@n8n/n8n-nodes-langchain 1.107.0 → 1.109.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/nodes/llms/LMChatOpenAi/LmChatOpenAi.node.js +2 -2
- package/dist/nodes/llms/LMChatOpenAi/LmChatOpenAi.node.js.map +1 -1
- package/dist/nodes/text_splitters/TextSplitterCharacterTextSplitter/TextSplitterCharacterTextSplitter.node.js +4 -2
- package/dist/nodes/text_splitters/TextSplitterCharacterTextSplitter/TextSplitterCharacterTextSplitter.node.js.map +1 -1
- package/dist/nodes/text_splitters/TextSplitterTokenSplitter/TextSplitterTokenSplitter.node.js +4 -2
- package/dist/nodes/text_splitters/TextSplitterTokenSplitter/TextSplitterTokenSplitter.node.js.map +1 -1
- package/dist/nodes/text_splitters/TextSplitterTokenSplitter/TokenTextSplitter.js +1 -1
- package/dist/nodes/text_splitters/TextSplitterTokenSplitter/TokenTextSplitter.js.map +1 -1
- package/dist/nodes/vendors/OpenAi/actions/audio/transcribe.operation.js +10 -8
- package/dist/nodes/vendors/OpenAi/actions/audio/transcribe.operation.js.map +1 -1
- package/dist/nodes/vendors/OpenAi/actions/audio/translate.operation.js +10 -8
- package/dist/nodes/vendors/OpenAi/actions/audio/translate.operation.js.map +1 -1
- package/dist/nodes/vendors/OpenAi/actions/file/upload.operation.js +10 -8
- package/dist/nodes/vendors/OpenAi/actions/file/upload.operation.js.map +1 -1
- package/dist/nodes/vendors/OpenAi/actions/text/message.operation.js +30 -0
- package/dist/nodes/vendors/OpenAi/actions/text/message.operation.js.map +1 -1
- package/dist/nodes/vendors/OpenAi/helpers/binary-data.js +38 -0
- package/dist/nodes/vendors/OpenAi/helpers/binary-data.js.map +1 -0
- package/dist/types/nodes.json +4 -4
- package/dist/utils/tokenizer/tiktoken.js +26 -22
- package/dist/utils/tokenizer/tiktoken.js.map +1 -1
- package/dist/utils/tokenizer/token-estimator.js +1 -1
- package/dist/utils/tokenizer/token-estimator.js.map +1 -1
- package/package.json +6 -6
|
@@ -290,8 +290,8 @@ class LmChatOpenAi {
|
|
|
290
290
|
],
|
|
291
291
|
displayOptions: {
|
|
292
292
|
show: {
|
|
293
|
-
// reasoning_effort is only available on o1, o1-versioned, or on o3-mini and beyond. Not on o1-mini or other GPT-models.
|
|
294
|
-
"/model": [{ _cnd: { regex: "(^o1([-\\d]+)?$)|(^o[3-9].*)" } }]
|
|
293
|
+
// reasoning_effort is only available on o1, o1-versioned, or on o3-mini and beyond, and gpt-5 models. Not on o1-mini or other GPT-models.
|
|
294
|
+
"/model": [{ _cnd: { regex: "(^o1([-\\d]+)?$)|(^o[3-9].*)|(^gpt-5.*)" } }]
|
|
295
295
|
}
|
|
296
296
|
}
|
|
297
297
|
},
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../nodes/llms/LMChatOpenAi/LmChatOpenAi.node.ts"],"sourcesContent":["import { ChatOpenAI, type ClientOptions } from '@langchain/openai';\nimport {\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n} from 'n8n-workflow';\n\nimport { getProxyAgent } from '@utils/httpProxyAgent';\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport { searchModels } from './methods/loadModels';\nimport { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';\nimport { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';\nimport { N8nLlmTracing } from '../N8nLlmTracing';\n\nexport class LmChatOpenAi implements INodeType {\n\tmethods = {\n\t\tlistSearch: {\n\t\t\tsearchModels,\n\t\t},\n\t};\n\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'OpenAI Chat Model',\n\n\t\tname: 'lmChatOpenAi',\n\t\ticon: { light: 'file:openAiLight.svg', dark: 'file:openAiLight.dark.svg' },\n\t\tgroup: ['transform'],\n\t\tversion: [1, 1.1, 1.2],\n\t\tdescription: 'For advanced usage with an AI chain',\n\t\tdefaults: {\n\t\t\tname: 'OpenAI Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatopenai/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\n\t\tinputs: [],\n\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'openAiApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\t\trequestDefaults: {\n\t\t\tignoreHttpStatusErrors: true,\n\t\t\tbaseURL:\n\t\t\t\t'={{ $parameter.options?.baseURL?.split(\"/\").slice(0,-1).join(\"/\") || $credentials?.url?.split(\"/\").slice(0,-1).join(\"/\") || \"https://api.openai.com\" }}',\n\t\t},\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),\n\t\t\t{\n\t\t\t\tdisplayName:\n\t\t\t\t\t'If using JSON response format, you must include word \"json\" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.',\n\t\t\t\tname: 'notice',\n\t\t\t\ttype: 'notice',\n\t\t\t\tdefault: '',\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\tshow: {\n\t\t\t\t\t\t'/options.responseFormat': ['json_object'],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Model',\n\t\t\t\tname: 'model',\n\t\t\t\ttype: 'options',\n\t\t\t\tdescription:\n\t\t\t\t\t'The model which will generate the completion. <a href=\"https://beta.openai.com/docs/models/overview\">Learn more</a>.',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tloadOptions: {\n\t\t\t\t\t\trouting: {\n\t\t\t\t\t\t\trequest: {\n\t\t\t\t\t\t\t\tmethod: 'GET',\n\t\t\t\t\t\t\t\turl: '={{ $parameter.options?.baseURL?.split(\"/\").slice(-1).pop() || $credentials?.url?.split(\"/\").slice(-1).pop() || \"v1\" }}/models',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\toutput: {\n\t\t\t\t\t\t\t\tpostReceive: [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'rootProperty',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tproperty: 'data',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'filter',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\t// If the baseURL is not set or is set to api.openai.com, include only chat models\n\t\t\t\t\t\t\t\t\t\t\tpass: `={{\n\t\t\t\t\t\t\t\t\t\t\t\t($parameter.options?.baseURL && !$parameter.options?.baseURL?.startsWith('https://api.openai.com/')) ||\n\t\t\t\t\t\t\t\t\t\t\t\t($credentials?.url && !$credentials.url.startsWith('https://api.openai.com/')) ||\n\t\t\t\t\t\t\t\t\t\t\t\t$responseItem.id.startsWith('ft:') ||\n\t\t\t\t\t\t\t\t\t\t\t\t$responseItem.id.startsWith('o1') ||\n\t\t\t\t\t\t\t\t\t\t\t\t$responseItem.id.startsWith('o3') ||\n\t\t\t\t\t\t\t\t\t\t\t\t($responseItem.id.startsWith('gpt-') && !$responseItem.id.includes('instruct'))\n\t\t\t\t\t\t\t\t\t\t\t}}`,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'setKeyValue',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tname: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t\tvalue: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'sort',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tkey: 'name',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trouting: {\n\t\t\t\t\tsend: {\n\t\t\t\t\t\ttype: 'body',\n\t\t\t\t\t\tproperty: 'model',\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tdefault: 'gpt-4o-mini',\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\thide: {\n\t\t\t\t\t\t'@version': [{ _cnd: { gte: 1.2 } }],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Model',\n\t\t\t\tname: 'model',\n\t\t\t\ttype: 'resourceLocator',\n\t\t\t\tdefault: { mode: 'list', value: 'gpt-4.1-mini' },\n\t\t\t\trequired: true,\n\t\t\t\tmodes: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'From List',\n\t\t\t\t\t\tname: 'list',\n\t\t\t\t\t\ttype: 'list',\n\t\t\t\t\t\tplaceholder: 'Select a model...',\n\t\t\t\t\t\ttypeOptions: {\n\t\t\t\t\t\t\tsearchListMethod: 'searchModels',\n\t\t\t\t\t\t\tsearchable: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'ID',\n\t\t\t\t\t\tname: 'id',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t\tplaceholder: 'gpt-4.1-mini',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t\tdescription: 'The model. Choose from the list, or specify an ID.',\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\thide: {\n\t\t\t\t\t\t'@version': [{ _cnd: { lte: 1.1 } }],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName:\n\t\t\t\t\t'When using non-OpenAI models via \"Base URL\" override, not all models might be chat-compatible or support other features, like tools calling or JSON response format',\n\t\t\t\tname: 'notice',\n\t\t\t\ttype: 'notice',\n\t\t\t\tdefault: '',\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\tshow: {\n\t\t\t\t\t\t'/options.baseURL': [{ _cnd: { exists: true } }],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Options',\n\t\t\t\tname: 'options',\n\t\t\t\tplaceholder: 'Add Option',\n\t\t\t\tdescription: 'Additional options to add',\n\t\t\t\ttype: 'collection',\n\t\t\t\tdefault: {},\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Base URL',\n\t\t\t\t\t\tname: 'baseURL',\n\t\t\t\t\t\tdefault: 'https://api.openai.com/v1',\n\t\t\t\t\t\tdescription: 'Override the default base URL for the API',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\thide: {\n\t\t\t\t\t\t\t\t'@version': [{ _cnd: { gte: 1.1 } }],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Frequency Penalty',\n\t\t\t\t\t\tname: 'frequencyPenalty',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim\",\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Maximum Number of Tokens',\n\t\t\t\t\t\tname: 'maxTokens',\n\t\t\t\t\t\tdefault: -1,\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t\ttypeOptions: {\n\t\t\t\t\t\t\tmaxValue: 32768,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Response Format',\n\t\t\t\t\t\tname: 'responseFormat',\n\t\t\t\t\t\tdefault: 'text',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Text',\n\t\t\t\t\t\t\t\tvalue: 'text',\n\t\t\t\t\t\t\t\tdescription: 'Regular text response',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'JSON',\n\t\t\t\t\t\t\t\tvalue: 'json_object',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'Enables JSON mode, which should guarantee the message the model generates is valid JSON',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Presence Penalty',\n\t\t\t\t\t\tname: 'presencePenalty',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics\",\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Sampling Temperature',\n\t\t\t\t\t\tname: 'temperature',\n\t\t\t\t\t\tdefault: 0.7,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Reasoning Effort',\n\t\t\t\t\t\tname: 'reasoningEffort',\n\t\t\t\t\t\tdefault: 'medium',\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls the amount of reasoning tokens to use. A value of \"low\" will favor speed and economical token usage, \"high\" will favor more complete reasoning at the cost of more tokens generated and slower responses.',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Low',\n\t\t\t\t\t\t\t\tvalue: 'low',\n\t\t\t\t\t\t\t\tdescription: 'Favors speed and economical token usage',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Medium',\n\t\t\t\t\t\t\t\tvalue: 'medium',\n\t\t\t\t\t\t\t\tdescription: 'Balance between speed and reasoning accuracy',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'High',\n\t\t\t\t\t\t\t\tvalue: 'high',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'Favors more complete reasoning at the cost of more tokens generated and slower responses',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\t\t// reasoning_effort is only available on o1, o1-versioned, or on o3-mini and beyond. Not on o1-mini or other GPT-models.\n\t\t\t\t\t\t\t\t'/model': [{ _cnd: { regex: '(^o1([-\\\\d]+)?$)|(^o[3-9].*)' } }],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Timeout',\n\t\t\t\t\t\tname: 'timeout',\n\t\t\t\t\t\tdefault: 60000,\n\t\t\t\t\t\tdescription: 'Maximum amount of time a request is allowed to take in milliseconds',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Max Retries',\n\t\t\t\t\t\tname: 'maxRetries',\n\t\t\t\t\t\tdefault: 2,\n\t\t\t\t\t\tdescription: 'Maximum number of retries to attempt',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Top P',\n\t\t\t\t\t\tname: 'topP',\n\t\t\t\t\t\tdefault: 1,\n\t\t\t\t\t\ttypeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst credentials = await this.getCredentials('openAiApi');\n\n\t\tconst version = this.getNode().typeVersion;\n\t\tconst modelName =\n\t\t\tversion >= 1.2\n\t\t\t\t? (this.getNodeParameter('model.value', itemIndex) as string)\n\t\t\t\t: (this.getNodeParameter('model', itemIndex) as string);\n\n\t\tconst options = this.getNodeParameter('options', itemIndex, {}) as {\n\t\t\tbaseURL?: string;\n\t\t\tfrequencyPenalty?: number;\n\t\t\tmaxTokens?: number;\n\t\t\tmaxRetries: number;\n\t\t\ttimeout: number;\n\t\t\tpresencePenalty?: number;\n\t\t\ttemperature?: number;\n\t\t\ttopP?: number;\n\t\t\tresponseFormat?: 'text' | 'json_object';\n\t\t\treasoningEffort?: 'low' | 'medium' | 'high';\n\t\t};\n\n\t\tconst configuration: ClientOptions = {};\n\n\t\tif (options.baseURL) {\n\t\t\tconfiguration.baseURL = options.baseURL;\n\t\t} else if (credentials.url) {\n\t\t\tconfiguration.baseURL = credentials.url as string;\n\t\t}\n\n\t\tif (configuration.baseURL) {\n\t\t\tconfiguration.fetchOptions = {\n\t\t\t\tdispatcher: getProxyAgent(configuration.baseURL ?? 'https://api.openai.com/v1'),\n\t\t\t};\n\t\t}\n\n\t\t// Extra options to send to OpenAI, that are not directly supported by LangChain\n\t\tconst modelKwargs: {\n\t\t\tresponse_format?: object;\n\t\t\treasoning_effort?: 'low' | 'medium' | 'high';\n\t\t} = {};\n\t\tif (options.responseFormat) modelKwargs.response_format = { type: options.responseFormat };\n\t\tif (options.reasoningEffort && ['low', 'medium', 'high'].includes(options.reasoningEffort))\n\t\t\tmodelKwargs.reasoning_effort = options.reasoningEffort;\n\n\t\tconst model = new ChatOpenAI({\n\t\t\tapiKey: credentials.apiKey as string,\n\t\t\tmodel: modelName,\n\t\t\t...options,\n\t\t\ttimeout: options.timeout ?? 60000,\n\t\t\tmaxRetries: options.maxRetries ?? 2,\n\t\t\tconfiguration,\n\t\t\tcallbacks: [new N8nLlmTracing(this)],\n\t\t\tmodelKwargs,\n\t\t\tonFailedAttempt: makeN8nLlmFailedAttemptHandler(this, openAiFailedAttemptHandler),\n\t\t});\n\n\t\treturn {\n\t\t\tresponse: model,\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,oBAA+C;AAC/C,0BAMO;AAEP,4BAA8B;AAC9B,0BAA6C;AAE7C,wBAA6B;AAC7B,4BAA2C;AAC3C,wCAA+C;AAC/C,2BAA8B;AAEvB,MAAM,aAAkC;AAAA,EAAxC;AACN,mBAAU;AAAA,MACT,YAAY;AAAA,QACX;AAAA,MACD;AAAA,IACD;AAEA,uBAAoC;AAAA,MACnC,aAAa;AAAA,MAEb,MAAM;AAAA,MACN,MAAM,EAAE,OAAO,wBAAwB,MAAM,4BAA4B;AAAA,MACzE,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS,CAAC,GAAG,KAAK,GAAG;AAAA,MACrB,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MAEA,QAAQ,CAAC;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA,iBAAiB;AAAA,QAChB,wBAAwB;AAAA,QACxB,SACC;AAAA,MACF;AAAA,MACA,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aACC;AAAA,UACD,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,2BAA2B,CAAC,aAAa;AAAA,YAC1C;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aACC;AAAA,UACD,aAAa;AAAA,YACZ,aAAa;AAAA,cACZ,SAAS;AAAA,gBACR,SAAS;AAAA,kBACR,QAAQ;AAAA,kBACR,KAAK;AAAA,gBACN;AAAA,gBACA,QAAQ;AAAA,kBACP,aAAa;AAAA,oBACZ;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,UAAU;AAAA,sBACX;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA;AAAA,wBAEX,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,sBAQP;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,wBACN,OAAO;AAAA,sBACR;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,KAAK;AAAA,sBACN;AAAA,oBACD;AAAA,kBACD;AAAA,gBACD;AAAA,cACD;AAAA,YACD;AAAA,UACD;AAAA,UACA,SAAS;AAAA,YACR,MAAM;AAAA,cACL,MAAM;AAAA,cACN,UAAU;AAAA,YACX;AAAA,UACD;AAAA,UACA,SAAS;AAAA,UACT,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,YAAY,CAAC,EAAE,MAAM,EAAE,KAAK,IAAI,EAAE,CAAC;AAAA,YACpC;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS,EAAE,MAAM,QAAQ,OAAO,eAAe;AAAA,UAC/C,UAAU;AAAA,UACV,OAAO;AAAA,YACN;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,MAAM;AAAA,cACN,aAAa;AAAA,cACb,aAAa;AAAA,gBACZ,kBAAkB;AAAA,gBAClB,YAAY;AAAA,cACb;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,MAAM;AAAA,cACN,aAAa;AAAA,YACd;AAAA,UACD;AAAA,UACA,aAAa;AAAA,UACb,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,YAAY,CAAC,EAAE,MAAM,EAAE,KAAK,IAAI,EAAE,CAAC;AAAA,YACpC;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aACC;AAAA,UACD,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,oBAAoB,CAAC,EAAE,MAAM,EAAE,QAAQ,KAAK,EAAE,CAAC;AAAA,YAChD;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,aAAa;AAAA,UACb,aAAa;AAAA,UACb,MAAM;AAAA,UACN,SAAS,CAAC;AAAA,UACV,SAAS;AAAA,YACR;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,cACN,gBAAgB;AAAA,gBACf,MAAM;AAAA,kBACL,YAAY,CAAC,EAAE,MAAM,EAAE,KAAK,IAAI,EAAE,CAAC;AAAA,gBACpC;AAAA,cACD;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,cAC7D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aACC;AAAA,cACD,MAAM;AAAA,cACN,aAAa;AAAA,gBACZ,UAAU;AAAA,cACX;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,MAAM;AAAA,cACN,SAAS;AAAA,gBACR;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aAAa;AAAA,gBACd;AAAA,gBACA;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aACC;AAAA,gBACF;AAAA,cACD;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,cAC7D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aACC;AAAA,cACD,MAAM;AAAA,cACN,SAAS;AAAA,gBACR;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aAAa;AAAA,gBACd;AAAA,gBACA;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aAAa;AAAA,gBACd;AAAA,gBACA;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aACC;AAAA,gBACF;AAAA,cACD;AAAA,cACA,gBAAgB;AAAA,gBACf,MAAM;AAAA;AAAA,kBAEL,UAAU,CAAC,EAAE,MAAM,EAAE,OAAO,+BAA+B,EAAE,CAAC;AAAA,gBAC/D;AAAA,cACD;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,cAAc,MAAM,KAAK,eAAe,WAAW;AAEzD,UAAM,UAAU,KAAK,QAAQ,EAAE;AAC/B,UAAM,YACL,WAAW,MACP,KAAK,iBAAiB,eAAe,SAAS,IAC9C,KAAK,iBAAiB,SAAS,SAAS;AAE7C,UAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW,CAAC,CAAC;AAa9D,UAAM,gBAA+B,CAAC;AAEtC,QAAI,QAAQ,SAAS;AACpB,oBAAc,UAAU,QAAQ;AAAA,IACjC,WAAW,YAAY,KAAK;AAC3B,oBAAc,UAAU,YAAY;AAAA,IACrC;AAEA,QAAI,cAAc,SAAS;AAC1B,oBAAc,eAAe;AAAA,QAC5B,gBAAY,qCAAc,cAAc,WAAW,2BAA2B;AAAA,MAC/E;AAAA,IACD;AAGA,UAAM,cAGF,CAAC;AACL,QAAI,QAAQ,eAAgB,aAAY,kBAAkB,EAAE,MAAM,QAAQ,eAAe;AACzF,QAAI,QAAQ,mBAAmB,CAAC,OAAO,UAAU,MAAM,EAAE,SAAS,QAAQ,eAAe;AACxF,kBAAY,mBAAmB,QAAQ;AAExC,UAAM,QAAQ,IAAI,yBAAW;AAAA,MAC5B,QAAQ,YAAY;AAAA,MACpB,OAAO;AAAA,MACP,GAAG;AAAA,MACH,SAAS,QAAQ,WAAW;AAAA,MAC5B,YAAY,QAAQ,cAAc;AAAA,MAClC;AAAA,MACA,WAAW,CAAC,IAAI,mCAAc,IAAI,CAAC;AAAA,MACnC;AAAA,MACA,qBAAiB,kEAA+B,MAAM,gDAA0B;AAAA,IACjF,CAAC;AAED,WAAO;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AACD;","names":[]}
|
|
1
|
+
{"version":3,"sources":["../../../../nodes/llms/LMChatOpenAi/LmChatOpenAi.node.ts"],"sourcesContent":["import { ChatOpenAI, type ClientOptions } from '@langchain/openai';\nimport {\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n} from 'n8n-workflow';\n\nimport { getProxyAgent } from '@utils/httpProxyAgent';\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport { searchModels } from './methods/loadModels';\nimport { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';\nimport { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';\nimport { N8nLlmTracing } from '../N8nLlmTracing';\n\nexport class LmChatOpenAi implements INodeType {\n\tmethods = {\n\t\tlistSearch: {\n\t\t\tsearchModels,\n\t\t},\n\t};\n\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'OpenAI Chat Model',\n\n\t\tname: 'lmChatOpenAi',\n\t\ticon: { light: 'file:openAiLight.svg', dark: 'file:openAiLight.dark.svg' },\n\t\tgroup: ['transform'],\n\t\tversion: [1, 1.1, 1.2],\n\t\tdescription: 'For advanced usage with an AI chain',\n\t\tdefaults: {\n\t\t\tname: 'OpenAI Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatopenai/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\n\t\tinputs: [],\n\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'openAiApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\t\trequestDefaults: {\n\t\t\tignoreHttpStatusErrors: true,\n\t\t\tbaseURL:\n\t\t\t\t'={{ $parameter.options?.baseURL?.split(\"/\").slice(0,-1).join(\"/\") || $credentials?.url?.split(\"/\").slice(0,-1).join(\"/\") || \"https://api.openai.com\" }}',\n\t\t},\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),\n\t\t\t{\n\t\t\t\tdisplayName:\n\t\t\t\t\t'If using JSON response format, you must include word \"json\" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.',\n\t\t\t\tname: 'notice',\n\t\t\t\ttype: 'notice',\n\t\t\t\tdefault: '',\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\tshow: {\n\t\t\t\t\t\t'/options.responseFormat': ['json_object'],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Model',\n\t\t\t\tname: 'model',\n\t\t\t\ttype: 'options',\n\t\t\t\tdescription:\n\t\t\t\t\t'The model which will generate the completion. <a href=\"https://beta.openai.com/docs/models/overview\">Learn more</a>.',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tloadOptions: {\n\t\t\t\t\t\trouting: {\n\t\t\t\t\t\t\trequest: {\n\t\t\t\t\t\t\t\tmethod: 'GET',\n\t\t\t\t\t\t\t\turl: '={{ $parameter.options?.baseURL?.split(\"/\").slice(-1).pop() || $credentials?.url?.split(\"/\").slice(-1).pop() || \"v1\" }}/models',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\toutput: {\n\t\t\t\t\t\t\t\tpostReceive: [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'rootProperty',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tproperty: 'data',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'filter',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\t// If the baseURL is not set or is set to api.openai.com, include only chat models\n\t\t\t\t\t\t\t\t\t\t\tpass: `={{\n\t\t\t\t\t\t\t\t\t\t\t\t($parameter.options?.baseURL && !$parameter.options?.baseURL?.startsWith('https://api.openai.com/')) ||\n\t\t\t\t\t\t\t\t\t\t\t\t($credentials?.url && !$credentials.url.startsWith('https://api.openai.com/')) ||\n\t\t\t\t\t\t\t\t\t\t\t\t$responseItem.id.startsWith('ft:') ||\n\t\t\t\t\t\t\t\t\t\t\t\t$responseItem.id.startsWith('o1') ||\n\t\t\t\t\t\t\t\t\t\t\t\t$responseItem.id.startsWith('o3') ||\n\t\t\t\t\t\t\t\t\t\t\t\t($responseItem.id.startsWith('gpt-') && !$responseItem.id.includes('instruct'))\n\t\t\t\t\t\t\t\t\t\t\t}}`,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'setKeyValue',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tname: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t\tvalue: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'sort',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tkey: 'name',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trouting: {\n\t\t\t\t\tsend: {\n\t\t\t\t\t\ttype: 'body',\n\t\t\t\t\t\tproperty: 'model',\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tdefault: 'gpt-4o-mini',\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\thide: {\n\t\t\t\t\t\t'@version': [{ _cnd: { gte: 1.2 } }],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Model',\n\t\t\t\tname: 'model',\n\t\t\t\ttype: 'resourceLocator',\n\t\t\t\tdefault: { mode: 'list', value: 'gpt-4.1-mini' },\n\t\t\t\trequired: true,\n\t\t\t\tmodes: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'From List',\n\t\t\t\t\t\tname: 'list',\n\t\t\t\t\t\ttype: 'list',\n\t\t\t\t\t\tplaceholder: 'Select a model...',\n\t\t\t\t\t\ttypeOptions: {\n\t\t\t\t\t\t\tsearchListMethod: 'searchModels',\n\t\t\t\t\t\t\tsearchable: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'ID',\n\t\t\t\t\t\tname: 'id',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t\tplaceholder: 'gpt-4.1-mini',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t\tdescription: 'The model. Choose from the list, or specify an ID.',\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\thide: {\n\t\t\t\t\t\t'@version': [{ _cnd: { lte: 1.1 } }],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName:\n\t\t\t\t\t'When using non-OpenAI models via \"Base URL\" override, not all models might be chat-compatible or support other features, like tools calling or JSON response format',\n\t\t\t\tname: 'notice',\n\t\t\t\ttype: 'notice',\n\t\t\t\tdefault: '',\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\tshow: {\n\t\t\t\t\t\t'/options.baseURL': [{ _cnd: { exists: true } }],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Options',\n\t\t\t\tname: 'options',\n\t\t\t\tplaceholder: 'Add Option',\n\t\t\t\tdescription: 'Additional options to add',\n\t\t\t\ttype: 'collection',\n\t\t\t\tdefault: {},\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Base URL',\n\t\t\t\t\t\tname: 'baseURL',\n\t\t\t\t\t\tdefault: 'https://api.openai.com/v1',\n\t\t\t\t\t\tdescription: 'Override the default base URL for the API',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\thide: {\n\t\t\t\t\t\t\t\t'@version': [{ _cnd: { gte: 1.1 } }],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Frequency Penalty',\n\t\t\t\t\t\tname: 'frequencyPenalty',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim\",\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Maximum Number of Tokens',\n\t\t\t\t\t\tname: 'maxTokens',\n\t\t\t\t\t\tdefault: -1,\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t\ttypeOptions: {\n\t\t\t\t\t\t\tmaxValue: 32768,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Response Format',\n\t\t\t\t\t\tname: 'responseFormat',\n\t\t\t\t\t\tdefault: 'text',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Text',\n\t\t\t\t\t\t\t\tvalue: 'text',\n\t\t\t\t\t\t\t\tdescription: 'Regular text response',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'JSON',\n\t\t\t\t\t\t\t\tvalue: 'json_object',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'Enables JSON mode, which should guarantee the message the model generates is valid JSON',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Presence Penalty',\n\t\t\t\t\t\tname: 'presencePenalty',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics\",\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Sampling Temperature',\n\t\t\t\t\t\tname: 'temperature',\n\t\t\t\t\t\tdefault: 0.7,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Reasoning Effort',\n\t\t\t\t\t\tname: 'reasoningEffort',\n\t\t\t\t\t\tdefault: 'medium',\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls the amount of reasoning tokens to use. A value of \"low\" will favor speed and economical token usage, \"high\" will favor more complete reasoning at the cost of more tokens generated and slower responses.',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Low',\n\t\t\t\t\t\t\t\tvalue: 'low',\n\t\t\t\t\t\t\t\tdescription: 'Favors speed and economical token usage',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Medium',\n\t\t\t\t\t\t\t\tvalue: 'medium',\n\t\t\t\t\t\t\t\tdescription: 'Balance between speed and reasoning accuracy',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'High',\n\t\t\t\t\t\t\t\tvalue: 'high',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'Favors more complete reasoning at the cost of more tokens generated and slower responses',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t\tdisplayOptions: {\n\t\t\t\t\t\t\tshow: {\n\t\t\t\t\t\t\t\t// reasoning_effort is only available on o1, o1-versioned, or on o3-mini and beyond, and gpt-5 models. Not on o1-mini or other GPT-models.\n\t\t\t\t\t\t\t\t'/model': [{ _cnd: { regex: '(^o1([-\\\\d]+)?$)|(^o[3-9].*)|(^gpt-5.*)' } }],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Timeout',\n\t\t\t\t\t\tname: 'timeout',\n\t\t\t\t\t\tdefault: 60000,\n\t\t\t\t\t\tdescription: 'Maximum amount of time a request is allowed to take in milliseconds',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Max Retries',\n\t\t\t\t\t\tname: 'maxRetries',\n\t\t\t\t\t\tdefault: 2,\n\t\t\t\t\t\tdescription: 'Maximum number of retries to attempt',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Top P',\n\t\t\t\t\t\tname: 'topP',\n\t\t\t\t\t\tdefault: 1,\n\t\t\t\t\t\ttypeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst credentials = await this.getCredentials('openAiApi');\n\n\t\tconst version = this.getNode().typeVersion;\n\t\tconst modelName =\n\t\t\tversion >= 1.2\n\t\t\t\t? (this.getNodeParameter('model.value', itemIndex) as string)\n\t\t\t\t: (this.getNodeParameter('model', itemIndex) as string);\n\n\t\tconst options = this.getNodeParameter('options', itemIndex, {}) as {\n\t\t\tbaseURL?: string;\n\t\t\tfrequencyPenalty?: number;\n\t\t\tmaxTokens?: number;\n\t\t\tmaxRetries: number;\n\t\t\ttimeout: number;\n\t\t\tpresencePenalty?: number;\n\t\t\ttemperature?: number;\n\t\t\ttopP?: number;\n\t\t\tresponseFormat?: 'text' | 'json_object';\n\t\t\treasoningEffort?: 'low' | 'medium' | 'high';\n\t\t};\n\n\t\tconst configuration: ClientOptions = {};\n\n\t\tif (options.baseURL) {\n\t\t\tconfiguration.baseURL = options.baseURL;\n\t\t} else if (credentials.url) {\n\t\t\tconfiguration.baseURL = credentials.url as string;\n\t\t}\n\n\t\tif (configuration.baseURL) {\n\t\t\tconfiguration.fetchOptions = {\n\t\t\t\tdispatcher: getProxyAgent(configuration.baseURL ?? 'https://api.openai.com/v1'),\n\t\t\t};\n\t\t}\n\n\t\t// Extra options to send to OpenAI, that are not directly supported by LangChain\n\t\tconst modelKwargs: {\n\t\t\tresponse_format?: object;\n\t\t\treasoning_effort?: 'low' | 'medium' | 'high';\n\t\t} = {};\n\t\tif (options.responseFormat) modelKwargs.response_format = { type: options.responseFormat };\n\t\tif (options.reasoningEffort && ['low', 'medium', 'high'].includes(options.reasoningEffort))\n\t\t\tmodelKwargs.reasoning_effort = options.reasoningEffort;\n\n\t\tconst model = new ChatOpenAI({\n\t\t\tapiKey: credentials.apiKey as string,\n\t\t\tmodel: modelName,\n\t\t\t...options,\n\t\t\ttimeout: options.timeout ?? 60000,\n\t\t\tmaxRetries: options.maxRetries ?? 2,\n\t\t\tconfiguration,\n\t\t\tcallbacks: [new N8nLlmTracing(this)],\n\t\t\tmodelKwargs,\n\t\t\tonFailedAttempt: makeN8nLlmFailedAttemptHandler(this, openAiFailedAttemptHandler),\n\t\t});\n\n\t\treturn {\n\t\t\tresponse: model,\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,oBAA+C;AAC/C,0BAMO;AAEP,4BAA8B;AAC9B,0BAA6C;AAE7C,wBAA6B;AAC7B,4BAA2C;AAC3C,wCAA+C;AAC/C,2BAA8B;AAEvB,MAAM,aAAkC;AAAA,EAAxC;AACN,mBAAU;AAAA,MACT,YAAY;AAAA,QACX;AAAA,MACD;AAAA,IACD;AAEA,uBAAoC;AAAA,MACnC,aAAa;AAAA,MAEb,MAAM;AAAA,MACN,MAAM,EAAE,OAAO,wBAAwB,MAAM,4BAA4B;AAAA,MACzE,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS,CAAC,GAAG,KAAK,GAAG;AAAA,MACrB,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MAEA,QAAQ,CAAC;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA,iBAAiB;AAAA,QAChB,wBAAwB;AAAA,QACxB,SACC;AAAA,MACF;AAAA,MACA,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aACC;AAAA,UACD,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,2BAA2B,CAAC,aAAa;AAAA,YAC1C;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aACC;AAAA,UACD,aAAa;AAAA,YACZ,aAAa;AAAA,cACZ,SAAS;AAAA,gBACR,SAAS;AAAA,kBACR,QAAQ;AAAA,kBACR,KAAK;AAAA,gBACN;AAAA,gBACA,QAAQ;AAAA,kBACP,aAAa;AAAA,oBACZ;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,UAAU;AAAA,sBACX;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA;AAAA,wBAEX,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,sBAQP;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,wBACN,OAAO;AAAA,sBACR;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,KAAK;AAAA,sBACN;AAAA,oBACD;AAAA,kBACD;AAAA,gBACD;AAAA,cACD;AAAA,YACD;AAAA,UACD;AAAA,UACA,SAAS;AAAA,YACR,MAAM;AAAA,cACL,MAAM;AAAA,cACN,UAAU;AAAA,YACX;AAAA,UACD;AAAA,UACA,SAAS;AAAA,UACT,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,YAAY,CAAC,EAAE,MAAM,EAAE,KAAK,IAAI,EAAE,CAAC;AAAA,YACpC;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS,EAAE,MAAM,QAAQ,OAAO,eAAe;AAAA,UAC/C,UAAU;AAAA,UACV,OAAO;AAAA,YACN;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,MAAM;AAAA,cACN,aAAa;AAAA,cACb,aAAa;AAAA,gBACZ,kBAAkB;AAAA,gBAClB,YAAY;AAAA,cACb;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,MAAM;AAAA,cACN,aAAa;AAAA,YACd;AAAA,UACD;AAAA,UACA,aAAa;AAAA,UACb,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,YAAY,CAAC,EAAE,MAAM,EAAE,KAAK,IAAI,EAAE,CAAC;AAAA,YACpC;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aACC;AAAA,UACD,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,oBAAoB,CAAC,EAAE,MAAM,EAAE,QAAQ,KAAK,EAAE,CAAC;AAAA,YAChD;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,aAAa;AAAA,UACb,aAAa;AAAA,UACb,MAAM;AAAA,UACN,SAAS,CAAC;AAAA,UACV,SAAS;AAAA,YACR;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,cACN,gBAAgB;AAAA,gBACf,MAAM;AAAA,kBACL,YAAY,CAAC,EAAE,MAAM,EAAE,KAAK,IAAI,EAAE,CAAC;AAAA,gBACpC;AAAA,cACD;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,cAC7D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aACC;AAAA,cACD,MAAM;AAAA,cACN,aAAa;AAAA,gBACZ,UAAU;AAAA,cACX;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,MAAM;AAAA,cACN,SAAS;AAAA,gBACR;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aAAa;AAAA,gBACd;AAAA,gBACA;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aACC;AAAA,gBACF;AAAA,cACD;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,cAC7D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aACC;AAAA,cACD,MAAM;AAAA,cACN,SAAS;AAAA,gBACR;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aAAa;AAAA,gBACd;AAAA,gBACA;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aAAa;AAAA,gBACd;AAAA,gBACA;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aACC;AAAA,gBACF;AAAA,cACD;AAAA,cACA,gBAAgB;AAAA,gBACf,MAAM;AAAA;AAAA,kBAEL,UAAU,CAAC,EAAE,MAAM,EAAE,OAAO,0CAA0C,EAAE,CAAC;AAAA,gBAC1E;AAAA,cACD;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,cAAc,MAAM,KAAK,eAAe,WAAW;AAEzD,UAAM,UAAU,KAAK,QAAQ,EAAE;AAC/B,UAAM,YACL,WAAW,MACP,KAAK,iBAAiB,eAAe,SAAS,IAC9C,KAAK,iBAAiB,SAAS,SAAS;AAE7C,UAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW,CAAC,CAAC;AAa9D,UAAM,gBAA+B,CAAC;AAEtC,QAAI,QAAQ,SAAS;AACpB,oBAAc,UAAU,QAAQ;AAAA,IACjC,WAAW,YAAY,KAAK;AAC3B,oBAAc,UAAU,YAAY;AAAA,IACrC;AAEA,QAAI,cAAc,SAAS;AAC1B,oBAAc,eAAe;AAAA,QAC5B,gBAAY,qCAAc,cAAc,WAAW,2BAA2B;AAAA,MAC/E;AAAA,IACD;AAGA,UAAM,cAGF,CAAC;AACL,QAAI,QAAQ,eAAgB,aAAY,kBAAkB,EAAE,MAAM,QAAQ,eAAe;AACzF,QAAI,QAAQ,mBAAmB,CAAC,OAAO,UAAU,MAAM,EAAE,SAAS,QAAQ,eAAe;AACxF,kBAAY,mBAAmB,QAAQ;AAExC,UAAM,QAAQ,IAAI,yBAAW;AAAA,MAC5B,QAAQ,YAAY;AAAA,MACpB,OAAO;AAAA,MACP,GAAG;AAAA,MACH,SAAS,QAAQ,WAAW;AAAA,MAC5B,YAAY,QAAQ,cAAc;AAAA,MAClC;AAAA,MACA,WAAW,CAAC,IAAI,mCAAc,IAAI,CAAC;AAAA,MACnC;AAAA,MACA,qBAAiB,kEAA+B,MAAM,gDAA0B;AAAA,IACjF,CAAC;AAED,WAAO;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AACD;","names":[]}
|
|
@@ -66,13 +66,15 @@ class TextSplitterCharacterTextSplitter {
|
|
|
66
66
|
displayName: "Chunk Size",
|
|
67
67
|
name: "chunkSize",
|
|
68
68
|
type: "number",
|
|
69
|
-
default: 1e3
|
|
69
|
+
default: 1e3,
|
|
70
|
+
description: "Maximum number of characters per chunk"
|
|
70
71
|
},
|
|
71
72
|
{
|
|
72
73
|
displayName: "Chunk Overlap",
|
|
73
74
|
name: "chunkOverlap",
|
|
74
75
|
type: "number",
|
|
75
|
-
default: 0
|
|
76
|
+
default: 0,
|
|
77
|
+
description: "Number of characters shared between consecutive chunks to preserve context"
|
|
76
78
|
}
|
|
77
79
|
]
|
|
78
80
|
};
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../nodes/text_splitters/TextSplitterCharacterTextSplitter/TextSplitterCharacterTextSplitter.node.ts"],"sourcesContent":["import type { CharacterTextSplitterParams } from '@langchain/textsplitters';\nimport { CharacterTextSplitter } from '@langchain/textsplitters';\nimport {\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n} from 'n8n-workflow';\n\nimport { logWrapper } from '@utils/logWrapper';\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nexport class TextSplitterCharacterTextSplitter implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'Character Text Splitter',\n\t\tname: 'textSplitterCharacterTextSplitter',\n\t\ticon: 'fa:grip-lines-vertical',\n\t\ticonColor: 'black',\n\t\tgroup: ['transform'],\n\t\tversion: 1,\n\t\tdescription: 'Split text into chunks by characters',\n\t\tdefaults: {\n\t\t\tname: 'Character Text Splitter',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Text Splitters'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.textsplittercharactertextsplitter/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\n\t\tinputs: [],\n\n\t\toutputs: [NodeConnectionTypes.AiTextSplitter],\n\t\toutputNames: ['Text Splitter'],\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiDocument]),\n\t\t\t{\n\t\t\t\tdisplayName: 'Separator',\n\t\t\t\tname: 'separator',\n\t\t\t\ttype: 'string',\n\t\t\t\tdefault: '',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Chunk Size',\n\t\t\t\tname: 'chunkSize',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 1000,\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Chunk Overlap',\n\t\t\t\tname: 'chunkOverlap',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 0,\n\t\t\t},\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tthis.logger.debug('Supply Data for Text Splitter');\n\n\t\tconst separator = this.getNodeParameter('separator', itemIndex) as string;\n\t\tconst chunkSize = this.getNodeParameter('chunkSize', itemIndex) as number;\n\t\tconst chunkOverlap = this.getNodeParameter('chunkOverlap', itemIndex) as number;\n\n\t\tconst params: CharacterTextSplitterParams = {\n\t\t\tseparator,\n\t\t\tchunkSize,\n\t\t\tchunkOverlap,\n\t\t\tkeepSeparator: false,\n\t\t};\n\n\t\tconst splitter = new CharacterTextSplitter(params);\n\n\t\treturn {\n\t\t\tresponse: logWrapper(splitter, this),\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,2BAAsC;AACtC,0BAMO;AAEP,wBAA2B;AAC3B,0BAA6C;AAEtC,MAAM,kCAAuD;AAAA,EAA7D;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,WAAW;AAAA,MACX,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS;AAAA,MACT,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,gBAAgB;AAAA,QACtB;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MAEA,QAAQ,CAAC;AAAA,MAET,SAAS,CAAC,wCAAoB,cAAc;AAAA,MAC5C,aAAa,CAAC,eAAe;AAAA,MAC7B,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,UAAU,CAAC;AAAA,QAC7D;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,QACV;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,
|
|
1
|
+
{"version":3,"sources":["../../../../nodes/text_splitters/TextSplitterCharacterTextSplitter/TextSplitterCharacterTextSplitter.node.ts"],"sourcesContent":["import type { CharacterTextSplitterParams } from '@langchain/textsplitters';\nimport { CharacterTextSplitter } from '@langchain/textsplitters';\nimport {\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n} from 'n8n-workflow';\n\nimport { logWrapper } from '@utils/logWrapper';\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nexport class TextSplitterCharacterTextSplitter implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'Character Text Splitter',\n\t\tname: 'textSplitterCharacterTextSplitter',\n\t\ticon: 'fa:grip-lines-vertical',\n\t\ticonColor: 'black',\n\t\tgroup: ['transform'],\n\t\tversion: 1,\n\t\tdescription: 'Split text into chunks by characters',\n\t\tdefaults: {\n\t\t\tname: 'Character Text Splitter',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Text Splitters'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.textsplittercharactertextsplitter/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\n\t\tinputs: [],\n\n\t\toutputs: [NodeConnectionTypes.AiTextSplitter],\n\t\toutputNames: ['Text Splitter'],\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiDocument]),\n\t\t\t{\n\t\t\t\tdisplayName: 'Separator',\n\t\t\t\tname: 'separator',\n\t\t\t\ttype: 'string',\n\t\t\t\tdefault: '',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Chunk Size',\n\t\t\t\tname: 'chunkSize',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 1000,\n\t\t\t\tdescription: 'Maximum number of characters per chunk',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Chunk Overlap',\n\t\t\t\tname: 'chunkOverlap',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 0,\n\t\t\t\tdescription: 'Number of characters shared between consecutive chunks to preserve context',\n\t\t\t},\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tthis.logger.debug('Supply Data for Text Splitter');\n\n\t\tconst separator = this.getNodeParameter('separator', itemIndex) as string;\n\t\tconst chunkSize = this.getNodeParameter('chunkSize', itemIndex) as number;\n\t\tconst chunkOverlap = this.getNodeParameter('chunkOverlap', itemIndex) as number;\n\n\t\tconst params: CharacterTextSplitterParams = {\n\t\t\tseparator,\n\t\t\tchunkSize,\n\t\t\tchunkOverlap,\n\t\t\tkeepSeparator: false,\n\t\t};\n\n\t\tconst splitter = new CharacterTextSplitter(params);\n\n\t\treturn {\n\t\t\tresponse: logWrapper(splitter, this),\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,2BAAsC;AACtC,0BAMO;AAEP,wBAA2B;AAC3B,0BAA6C;AAEtC,MAAM,kCAAuD;AAAA,EAA7D;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,WAAW;AAAA,MACX,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS;AAAA,MACT,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,gBAAgB;AAAA,QACtB;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MAEA,QAAQ,CAAC;AAAA,MAET,SAAS,CAAC,wCAAoB,cAAc;AAAA,MAC5C,aAAa,CAAC,eAAe;AAAA,MAC7B,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,UAAU,CAAC;AAAA,QAC7D;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,QACV;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,aAAa;AAAA,QACd;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,aAAa;AAAA,QACd;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,SAAK,OAAO,MAAM,+BAA+B;AAEjD,UAAM,YAAY,KAAK,iBAAiB,aAAa,SAAS;AAC9D,UAAM,YAAY,KAAK,iBAAiB,aAAa,SAAS;AAC9D,UAAM,eAAe,KAAK,iBAAiB,gBAAgB,SAAS;AAEpE,UAAM,SAAsC;AAAA,MAC3C;AAAA,MACA;AAAA,MACA;AAAA,MACA,eAAe;AAAA,IAChB;AAEA,UAAM,WAAW,IAAI,2CAAsB,MAAM;AAEjD,WAAO;AAAA,MACN,cAAU,8BAAW,UAAU,IAAI;AAAA,IACpC;AAAA,EACD;AACD;","names":[]}
|
package/dist/nodes/text_splitters/TextSplitterTokenSplitter/TextSplitterTokenSplitter.node.js
CHANGED
|
@@ -60,13 +60,15 @@ class TextSplitterTokenSplitter {
|
|
|
60
60
|
displayName: "Chunk Size",
|
|
61
61
|
name: "chunkSize",
|
|
62
62
|
type: "number",
|
|
63
|
-
default: 1e3
|
|
63
|
+
default: 1e3,
|
|
64
|
+
description: "Maximum number of tokens per chunk"
|
|
64
65
|
},
|
|
65
66
|
{
|
|
66
67
|
displayName: "Chunk Overlap",
|
|
67
68
|
name: "chunkOverlap",
|
|
68
69
|
type: "number",
|
|
69
|
-
default: 0
|
|
70
|
+
default: 0,
|
|
71
|
+
description: "Number of tokens shared between consecutive chunks to preserve context"
|
|
70
72
|
}
|
|
71
73
|
]
|
|
72
74
|
};
|
package/dist/nodes/text_splitters/TextSplitterTokenSplitter/TextSplitterTokenSplitter.node.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../nodes/text_splitters/TextSplitterTokenSplitter/TextSplitterTokenSplitter.node.ts"],"sourcesContent":["import {\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n} from 'n8n-workflow';\n\nimport { logWrapper } from '@utils/logWrapper';\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport { TokenTextSplitter } from './TokenTextSplitter';\n\nexport class TextSplitterTokenSplitter implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'Token Splitter',\n\t\tname: 'textSplitterTokenSplitter',\n\t\ticon: 'fa:grip-lines-vertical',\n\t\ticonColor: 'black',\n\t\tgroup: ['transform'],\n\t\tversion: 1,\n\t\tdescription: 'Split text into chunks by tokens',\n\t\tdefaults: {\n\t\t\tname: 'Token Splitter',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Text Splitters'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.textsplittertokensplitter/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\n\t\tinputs: [],\n\n\t\toutputs: [NodeConnectionTypes.AiTextSplitter],\n\t\toutputNames: ['Text Splitter'],\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiDocument]),\n\t\t\t{\n\t\t\t\tdisplayName: 'Chunk Size',\n\t\t\t\tname: 'chunkSize',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 1000,\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Chunk Overlap',\n\t\t\t\tname: 'chunkOverlap',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 0,\n\t\t\t},\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tthis.logger.debug('Supply Data for Text Splitter');\n\n\t\tconst chunkSize = this.getNodeParameter('chunkSize', itemIndex) as number;\n\t\tconst chunkOverlap = this.getNodeParameter('chunkOverlap', itemIndex) as number;\n\n\t\tconst splitter = new TokenTextSplitter({\n\t\t\tchunkSize,\n\t\t\tchunkOverlap,\n\t\t\tallowedSpecial: 'all',\n\t\t\tdisallowedSpecial: 'all',\n\t\t\tencodingName: 'cl100k_base',\n\t\t\tkeepSeparator: false,\n\t\t});\n\n\t\treturn {\n\t\t\tresponse: logWrapper(splitter, this),\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,0BAMO;AAEP,wBAA2B;AAC3B,0BAA6C;AAE7C,+BAAkC;AAE3B,MAAM,0BAA+C;AAAA,EAArD;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,WAAW;AAAA,MACX,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS;AAAA,MACT,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,gBAAgB;AAAA,QACtB;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MAEA,QAAQ,CAAC;AAAA,MAET,SAAS,CAAC,wCAAoB,cAAc;AAAA,MAC5C,aAAa,CAAC,eAAe;AAAA,MAC7B,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,UAAU,CAAC;AAAA,QAC7D;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,
|
|
1
|
+
{"version":3,"sources":["../../../../nodes/text_splitters/TextSplitterTokenSplitter/TextSplitterTokenSplitter.node.ts"],"sourcesContent":["import {\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n} from 'n8n-workflow';\n\nimport { logWrapper } from '@utils/logWrapper';\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport { TokenTextSplitter } from './TokenTextSplitter';\n\nexport class TextSplitterTokenSplitter implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'Token Splitter',\n\t\tname: 'textSplitterTokenSplitter',\n\t\ticon: 'fa:grip-lines-vertical',\n\t\ticonColor: 'black',\n\t\tgroup: ['transform'],\n\t\tversion: 1,\n\t\tdescription: 'Split text into chunks by tokens',\n\t\tdefaults: {\n\t\t\tname: 'Token Splitter',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Text Splitters'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.textsplittertokensplitter/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\n\t\tinputs: [],\n\n\t\toutputs: [NodeConnectionTypes.AiTextSplitter],\n\t\toutputNames: ['Text Splitter'],\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiDocument]),\n\t\t\t{\n\t\t\t\tdisplayName: 'Chunk Size',\n\t\t\t\tname: 'chunkSize',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 1000,\n\t\t\t\tdescription: 'Maximum number of tokens per chunk',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Chunk Overlap',\n\t\t\t\tname: 'chunkOverlap',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 0,\n\t\t\t\tdescription: 'Number of tokens shared between consecutive chunks to preserve context',\n\t\t\t},\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tthis.logger.debug('Supply Data for Text Splitter');\n\n\t\tconst chunkSize = this.getNodeParameter('chunkSize', itemIndex) as number;\n\t\tconst chunkOverlap = this.getNodeParameter('chunkOverlap', itemIndex) as number;\n\n\t\tconst splitter = new TokenTextSplitter({\n\t\t\tchunkSize,\n\t\t\tchunkOverlap,\n\t\t\tallowedSpecial: 'all',\n\t\t\tdisallowedSpecial: 'all',\n\t\t\tencodingName: 'cl100k_base',\n\t\t\tkeepSeparator: false,\n\t\t});\n\n\t\treturn {\n\t\t\tresponse: logWrapper(splitter, this),\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,0BAMO;AAEP,wBAA2B;AAC3B,0BAA6C;AAE7C,+BAAkC;AAE3B,MAAM,0BAA+C;AAAA,EAArD;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,WAAW;AAAA,MACX,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS;AAAA,MACT,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,gBAAgB;AAAA,QACtB;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MAEA,QAAQ,CAAC;AAAA,MAET,SAAS,CAAC,wCAAoB,cAAc;AAAA,MAC5C,aAAa,CAAC,eAAe;AAAA,MAC7B,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,UAAU,CAAC;AAAA,QAC7D;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,aAAa;AAAA,QACd;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,aAAa;AAAA,QACd;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,SAAK,OAAO,MAAM,+BAA+B;AAEjD,UAAM,YAAY,KAAK,iBAAiB,aAAa,SAAS;AAC9D,UAAM,eAAe,KAAK,iBAAiB,gBAAgB,SAAS;AAEpE,UAAM,WAAW,IAAI,2CAAkB;AAAA,MACtC;AAAA,MACA;AAAA,MACA,gBAAgB;AAAA,MAChB,mBAAmB;AAAA,MACnB,cAAc;AAAA,MACd,eAAe;AAAA,IAChB,CAAC;AAED,WAAO;AAAA,MACN,cAAU,8BAAW,UAAU,IAAI;AAAA,IACpC;AAAA,EACD;AACD;","names":[]}
|
|
@@ -50,7 +50,7 @@ class TokenTextSplitter extends import_textsplitters.TextSplitter {
|
|
|
50
50
|
return splits;
|
|
51
51
|
}
|
|
52
52
|
try {
|
|
53
|
-
this.tokenizer ??= (0, import_tiktoken.getEncoding)(this.encodingName);
|
|
53
|
+
this.tokenizer ??= await (0, import_tiktoken.getEncoding)(this.encodingName);
|
|
54
54
|
const splits = [];
|
|
55
55
|
const input_ids = this.tokenizer.encode(text, this.allowedSpecial, this.disallowedSpecial);
|
|
56
56
|
let start_idx = 0;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../nodes/text_splitters/TextSplitterTokenSplitter/TokenTextSplitter.ts"],"sourcesContent":["import type { TokenTextSplitterParams } from '@langchain/textsplitters';\nimport { TextSplitter } from '@langchain/textsplitters';\nimport { hasLongSequentialRepeat } from '@utils/helpers';\nimport { getEncoding } from '@utils/tokenizer/tiktoken';\nimport { estimateTextSplitsByTokens } from '@utils/tokenizer/token-estimator';\nimport type * as tiktoken from 'js-tiktoken';\n\n/**\n * Implementation of splitter which looks at tokens.\n * This is override of the LangChain TokenTextSplitter\n * to use the n8n tokenizer utility which uses local JSON encodings\n */\nexport class TokenTextSplitter extends TextSplitter implements TokenTextSplitterParams {\n\tstatic lc_name() {\n\t\treturn 'TokenTextSplitter';\n\t}\n\n\tencodingName: tiktoken.TiktokenEncoding;\n\n\tallowedSpecial: 'all' | string[];\n\n\tdisallowedSpecial: 'all' | string[];\n\n\tprivate tokenizer: tiktoken.Tiktoken | undefined;\n\n\tconstructor(fields?: Partial<TokenTextSplitterParams>) {\n\t\tsuper(fields);\n\n\t\tthis.encodingName = fields?.encodingName ?? 'cl100k_base';\n\t\tthis.allowedSpecial = fields?.allowedSpecial ?? [];\n\t\tthis.disallowedSpecial = fields?.disallowedSpecial ?? 'all';\n\t}\n\n\tasync splitText(text: string): Promise<string[]> {\n\t\ttry {\n\t\t\t// Validate input\n\t\t\tif (!text || typeof text !== 'string') {\n\t\t\t\treturn [];\n\t\t\t}\n\n\t\t\t// Check for repetitive content\n\t\t\tif (hasLongSequentialRepeat(text)) {\n\t\t\t\tconst splits = estimateTextSplitsByTokens(\n\t\t\t\t\ttext,\n\t\t\t\t\tthis.chunkSize,\n\t\t\t\t\tthis.chunkOverlap,\n\t\t\t\t\tthis.encodingName,\n\t\t\t\t);\n\t\t\t\treturn splits;\n\t\t\t}\n\n\t\t\t// Use tiktoken for normal text\n\t\t\ttry {\n\t\t\t\tthis.tokenizer ??= getEncoding(this.encodingName);\n\n\t\t\t\tconst splits: string[] = [];\n\t\t\t\tconst input_ids = this.tokenizer.encode(text, this.allowedSpecial, this.disallowedSpecial);\n\n\t\t\t\tlet start_idx = 0;\n\t\t\t\tlet chunkCount = 0;\n\n\t\t\t\twhile (start_idx < input_ids.length) {\n\t\t\t\t\tif (start_idx > 0) {\n\t\t\t\t\t\tstart_idx = Math.max(0, start_idx - this.chunkOverlap);\n\t\t\t\t\t}\n\t\t\t\t\tconst end_idx = Math.min(start_idx + this.chunkSize, input_ids.length);\n\t\t\t\t\tconst chunk_ids = input_ids.slice(start_idx, end_idx);\n\n\t\t\t\t\tsplits.push(this.tokenizer.decode(chunk_ids));\n\n\t\t\t\t\tchunkCount++;\n\t\t\t\t\tstart_idx = end_idx;\n\t\t\t\t}\n\n\t\t\t\treturn splits;\n\t\t\t} catch (tiktokenError) {\n\t\t\t\t// Fall back to character-based splitting if tiktoken fails\n\t\t\t\treturn estimateTextSplitsByTokens(\n\t\t\t\t\ttext,\n\t\t\t\t\tthis.chunkSize,\n\t\t\t\t\tthis.chunkOverlap,\n\t\t\t\t\tthis.encodingName,\n\t\t\t\t);\n\t\t\t}\n\t\t} catch (error) {\n\t\t\t// Return empty array on complete failure\n\t\t\treturn [];\n\t\t}\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,2BAA6B;AAC7B,qBAAwC;AACxC,sBAA4B;AAC5B,6BAA2C;AAQpC,MAAM,0BAA0B,kCAAgD;AAAA,EACtF,OAAO,UAAU;AAChB,WAAO;AAAA,EACR;AAAA,EAUA,YAAY,QAA2C;AACtD,UAAM,MAAM;AAEZ,SAAK,eAAe,QAAQ,gBAAgB;AAC5C,SAAK,iBAAiB,QAAQ,kBAAkB,CAAC;AACjD,SAAK,oBAAoB,QAAQ,qBAAqB;AAAA,EACvD;AAAA,EAEA,MAAM,UAAU,MAAiC;AAChD,QAAI;AAEH,UAAI,CAAC,QAAQ,OAAO,SAAS,UAAU;AACtC,eAAO,CAAC;AAAA,MACT;AAGA,cAAI,wCAAwB,IAAI,GAAG;AAClC,cAAM,aAAS;AAAA,UACd;AAAA,UACA,KAAK;AAAA,UACL,KAAK;AAAA,UACL,KAAK;AAAA,QACN;AACA,eAAO;AAAA,MACR;AAGA,UAAI;AACH,aAAK,
|
|
1
|
+
{"version":3,"sources":["../../../../nodes/text_splitters/TextSplitterTokenSplitter/TokenTextSplitter.ts"],"sourcesContent":["import type { TokenTextSplitterParams } from '@langchain/textsplitters';\nimport { TextSplitter } from '@langchain/textsplitters';\nimport { hasLongSequentialRepeat } from '@utils/helpers';\nimport { getEncoding } from '@utils/tokenizer/tiktoken';\nimport { estimateTextSplitsByTokens } from '@utils/tokenizer/token-estimator';\nimport type * as tiktoken from 'js-tiktoken';\n\n/**\n * Implementation of splitter which looks at tokens.\n * This is override of the LangChain TokenTextSplitter\n * to use the n8n tokenizer utility which uses local JSON encodings\n */\nexport class TokenTextSplitter extends TextSplitter implements TokenTextSplitterParams {\n\tstatic lc_name() {\n\t\treturn 'TokenTextSplitter';\n\t}\n\n\tencodingName: tiktoken.TiktokenEncoding;\n\n\tallowedSpecial: 'all' | string[];\n\n\tdisallowedSpecial: 'all' | string[];\n\n\tprivate tokenizer: tiktoken.Tiktoken | undefined;\n\n\tconstructor(fields?: Partial<TokenTextSplitterParams>) {\n\t\tsuper(fields);\n\n\t\tthis.encodingName = fields?.encodingName ?? 'cl100k_base';\n\t\tthis.allowedSpecial = fields?.allowedSpecial ?? [];\n\t\tthis.disallowedSpecial = fields?.disallowedSpecial ?? 'all';\n\t}\n\n\tasync splitText(text: string): Promise<string[]> {\n\t\ttry {\n\t\t\t// Validate input\n\t\t\tif (!text || typeof text !== 'string') {\n\t\t\t\treturn [];\n\t\t\t}\n\n\t\t\t// Check for repetitive content\n\t\t\tif (hasLongSequentialRepeat(text)) {\n\t\t\t\tconst splits = estimateTextSplitsByTokens(\n\t\t\t\t\ttext,\n\t\t\t\t\tthis.chunkSize,\n\t\t\t\t\tthis.chunkOverlap,\n\t\t\t\t\tthis.encodingName,\n\t\t\t\t);\n\t\t\t\treturn splits;\n\t\t\t}\n\n\t\t\t// Use tiktoken for normal text\n\t\t\ttry {\n\t\t\t\tthis.tokenizer ??= await getEncoding(this.encodingName);\n\n\t\t\t\tconst splits: string[] = [];\n\t\t\t\tconst input_ids = this.tokenizer.encode(text, this.allowedSpecial, this.disallowedSpecial);\n\n\t\t\t\tlet start_idx = 0;\n\t\t\t\tlet chunkCount = 0;\n\n\t\t\t\twhile (start_idx < input_ids.length) {\n\t\t\t\t\tif (start_idx > 0) {\n\t\t\t\t\t\tstart_idx = Math.max(0, start_idx - this.chunkOverlap);\n\t\t\t\t\t}\n\t\t\t\t\tconst end_idx = Math.min(start_idx + this.chunkSize, input_ids.length);\n\t\t\t\t\tconst chunk_ids = input_ids.slice(start_idx, end_idx);\n\n\t\t\t\t\tsplits.push(this.tokenizer.decode(chunk_ids));\n\n\t\t\t\t\tchunkCount++;\n\t\t\t\t\tstart_idx = end_idx;\n\t\t\t\t}\n\n\t\t\t\treturn splits;\n\t\t\t} catch (tiktokenError) {\n\t\t\t\t// Fall back to character-based splitting if tiktoken fails\n\t\t\t\treturn estimateTextSplitsByTokens(\n\t\t\t\t\ttext,\n\t\t\t\t\tthis.chunkSize,\n\t\t\t\t\tthis.chunkOverlap,\n\t\t\t\t\tthis.encodingName,\n\t\t\t\t);\n\t\t\t}\n\t\t} catch (error) {\n\t\t\t// Return empty array on complete failure\n\t\t\treturn [];\n\t\t}\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,2BAA6B;AAC7B,qBAAwC;AACxC,sBAA4B;AAC5B,6BAA2C;AAQpC,MAAM,0BAA0B,kCAAgD;AAAA,EACtF,OAAO,UAAU;AAChB,WAAO;AAAA,EACR;AAAA,EAUA,YAAY,QAA2C;AACtD,UAAM,MAAM;AAEZ,SAAK,eAAe,QAAQ,gBAAgB;AAC5C,SAAK,iBAAiB,QAAQ,kBAAkB,CAAC;AACjD,SAAK,oBAAoB,QAAQ,qBAAqB;AAAA,EACvD;AAAA,EAEA,MAAM,UAAU,MAAiC;AAChD,QAAI;AAEH,UAAI,CAAC,QAAQ,OAAO,SAAS,UAAU;AACtC,eAAO,CAAC;AAAA,MACT;AAGA,cAAI,wCAAwB,IAAI,GAAG;AAClC,cAAM,aAAS;AAAA,UACd;AAAA,UACA,KAAK;AAAA,UACL,KAAK;AAAA,UACL,KAAK;AAAA,QACN;AACA,eAAO;AAAA,MACR;AAGA,UAAI;AACH,aAAK,cAAc,UAAM,6BAAY,KAAK,YAAY;AAEtD,cAAM,SAAmB,CAAC;AAC1B,cAAM,YAAY,KAAK,UAAU,OAAO,MAAM,KAAK,gBAAgB,KAAK,iBAAiB;AAEzF,YAAI,YAAY;AAChB,YAAI,aAAa;AAEjB,eAAO,YAAY,UAAU,QAAQ;AACpC,cAAI,YAAY,GAAG;AAClB,wBAAY,KAAK,IAAI,GAAG,YAAY,KAAK,YAAY;AAAA,UACtD;AACA,gBAAM,UAAU,KAAK,IAAI,YAAY,KAAK,WAAW,UAAU,MAAM;AACrE,gBAAM,YAAY,UAAU,MAAM,WAAW,OAAO;AAEpD,iBAAO,KAAK,KAAK,UAAU,OAAO,SAAS,CAAC;AAE5C;AACA,sBAAY;AAAA,QACb;AAEA,eAAO;AAAA,MACR,SAAS,eAAe;AAEvB,mBAAO;AAAA,UACN;AAAA,UACA,KAAK;AAAA,UACL,KAAK;AAAA,UACL,KAAK;AAAA,QACN;AAAA,MACD;AAAA,IACD,SAAS,OAAO;AAEf,aAAO,CAAC;AAAA,IACT;AAAA,EACD;AACD;","names":[]}
|
|
@@ -34,6 +34,7 @@ __export(transcribe_operation_exports, {
|
|
|
34
34
|
module.exports = __toCommonJS(transcribe_operation_exports);
|
|
35
35
|
var import_form_data = __toESM(require("form-data"));
|
|
36
36
|
var import_n8n_workflow = require("n8n-workflow");
|
|
37
|
+
var import_binary_data = require("../../helpers/binary-data");
|
|
37
38
|
var import_transport = require("../../transport");
|
|
38
39
|
const properties = [
|
|
39
40
|
{
|
|
@@ -92,17 +93,18 @@ async function execute(i) {
|
|
|
92
93
|
if (options.temperature) {
|
|
93
94
|
formData.append("temperature", options.temperature.toString());
|
|
94
95
|
}
|
|
95
|
-
const
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
96
|
+
const { filename, contentType, fileContent } = await (0, import_binary_data.getBinaryDataFile)(
|
|
97
|
+
this,
|
|
98
|
+
i,
|
|
99
|
+
binaryPropertyName
|
|
100
|
+
);
|
|
101
|
+
formData.append("file", fileContent, {
|
|
102
|
+
filename,
|
|
103
|
+
contentType
|
|
100
104
|
});
|
|
101
105
|
const response = await import_transport.apiRequest.call(this, "POST", "/audio/transcriptions", {
|
|
102
106
|
option: { formData },
|
|
103
|
-
headers:
|
|
104
|
-
"Content-Type": "multipart/form-data"
|
|
105
|
-
}
|
|
107
|
+
headers: formData.getHeaders()
|
|
106
108
|
});
|
|
107
109
|
return [
|
|
108
110
|
{
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../../../nodes/vendors/OpenAi/actions/audio/transcribe.operation.ts"],"sourcesContent":["import FormData from 'form-data';\nimport type { INodeProperties, IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';\nimport { updateDisplayOptions } from 'n8n-workflow';\n\nimport { apiRequest } from '../../transport';\n\nconst properties: INodeProperties[] = [\n\t{\n\t\tdisplayName: 'Input Data Field Name',\n\t\tname: 'binaryPropertyName',\n\t\ttype: 'string',\n\t\tdefault: 'data',\n\t\tplaceholder: 'e.g. data',\n\t\thint: 'The name of the input field containing the binary file data to be processed',\n\t\tdescription:\n\t\t\t'Name of the binary property which contains the audio file in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm',\n\t},\n\t{\n\t\tdisplayName: 'Options',\n\t\tname: 'options',\n\t\tplaceholder: 'Add Option',\n\t\ttype: 'collection',\n\t\tdefault: {},\n\t\toptions: [\n\t\t\t{\n\t\t\t\tdisplayName: 'Language of the Audio File',\n\t\t\t\tname: 'language',\n\t\t\t\ttype: 'string',\n\t\t\t\tdescription:\n\t\t\t\t\t'The language of the input audio. Supplying the input language in <a href=\"https://en.wikipedia.org/wiki/List_of_ISO_639_language_codes\" target=\"_blank\">ISO-639-1</a> format will improve accuracy and latency.',\n\t\t\t\tdefault: '',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Output Randomness (Temperature)',\n\t\t\t\tname: 'temperature',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 0,\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tminValue: 0,\n\t\t\t\t\tmaxValue: 1,\n\t\t\t\t\tnumberPrecision: 1,\n\t\t\t\t},\n\t\t\t},\n\t\t],\n\t},\n];\n\nconst displayOptions = {\n\tshow: {\n\t\toperation: ['transcribe'],\n\t\tresource: ['audio'],\n\t},\n};\n\nexport const description = updateDisplayOptions(displayOptions, properties);\n\nexport async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {\n\tconst model = 'whisper-1';\n\tconst binaryPropertyName = this.getNodeParameter('binaryPropertyName', i);\n\tconst options = this.getNodeParameter('options', i, {});\n\n\tconst formData = new FormData();\n\n\tformData.append('model', model);\n\n\tif (options.language) {\n\t\tformData.append('language', options.language);\n\t}\n\n\tif (options.temperature) {\n\t\tformData.append('temperature', options.temperature.toString());\n\t}\n\n\tconst
|
|
1
|
+
{"version":3,"sources":["../../../../../../nodes/vendors/OpenAi/actions/audio/transcribe.operation.ts"],"sourcesContent":["import FormData from 'form-data';\nimport type { INodeProperties, IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';\nimport { updateDisplayOptions } from 'n8n-workflow';\n\nimport { getBinaryDataFile } from '../../helpers/binary-data';\nimport { apiRequest } from '../../transport';\n\nconst properties: INodeProperties[] = [\n\t{\n\t\tdisplayName: 'Input Data Field Name',\n\t\tname: 'binaryPropertyName',\n\t\ttype: 'string',\n\t\tdefault: 'data',\n\t\tplaceholder: 'e.g. data',\n\t\thint: 'The name of the input field containing the binary file data to be processed',\n\t\tdescription:\n\t\t\t'Name of the binary property which contains the audio file in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm',\n\t},\n\t{\n\t\tdisplayName: 'Options',\n\t\tname: 'options',\n\t\tplaceholder: 'Add Option',\n\t\ttype: 'collection',\n\t\tdefault: {},\n\t\toptions: [\n\t\t\t{\n\t\t\t\tdisplayName: 'Language of the Audio File',\n\t\t\t\tname: 'language',\n\t\t\t\ttype: 'string',\n\t\t\t\tdescription:\n\t\t\t\t\t'The language of the input audio. Supplying the input language in <a href=\"https://en.wikipedia.org/wiki/List_of_ISO_639_language_codes\" target=\"_blank\">ISO-639-1</a> format will improve accuracy and latency.',\n\t\t\t\tdefault: '',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Output Randomness (Temperature)',\n\t\t\t\tname: 'temperature',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 0,\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tminValue: 0,\n\t\t\t\t\tmaxValue: 1,\n\t\t\t\t\tnumberPrecision: 1,\n\t\t\t\t},\n\t\t\t},\n\t\t],\n\t},\n];\n\nconst displayOptions = {\n\tshow: {\n\t\toperation: ['transcribe'],\n\t\tresource: ['audio'],\n\t},\n};\n\nexport const description = updateDisplayOptions(displayOptions, properties);\n\nexport async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {\n\tconst model = 'whisper-1';\n\tconst binaryPropertyName = this.getNodeParameter('binaryPropertyName', i);\n\tconst options = this.getNodeParameter('options', i, {});\n\n\tconst formData = new FormData();\n\n\tformData.append('model', model);\n\n\tif (options.language) {\n\t\tformData.append('language', options.language);\n\t}\n\n\tif (options.temperature) {\n\t\tformData.append('temperature', options.temperature.toString());\n\t}\n\n\tconst { filename, contentType, fileContent } = await getBinaryDataFile(\n\t\tthis,\n\t\ti,\n\t\tbinaryPropertyName,\n\t);\n\tformData.append('file', fileContent, {\n\t\tfilename,\n\t\tcontentType,\n\t});\n\n\tconst response = await apiRequest.call(this, 'POST', '/audio/transcriptions', {\n\t\toption: { formData },\n\t\theaders: formData.getHeaders(),\n\t});\n\n\treturn [\n\t\t{\n\t\t\tjson: response,\n\t\t\tpairedItem: { item: i },\n\t\t},\n\t];\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,uBAAqB;AAErB,0BAAqC;AAErC,yBAAkC;AAClC,uBAA2B;AAE3B,MAAM,aAAgC;AAAA,EACrC;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,aAAa;AAAA,IACb,MAAM;AAAA,IACN,aACC;AAAA,EACF;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,IACN,SAAS,CAAC;AAAA,IACV,SAAS;AAAA,MACR;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,aACC;AAAA,QACD,SAAS;AAAA,MACV;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,UACZ,UAAU;AAAA,UACV,UAAU;AAAA,UACV,iBAAiB;AAAA,QAClB;AAAA,MACD;AAAA,IACD;AAAA,EACD;AACD;AAEA,MAAM,iBAAiB;AAAA,EACtB,MAAM;AAAA,IACL,WAAW,CAAC,YAAY;AAAA,IACxB,UAAU,CAAC,OAAO;AAAA,EACnB;AACD;AAEO,MAAM,kBAAc,0CAAqB,gBAAgB,UAAU;AAE1E,eAAsB,QAAiC,GAA0C;AAChG,QAAM,QAAQ;AACd,QAAM,qBAAqB,KAAK,iBAAiB,sBAAsB,CAAC;AACxE,QAAM,UAAU,KAAK,iBAAiB,WAAW,GAAG,CAAC,CAAC;AAEtD,QAAM,WAAW,IAAI,iBAAAA,QAAS;AAE9B,WAAS,OAAO,SAAS,KAAK;AAE9B,MAAI,QAAQ,UAAU;AACrB,aAAS,OAAO,YAAY,QAAQ,QAAQ;AAAA,EAC7C;AAEA,MAAI,QAAQ,aAAa;AACxB,aAAS,OAAO,eAAe,QAAQ,YAAY,SAAS,CAAC;AAAA,EAC9D;AAEA,QAAM,EAAE,UAAU,aAAa,YAAY,IAAI,UAAM;AAAA,IACpD;AAAA,IACA;AAAA,IACA;AAAA,EACD;AACA,WAAS,OAAO,QAAQ,aAAa;AAAA,IACpC;AAAA,IACA;AAAA,EACD,CAAC;AAED,QAAM,WAAW,MAAM,4BAAW,KAAK,MAAM,QAAQ,yBAAyB;AAAA,IAC7E,QAAQ,EAAE,SAAS;AAAA,IACnB,SAAS,SAAS,WAAW;AAAA,EAC9B,CAAC;AAED,SAAO;AAAA,IACN;AAAA,MACC,MAAM;AAAA,MACN,YAAY,EAAE,MAAM,EAAE;AAAA,IACvB;AAAA,EACD;AACD;","names":["FormData"]}
|
|
@@ -34,6 +34,7 @@ __export(translate_operation_exports, {
|
|
|
34
34
|
module.exports = __toCommonJS(translate_operation_exports);
|
|
35
35
|
var import_form_data = __toESM(require("form-data"));
|
|
36
36
|
var import_n8n_workflow = require("n8n-workflow");
|
|
37
|
+
var import_binary_data = require("../../helpers/binary-data");
|
|
37
38
|
var import_transport = require("../../transport");
|
|
38
39
|
const properties = [
|
|
39
40
|
{
|
|
@@ -82,17 +83,18 @@ async function execute(i) {
|
|
|
82
83
|
if (options.temperature) {
|
|
83
84
|
formData.append("temperature", options.temperature.toString());
|
|
84
85
|
}
|
|
85
|
-
const
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
86
|
+
const { filename, contentType, fileContent } = await (0, import_binary_data.getBinaryDataFile)(
|
|
87
|
+
this,
|
|
88
|
+
i,
|
|
89
|
+
binaryPropertyName
|
|
90
|
+
);
|
|
91
|
+
formData.append("file", fileContent, {
|
|
92
|
+
filename,
|
|
93
|
+
contentType
|
|
90
94
|
});
|
|
91
95
|
const response = await import_transport.apiRequest.call(this, "POST", "/audio/translations", {
|
|
92
96
|
option: { formData },
|
|
93
|
-
headers:
|
|
94
|
-
"Content-Type": "multipart/form-data"
|
|
95
|
-
}
|
|
97
|
+
headers: formData.getHeaders()
|
|
96
98
|
});
|
|
97
99
|
return [
|
|
98
100
|
{
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../../../nodes/vendors/OpenAi/actions/audio/translate.operation.ts"],"sourcesContent":["import FormData from 'form-data';\nimport type { INodeProperties, IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';\nimport { updateDisplayOptions } from 'n8n-workflow';\n\nimport { apiRequest } from '../../transport';\n\nconst properties: INodeProperties[] = [\n\t{\n\t\tdisplayName: 'Input Data Field Name',\n\t\tname: 'binaryPropertyName',\n\t\ttype: 'string',\n\t\tdefault: 'data',\n\t\thint: 'The name of the input field containing the binary file data to be processed',\n\t\tplaceholder: 'e.g. data',\n\t\tdescription:\n\t\t\t'Name of the binary property which contains the audio file in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm',\n\t},\n\t{\n\t\tdisplayName: 'Options',\n\t\tname: 'options',\n\t\tplaceholder: 'Add Option',\n\t\ttype: 'collection',\n\t\tdefault: {},\n\t\toptions: [\n\t\t\t{\n\t\t\t\tdisplayName: 'Output Randomness (Temperature)',\n\t\t\t\tname: 'temperature',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 0,\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tminValue: 0,\n\t\t\t\t\tmaxValue: 1,\n\t\t\t\t\tnumberPrecision: 1,\n\t\t\t\t},\n\t\t\t},\n\t\t],\n\t},\n];\n\nconst displayOptions = {\n\tshow: {\n\t\toperation: ['translate'],\n\t\tresource: ['audio'],\n\t},\n};\n\nexport const description = updateDisplayOptions(displayOptions, properties);\n\nexport async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {\n\tconst model = 'whisper-1';\n\tconst binaryPropertyName = this.getNodeParameter('binaryPropertyName', i);\n\tconst options = this.getNodeParameter('options', i, {});\n\n\tconst formData = new FormData();\n\n\tformData.append('model', model);\n\n\tif (options.temperature) {\n\t\tformData.append('temperature', options.temperature.toString());\n\t}\n\n\tconst
|
|
1
|
+
{"version":3,"sources":["../../../../../../nodes/vendors/OpenAi/actions/audio/translate.operation.ts"],"sourcesContent":["import FormData from 'form-data';\nimport type { INodeProperties, IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';\nimport { updateDisplayOptions } from 'n8n-workflow';\n\nimport { getBinaryDataFile } from '../../helpers/binary-data';\nimport { apiRequest } from '../../transport';\n\nconst properties: INodeProperties[] = [\n\t{\n\t\tdisplayName: 'Input Data Field Name',\n\t\tname: 'binaryPropertyName',\n\t\ttype: 'string',\n\t\tdefault: 'data',\n\t\thint: 'The name of the input field containing the binary file data to be processed',\n\t\tplaceholder: 'e.g. data',\n\t\tdescription:\n\t\t\t'Name of the binary property which contains the audio file in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm',\n\t},\n\t{\n\t\tdisplayName: 'Options',\n\t\tname: 'options',\n\t\tplaceholder: 'Add Option',\n\t\ttype: 'collection',\n\t\tdefault: {},\n\t\toptions: [\n\t\t\t{\n\t\t\t\tdisplayName: 'Output Randomness (Temperature)',\n\t\t\t\tname: 'temperature',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 0,\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tminValue: 0,\n\t\t\t\t\tmaxValue: 1,\n\t\t\t\t\tnumberPrecision: 1,\n\t\t\t\t},\n\t\t\t},\n\t\t],\n\t},\n];\n\nconst displayOptions = {\n\tshow: {\n\t\toperation: ['translate'],\n\t\tresource: ['audio'],\n\t},\n};\n\nexport const description = updateDisplayOptions(displayOptions, properties);\n\nexport async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {\n\tconst model = 'whisper-1';\n\tconst binaryPropertyName = this.getNodeParameter('binaryPropertyName', i);\n\tconst options = this.getNodeParameter('options', i, {});\n\n\tconst formData = new FormData();\n\n\tformData.append('model', model);\n\n\tif (options.temperature) {\n\t\tformData.append('temperature', options.temperature.toString());\n\t}\n\n\tconst { filename, contentType, fileContent } = await getBinaryDataFile(\n\t\tthis,\n\t\ti,\n\t\tbinaryPropertyName,\n\t);\n\tformData.append('file', fileContent, {\n\t\tfilename,\n\t\tcontentType,\n\t});\n\n\tconst response = await apiRequest.call(this, 'POST', '/audio/translations', {\n\t\toption: { formData },\n\t\theaders: formData.getHeaders(),\n\t});\n\n\treturn [\n\t\t{\n\t\t\tjson: response,\n\t\t\tpairedItem: { item: i },\n\t\t},\n\t];\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,uBAAqB;AAErB,0BAAqC;AAErC,yBAAkC;AAClC,uBAA2B;AAE3B,MAAM,aAAgC;AAAA,EACrC;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,MAAM;AAAA,IACN,aAAa;AAAA,IACb,aACC;AAAA,EACF;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,IACN,SAAS,CAAC;AAAA,IACV,SAAS;AAAA,MACR;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,UACZ,UAAU;AAAA,UACV,UAAU;AAAA,UACV,iBAAiB;AAAA,QAClB;AAAA,MACD;AAAA,IACD;AAAA,EACD;AACD;AAEA,MAAM,iBAAiB;AAAA,EACtB,MAAM;AAAA,IACL,WAAW,CAAC,WAAW;AAAA,IACvB,UAAU,CAAC,OAAO;AAAA,EACnB;AACD;AAEO,MAAM,kBAAc,0CAAqB,gBAAgB,UAAU;AAE1E,eAAsB,QAAiC,GAA0C;AAChG,QAAM,QAAQ;AACd,QAAM,qBAAqB,KAAK,iBAAiB,sBAAsB,CAAC;AACxE,QAAM,UAAU,KAAK,iBAAiB,WAAW,GAAG,CAAC,CAAC;AAEtD,QAAM,WAAW,IAAI,iBAAAA,QAAS;AAE9B,WAAS,OAAO,SAAS,KAAK;AAE9B,MAAI,QAAQ,aAAa;AACxB,aAAS,OAAO,eAAe,QAAQ,YAAY,SAAS,CAAC;AAAA,EAC9D;AAEA,QAAM,EAAE,UAAU,aAAa,YAAY,IAAI,UAAM;AAAA,IACpD;AAAA,IACA;AAAA,IACA;AAAA,EACD;AACA,WAAS,OAAO,QAAQ,aAAa;AAAA,IACpC;AAAA,IACA;AAAA,EACD,CAAC;AAED,QAAM,WAAW,MAAM,4BAAW,KAAK,MAAM,QAAQ,uBAAuB;AAAA,IAC3E,QAAQ,EAAE,SAAS;AAAA,IACnB,SAAS,SAAS,WAAW;AAAA,EAC9B,CAAC;AAED,SAAO;AAAA,IACN;AAAA,MACC,MAAM;AAAA,MACN,YAAY,EAAE,MAAM,EAAE;AAAA,IACvB;AAAA,EACD;AACD;","names":["FormData"]}
|
|
@@ -34,6 +34,7 @@ __export(upload_operation_exports, {
|
|
|
34
34
|
module.exports = __toCommonJS(upload_operation_exports);
|
|
35
35
|
var import_form_data = __toESM(require("form-data"));
|
|
36
36
|
var import_n8n_workflow = require("n8n-workflow");
|
|
37
|
+
var import_binary_data = require("../../helpers/binary-data");
|
|
37
38
|
var import_transport = require("../../transport");
|
|
38
39
|
const properties = [
|
|
39
40
|
{
|
|
@@ -84,18 +85,19 @@ async function execute(i) {
|
|
|
84
85
|
const options = this.getNodeParameter("options", i, {});
|
|
85
86
|
const formData = new import_form_data.default();
|
|
86
87
|
formData.append("purpose", options.purpose || "assistants");
|
|
87
|
-
const
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
88
|
+
const { filename, contentType, fileContent } = await (0, import_binary_data.getBinaryDataFile)(
|
|
89
|
+
this,
|
|
90
|
+
i,
|
|
91
|
+
binaryPropertyName
|
|
92
|
+
);
|
|
93
|
+
formData.append("file", fileContent, {
|
|
94
|
+
filename,
|
|
95
|
+
contentType
|
|
92
96
|
});
|
|
93
97
|
try {
|
|
94
98
|
const response = await import_transport.apiRequest.call(this, "POST", "/files", {
|
|
95
99
|
option: { formData },
|
|
96
|
-
headers:
|
|
97
|
-
"Content-Type": "multipart/form-data"
|
|
98
|
-
}
|
|
100
|
+
headers: formData.getHeaders()
|
|
99
101
|
});
|
|
100
102
|
return [
|
|
101
103
|
{
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../../../nodes/vendors/OpenAi/actions/file/upload.operation.ts"],"sourcesContent":["import FormData from 'form-data';\nimport type { INodeProperties, IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';\nimport { updateDisplayOptions, NodeOperationError } from 'n8n-workflow';\n\nimport { apiRequest } from '../../transport';\n\nconst properties: INodeProperties[] = [\n\t{\n\t\tdisplayName: 'Input Data Field Name',\n\t\tname: 'binaryPropertyName',\n\t\ttype: 'string',\n\t\tdefault: 'data',\n\t\thint: 'The name of the input field containing the binary file data to be processed',\n\t\tplaceholder: 'e.g. data',\n\t\tdescription:\n\t\t\t'Name of the binary property which contains the file. The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants.',\n\t},\n\t{\n\t\tdisplayName: 'Options',\n\t\tname: 'options',\n\t\tplaceholder: 'Add Option',\n\t\ttype: 'collection',\n\t\tdefault: {},\n\t\toptions: [\n\t\t\t{\n\t\t\t\tdisplayName: 'Purpose',\n\t\t\t\tname: 'purpose',\n\t\t\t\ttype: 'options',\n\t\t\t\tdefault: 'assistants',\n\t\t\t\tdescription:\n\t\t\t\t\t\"The intended purpose of the uploaded file, the 'Fine-tuning' only supports .jsonl files\",\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tname: 'Assistants',\n\t\t\t\t\t\tvalue: 'assistants',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tname: 'Fine-Tune',\n\t\t\t\t\t\tvalue: 'fine-tune',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t},\n];\n\nconst displayOptions = {\n\tshow: {\n\t\toperation: ['upload'],\n\t\tresource: ['file'],\n\t},\n};\n\nexport const description = updateDisplayOptions(displayOptions, properties);\n\nexport async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {\n\tconst binaryPropertyName = this.getNodeParameter('binaryPropertyName', i);\n\tconst options = this.getNodeParameter('options', i, {});\n\n\tconst formData = new FormData();\n\n\tformData.append('purpose', options.purpose || 'assistants');\n\n\tconst
|
|
1
|
+
{"version":3,"sources":["../../../../../../nodes/vendors/OpenAi/actions/file/upload.operation.ts"],"sourcesContent":["import FormData from 'form-data';\nimport type { INodeProperties, IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';\nimport { updateDisplayOptions, NodeOperationError } from 'n8n-workflow';\n\nimport { getBinaryDataFile } from '../../helpers/binary-data';\nimport { apiRequest } from '../../transport';\n\nconst properties: INodeProperties[] = [\n\t{\n\t\tdisplayName: 'Input Data Field Name',\n\t\tname: 'binaryPropertyName',\n\t\ttype: 'string',\n\t\tdefault: 'data',\n\t\thint: 'The name of the input field containing the binary file data to be processed',\n\t\tplaceholder: 'e.g. data',\n\t\tdescription:\n\t\t\t'Name of the binary property which contains the file. The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants.',\n\t},\n\t{\n\t\tdisplayName: 'Options',\n\t\tname: 'options',\n\t\tplaceholder: 'Add Option',\n\t\ttype: 'collection',\n\t\tdefault: {},\n\t\toptions: [\n\t\t\t{\n\t\t\t\tdisplayName: 'Purpose',\n\t\t\t\tname: 'purpose',\n\t\t\t\ttype: 'options',\n\t\t\t\tdefault: 'assistants',\n\t\t\t\tdescription:\n\t\t\t\t\t\"The intended purpose of the uploaded file, the 'Fine-tuning' only supports .jsonl files\",\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tname: 'Assistants',\n\t\t\t\t\t\tvalue: 'assistants',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tname: 'Fine-Tune',\n\t\t\t\t\t\tvalue: 'fine-tune',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t},\n];\n\nconst displayOptions = {\n\tshow: {\n\t\toperation: ['upload'],\n\t\tresource: ['file'],\n\t},\n};\n\nexport const description = updateDisplayOptions(displayOptions, properties);\n\nexport async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {\n\tconst binaryPropertyName = this.getNodeParameter('binaryPropertyName', i);\n\tconst options = this.getNodeParameter('options', i, {});\n\n\tconst formData = new FormData();\n\n\tformData.append('purpose', options.purpose || 'assistants');\n\n\tconst { filename, contentType, fileContent } = await getBinaryDataFile(\n\t\tthis,\n\t\ti,\n\t\tbinaryPropertyName,\n\t);\n\tformData.append('file', fileContent, {\n\t\tfilename,\n\t\tcontentType,\n\t});\n\n\ttry {\n\t\tconst response = await apiRequest.call(this, 'POST', '/files', {\n\t\t\toption: { formData },\n\t\t\theaders: formData.getHeaders(),\n\t\t});\n\n\t\treturn [\n\t\t\t{\n\t\t\t\tjson: response,\n\t\t\t\tpairedItem: { item: i },\n\t\t\t},\n\t\t];\n\t} catch (error) {\n\t\tif (\n\t\t\terror.message.includes('Bad request') &&\n\t\t\terror.description?.includes('Expected file to have JSONL format')\n\t\t) {\n\t\t\tthrow new NodeOperationError(this.getNode(), 'The file content is not in JSONL format', {\n\t\t\t\tdescription:\n\t\t\t\t\t'Fine-tuning accepts only files in JSONL format, where every line is a valid JSON dictionary',\n\t\t\t});\n\t\t}\n\t\tthrow error;\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,uBAAqB;AAErB,0BAAyD;AAEzD,yBAAkC;AAClC,uBAA2B;AAE3B,MAAM,aAAgC;AAAA,EACrC;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,MAAM;AAAA,IACN,aAAa;AAAA,IACb,aACC;AAAA,EACF;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,IACN,SAAS,CAAC;AAAA,IACV,SAAS;AAAA,MACR;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aACC;AAAA,QACD,SAAS;AAAA,UACR;AAAA,YACC,MAAM;AAAA,YACN,OAAO;AAAA,UACR;AAAA,UACA;AAAA,YACC,MAAM;AAAA,YACN,OAAO;AAAA,UACR;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA,EACD;AACD;AAEA,MAAM,iBAAiB;AAAA,EACtB,MAAM;AAAA,IACL,WAAW,CAAC,QAAQ;AAAA,IACpB,UAAU,CAAC,MAAM;AAAA,EAClB;AACD;AAEO,MAAM,kBAAc,0CAAqB,gBAAgB,UAAU;AAE1E,eAAsB,QAAiC,GAA0C;AAChG,QAAM,qBAAqB,KAAK,iBAAiB,sBAAsB,CAAC;AACxE,QAAM,UAAU,KAAK,iBAAiB,WAAW,GAAG,CAAC,CAAC;AAEtD,QAAM,WAAW,IAAI,iBAAAA,QAAS;AAE9B,WAAS,OAAO,WAAW,QAAQ,WAAW,YAAY;AAE1D,QAAM,EAAE,UAAU,aAAa,YAAY,IAAI,UAAM;AAAA,IACpD;AAAA,IACA;AAAA,IACA;AAAA,EACD;AACA,WAAS,OAAO,QAAQ,aAAa;AAAA,IACpC;AAAA,IACA;AAAA,EACD,CAAC;AAED,MAAI;AACH,UAAM,WAAW,MAAM,4BAAW,KAAK,MAAM,QAAQ,UAAU;AAAA,MAC9D,QAAQ,EAAE,SAAS;AAAA,MACnB,SAAS,SAAS,WAAW;AAAA,IAC9B,CAAC;AAED,WAAO;AAAA,MACN;AAAA,QACC,MAAM;AAAA,QACN,YAAY,EAAE,MAAM,EAAE;AAAA,MACvB;AAAA,IACD;AAAA,EACD,SAAS,OAAO;AACf,QACC,MAAM,QAAQ,SAAS,aAAa,KACpC,MAAM,aAAa,SAAS,oCAAoC,GAC/D;AACD,YAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,2CAA2C;AAAA,QACvF,aACC;AAAA,MACF,CAAC;AAAA,IACF;AACA,UAAM;AAAA,EACP;AACD;","names":["FormData"]}
|
|
@@ -188,6 +188,36 @@ const properties = [
|
|
|
188
188
|
description: "An alternative to sampling with temperature, controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.",
|
|
189
189
|
type: "number"
|
|
190
190
|
},
|
|
191
|
+
{
|
|
192
|
+
displayName: "Reasoning Effort",
|
|
193
|
+
name: "reasoning_effort",
|
|
194
|
+
default: "medium",
|
|
195
|
+
description: 'Controls the amount of reasoning tokens to use. A value of "low" will favor speed and economical token usage, "high" will favor more complete reasoning at the cost of more tokens generated and slower responses.',
|
|
196
|
+
type: "options",
|
|
197
|
+
options: [
|
|
198
|
+
{
|
|
199
|
+
name: "Low",
|
|
200
|
+
value: "low",
|
|
201
|
+
description: "Favors speed and economical token usage"
|
|
202
|
+
},
|
|
203
|
+
{
|
|
204
|
+
name: "Medium",
|
|
205
|
+
value: "medium",
|
|
206
|
+
description: "Balance between speed and reasoning accuracy"
|
|
207
|
+
},
|
|
208
|
+
{
|
|
209
|
+
name: "High",
|
|
210
|
+
value: "high",
|
|
211
|
+
description: "Favors more complete reasoning at the cost of more tokens generated and slower responses"
|
|
212
|
+
}
|
|
213
|
+
],
|
|
214
|
+
displayOptions: {
|
|
215
|
+
show: {
|
|
216
|
+
// reasoning_effort is only available on o1, o1-versioned, or on o3-mini and beyond, and gpt-5 models. Not on o1-mini or other GPT-models.
|
|
217
|
+
"/modelId": [{ _cnd: { regex: "(^o1([-\\d]+)?$)|(^o[3-9].*)|(^gpt-5.*)" } }]
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
},
|
|
191
221
|
{
|
|
192
222
|
displayName: "Max Tool Calls Iterations",
|
|
193
223
|
name: "maxToolsIterations",
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../../../nodes/vendors/OpenAi/actions/text/message.operation.ts"],"sourcesContent":["import type { Tool } from '@langchain/core/tools';\nimport _omit from 'lodash/omit';\nimport type {\n\tINodeProperties,\n\tIExecuteFunctions,\n\tINodeExecutionData,\n\tIDataObject,\n} from 'n8n-workflow';\nimport { jsonParse, updateDisplayOptions } from 'n8n-workflow';\n\nimport { getConnectedTools } from '@utils/helpers';\n\nimport { MODELS_NOT_SUPPORT_FUNCTION_CALLS } from '../../helpers/constants';\nimport type { ChatCompletion } from '../../helpers/interfaces';\nimport { formatToOpenAIAssistantTool } from '../../helpers/utils';\nimport { apiRequest } from '../../transport';\nimport { modelRLC } from '../descriptions';\n\nconst properties: INodeProperties[] = [\n\tmodelRLC('modelSearch'),\n\t{\n\t\tdisplayName: 'Messages',\n\t\tname: 'messages',\n\t\ttype: 'fixedCollection',\n\t\ttypeOptions: {\n\t\t\tsortable: true,\n\t\t\tmultipleValues: true,\n\t\t},\n\t\tplaceholder: 'Add Message',\n\t\tdefault: { values: [{ content: '' }] },\n\t\toptions: [\n\t\t\t{\n\t\t\t\tdisplayName: 'Values',\n\t\t\t\tname: 'values',\n\t\t\t\tvalues: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Prompt',\n\t\t\t\t\t\tname: 'content',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t\tdescription: 'The content of the message to be send',\n\t\t\t\t\t\tdefault: '',\n\t\t\t\t\t\tplaceholder: 'e.g. Hello, how can you help me?',\n\t\t\t\t\t\ttypeOptions: {\n\t\t\t\t\t\t\trows: 2,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Role',\n\t\t\t\t\t\tname: 'role',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Role in shaping the model's response, it tells the model how it should behave and interact with the user\",\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'User',\n\t\t\t\t\t\t\t\tvalue: 'user',\n\t\t\t\t\t\t\t\tdescription: 'Send a message as a user and get a response from the model',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Assistant',\n\t\t\t\t\t\t\t\tvalue: 'assistant',\n\t\t\t\t\t\t\t\tdescription: 'Tell the model to adopt a specific tone or personality',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'System',\n\t\t\t\t\t\t\t\tvalue: 'system',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t\"Usually used to set the model's behavior or context for the next user message\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t\tdefault: 'user',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t},\n\t{\n\t\tdisplayName: 'Simplify Output',\n\t\tname: 'simplify',\n\t\ttype: 'boolean',\n\t\tdefault: true,\n\t\tdescription: 'Whether to return a simplified version of the response instead of the raw data',\n\t},\n\t{\n\t\tdisplayName: 'Output Content as JSON',\n\t\tname: 'jsonOutput',\n\t\ttype: 'boolean',\n\t\tdescription:\n\t\t\t'Whether to attempt to return the response in JSON format. Compatible with GPT-4 Turbo and all GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106.',\n\t\tdefault: false,\n\t},\n\t{\n\t\tdisplayName: 'Hide Tools',\n\t\tname: 'hideTools',\n\t\ttype: 'hidden',\n\t\tdefault: 'hide',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\tmodelId: MODELS_NOT_SUPPORT_FUNCTION_CALLS,\n\t\t\t\t'@version': [{ _cnd: { gte: 1.2 } }],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Connect your own custom n8n tools to this node on the canvas',\n\t\tname: 'noticeTools',\n\t\ttype: 'notice',\n\t\tdefault: '',\n\t\tdisplayOptions: {\n\t\t\thide: {\n\t\t\t\thideTools: ['hide'],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Options',\n\t\tname: 'options',\n\t\tplaceholder: 'Add Option',\n\t\ttype: 'collection',\n\t\tdefault: {},\n\t\toptions: [\n\t\t\t{\n\t\t\t\tdisplayName: 'Frequency Penalty',\n\t\t\t\tname: 'frequency_penalty',\n\t\t\t\tdefault: 0,\n\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\tdescription:\n\t\t\t\t\t\"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim\",\n\t\t\t\ttype: 'number',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Maximum Number of Tokens',\n\t\t\t\tname: 'maxTokens',\n\t\t\t\tdefault: 16,\n\t\t\t\tdescription:\n\t\t\t\t\t'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',\n\t\t\t\ttype: 'number',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tmaxValue: 32768,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Number of Completions',\n\t\t\t\tname: 'n',\n\t\t\t\tdefault: 1,\n\t\t\t\tdescription:\n\t\t\t\t\t'How many completions to generate for each prompt. Note: Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for max_tokens and stop.',\n\t\t\t\ttype: 'number',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Presence Penalty',\n\t\t\t\tname: 'presence_penalty',\n\t\t\t\tdefault: 0,\n\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\tdescription:\n\t\t\t\t\t\"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics\",\n\t\t\t\ttype: 'number',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Output Randomness (Temperature)',\n\t\t\t\tname: 'temperature',\n\t\t\t\tdefault: 1,\n\t\t\t\ttypeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },\n\t\t\t\tdescription:\n\t\t\t\t\t'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive. We generally recommend altering this or temperature but not both.',\n\t\t\t\ttype: 'number',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Output Randomness (Top P)',\n\t\t\t\tname: 'topP',\n\t\t\t\tdefault: 1,\n\t\t\t\ttypeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },\n\t\t\t\tdescription:\n\t\t\t\t\t'An alternative to sampling with temperature, controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',\n\t\t\t\ttype: 'number',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Max Tool Calls Iterations',\n\t\t\t\tname: 'maxToolsIterations',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 15,\n\t\t\t\tdescription:\n\t\t\t\t\t'The maximum number of tool iteration cycles the LLM will run before stopping. A single iteration can contain multiple tool calls. Set to 0 for no limit.',\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\tshow: {\n\t\t\t\t\t\t'@version': [{ _cnd: { gte: 1.5 } }],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t],\n\t},\n];\n\nconst displayOptions = {\n\tshow: {\n\t\toperation: ['message'],\n\t\tresource: ['text'],\n\t},\n};\n\nexport const description = updateDisplayOptions(displayOptions, properties);\n\nexport async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {\n\tconst nodeVersion = this.getNode().typeVersion;\n\tconst model = this.getNodeParameter('modelId', i, '', { extractValue: true });\n\tlet messages = this.getNodeParameter('messages.values', i, []) as IDataObject[];\n\tconst options = this.getNodeParameter('options', i, {});\n\tconst jsonOutput = this.getNodeParameter('jsonOutput', i, false) as boolean;\n\tconst maxToolsIterations =\n\t\tnodeVersion >= 1.5 ? (this.getNodeParameter('options.maxToolsIterations', i, 15) as number) : 0;\n\n\tconst abortSignal = this.getExecutionCancelSignal();\n\n\tif (options.maxTokens !== undefined) {\n\t\toptions.max_completion_tokens = options.maxTokens;\n\t\tdelete options.maxTokens;\n\t}\n\n\tif (options.topP !== undefined) {\n\t\toptions.top_p = options.topP;\n\t\tdelete options.topP;\n\t}\n\n\tlet response_format;\n\tif (jsonOutput) {\n\t\tresponse_format = { type: 'json_object' };\n\t\tmessages = [\n\t\t\t{\n\t\t\t\trole: 'system',\n\t\t\t\tcontent: 'You are a helpful assistant designed to output JSON.',\n\t\t\t},\n\t\t\t...messages,\n\t\t];\n\t}\n\n\tconst hideTools = this.getNodeParameter('hideTools', i, '') as string;\n\n\tlet tools;\n\tlet externalTools: Tool[] = [];\n\n\tif (hideTools !== 'hide') {\n\t\tconst enforceUniqueNames = nodeVersion > 1;\n\t\texternalTools = await getConnectedTools(this, enforceUniqueNames, false);\n\t}\n\n\tif (externalTools.length) {\n\t\ttools = externalTools.length ? externalTools?.map(formatToOpenAIAssistantTool) : undefined;\n\t}\n\n\tconst body: IDataObject = {\n\t\tmodel,\n\t\tmessages,\n\t\ttools,\n\t\tresponse_format,\n\t\t..._omit(options, ['maxToolsIterations']),\n\t};\n\n\tlet response = (await apiRequest.call(this, 'POST', '/chat/completions', {\n\t\tbody,\n\t})) as ChatCompletion;\n\n\tif (!response) return [];\n\n\tlet currentIteration = 1;\n\tlet toolCalls = response?.choices[0]?.message?.tool_calls;\n\n\twhile (toolCalls?.length) {\n\t\t// Break the loop if the max iterations is reached or the execution is canceled\n\t\tif (\n\t\t\tabortSignal?.aborted ||\n\t\t\t(maxToolsIterations > 0 && currentIteration >= maxToolsIterations)\n\t\t) {\n\t\t\tbreak;\n\t\t}\n\t\tmessages.push(response.choices[0].message);\n\n\t\tfor (const toolCall of toolCalls) {\n\t\t\tconst functionName = toolCall.function.name;\n\t\t\tconst functionArgs = toolCall.function.arguments;\n\n\t\t\tlet functionResponse;\n\t\t\tfor (const tool of externalTools ?? []) {\n\t\t\t\tif (tool.name === functionName) {\n\t\t\t\t\tconst parsedArgs: { input: string } = jsonParse(functionArgs);\n\t\t\t\t\tconst functionInput = parsedArgs.input ?? parsedArgs ?? functionArgs;\n\t\t\t\t\tfunctionResponse = await tool.invoke(functionInput);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (typeof functionResponse === 'object') {\n\t\t\t\tfunctionResponse = JSON.stringify(functionResponse);\n\t\t\t}\n\n\t\t\tmessages.push({\n\t\t\t\ttool_call_id: toolCall.id,\n\t\t\t\trole: 'tool',\n\t\t\t\tcontent: functionResponse,\n\t\t\t});\n\t\t}\n\n\t\tresponse = (await apiRequest.call(this, 'POST', '/chat/completions', {\n\t\t\tbody,\n\t\t})) as ChatCompletion;\n\n\t\ttoolCalls = response.choices[0].message.tool_calls;\n\t\tcurrentIteration += 1;\n\t}\n\n\tif (response_format) {\n\t\tresponse.choices = response.choices.map((choice) => {\n\t\t\ttry {\n\t\t\t\tchoice.message.content = JSON.parse(choice.message.content);\n\t\t\t} catch (error) {}\n\t\t\treturn choice;\n\t\t});\n\t}\n\n\tconst simplify = this.getNodeParameter('simplify', i) as boolean;\n\n\tconst returnData: INodeExecutionData[] = [];\n\n\tif (simplify) {\n\t\tfor (const entry of response.choices) {\n\t\t\treturnData.push({\n\t\t\t\tjson: entry,\n\t\t\t\tpairedItem: { item: i },\n\t\t\t});\n\t\t}\n\t} else {\n\t\treturnData.push({ json: response, pairedItem: { item: i } });\n\t}\n\n\treturn returnData;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,kBAAkB;AAOlB,0BAAgD;AAEhD,qBAAkC;AAElC,uBAAkD;AAElD,mBAA4C;AAC5C,uBAA2B;AAC3B,0BAAyB;AAEzB,MAAM,aAAgC;AAAA,MACrC,8BAAS,aAAa;AAAA,EACtB;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,aAAa;AAAA,MACZ,UAAU;AAAA,MACV,gBAAgB;AAAA,IACjB;AAAA,IACA,aAAa;AAAA,IACb,SAAS,EAAE,QAAQ,CAAC,EAAE,SAAS,GAAG,CAAC,EAAE;AAAA,IACrC,SAAS;AAAA,MACR;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,QAAQ;AAAA,UACP;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,aAAa;AAAA,YACb,SAAS;AAAA,YACT,aAAa;AAAA,YACb,aAAa;AAAA,cACZ,MAAM;AAAA,YACP;AAAA,UACD;AAAA,UACA;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,aACC;AAAA,YACD,SAAS;AAAA,cACR;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aAAa;AAAA,cACd;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aAAa;AAAA,cACd;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aACC;AAAA,cACF;AAAA,YACD;AAAA,YACA,SAAS;AAAA,UACV;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,aAAa;AAAA,EACd;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,aACC;AAAA,IACD,SAAS;AAAA,EACV;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,SAAS;AAAA,QACT,YAAY,CAAC,EAAE,MAAM,EAAE,KAAK,IAAI,EAAE,CAAC;AAAA,MACpC;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,WAAW,CAAC,MAAM;AAAA,MACnB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,IACN,SAAS,CAAC;AAAA,IACV,SAAS;AAAA,MACR;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,QAC7D,aACC;AAAA,QACD,MAAM;AAAA,MACP;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aACC;AAAA,QACD,MAAM;AAAA,QACN,aAAa;AAAA,UACZ,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aACC;AAAA,QACD,MAAM;AAAA,MACP;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,QAC7D,aACC;AAAA,QACD,MAAM;AAAA,MACP;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,QAC5D,aACC;AAAA,QACD,MAAM;AAAA,MACP;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,QAC5D,aACC;AAAA,QACD,MAAM;AAAA,MACP;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aACC;AAAA,QACD,gBAAgB;AAAA,UACf,MAAM;AAAA,YACL,YAAY,CAAC,EAAE,MAAM,EAAE,KAAK,IAAI,EAAE,CAAC;AAAA,UACpC;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA,EACD;AACD;AAEA,MAAM,iBAAiB;AAAA,EACtB,MAAM;AAAA,IACL,WAAW,CAAC,SAAS;AAAA,IACrB,UAAU,CAAC,MAAM;AAAA,EAClB;AACD;AAEO,MAAM,kBAAc,0CAAqB,gBAAgB,UAAU;AAE1E,eAAsB,QAAiC,GAA0C;AAChG,QAAM,cAAc,KAAK,QAAQ,EAAE;AACnC,QAAM,QAAQ,KAAK,iBAAiB,WAAW,GAAG,IAAI,EAAE,cAAc,KAAK,CAAC;AAC5E,MAAI,WAAW,KAAK,iBAAiB,mBAAmB,GAAG,CAAC,CAAC;AAC7D,QAAM,UAAU,KAAK,iBAAiB,WAAW,GAAG,CAAC,CAAC;AACtD,QAAM,aAAa,KAAK,iBAAiB,cAAc,GAAG,KAAK;AAC/D,QAAM,qBACL,eAAe,MAAO,KAAK,iBAAiB,8BAA8B,GAAG,EAAE,IAAe;AAE/F,QAAM,cAAc,KAAK,yBAAyB;AAElD,MAAI,QAAQ,cAAc,QAAW;AACpC,YAAQ,wBAAwB,QAAQ;AACxC,WAAO,QAAQ;AAAA,EAChB;AAEA,MAAI,QAAQ,SAAS,QAAW;AAC/B,YAAQ,QAAQ,QAAQ;AACxB,WAAO,QAAQ;AAAA,EAChB;AAEA,MAAI;AACJ,MAAI,YAAY;AACf,sBAAkB,EAAE,MAAM,cAAc;AACxC,eAAW;AAAA,MACV;AAAA,QACC,MAAM;AAAA,QACN,SAAS;AAAA,MACV;AAAA,MACA,GAAG;AAAA,IACJ;AAAA,EACD;AAEA,QAAM,YAAY,KAAK,iBAAiB,aAAa,GAAG,EAAE;AAE1D,MAAI;AACJ,MAAI,gBAAwB,CAAC;AAE7B,MAAI,cAAc,QAAQ;AACzB,UAAM,qBAAqB,cAAc;AACzC,oBAAgB,UAAM,kCAAkB,MAAM,oBAAoB,KAAK;AAAA,EACxE;AAEA,MAAI,cAAc,QAAQ;AACzB,YAAQ,cAAc,SAAS,eAAe,IAAI,wCAA2B,IAAI;AAAA,EAClF;AAEA,QAAM,OAAoB;AAAA,IACzB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,OAAG,YAAAA,SAAM,SAAS,CAAC,oBAAoB,CAAC;AAAA,EACzC;AAEA,MAAI,WAAY,MAAM,4BAAW,KAAK,MAAM,QAAQ,qBAAqB;AAAA,IACxE;AAAA,EACD,CAAC;AAED,MAAI,CAAC,SAAU,QAAO,CAAC;AAEvB,MAAI,mBAAmB;AACvB,MAAI,YAAY,UAAU,QAAQ,CAAC,GAAG,SAAS;AAE/C,SAAO,WAAW,QAAQ;AAEzB,QACC,aAAa,WACZ,qBAAqB,KAAK,oBAAoB,oBAC9C;AACD;AAAA,IACD;AACA,aAAS,KAAK,SAAS,QAAQ,CAAC,EAAE,OAAO;AAEzC,eAAW,YAAY,WAAW;AACjC,YAAM,eAAe,SAAS,SAAS;AACvC,YAAM,eAAe,SAAS,SAAS;AAEvC,UAAI;AACJ,iBAAW,QAAQ,iBAAiB,CAAC,GAAG;AACvC,YAAI,KAAK,SAAS,cAAc;AAC/B,gBAAM,iBAAgC,+BAAU,YAAY;AAC5D,gBAAM,gBAAgB,WAAW,SAAS,cAAc;AACxD,6BAAmB,MAAM,KAAK,OAAO,aAAa;AAAA,QACnD;AAAA,MACD;AAEA,UAAI,OAAO,qBAAqB,UAAU;AACzC,2BAAmB,KAAK,UAAU,gBAAgB;AAAA,MACnD;AAEA,eAAS,KAAK;AAAA,QACb,cAAc,SAAS;AAAA,QACvB,MAAM;AAAA,QACN,SAAS;AAAA,MACV,CAAC;AAAA,IACF;AAEA,eAAY,MAAM,4BAAW,KAAK,MAAM,QAAQ,qBAAqB;AAAA,MACpE;AAAA,IACD,CAAC;AAED,gBAAY,SAAS,QAAQ,CAAC,EAAE,QAAQ;AACxC,wBAAoB;AAAA,EACrB;AAEA,MAAI,iBAAiB;AACpB,aAAS,UAAU,SAAS,QAAQ,IAAI,CAAC,WAAW;AACnD,UAAI;AACH,eAAO,QAAQ,UAAU,KAAK,MAAM,OAAO,QAAQ,OAAO;AAAA,MAC3D,SAAS,OAAO;AAAA,MAAC;AACjB,aAAO;AAAA,IACR,CAAC;AAAA,EACF;AAEA,QAAM,WAAW,KAAK,iBAAiB,YAAY,CAAC;AAEpD,QAAM,aAAmC,CAAC;AAE1C,MAAI,UAAU;AACb,eAAW,SAAS,SAAS,SAAS;AACrC,iBAAW,KAAK;AAAA,QACf,MAAM;AAAA,QACN,YAAY,EAAE,MAAM,EAAE;AAAA,MACvB,CAAC;AAAA,IACF;AAAA,EACD,OAAO;AACN,eAAW,KAAK,EAAE,MAAM,UAAU,YAAY,EAAE,MAAM,EAAE,EAAE,CAAC;AAAA,EAC5D;AAEA,SAAO;AACR;","names":["_omit"]}
|
|
1
|
+
{"version":3,"sources":["../../../../../../nodes/vendors/OpenAi/actions/text/message.operation.ts"],"sourcesContent":["import type { Tool } from '@langchain/core/tools';\nimport _omit from 'lodash/omit';\nimport type {\n\tINodeProperties,\n\tIExecuteFunctions,\n\tINodeExecutionData,\n\tIDataObject,\n} from 'n8n-workflow';\nimport { jsonParse, updateDisplayOptions } from 'n8n-workflow';\n\nimport { getConnectedTools } from '@utils/helpers';\n\nimport { MODELS_NOT_SUPPORT_FUNCTION_CALLS } from '../../helpers/constants';\nimport type { ChatCompletion } from '../../helpers/interfaces';\nimport { formatToOpenAIAssistantTool } from '../../helpers/utils';\nimport { apiRequest } from '../../transport';\nimport { modelRLC } from '../descriptions';\n\nconst properties: INodeProperties[] = [\n\tmodelRLC('modelSearch'),\n\t{\n\t\tdisplayName: 'Messages',\n\t\tname: 'messages',\n\t\ttype: 'fixedCollection',\n\t\ttypeOptions: {\n\t\t\tsortable: true,\n\t\t\tmultipleValues: true,\n\t\t},\n\t\tplaceholder: 'Add Message',\n\t\tdefault: { values: [{ content: '' }] },\n\t\toptions: [\n\t\t\t{\n\t\t\t\tdisplayName: 'Values',\n\t\t\t\tname: 'values',\n\t\t\t\tvalues: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Prompt',\n\t\t\t\t\t\tname: 'content',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t\tdescription: 'The content of the message to be send',\n\t\t\t\t\t\tdefault: '',\n\t\t\t\t\t\tplaceholder: 'e.g. Hello, how can you help me?',\n\t\t\t\t\t\ttypeOptions: {\n\t\t\t\t\t\t\trows: 2,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Role',\n\t\t\t\t\t\tname: 'role',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Role in shaping the model's response, it tells the model how it should behave and interact with the user\",\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'User',\n\t\t\t\t\t\t\t\tvalue: 'user',\n\t\t\t\t\t\t\t\tdescription: 'Send a message as a user and get a response from the model',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Assistant',\n\t\t\t\t\t\t\t\tvalue: 'assistant',\n\t\t\t\t\t\t\t\tdescription: 'Tell the model to adopt a specific tone or personality',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'System',\n\t\t\t\t\t\t\t\tvalue: 'system',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t\"Usually used to set the model's behavior or context for the next user message\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t\tdefault: 'user',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t},\n\t{\n\t\tdisplayName: 'Simplify Output',\n\t\tname: 'simplify',\n\t\ttype: 'boolean',\n\t\tdefault: true,\n\t\tdescription: 'Whether to return a simplified version of the response instead of the raw data',\n\t},\n\t{\n\t\tdisplayName: 'Output Content as JSON',\n\t\tname: 'jsonOutput',\n\t\ttype: 'boolean',\n\t\tdescription:\n\t\t\t'Whether to attempt to return the response in JSON format. Compatible with GPT-4 Turbo and all GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106.',\n\t\tdefault: false,\n\t},\n\t{\n\t\tdisplayName: 'Hide Tools',\n\t\tname: 'hideTools',\n\t\ttype: 'hidden',\n\t\tdefault: 'hide',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\tmodelId: MODELS_NOT_SUPPORT_FUNCTION_CALLS,\n\t\t\t\t'@version': [{ _cnd: { gte: 1.2 } }],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Connect your own custom n8n tools to this node on the canvas',\n\t\tname: 'noticeTools',\n\t\ttype: 'notice',\n\t\tdefault: '',\n\t\tdisplayOptions: {\n\t\t\thide: {\n\t\t\t\thideTools: ['hide'],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Options',\n\t\tname: 'options',\n\t\tplaceholder: 'Add Option',\n\t\ttype: 'collection',\n\t\tdefault: {},\n\t\toptions: [\n\t\t\t{\n\t\t\t\tdisplayName: 'Frequency Penalty',\n\t\t\t\tname: 'frequency_penalty',\n\t\t\t\tdefault: 0,\n\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\tdescription:\n\t\t\t\t\t\"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim\",\n\t\t\t\ttype: 'number',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Maximum Number of Tokens',\n\t\t\t\tname: 'maxTokens',\n\t\t\t\tdefault: 16,\n\t\t\t\tdescription:\n\t\t\t\t\t'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',\n\t\t\t\ttype: 'number',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tmaxValue: 32768,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Number of Completions',\n\t\t\t\tname: 'n',\n\t\t\t\tdefault: 1,\n\t\t\t\tdescription:\n\t\t\t\t\t'How many completions to generate for each prompt. Note: Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for max_tokens and stop.',\n\t\t\t\ttype: 'number',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Presence Penalty',\n\t\t\t\tname: 'presence_penalty',\n\t\t\t\tdefault: 0,\n\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\tdescription:\n\t\t\t\t\t\"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics\",\n\t\t\t\ttype: 'number',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Output Randomness (Temperature)',\n\t\t\t\tname: 'temperature',\n\t\t\t\tdefault: 1,\n\t\t\t\ttypeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },\n\t\t\t\tdescription:\n\t\t\t\t\t'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive. We generally recommend altering this or temperature but not both.',\n\t\t\t\ttype: 'number',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Output Randomness (Top P)',\n\t\t\t\tname: 'topP',\n\t\t\t\tdefault: 1,\n\t\t\t\ttypeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },\n\t\t\t\tdescription:\n\t\t\t\t\t'An alternative to sampling with temperature, controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',\n\t\t\t\ttype: 'number',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Reasoning Effort',\n\t\t\t\tname: 'reasoning_effort',\n\t\t\t\tdefault: 'medium',\n\t\t\t\tdescription:\n\t\t\t\t\t'Controls the amount of reasoning tokens to use. A value of \"low\" will favor speed and economical token usage, \"high\" will favor more complete reasoning at the cost of more tokens generated and slower responses.',\n\t\t\t\ttype: 'options',\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tname: 'Low',\n\t\t\t\t\t\tvalue: 'low',\n\t\t\t\t\t\tdescription: 'Favors speed and economical token usage',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tname: 'Medium',\n\t\t\t\t\t\tvalue: 'medium',\n\t\t\t\t\t\tdescription: 'Balance between speed and reasoning accuracy',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tname: 'High',\n\t\t\t\t\t\tvalue: 'high',\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Favors more complete reasoning at the cost of more tokens generated and slower responses',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\tshow: {\n\t\t\t\t\t\t// reasoning_effort is only available on o1, o1-versioned, or on o3-mini and beyond, and gpt-5 models. Not on o1-mini or other GPT-models.\n\t\t\t\t\t\t'/modelId': [{ _cnd: { regex: '(^o1([-\\\\d]+)?$)|(^o[3-9].*)|(^gpt-5.*)' } }],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Max Tool Calls Iterations',\n\t\t\t\tname: 'maxToolsIterations',\n\t\t\t\ttype: 'number',\n\t\t\t\tdefault: 15,\n\t\t\t\tdescription:\n\t\t\t\t\t'The maximum number of tool iteration cycles the LLM will run before stopping. A single iteration can contain multiple tool calls. Set to 0 for no limit.',\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\tshow: {\n\t\t\t\t\t\t'@version': [{ _cnd: { gte: 1.5 } }],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t],\n\t},\n];\n\nconst displayOptions = {\n\tshow: {\n\t\toperation: ['message'],\n\t\tresource: ['text'],\n\t},\n};\n\nexport const description = updateDisplayOptions(displayOptions, properties);\n\nexport async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {\n\tconst nodeVersion = this.getNode().typeVersion;\n\tconst model = this.getNodeParameter('modelId', i, '', { extractValue: true });\n\tlet messages = this.getNodeParameter('messages.values', i, []) as IDataObject[];\n\tconst options = this.getNodeParameter('options', i, {});\n\tconst jsonOutput = this.getNodeParameter('jsonOutput', i, false) as boolean;\n\tconst maxToolsIterations =\n\t\tnodeVersion >= 1.5 ? (this.getNodeParameter('options.maxToolsIterations', i, 15) as number) : 0;\n\n\tconst abortSignal = this.getExecutionCancelSignal();\n\n\tif (options.maxTokens !== undefined) {\n\t\toptions.max_completion_tokens = options.maxTokens;\n\t\tdelete options.maxTokens;\n\t}\n\n\tif (options.topP !== undefined) {\n\t\toptions.top_p = options.topP;\n\t\tdelete options.topP;\n\t}\n\n\tlet response_format;\n\tif (jsonOutput) {\n\t\tresponse_format = { type: 'json_object' };\n\t\tmessages = [\n\t\t\t{\n\t\t\t\trole: 'system',\n\t\t\t\tcontent: 'You are a helpful assistant designed to output JSON.',\n\t\t\t},\n\t\t\t...messages,\n\t\t];\n\t}\n\n\tconst hideTools = this.getNodeParameter('hideTools', i, '') as string;\n\n\tlet tools;\n\tlet externalTools: Tool[] = [];\n\n\tif (hideTools !== 'hide') {\n\t\tconst enforceUniqueNames = nodeVersion > 1;\n\t\texternalTools = await getConnectedTools(this, enforceUniqueNames, false);\n\t}\n\n\tif (externalTools.length) {\n\t\ttools = externalTools.length ? externalTools?.map(formatToOpenAIAssistantTool) : undefined;\n\t}\n\n\tconst body: IDataObject = {\n\t\tmodel,\n\t\tmessages,\n\t\ttools,\n\t\tresponse_format,\n\t\t..._omit(options, ['maxToolsIterations']),\n\t};\n\n\tlet response = (await apiRequest.call(this, 'POST', '/chat/completions', {\n\t\tbody,\n\t})) as ChatCompletion;\n\n\tif (!response) return [];\n\n\tlet currentIteration = 1;\n\tlet toolCalls = response?.choices[0]?.message?.tool_calls;\n\n\twhile (toolCalls?.length) {\n\t\t// Break the loop if the max iterations is reached or the execution is canceled\n\t\tif (\n\t\t\tabortSignal?.aborted ||\n\t\t\t(maxToolsIterations > 0 && currentIteration >= maxToolsIterations)\n\t\t) {\n\t\t\tbreak;\n\t\t}\n\t\tmessages.push(response.choices[0].message);\n\n\t\tfor (const toolCall of toolCalls) {\n\t\t\tconst functionName = toolCall.function.name;\n\t\t\tconst functionArgs = toolCall.function.arguments;\n\n\t\t\tlet functionResponse;\n\t\t\tfor (const tool of externalTools ?? []) {\n\t\t\t\tif (tool.name === functionName) {\n\t\t\t\t\tconst parsedArgs: { input: string } = jsonParse(functionArgs);\n\t\t\t\t\tconst functionInput = parsedArgs.input ?? parsedArgs ?? functionArgs;\n\t\t\t\t\tfunctionResponse = await tool.invoke(functionInput);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (typeof functionResponse === 'object') {\n\t\t\t\tfunctionResponse = JSON.stringify(functionResponse);\n\t\t\t}\n\n\t\t\tmessages.push({\n\t\t\t\ttool_call_id: toolCall.id,\n\t\t\t\trole: 'tool',\n\t\t\t\tcontent: functionResponse,\n\t\t\t});\n\t\t}\n\n\t\tresponse = (await apiRequest.call(this, 'POST', '/chat/completions', {\n\t\t\tbody,\n\t\t})) as ChatCompletion;\n\n\t\ttoolCalls = response.choices[0].message.tool_calls;\n\t\tcurrentIteration += 1;\n\t}\n\n\tif (response_format) {\n\t\tresponse.choices = response.choices.map((choice) => {\n\t\t\ttry {\n\t\t\t\tchoice.message.content = JSON.parse(choice.message.content);\n\t\t\t} catch (error) {}\n\t\t\treturn choice;\n\t\t});\n\t}\n\n\tconst simplify = this.getNodeParameter('simplify', i) as boolean;\n\n\tconst returnData: INodeExecutionData[] = [];\n\n\tif (simplify) {\n\t\tfor (const entry of response.choices) {\n\t\t\treturnData.push({\n\t\t\t\tjson: entry,\n\t\t\t\tpairedItem: { item: i },\n\t\t\t});\n\t\t}\n\t} else {\n\t\treturnData.push({ json: response, pairedItem: { item: i } });\n\t}\n\n\treturn returnData;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,kBAAkB;AAOlB,0BAAgD;AAEhD,qBAAkC;AAElC,uBAAkD;AAElD,mBAA4C;AAC5C,uBAA2B;AAC3B,0BAAyB;AAEzB,MAAM,aAAgC;AAAA,MACrC,8BAAS,aAAa;AAAA,EACtB;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,aAAa;AAAA,MACZ,UAAU;AAAA,MACV,gBAAgB;AAAA,IACjB;AAAA,IACA,aAAa;AAAA,IACb,SAAS,EAAE,QAAQ,CAAC,EAAE,SAAS,GAAG,CAAC,EAAE;AAAA,IACrC,SAAS;AAAA,MACR;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,QAAQ;AAAA,UACP;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,aAAa;AAAA,YACb,SAAS;AAAA,YACT,aAAa;AAAA,YACb,aAAa;AAAA,cACZ,MAAM;AAAA,YACP;AAAA,UACD;AAAA,UACA;AAAA,YACC,aAAa;AAAA,YACb,MAAM;AAAA,YACN,MAAM;AAAA,YACN,aACC;AAAA,YACD,SAAS;AAAA,cACR;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aAAa;AAAA,cACd;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aAAa;AAAA,cACd;AAAA,cACA;AAAA,gBACC,MAAM;AAAA,gBACN,OAAO;AAAA,gBACP,aACC;AAAA,cACF;AAAA,YACD;AAAA,YACA,SAAS;AAAA,UACV;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,aAAa;AAAA,EACd;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,aACC;AAAA,IACD,SAAS;AAAA,EACV;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,SAAS;AAAA,QACT,YAAY,CAAC,EAAE,MAAM,EAAE,KAAK,IAAI,EAAE,CAAC;AAAA,MACpC;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,WAAW,CAAC,MAAM;AAAA,MACnB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,aAAa;AAAA,IACb,MAAM;AAAA,IACN,SAAS,CAAC;AAAA,IACV,SAAS;AAAA,MACR;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,QAC7D,aACC;AAAA,QACD,MAAM;AAAA,MACP;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aACC;AAAA,QACD,MAAM;AAAA,QACN,aAAa;AAAA,UACZ,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aACC;AAAA,QACD,MAAM;AAAA,MACP;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,QAC7D,aACC;AAAA,QACD,MAAM;AAAA,MACP;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,QAC5D,aACC;AAAA,QACD,MAAM;AAAA,MACP;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,QAC5D,aACC;AAAA,QACD,MAAM;AAAA,MACP;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aACC;AAAA,QACD,MAAM;AAAA,QACN,SAAS;AAAA,UACR;AAAA,YACC,MAAM;AAAA,YACN,OAAO;AAAA,YACP,aAAa;AAAA,UACd;AAAA,UACA;AAAA,YACC,MAAM;AAAA,YACN,OAAO;AAAA,YACP,aAAa;AAAA,UACd;AAAA,UACA;AAAA,YACC,MAAM;AAAA,YACN,OAAO;AAAA,YACP,aACC;AAAA,UACF;AAAA,QACD;AAAA,QACA,gBAAgB;AAAA,UACf,MAAM;AAAA;AAAA,YAEL,YAAY,CAAC,EAAE,MAAM,EAAE,OAAO,0CAA0C,EAAE,CAAC;AAAA,UAC5E;AAAA,QACD;AAAA,MACD;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aACC;AAAA,QACD,gBAAgB;AAAA,UACf,MAAM;AAAA,YACL,YAAY,CAAC,EAAE,MAAM,EAAE,KAAK,IAAI,EAAE,CAAC;AAAA,UACpC;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA,EACD;AACD;AAEA,MAAM,iBAAiB;AAAA,EACtB,MAAM;AAAA,IACL,WAAW,CAAC,SAAS;AAAA,IACrB,UAAU,CAAC,MAAM;AAAA,EAClB;AACD;AAEO,MAAM,kBAAc,0CAAqB,gBAAgB,UAAU;AAE1E,eAAsB,QAAiC,GAA0C;AAChG,QAAM,cAAc,KAAK,QAAQ,EAAE;AACnC,QAAM,QAAQ,KAAK,iBAAiB,WAAW,GAAG,IAAI,EAAE,cAAc,KAAK,CAAC;AAC5E,MAAI,WAAW,KAAK,iBAAiB,mBAAmB,GAAG,CAAC,CAAC;AAC7D,QAAM,UAAU,KAAK,iBAAiB,WAAW,GAAG,CAAC,CAAC;AACtD,QAAM,aAAa,KAAK,iBAAiB,cAAc,GAAG,KAAK;AAC/D,QAAM,qBACL,eAAe,MAAO,KAAK,iBAAiB,8BAA8B,GAAG,EAAE,IAAe;AAE/F,QAAM,cAAc,KAAK,yBAAyB;AAElD,MAAI,QAAQ,cAAc,QAAW;AACpC,YAAQ,wBAAwB,QAAQ;AACxC,WAAO,QAAQ;AAAA,EAChB;AAEA,MAAI,QAAQ,SAAS,QAAW;AAC/B,YAAQ,QAAQ,QAAQ;AACxB,WAAO,QAAQ;AAAA,EAChB;AAEA,MAAI;AACJ,MAAI,YAAY;AACf,sBAAkB,EAAE,MAAM,cAAc;AACxC,eAAW;AAAA,MACV;AAAA,QACC,MAAM;AAAA,QACN,SAAS;AAAA,MACV;AAAA,MACA,GAAG;AAAA,IACJ;AAAA,EACD;AAEA,QAAM,YAAY,KAAK,iBAAiB,aAAa,GAAG,EAAE;AAE1D,MAAI;AACJ,MAAI,gBAAwB,CAAC;AAE7B,MAAI,cAAc,QAAQ;AACzB,UAAM,qBAAqB,cAAc;AACzC,oBAAgB,UAAM,kCAAkB,MAAM,oBAAoB,KAAK;AAAA,EACxE;AAEA,MAAI,cAAc,QAAQ;AACzB,YAAQ,cAAc,SAAS,eAAe,IAAI,wCAA2B,IAAI;AAAA,EAClF;AAEA,QAAM,OAAoB;AAAA,IACzB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,OAAG,YAAAA,SAAM,SAAS,CAAC,oBAAoB,CAAC;AAAA,EACzC;AAEA,MAAI,WAAY,MAAM,4BAAW,KAAK,MAAM,QAAQ,qBAAqB;AAAA,IACxE;AAAA,EACD,CAAC;AAED,MAAI,CAAC,SAAU,QAAO,CAAC;AAEvB,MAAI,mBAAmB;AACvB,MAAI,YAAY,UAAU,QAAQ,CAAC,GAAG,SAAS;AAE/C,SAAO,WAAW,QAAQ;AAEzB,QACC,aAAa,WACZ,qBAAqB,KAAK,oBAAoB,oBAC9C;AACD;AAAA,IACD;AACA,aAAS,KAAK,SAAS,QAAQ,CAAC,EAAE,OAAO;AAEzC,eAAW,YAAY,WAAW;AACjC,YAAM,eAAe,SAAS,SAAS;AACvC,YAAM,eAAe,SAAS,SAAS;AAEvC,UAAI;AACJ,iBAAW,QAAQ,iBAAiB,CAAC,GAAG;AACvC,YAAI,KAAK,SAAS,cAAc;AAC/B,gBAAM,iBAAgC,+BAAU,YAAY;AAC5D,gBAAM,gBAAgB,WAAW,SAAS,cAAc;AACxD,6BAAmB,MAAM,KAAK,OAAO,aAAa;AAAA,QACnD;AAAA,MACD;AAEA,UAAI,OAAO,qBAAqB,UAAU;AACzC,2BAAmB,KAAK,UAAU,gBAAgB;AAAA,MACnD;AAEA,eAAS,KAAK;AAAA,QACb,cAAc,SAAS;AAAA,QACvB,MAAM;AAAA,QACN,SAAS;AAAA,MACV,CAAC;AAAA,IACF;AAEA,eAAY,MAAM,4BAAW,KAAK,MAAM,QAAQ,qBAAqB;AAAA,MACpE;AAAA,IACD,CAAC;AAED,gBAAY,SAAS,QAAQ,CAAC,EAAE,QAAQ;AACxC,wBAAoB;AAAA,EACrB;AAEA,MAAI,iBAAiB;AACpB,aAAS,UAAU,SAAS,QAAQ,IAAI,CAAC,WAAW;AACnD,UAAI;AACH,eAAO,QAAQ,UAAU,KAAK,MAAM,OAAO,QAAQ,OAAO;AAAA,MAC3D,SAAS,OAAO;AAAA,MAAC;AACjB,aAAO;AAAA,IACR,CAAC;AAAA,EACF;AAEA,QAAM,WAAW,KAAK,iBAAiB,YAAY,CAAC;AAEpD,QAAM,aAAmC,CAAC;AAE1C,MAAI,UAAU;AACb,eAAW,SAAS,SAAS,SAAS;AACrC,iBAAW,KAAK;AAAA,QACf,MAAM;AAAA,QACN,YAAY,EAAE,MAAM,EAAE;AAAA,MACvB,CAAC;AAAA,IACF;AAAA,EACD,OAAO;AACN,eAAW,KAAK,EAAE,MAAM,UAAU,YAAY,EAAE,MAAM,EAAE,EAAE,CAAC;AAAA,EAC5D;AAEA,SAAO;AACR;","names":["_omit"]}
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
var binary_data_exports = {};
|
|
20
|
+
__export(binary_data_exports, {
|
|
21
|
+
getBinaryDataFile: () => getBinaryDataFile
|
|
22
|
+
});
|
|
23
|
+
module.exports = __toCommonJS(binary_data_exports);
|
|
24
|
+
const CHUNK_SIZE = 256 * 1024;
|
|
25
|
+
async function getBinaryDataFile(ctx, itemIdx, binaryPropertyName) {
|
|
26
|
+
const binaryData = ctx.helpers.assertBinaryData(itemIdx, binaryPropertyName);
|
|
27
|
+
const fileContent = binaryData.id ? await ctx.helpers.getBinaryStream(binaryData.id, CHUNK_SIZE) : await ctx.helpers.getBinaryDataBuffer(itemIdx, binaryPropertyName);
|
|
28
|
+
return {
|
|
29
|
+
filename: binaryData.fileName,
|
|
30
|
+
contentType: binaryData.mimeType,
|
|
31
|
+
fileContent
|
|
32
|
+
};
|
|
33
|
+
}
|
|
34
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
35
|
+
0 && (module.exports = {
|
|
36
|
+
getBinaryDataFile
|
|
37
|
+
});
|
|
38
|
+
//# sourceMappingURL=binary-data.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../../../../nodes/vendors/OpenAi/helpers/binary-data.ts"],"sourcesContent":["import type { IExecuteFunctions } from 'n8n-workflow';\n\n/** Chunk size to use for streaming. 256Kb */\nconst CHUNK_SIZE = 256 * 1024;\n\n/**\n * Gets the binary data file for the given item index and given property name.\n * Returns the file name, content type and the file content. Uses streaming\n * when possible.\n */\nexport async function getBinaryDataFile(\n\tctx: IExecuteFunctions,\n\titemIdx: number,\n\tbinaryPropertyName: string,\n) {\n\tconst binaryData = ctx.helpers.assertBinaryData(itemIdx, binaryPropertyName);\n\n\tconst fileContent = binaryData.id\n\t\t? await ctx.helpers.getBinaryStream(binaryData.id, CHUNK_SIZE)\n\t\t: await ctx.helpers.getBinaryDataBuffer(itemIdx, binaryPropertyName);\n\n\treturn {\n\t\tfilename: binaryData.fileName,\n\t\tcontentType: binaryData.mimeType,\n\t\tfileContent,\n\t};\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAGA,MAAM,aAAa,MAAM;AAOzB,eAAsB,kBACrB,KACA,SACA,oBACC;AACD,QAAM,aAAa,IAAI,QAAQ,iBAAiB,SAAS,kBAAkB;AAE3E,QAAM,cAAc,WAAW,KAC5B,MAAM,IAAI,QAAQ,gBAAgB,WAAW,IAAI,UAAU,IAC3D,MAAM,IAAI,QAAQ,oBAAoB,SAAS,kBAAkB;AAEpE,SAAO;AAAA,IACN,UAAU,WAAW;AAAA,IACrB,aAAa,WAAW;AAAA,IACxB;AAAA,EACD;AACD;","names":[]}
|