@n8n/n8n-nodes-langchain 1.111.1 → 1.112.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. package/dist/nodes/agents/Agent/agents/ToolsAgent/V2/execute.js +8 -2
  2. package/dist/nodes/agents/Agent/agents/ToolsAgent/V2/execute.js.map +1 -1
  3. package/dist/nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.js +3 -1
  4. package/dist/nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.js.map +1 -1
  5. package/dist/nodes/llms/LmChatGoogleVertex/LmChatGoogleVertex.node.js +19 -4
  6. package/dist/nodes/llms/LmChatGoogleVertex/LmChatGoogleVertex.node.js.map +1 -1
  7. package/dist/nodes/llms/LmChatGoogleVertex/error-handling.js +1 -1
  8. package/dist/nodes/llms/LmChatGoogleVertex/error-handling.js.map +1 -1
  9. package/dist/nodes/llms/gemini-common/additional-options.js +98 -80
  10. package/dist/nodes/llms/gemini-common/additional-options.js.map +1 -1
  11. package/dist/nodes/vendors/GoogleGemini/actions/audio/transcribe.operation.js.map +1 -1
  12. package/dist/nodes/vendors/GoogleGemini/actions/file/upload.operation.js +11 -28
  13. package/dist/nodes/vendors/GoogleGemini/actions/file/upload.operation.js.map +1 -1
  14. package/dist/nodes/vendors/GoogleGemini/actions/image/generate.operation.js +2 -1
  15. package/dist/nodes/vendors/GoogleGemini/actions/image/generate.operation.js.map +1 -1
  16. package/dist/nodes/vendors/GoogleGemini/actions/text/message.operation.js +35 -1
  17. package/dist/nodes/vendors/GoogleGemini/actions/text/message.operation.js.map +1 -1
  18. package/dist/nodes/vendors/GoogleGemini/helpers/baseAnalyze.js +6 -0
  19. package/dist/nodes/vendors/GoogleGemini/helpers/baseAnalyze.js.map +1 -1
  20. package/dist/nodes/vendors/GoogleGemini/helpers/interfaces.js +12 -0
  21. package/dist/nodes/vendors/GoogleGemini/helpers/interfaces.js.map +1 -1
  22. package/dist/nodes/vendors/GoogleGemini/helpers/utils.js +76 -0
  23. package/dist/nodes/vendors/GoogleGemini/helpers/utils.js.map +1 -1
  24. package/dist/types/nodes.json +2 -2
  25. package/package.json +8 -8
@@ -88,7 +88,9 @@ async function processEventStream(ctx, eventStream, itemIndex, returnIntermediat
88
88
  let chunkText = "";
89
89
  if (Array.isArray(chunkContent)) {
90
90
  for (const message of chunkContent) {
91
- chunkText += message?.text;
91
+ if (message?.type === "text") {
92
+ chunkText += message?.text;
93
+ }
92
94
  }
93
95
  } else if (typeof chunkContent === "string") {
94
96
  chunkText = chunkContent;
@@ -197,7 +199,11 @@ async function toolsAgentExecute() {
197
199
  const executeOptions = { signal: this.getExecutionCancelSignal() };
198
200
  const isStreamingAvailable = "isStreaming" in this ? this.isStreaming?.() : void 0;
199
201
  if ("isStreaming" in this && enableStreaming && isStreamingAvailable && this.getNode().typeVersion >= 2.1) {
200
- const chatHistory = await memory?.chatHistory.getMessages();
202
+ let chatHistory;
203
+ if (memory) {
204
+ const memoryVariables = await memory.loadMemoryVariables({});
205
+ chatHistory = memoryVariables["chat_history"];
206
+ }
201
207
  const eventStream = executor.streamEvents(
202
208
  {
203
209
  ...invokeParams,
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../../../../nodes/agents/Agent/agents/ToolsAgent/V2/execute.ts"],"sourcesContent":["import type { StreamEvent } from '@langchain/core/dist/tracers/event_stream';\nimport type { IterableReadableStream } from '@langchain/core/dist/utils/stream';\nimport type { BaseChatModel } from '@langchain/core/language_models/chat_models';\nimport type { AIMessageChunk, MessageContentText } from '@langchain/core/messages';\nimport type { ChatPromptTemplate } from '@langchain/core/prompts';\nimport { RunnableSequence } from '@langchain/core/runnables';\nimport {\n\tAgentExecutor,\n\ttype AgentRunnableSequence,\n\tcreateToolCallingAgent,\n} from 'langchain/agents';\nimport type { BaseChatMemory } from 'langchain/memory';\nimport type { DynamicStructuredTool, Tool } from 'langchain/tools';\nimport omit from 'lodash/omit';\nimport { jsonParse, NodeOperationError, sleep } from 'n8n-workflow';\nimport type { IExecuteFunctions, INodeExecutionData, ISupplyDataFunctions } from 'n8n-workflow';\nimport assert from 'node:assert';\n\nimport { getPromptInputByType } from '@utils/helpers';\nimport {\n\tgetOptionalOutputParser,\n\ttype N8nOutputParser,\n} from '@utils/output_parsers/N8nOutputParser';\n\nimport {\n\tfixEmptyContentMessage,\n\tgetAgentStepsParser,\n\tgetChatModel,\n\tgetOptionalMemory,\n\tgetTools,\n\tprepareMessages,\n\tpreparePrompt,\n} from '../common';\nimport { SYSTEM_MESSAGE } from '../prompt';\n\n/**\n * Creates an agent executor with the given configuration\n */\nfunction createAgentExecutor(\n\tmodel: BaseChatModel,\n\ttools: Array<DynamicStructuredTool | Tool>,\n\tprompt: ChatPromptTemplate,\n\toptions: { maxIterations?: number; returnIntermediateSteps?: boolean },\n\toutputParser?: N8nOutputParser,\n\tmemory?: BaseChatMemory,\n\tfallbackModel?: BaseChatModel | null,\n) {\n\tconst agent = createToolCallingAgent({\n\t\tllm: model,\n\t\ttools,\n\t\tprompt,\n\t\tstreamRunnable: false,\n\t});\n\n\tlet fallbackAgent: AgentRunnableSequence | undefined;\n\tif (fallbackModel) {\n\t\tfallbackAgent = createToolCallingAgent({\n\t\t\tllm: fallbackModel,\n\t\t\ttools,\n\t\t\tprompt,\n\t\t\tstreamRunnable: false,\n\t\t});\n\t}\n\tconst runnableAgent = RunnableSequence.from([\n\t\tfallbackAgent ? agent.withFallbacks([fallbackAgent]) : agent,\n\t\tgetAgentStepsParser(outputParser, memory),\n\t\tfixEmptyContentMessage,\n\t]) as AgentRunnableSequence;\n\n\trunnableAgent.singleAction = false;\n\trunnableAgent.streamRunnable = false;\n\n\treturn AgentExecutor.fromAgentAndTools({\n\t\tagent: runnableAgent,\n\t\tmemory,\n\t\ttools,\n\t\treturnIntermediateSteps: options.returnIntermediateSteps === true,\n\t\tmaxIterations: options.maxIterations ?? 10,\n\t});\n}\n\nasync function processEventStream(\n\tctx: IExecuteFunctions,\n\teventStream: IterableReadableStream<StreamEvent>,\n\titemIndex: number,\n\treturnIntermediateSteps: boolean = false,\n): Promise<{ output: string; intermediateSteps?: any[] }> {\n\tconst agentResult: { output: string; intermediateSteps?: any[] } = {\n\t\toutput: '',\n\t};\n\n\tif (returnIntermediateSteps) {\n\t\tagentResult.intermediateSteps = [];\n\t}\n\n\tctx.sendChunk('begin', itemIndex);\n\tfor await (const event of eventStream) {\n\t\t// Stream chat model tokens as they come in\n\t\tswitch (event.event) {\n\t\t\tcase 'on_chat_model_stream':\n\t\t\t\tconst chunk = event.data?.chunk as AIMessageChunk;\n\t\t\t\tif (chunk?.content) {\n\t\t\t\t\tconst chunkContent = chunk.content;\n\t\t\t\t\tlet chunkText = '';\n\t\t\t\t\tif (Array.isArray(chunkContent)) {\n\t\t\t\t\t\tfor (const message of chunkContent) {\n\t\t\t\t\t\t\tchunkText += (message as MessageContentText)?.text;\n\t\t\t\t\t\t}\n\t\t\t\t\t} else if (typeof chunkContent === 'string') {\n\t\t\t\t\t\tchunkText = chunkContent;\n\t\t\t\t\t}\n\t\t\t\t\tctx.sendChunk('item', itemIndex, chunkText);\n\n\t\t\t\t\tagentResult.output += chunkText;\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\tcase 'on_chat_model_end':\n\t\t\t\t// Capture full LLM response with tool calls for intermediate steps\n\t\t\t\tif (returnIntermediateSteps && event.data) {\n\t\t\t\t\tconst chatModelData = event.data as any;\n\t\t\t\t\tconst output = chatModelData.output;\n\n\t\t\t\t\t// Check if this LLM response contains tool calls\n\t\t\t\t\tif (output?.tool_calls && output.tool_calls.length > 0) {\n\t\t\t\t\t\tfor (const toolCall of output.tool_calls) {\n\t\t\t\t\t\t\tagentResult.intermediateSteps!.push({\n\t\t\t\t\t\t\t\taction: {\n\t\t\t\t\t\t\t\t\ttool: toolCall.name,\n\t\t\t\t\t\t\t\t\ttoolInput: toolCall.args,\n\t\t\t\t\t\t\t\t\tlog:\n\t\t\t\t\t\t\t\t\t\toutput.content ||\n\t\t\t\t\t\t\t\t\t\t`Calling ${toolCall.name} with input: ${JSON.stringify(toolCall.args)}`,\n\t\t\t\t\t\t\t\t\tmessageLog: [output], // Include the full LLM response\n\t\t\t\t\t\t\t\t\ttoolCallId: toolCall.id,\n\t\t\t\t\t\t\t\t\ttype: toolCall.type,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\tcase 'on_tool_end':\n\t\t\t\t// Capture tool execution results and match with action\n\t\t\t\tif (returnIntermediateSteps && event.data && agentResult.intermediateSteps!.length > 0) {\n\t\t\t\t\tconst toolData = event.data as any;\n\t\t\t\t\t// Find the matching intermediate step for this tool call\n\t\t\t\t\tconst matchingStep = agentResult.intermediateSteps!.find(\n\t\t\t\t\t\t(step) => !step.observation && step.action.tool === event.name,\n\t\t\t\t\t);\n\t\t\t\t\tif (matchingStep) {\n\t\t\t\t\t\tmatchingStep.observation = toolData.output;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\tdefault:\n\t\t\t\tbreak;\n\t\t}\n\t}\n\tctx.sendChunk('end', itemIndex);\n\n\treturn agentResult;\n}\n\n/* -----------------------------------------------------------\n Main Executor Function\n----------------------------------------------------------- */\n/**\n * The main executor method for the Tools Agent.\n *\n * This function retrieves necessary components (model, memory, tools), prepares the prompt,\n * creates the agent, and processes each input item. The error handling for each item is also\n * managed here based on the node's continueOnFail setting.\n *\n * @param this Execute context. SupplyDataContext is passed when agent is as a tool\n *\n * @returns The array of execution data for all processed items\n */\nexport async function toolsAgentExecute(\n\tthis: IExecuteFunctions | ISupplyDataFunctions,\n): Promise<INodeExecutionData[][]> {\n\tthis.logger.debug('Executing Tools Agent V2');\n\n\tconst returnData: INodeExecutionData[] = [];\n\tconst items = this.getInputData();\n\tconst batchSize = this.getNodeParameter('options.batching.batchSize', 0, 1) as number;\n\tconst delayBetweenBatches = this.getNodeParameter(\n\t\t'options.batching.delayBetweenBatches',\n\t\t0,\n\t\t0,\n\t) as number;\n\tconst needsFallback = this.getNodeParameter('needsFallback', 0, false) as boolean;\n\tconst memory = await getOptionalMemory(this);\n\tconst model = await getChatModel(this, 0);\n\tassert(model, 'Please connect a model to the Chat Model input');\n\tconst fallbackModel = needsFallback ? await getChatModel(this, 1) : null;\n\n\tif (needsFallback && !fallbackModel) {\n\t\tthrow new NodeOperationError(\n\t\t\tthis.getNode(),\n\t\t\t'Please connect a model to the Fallback Model input or disable the fallback option',\n\t\t);\n\t}\n\n\t// Check if streaming is enabled\n\tconst enableStreaming = this.getNodeParameter('options.enableStreaming', 0, true) as boolean;\n\n\tfor (let i = 0; i < items.length; i += batchSize) {\n\t\tconst batch = items.slice(i, i + batchSize);\n\t\tconst batchPromises = batch.map(async (_item, batchItemIndex) => {\n\t\t\tconst itemIndex = i + batchItemIndex;\n\n\t\t\tconst input = getPromptInputByType({\n\t\t\t\tctx: this,\n\t\t\t\ti: itemIndex,\n\t\t\t\tinputKey: 'text',\n\t\t\t\tpromptTypeKey: 'promptType',\n\t\t\t});\n\t\t\tif (input === undefined) {\n\t\t\t\tthrow new NodeOperationError(this.getNode(), 'The \"text\" parameter is empty.');\n\t\t\t}\n\t\t\tconst outputParser = await getOptionalOutputParser(this, itemIndex);\n\t\t\tconst tools = await getTools(this, outputParser);\n\t\t\tconst options = this.getNodeParameter('options', itemIndex, {}) as {\n\t\t\t\tsystemMessage?: string;\n\t\t\t\tmaxIterations?: number;\n\t\t\t\treturnIntermediateSteps?: boolean;\n\t\t\t\tpassthroughBinaryImages?: boolean;\n\t\t\t};\n\n\t\t\t// Prepare the prompt messages and prompt template.\n\t\t\tconst messages = await prepareMessages(this, itemIndex, {\n\t\t\t\tsystemMessage: options.systemMessage,\n\t\t\t\tpassthroughBinaryImages: options.passthroughBinaryImages ?? true,\n\t\t\t\toutputParser,\n\t\t\t});\n\t\t\tconst prompt: ChatPromptTemplate = preparePrompt(messages);\n\n\t\t\t// Create executors for primary and fallback models\n\t\t\tconst executor = createAgentExecutor(\n\t\t\t\tmodel,\n\t\t\t\ttools,\n\t\t\t\tprompt,\n\t\t\t\toptions,\n\t\t\t\toutputParser,\n\t\t\t\tmemory,\n\t\t\t\tfallbackModel,\n\t\t\t);\n\t\t\t// Invoke with fallback logic\n\t\t\tconst invokeParams = {\n\t\t\t\tinput,\n\t\t\t\tsystem_message: options.systemMessage ?? SYSTEM_MESSAGE,\n\t\t\t\tformatting_instructions:\n\t\t\t\t\t'IMPORTANT: For your response to user, you MUST use the `format_final_json_response` tool with your complete answer formatted according to the required schema. Do not attempt to format the JSON manually - always use this tool. Your response will be rejected if it is not properly formatted through this tool. Only use this tool once you are ready to provide your final answer.',\n\t\t\t};\n\t\t\tconst executeOptions = { signal: this.getExecutionCancelSignal() };\n\n\t\t\t// Check if streaming is actually available\n\t\t\tconst isStreamingAvailable = 'isStreaming' in this ? this.isStreaming?.() : undefined;\n\n\t\t\tif (\n\t\t\t\t'isStreaming' in this &&\n\t\t\t\tenableStreaming &&\n\t\t\t\tisStreamingAvailable &&\n\t\t\t\tthis.getNode().typeVersion >= 2.1\n\t\t\t) {\n\t\t\t\tconst chatHistory = await memory?.chatHistory.getMessages();\n\t\t\t\tconst eventStream = executor.streamEvents(\n\t\t\t\t\t{\n\t\t\t\t\t\t...invokeParams,\n\t\t\t\t\t\tchat_history: chatHistory ?? undefined,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tversion: 'v2',\n\t\t\t\t\t\t...executeOptions,\n\t\t\t\t\t},\n\t\t\t\t);\n\n\t\t\t\treturn await processEventStream(\n\t\t\t\t\tthis,\n\t\t\t\t\teventStream,\n\t\t\t\t\titemIndex,\n\t\t\t\t\toptions.returnIntermediateSteps,\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\t// Handle regular execution\n\t\t\t\treturn await executor.invoke(invokeParams, executeOptions);\n\t\t\t}\n\t\t});\n\n\t\tconst batchResults = await Promise.allSettled(batchPromises);\n\t\t// This is only used to check if the output parser is connected\n\t\t// so we can parse the output if needed. Actual output parsing is done in the loop above\n\t\tconst outputParser = await getOptionalOutputParser(this, 0);\n\t\tbatchResults.forEach((result, index) => {\n\t\t\tconst itemIndex = i + index;\n\t\t\tif (result.status === 'rejected') {\n\t\t\t\tconst error = result.reason as Error;\n\t\t\t\tif (this.continueOnFail()) {\n\t\t\t\t\treturnData.push({\n\t\t\t\t\t\tjson: { error: error.message },\n\t\t\t\t\t\tpairedItem: { item: itemIndex },\n\t\t\t\t\t});\n\t\t\t\t\treturn;\n\t\t\t\t} else {\n\t\t\t\t\tthrow new NodeOperationError(this.getNode(), error);\n\t\t\t\t}\n\t\t\t}\n\t\t\tconst response = result.value;\n\t\t\t// If memory and outputParser are connected, parse the output.\n\t\t\tif (memory && outputParser) {\n\t\t\t\tconst parsedOutput = jsonParse<{ output: Record<string, unknown> }>(\n\t\t\t\t\tresponse.output as string,\n\t\t\t\t);\n\t\t\t\tresponse.output = parsedOutput?.output ?? parsedOutput;\n\t\t\t}\n\n\t\t\t// Omit internal keys before returning the result.\n\t\t\tconst itemResult = {\n\t\t\t\tjson: omit(\n\t\t\t\t\tresponse,\n\t\t\t\t\t'system_message',\n\t\t\t\t\t'formatting_instructions',\n\t\t\t\t\t'input',\n\t\t\t\t\t'chat_history',\n\t\t\t\t\t'agent_scratchpad',\n\t\t\t\t),\n\t\t\t\tpairedItem: { item: itemIndex },\n\t\t\t};\n\n\t\t\treturnData.push(itemResult);\n\t\t});\n\n\t\tif (i + batchSize < items.length && delayBetweenBatches > 0) {\n\t\t\tawait sleep(delayBetweenBatches);\n\t\t}\n\t}\n\n\treturn [returnData];\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAKA,uBAAiC;AACjC,oBAIO;AAGP,kBAAiB;AACjB,0BAAqD;AAErD,yBAAmB;AAEnB,qBAAqC;AACrC,6BAGO;AAEP,oBAQO;AACP,oBAA+B;AAK/B,SAAS,oBACR,OACA,OACA,QACA,SACA,cACA,QACA,eACC;AACD,QAAM,YAAQ,sCAAuB;AAAA,IACpC,KAAK;AAAA,IACL;AAAA,IACA;AAAA,IACA,gBAAgB;AAAA,EACjB,CAAC;AAED,MAAI;AACJ,MAAI,eAAe;AAClB,wBAAgB,sCAAuB;AAAA,MACtC,KAAK;AAAA,MACL;AAAA,MACA;AAAA,MACA,gBAAgB;AAAA,IACjB,CAAC;AAAA,EACF;AACA,QAAM,gBAAgB,kCAAiB,KAAK;AAAA,IAC3C,gBAAgB,MAAM,cAAc,CAAC,aAAa,CAAC,IAAI;AAAA,QACvD,mCAAoB,cAAc,MAAM;AAAA,IACxC;AAAA,EACD,CAAC;AAED,gBAAc,eAAe;AAC7B,gBAAc,iBAAiB;AAE/B,SAAO,4BAAc,kBAAkB;AAAA,IACtC,OAAO;AAAA,IACP;AAAA,IACA;AAAA,IACA,yBAAyB,QAAQ,4BAA4B;AAAA,IAC7D,eAAe,QAAQ,iBAAiB;AAAA,EACzC,CAAC;AACF;AAEA,eAAe,mBACd,KACA,aACA,WACA,0BAAmC,OACsB;AACzD,QAAM,cAA6D;AAAA,IAClE,QAAQ;AAAA,EACT;AAEA,MAAI,yBAAyB;AAC5B,gBAAY,oBAAoB,CAAC;AAAA,EAClC;AAEA,MAAI,UAAU,SAAS,SAAS;AAChC,mBAAiB,SAAS,aAAa;AAEtC,YAAQ,MAAM,OAAO;AAAA,MACpB,KAAK;AACJ,cAAM,QAAQ,MAAM,MAAM;AAC1B,YAAI,OAAO,SAAS;AACnB,gBAAM,eAAe,MAAM;AAC3B,cAAI,YAAY;AAChB,cAAI,MAAM,QAAQ,YAAY,GAAG;AAChC,uBAAW,WAAW,cAAc;AACnC,2BAAc,SAAgC;AAAA,YAC/C;AAAA,UACD,WAAW,OAAO,iBAAiB,UAAU;AAC5C,wBAAY;AAAA,UACb;AACA,cAAI,UAAU,QAAQ,WAAW,SAAS;AAE1C,sBAAY,UAAU;AAAA,QACvB;AACA;AAAA,MACD,KAAK;AAEJ,YAAI,2BAA2B,MAAM,MAAM;AAC1C,gBAAM,gBAAgB,MAAM;AAC5B,gBAAM,SAAS,cAAc;AAG7B,cAAI,QAAQ,cAAc,OAAO,WAAW,SAAS,GAAG;AACvD,uBAAW,YAAY,OAAO,YAAY;AACzC,0BAAY,kBAAmB,KAAK;AAAA,gBACnC,QAAQ;AAAA,kBACP,MAAM,SAAS;AAAA,kBACf,WAAW,SAAS;AAAA,kBACpB,KACC,OAAO,WACP,WAAW,SAAS,IAAI,gBAAgB,KAAK,UAAU,SAAS,IAAI,CAAC;AAAA,kBACtE,YAAY,CAAC,MAAM;AAAA;AAAA,kBACnB,YAAY,SAAS;AAAA,kBACrB,MAAM,SAAS;AAAA,gBAChB;AAAA,cACD,CAAC;AAAA,YACF;AAAA,UACD;AAAA,QACD;AACA;AAAA,MACD,KAAK;AAEJ,YAAI,2BAA2B,MAAM,QAAQ,YAAY,kBAAmB,SAAS,GAAG;AACvF,gBAAM,WAAW,MAAM;AAEvB,gBAAM,eAAe,YAAY,kBAAmB;AAAA,YACnD,CAAC,SAAS,CAAC,KAAK,eAAe,KAAK,OAAO,SAAS,MAAM;AAAA,UAC3D;AACA,cAAI,cAAc;AACjB,yBAAa,cAAc,SAAS;AAAA,UACrC;AAAA,QACD;AACA;AAAA,MACD;AACC;AAAA,IACF;AAAA,EACD;AACA,MAAI,UAAU,OAAO,SAAS;AAE9B,SAAO;AACR;AAgBA,eAAsB,oBAEa;AAClC,OAAK,OAAO,MAAM,0BAA0B;AAE5C,QAAM,aAAmC,CAAC;AAC1C,QAAM,QAAQ,KAAK,aAAa;AAChC,QAAM,YAAY,KAAK,iBAAiB,8BAA8B,GAAG,CAAC;AAC1E,QAAM,sBAAsB,KAAK;AAAA,IAChC;AAAA,IACA;AAAA,IACA;AAAA,EACD;AACA,QAAM,gBAAgB,KAAK,iBAAiB,iBAAiB,GAAG,KAAK;AACrE,QAAM,SAAS,UAAM,iCAAkB,IAAI;AAC3C,QAAM,QAAQ,UAAM,4BAAa,MAAM,CAAC;AACxC,yBAAAA,SAAO,OAAO,gDAAgD;AAC9D,QAAM,gBAAgB,gBAAgB,UAAM,4BAAa,MAAM,CAAC,IAAI;AAEpE,MAAI,iBAAiB,CAAC,eAAe;AACpC,UAAM,IAAI;AAAA,MACT,KAAK,QAAQ;AAAA,MACb;AAAA,IACD;AAAA,EACD;AAGA,QAAM,kBAAkB,KAAK,iBAAiB,2BAA2B,GAAG,IAAI;AAEhF,WAAS,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK,WAAW;AACjD,UAAM,QAAQ,MAAM,MAAM,GAAG,IAAI,SAAS;AAC1C,UAAM,gBAAgB,MAAM,IAAI,OAAO,OAAO,mBAAmB;AAChE,YAAM,YAAY,IAAI;AAEtB,YAAM,YAAQ,qCAAqB;AAAA,QAClC,KAAK;AAAA,QACL,GAAG;AAAA,QACH,UAAU;AAAA,QACV,eAAe;AAAA,MAChB,CAAC;AACD,UAAI,UAAU,QAAW;AACxB,cAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,gCAAgC;AAAA,MAC9E;AACA,YAAMC,gBAAe,UAAM,gDAAwB,MAAM,SAAS;AAClE,YAAM,QAAQ,UAAM,wBAAS,MAAMA,aAAY;AAC/C,YAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW,CAAC,CAAC;AAQ9D,YAAM,WAAW,UAAM,+BAAgB,MAAM,WAAW;AAAA,QACvD,eAAe,QAAQ;AAAA,QACvB,yBAAyB,QAAQ,2BAA2B;AAAA,QAC5D,cAAAA;AAAA,MACD,CAAC;AACD,YAAM,aAA6B,6BAAc,QAAQ;AAGzD,YAAM,WAAW;AAAA,QAChB;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACAA;AAAA,QACA;AAAA,QACA;AAAA,MACD;AAEA,YAAM,eAAe;AAAA,QACpB;AAAA,QACA,gBAAgB,QAAQ,iBAAiB;AAAA,QACzC,yBACC;AAAA,MACF;AACA,YAAM,iBAAiB,EAAE,QAAQ,KAAK,yBAAyB,EAAE;AAGjE,YAAM,uBAAuB,iBAAiB,OAAO,KAAK,cAAc,IAAI;AAE5E,UACC,iBAAiB,QACjB,mBACA,wBACA,KAAK,QAAQ,EAAE,eAAe,KAC7B;AACD,cAAM,cAAc,MAAM,QAAQ,YAAY,YAAY;AAC1D,cAAM,cAAc,SAAS;AAAA,UAC5B;AAAA,YACC,GAAG;AAAA,YACH,cAAc,eAAe;AAAA,UAC9B;AAAA,UACA;AAAA,YACC,SAAS;AAAA,YACT,GAAG;AAAA,UACJ;AAAA,QACD;AAEA,eAAO,MAAM;AAAA,UACZ;AAAA,UACA;AAAA,UACA;AAAA,UACA,QAAQ;AAAA,QACT;AAAA,MACD,OAAO;AAEN,eAAO,MAAM,SAAS,OAAO,cAAc,cAAc;AAAA,MAC1D;AAAA,IACD,CAAC;AAED,UAAM,eAAe,MAAM,QAAQ,WAAW,aAAa;AAG3D,UAAM,eAAe,UAAM,gDAAwB,MAAM,CAAC;AAC1D,iBAAa,QAAQ,CAAC,QAAQ,UAAU;AACvC,YAAM,YAAY,IAAI;AACtB,UAAI,OAAO,WAAW,YAAY;AACjC,cAAM,QAAQ,OAAO;AACrB,YAAI,KAAK,eAAe,GAAG;AAC1B,qBAAW,KAAK;AAAA,YACf,MAAM,EAAE,OAAO,MAAM,QAAQ;AAAA,YAC7B,YAAY,EAAE,MAAM,UAAU;AAAA,UAC/B,CAAC;AACD;AAAA,QACD,OAAO;AACN,gBAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,KAAK;AAAA,QACnD;AAAA,MACD;AACA,YAAM,WAAW,OAAO;AAExB,UAAI,UAAU,cAAc;AAC3B,cAAM,mBAAe;AAAA,UACpB,SAAS;AAAA,QACV;AACA,iBAAS,SAAS,cAAc,UAAU;AAAA,MAC3C;AAGA,YAAM,aAAa;AAAA,QAClB,UAAM,YAAAC;AAAA,UACL;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,QACD;AAAA,QACA,YAAY,EAAE,MAAM,UAAU;AAAA,MAC/B;AAEA,iBAAW,KAAK,UAAU;AAAA,IAC3B,CAAC;AAED,QAAI,IAAI,YAAY,MAAM,UAAU,sBAAsB,GAAG;AAC5D,gBAAM,2BAAM,mBAAmB;AAAA,IAChC;AAAA,EACD;AAEA,SAAO,CAAC,UAAU;AACnB;","names":["assert","outputParser","omit"]}
1
+ {"version":3,"sources":["../../../../../../../nodes/agents/Agent/agents/ToolsAgent/V2/execute.ts"],"sourcesContent":["import type { StreamEvent } from '@langchain/core/dist/tracers/event_stream';\nimport type { IterableReadableStream } from '@langchain/core/dist/utils/stream';\nimport type { BaseChatModel } from '@langchain/core/language_models/chat_models';\nimport type { AIMessageChunk, MessageContentText } from '@langchain/core/messages';\nimport type { ChatPromptTemplate } from '@langchain/core/prompts';\nimport { RunnableSequence } from '@langchain/core/runnables';\nimport {\n\tAgentExecutor,\n\ttype AgentRunnableSequence,\n\tcreateToolCallingAgent,\n} from 'langchain/agents';\nimport type { BaseChatMemory } from 'langchain/memory';\nimport type { DynamicStructuredTool, Tool } from 'langchain/tools';\nimport omit from 'lodash/omit';\nimport { jsonParse, NodeOperationError, sleep } from 'n8n-workflow';\nimport type { IExecuteFunctions, INodeExecutionData, ISupplyDataFunctions } from 'n8n-workflow';\nimport assert from 'node:assert';\n\nimport { getPromptInputByType } from '@utils/helpers';\nimport {\n\tgetOptionalOutputParser,\n\ttype N8nOutputParser,\n} from '@utils/output_parsers/N8nOutputParser';\n\nimport {\n\tfixEmptyContentMessage,\n\tgetAgentStepsParser,\n\tgetChatModel,\n\tgetOptionalMemory,\n\tgetTools,\n\tprepareMessages,\n\tpreparePrompt,\n} from '../common';\nimport { SYSTEM_MESSAGE } from '../prompt';\n\n/**\n * Creates an agent executor with the given configuration\n */\nfunction createAgentExecutor(\n\tmodel: BaseChatModel,\n\ttools: Array<DynamicStructuredTool | Tool>,\n\tprompt: ChatPromptTemplate,\n\toptions: { maxIterations?: number; returnIntermediateSteps?: boolean },\n\toutputParser?: N8nOutputParser,\n\tmemory?: BaseChatMemory,\n\tfallbackModel?: BaseChatModel | null,\n) {\n\tconst agent = createToolCallingAgent({\n\t\tllm: model,\n\t\ttools,\n\t\tprompt,\n\t\tstreamRunnable: false,\n\t});\n\n\tlet fallbackAgent: AgentRunnableSequence | undefined;\n\tif (fallbackModel) {\n\t\tfallbackAgent = createToolCallingAgent({\n\t\t\tllm: fallbackModel,\n\t\t\ttools,\n\t\t\tprompt,\n\t\t\tstreamRunnable: false,\n\t\t});\n\t}\n\tconst runnableAgent = RunnableSequence.from([\n\t\tfallbackAgent ? agent.withFallbacks([fallbackAgent]) : agent,\n\t\tgetAgentStepsParser(outputParser, memory),\n\t\tfixEmptyContentMessage,\n\t]) as AgentRunnableSequence;\n\n\trunnableAgent.singleAction = false;\n\trunnableAgent.streamRunnable = false;\n\n\treturn AgentExecutor.fromAgentAndTools({\n\t\tagent: runnableAgent,\n\t\tmemory,\n\t\ttools,\n\t\treturnIntermediateSteps: options.returnIntermediateSteps === true,\n\t\tmaxIterations: options.maxIterations ?? 10,\n\t});\n}\n\nasync function processEventStream(\n\tctx: IExecuteFunctions,\n\teventStream: IterableReadableStream<StreamEvent>,\n\titemIndex: number,\n\treturnIntermediateSteps: boolean = false,\n): Promise<{ output: string; intermediateSteps?: any[] }> {\n\tconst agentResult: { output: string; intermediateSteps?: any[] } = {\n\t\toutput: '',\n\t};\n\n\tif (returnIntermediateSteps) {\n\t\tagentResult.intermediateSteps = [];\n\t}\n\n\tctx.sendChunk('begin', itemIndex);\n\tfor await (const event of eventStream) {\n\t\t// Stream chat model tokens as they come in\n\t\tswitch (event.event) {\n\t\t\tcase 'on_chat_model_stream':\n\t\t\t\tconst chunk = event.data?.chunk as AIMessageChunk;\n\t\t\t\tif (chunk?.content) {\n\t\t\t\t\tconst chunkContent = chunk.content;\n\t\t\t\t\tlet chunkText = '';\n\t\t\t\t\tif (Array.isArray(chunkContent)) {\n\t\t\t\t\t\tfor (const message of chunkContent) {\n\t\t\t\t\t\t\tif (message?.type === 'text') {\n\t\t\t\t\t\t\t\tchunkText += (message as MessageContentText)?.text;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else if (typeof chunkContent === 'string') {\n\t\t\t\t\t\tchunkText = chunkContent;\n\t\t\t\t\t}\n\t\t\t\t\tctx.sendChunk('item', itemIndex, chunkText);\n\n\t\t\t\t\tagentResult.output += chunkText;\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\tcase 'on_chat_model_end':\n\t\t\t\t// Capture full LLM response with tool calls for intermediate steps\n\t\t\t\tif (returnIntermediateSteps && event.data) {\n\t\t\t\t\tconst chatModelData = event.data as any;\n\t\t\t\t\tconst output = chatModelData.output;\n\n\t\t\t\t\t// Check if this LLM response contains tool calls\n\t\t\t\t\tif (output?.tool_calls && output.tool_calls.length > 0) {\n\t\t\t\t\t\tfor (const toolCall of output.tool_calls) {\n\t\t\t\t\t\t\tagentResult.intermediateSteps!.push({\n\t\t\t\t\t\t\t\taction: {\n\t\t\t\t\t\t\t\t\ttool: toolCall.name,\n\t\t\t\t\t\t\t\t\ttoolInput: toolCall.args,\n\t\t\t\t\t\t\t\t\tlog:\n\t\t\t\t\t\t\t\t\t\toutput.content ||\n\t\t\t\t\t\t\t\t\t\t`Calling ${toolCall.name} with input: ${JSON.stringify(toolCall.args)}`,\n\t\t\t\t\t\t\t\t\tmessageLog: [output], // Include the full LLM response\n\t\t\t\t\t\t\t\t\ttoolCallId: toolCall.id,\n\t\t\t\t\t\t\t\t\ttype: toolCall.type,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\tcase 'on_tool_end':\n\t\t\t\t// Capture tool execution results and match with action\n\t\t\t\tif (returnIntermediateSteps && event.data && agentResult.intermediateSteps!.length > 0) {\n\t\t\t\t\tconst toolData = event.data as any;\n\t\t\t\t\t// Find the matching intermediate step for this tool call\n\t\t\t\t\tconst matchingStep = agentResult.intermediateSteps!.find(\n\t\t\t\t\t\t(step) => !step.observation && step.action.tool === event.name,\n\t\t\t\t\t);\n\t\t\t\t\tif (matchingStep) {\n\t\t\t\t\t\tmatchingStep.observation = toolData.output;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\tdefault:\n\t\t\t\tbreak;\n\t\t}\n\t}\n\tctx.sendChunk('end', itemIndex);\n\n\treturn agentResult;\n}\n\n/* -----------------------------------------------------------\n Main Executor Function\n----------------------------------------------------------- */\n/**\n * The main executor method for the Tools Agent.\n *\n * This function retrieves necessary components (model, memory, tools), prepares the prompt,\n * creates the agent, and processes each input item. The error handling for each item is also\n * managed here based on the node's continueOnFail setting.\n *\n * @param this Execute context. SupplyDataContext is passed when agent is as a tool\n *\n * @returns The array of execution data for all processed items\n */\nexport async function toolsAgentExecute(\n\tthis: IExecuteFunctions | ISupplyDataFunctions,\n): Promise<INodeExecutionData[][]> {\n\tthis.logger.debug('Executing Tools Agent V2');\n\n\tconst returnData: INodeExecutionData[] = [];\n\tconst items = this.getInputData();\n\tconst batchSize = this.getNodeParameter('options.batching.batchSize', 0, 1) as number;\n\tconst delayBetweenBatches = this.getNodeParameter(\n\t\t'options.batching.delayBetweenBatches',\n\t\t0,\n\t\t0,\n\t) as number;\n\tconst needsFallback = this.getNodeParameter('needsFallback', 0, false) as boolean;\n\tconst memory = await getOptionalMemory(this);\n\tconst model = await getChatModel(this, 0);\n\tassert(model, 'Please connect a model to the Chat Model input');\n\tconst fallbackModel = needsFallback ? await getChatModel(this, 1) : null;\n\n\tif (needsFallback && !fallbackModel) {\n\t\tthrow new NodeOperationError(\n\t\t\tthis.getNode(),\n\t\t\t'Please connect a model to the Fallback Model input or disable the fallback option',\n\t\t);\n\t}\n\n\t// Check if streaming is enabled\n\tconst enableStreaming = this.getNodeParameter('options.enableStreaming', 0, true) as boolean;\n\n\tfor (let i = 0; i < items.length; i += batchSize) {\n\t\tconst batch = items.slice(i, i + batchSize);\n\t\tconst batchPromises = batch.map(async (_item, batchItemIndex) => {\n\t\t\tconst itemIndex = i + batchItemIndex;\n\n\t\t\tconst input = getPromptInputByType({\n\t\t\t\tctx: this,\n\t\t\t\ti: itemIndex,\n\t\t\t\tinputKey: 'text',\n\t\t\t\tpromptTypeKey: 'promptType',\n\t\t\t});\n\t\t\tif (input === undefined) {\n\t\t\t\tthrow new NodeOperationError(this.getNode(), 'The \"text\" parameter is empty.');\n\t\t\t}\n\t\t\tconst outputParser = await getOptionalOutputParser(this, itemIndex);\n\t\t\tconst tools = await getTools(this, outputParser);\n\t\t\tconst options = this.getNodeParameter('options', itemIndex, {}) as {\n\t\t\t\tsystemMessage?: string;\n\t\t\t\tmaxIterations?: number;\n\t\t\t\treturnIntermediateSteps?: boolean;\n\t\t\t\tpassthroughBinaryImages?: boolean;\n\t\t\t};\n\n\t\t\t// Prepare the prompt messages and prompt template.\n\t\t\tconst messages = await prepareMessages(this, itemIndex, {\n\t\t\t\tsystemMessage: options.systemMessage,\n\t\t\t\tpassthroughBinaryImages: options.passthroughBinaryImages ?? true,\n\t\t\t\toutputParser,\n\t\t\t});\n\t\t\tconst prompt: ChatPromptTemplate = preparePrompt(messages);\n\n\t\t\t// Create executors for primary and fallback models\n\t\t\tconst executor = createAgentExecutor(\n\t\t\t\tmodel,\n\t\t\t\ttools,\n\t\t\t\tprompt,\n\t\t\t\toptions,\n\t\t\t\toutputParser,\n\t\t\t\tmemory,\n\t\t\t\tfallbackModel,\n\t\t\t);\n\t\t\t// Invoke with fallback logic\n\t\t\tconst invokeParams = {\n\t\t\t\tinput,\n\t\t\t\tsystem_message: options.systemMessage ?? SYSTEM_MESSAGE,\n\t\t\t\tformatting_instructions:\n\t\t\t\t\t'IMPORTANT: For your response to user, you MUST use the `format_final_json_response` tool with your complete answer formatted according to the required schema. Do not attempt to format the JSON manually - always use this tool. Your response will be rejected if it is not properly formatted through this tool. Only use this tool once you are ready to provide your final answer.',\n\t\t\t};\n\t\t\tconst executeOptions = { signal: this.getExecutionCancelSignal() };\n\n\t\t\t// Check if streaming is actually available\n\t\t\tconst isStreamingAvailable = 'isStreaming' in this ? this.isStreaming?.() : undefined;\n\n\t\t\tif (\n\t\t\t\t'isStreaming' in this &&\n\t\t\t\tenableStreaming &&\n\t\t\t\tisStreamingAvailable &&\n\t\t\t\tthis.getNode().typeVersion >= 2.1\n\t\t\t) {\n\t\t\t\t// Get chat history respecting the context window length configured in memory\n\t\t\t\tlet chatHistory;\n\t\t\t\tif (memory) {\n\t\t\t\t\t// Load memory variables to respect context window length\n\t\t\t\t\tconst memoryVariables = await memory.loadMemoryVariables({});\n\t\t\t\t\tchatHistory = memoryVariables['chat_history'];\n\t\t\t\t}\n\t\t\t\tconst eventStream = executor.streamEvents(\n\t\t\t\t\t{\n\t\t\t\t\t\t...invokeParams,\n\t\t\t\t\t\tchat_history: chatHistory ?? undefined,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tversion: 'v2',\n\t\t\t\t\t\t...executeOptions,\n\t\t\t\t\t},\n\t\t\t\t);\n\n\t\t\t\treturn await processEventStream(\n\t\t\t\t\tthis,\n\t\t\t\t\teventStream,\n\t\t\t\t\titemIndex,\n\t\t\t\t\toptions.returnIntermediateSteps,\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\t// Handle regular execution\n\t\t\t\treturn await executor.invoke(invokeParams, executeOptions);\n\t\t\t}\n\t\t});\n\n\t\tconst batchResults = await Promise.allSettled(batchPromises);\n\t\t// This is only used to check if the output parser is connected\n\t\t// so we can parse the output if needed. Actual output parsing is done in the loop above\n\t\tconst outputParser = await getOptionalOutputParser(this, 0);\n\t\tbatchResults.forEach((result, index) => {\n\t\t\tconst itemIndex = i + index;\n\t\t\tif (result.status === 'rejected') {\n\t\t\t\tconst error = result.reason as Error;\n\t\t\t\tif (this.continueOnFail()) {\n\t\t\t\t\treturnData.push({\n\t\t\t\t\t\tjson: { error: error.message },\n\t\t\t\t\t\tpairedItem: { item: itemIndex },\n\t\t\t\t\t});\n\t\t\t\t\treturn;\n\t\t\t\t} else {\n\t\t\t\t\tthrow new NodeOperationError(this.getNode(), error);\n\t\t\t\t}\n\t\t\t}\n\t\t\tconst response = result.value;\n\t\t\t// If memory and outputParser are connected, parse the output.\n\t\t\tif (memory && outputParser) {\n\t\t\t\tconst parsedOutput = jsonParse<{ output: Record<string, unknown> }>(\n\t\t\t\t\tresponse.output as string,\n\t\t\t\t);\n\t\t\t\tresponse.output = parsedOutput?.output ?? parsedOutput;\n\t\t\t}\n\n\t\t\t// Omit internal keys before returning the result.\n\t\t\tconst itemResult = {\n\t\t\t\tjson: omit(\n\t\t\t\t\tresponse,\n\t\t\t\t\t'system_message',\n\t\t\t\t\t'formatting_instructions',\n\t\t\t\t\t'input',\n\t\t\t\t\t'chat_history',\n\t\t\t\t\t'agent_scratchpad',\n\t\t\t\t),\n\t\t\t\tpairedItem: { item: itemIndex },\n\t\t\t};\n\n\t\t\treturnData.push(itemResult);\n\t\t});\n\n\t\tif (i + batchSize < items.length && delayBetweenBatches > 0) {\n\t\t\tawait sleep(delayBetweenBatches);\n\t\t}\n\t}\n\n\treturn [returnData];\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAKA,uBAAiC;AACjC,oBAIO;AAGP,kBAAiB;AACjB,0BAAqD;AAErD,yBAAmB;AAEnB,qBAAqC;AACrC,6BAGO;AAEP,oBAQO;AACP,oBAA+B;AAK/B,SAAS,oBACR,OACA,OACA,QACA,SACA,cACA,QACA,eACC;AACD,QAAM,YAAQ,sCAAuB;AAAA,IACpC,KAAK;AAAA,IACL;AAAA,IACA;AAAA,IACA,gBAAgB;AAAA,EACjB,CAAC;AAED,MAAI;AACJ,MAAI,eAAe;AAClB,wBAAgB,sCAAuB;AAAA,MACtC,KAAK;AAAA,MACL;AAAA,MACA;AAAA,MACA,gBAAgB;AAAA,IACjB,CAAC;AAAA,EACF;AACA,QAAM,gBAAgB,kCAAiB,KAAK;AAAA,IAC3C,gBAAgB,MAAM,cAAc,CAAC,aAAa,CAAC,IAAI;AAAA,QACvD,mCAAoB,cAAc,MAAM;AAAA,IACxC;AAAA,EACD,CAAC;AAED,gBAAc,eAAe;AAC7B,gBAAc,iBAAiB;AAE/B,SAAO,4BAAc,kBAAkB;AAAA,IACtC,OAAO;AAAA,IACP;AAAA,IACA;AAAA,IACA,yBAAyB,QAAQ,4BAA4B;AAAA,IAC7D,eAAe,QAAQ,iBAAiB;AAAA,EACzC,CAAC;AACF;AAEA,eAAe,mBACd,KACA,aACA,WACA,0BAAmC,OACsB;AACzD,QAAM,cAA6D;AAAA,IAClE,QAAQ;AAAA,EACT;AAEA,MAAI,yBAAyB;AAC5B,gBAAY,oBAAoB,CAAC;AAAA,EAClC;AAEA,MAAI,UAAU,SAAS,SAAS;AAChC,mBAAiB,SAAS,aAAa;AAEtC,YAAQ,MAAM,OAAO;AAAA,MACpB,KAAK;AACJ,cAAM,QAAQ,MAAM,MAAM;AAC1B,YAAI,OAAO,SAAS;AACnB,gBAAM,eAAe,MAAM;AAC3B,cAAI,YAAY;AAChB,cAAI,MAAM,QAAQ,YAAY,GAAG;AAChC,uBAAW,WAAW,cAAc;AACnC,kBAAI,SAAS,SAAS,QAAQ;AAC7B,6BAAc,SAAgC;AAAA,cAC/C;AAAA,YACD;AAAA,UACD,WAAW,OAAO,iBAAiB,UAAU;AAC5C,wBAAY;AAAA,UACb;AACA,cAAI,UAAU,QAAQ,WAAW,SAAS;AAE1C,sBAAY,UAAU;AAAA,QACvB;AACA;AAAA,MACD,KAAK;AAEJ,YAAI,2BAA2B,MAAM,MAAM;AAC1C,gBAAM,gBAAgB,MAAM;AAC5B,gBAAM,SAAS,cAAc;AAG7B,cAAI,QAAQ,cAAc,OAAO,WAAW,SAAS,GAAG;AACvD,uBAAW,YAAY,OAAO,YAAY;AACzC,0BAAY,kBAAmB,KAAK;AAAA,gBACnC,QAAQ;AAAA,kBACP,MAAM,SAAS;AAAA,kBACf,WAAW,SAAS;AAAA,kBACpB,KACC,OAAO,WACP,WAAW,SAAS,IAAI,gBAAgB,KAAK,UAAU,SAAS,IAAI,CAAC;AAAA,kBACtE,YAAY,CAAC,MAAM;AAAA;AAAA,kBACnB,YAAY,SAAS;AAAA,kBACrB,MAAM,SAAS;AAAA,gBAChB;AAAA,cACD,CAAC;AAAA,YACF;AAAA,UACD;AAAA,QACD;AACA;AAAA,MACD,KAAK;AAEJ,YAAI,2BAA2B,MAAM,QAAQ,YAAY,kBAAmB,SAAS,GAAG;AACvF,gBAAM,WAAW,MAAM;AAEvB,gBAAM,eAAe,YAAY,kBAAmB;AAAA,YACnD,CAAC,SAAS,CAAC,KAAK,eAAe,KAAK,OAAO,SAAS,MAAM;AAAA,UAC3D;AACA,cAAI,cAAc;AACjB,yBAAa,cAAc,SAAS;AAAA,UACrC;AAAA,QACD;AACA;AAAA,MACD;AACC;AAAA,IACF;AAAA,EACD;AACA,MAAI,UAAU,OAAO,SAAS;AAE9B,SAAO;AACR;AAgBA,eAAsB,oBAEa;AAClC,OAAK,OAAO,MAAM,0BAA0B;AAE5C,QAAM,aAAmC,CAAC;AAC1C,QAAM,QAAQ,KAAK,aAAa;AAChC,QAAM,YAAY,KAAK,iBAAiB,8BAA8B,GAAG,CAAC;AAC1E,QAAM,sBAAsB,KAAK;AAAA,IAChC;AAAA,IACA;AAAA,IACA;AAAA,EACD;AACA,QAAM,gBAAgB,KAAK,iBAAiB,iBAAiB,GAAG,KAAK;AACrE,QAAM,SAAS,UAAM,iCAAkB,IAAI;AAC3C,QAAM,QAAQ,UAAM,4BAAa,MAAM,CAAC;AACxC,yBAAAA,SAAO,OAAO,gDAAgD;AAC9D,QAAM,gBAAgB,gBAAgB,UAAM,4BAAa,MAAM,CAAC,IAAI;AAEpE,MAAI,iBAAiB,CAAC,eAAe;AACpC,UAAM,IAAI;AAAA,MACT,KAAK,QAAQ;AAAA,MACb;AAAA,IACD;AAAA,EACD;AAGA,QAAM,kBAAkB,KAAK,iBAAiB,2BAA2B,GAAG,IAAI;AAEhF,WAAS,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK,WAAW;AACjD,UAAM,QAAQ,MAAM,MAAM,GAAG,IAAI,SAAS;AAC1C,UAAM,gBAAgB,MAAM,IAAI,OAAO,OAAO,mBAAmB;AAChE,YAAM,YAAY,IAAI;AAEtB,YAAM,YAAQ,qCAAqB;AAAA,QAClC,KAAK;AAAA,QACL,GAAG;AAAA,QACH,UAAU;AAAA,QACV,eAAe;AAAA,MAChB,CAAC;AACD,UAAI,UAAU,QAAW;AACxB,cAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,gCAAgC;AAAA,MAC9E;AACA,YAAMC,gBAAe,UAAM,gDAAwB,MAAM,SAAS;AAClE,YAAM,QAAQ,UAAM,wBAAS,MAAMA,aAAY;AAC/C,YAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW,CAAC,CAAC;AAQ9D,YAAM,WAAW,UAAM,+BAAgB,MAAM,WAAW;AAAA,QACvD,eAAe,QAAQ;AAAA,QACvB,yBAAyB,QAAQ,2BAA2B;AAAA,QAC5D,cAAAA;AAAA,MACD,CAAC;AACD,YAAM,aAA6B,6BAAc,QAAQ;AAGzD,YAAM,WAAW;AAAA,QAChB;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACAA;AAAA,QACA;AAAA,QACA;AAAA,MACD;AAEA,YAAM,eAAe;AAAA,QACpB;AAAA,QACA,gBAAgB,QAAQ,iBAAiB;AAAA,QACzC,yBACC;AAAA,MACF;AACA,YAAM,iBAAiB,EAAE,QAAQ,KAAK,yBAAyB,EAAE;AAGjE,YAAM,uBAAuB,iBAAiB,OAAO,KAAK,cAAc,IAAI;AAE5E,UACC,iBAAiB,QACjB,mBACA,wBACA,KAAK,QAAQ,EAAE,eAAe,KAC7B;AAED,YAAI;AACJ,YAAI,QAAQ;AAEX,gBAAM,kBAAkB,MAAM,OAAO,oBAAoB,CAAC,CAAC;AAC3D,wBAAc,gBAAgB,cAAc;AAAA,QAC7C;AACA,cAAM,cAAc,SAAS;AAAA,UAC5B;AAAA,YACC,GAAG;AAAA,YACH,cAAc,eAAe;AAAA,UAC9B;AAAA,UACA;AAAA,YACC,SAAS;AAAA,YACT,GAAG;AAAA,UACJ;AAAA,QACD;AAEA,eAAO,MAAM;AAAA,UACZ;AAAA,UACA;AAAA,UACA;AAAA,UACA,QAAQ;AAAA,QACT;AAAA,MACD,OAAO;AAEN,eAAO,MAAM,SAAS,OAAO,cAAc,cAAc;AAAA,MAC1D;AAAA,IACD,CAAC;AAED,UAAM,eAAe,MAAM,QAAQ,WAAW,aAAa;AAG3D,UAAM,eAAe,UAAM,gDAAwB,MAAM,CAAC;AAC1D,iBAAa,QAAQ,CAAC,QAAQ,UAAU;AACvC,YAAM,YAAY,IAAI;AACtB,UAAI,OAAO,WAAW,YAAY;AACjC,cAAM,QAAQ,OAAO;AACrB,YAAI,KAAK,eAAe,GAAG;AAC1B,qBAAW,KAAK;AAAA,YACf,MAAM,EAAE,OAAO,MAAM,QAAQ;AAAA,YAC7B,YAAY,EAAE,MAAM,UAAU;AAAA,UAC/B,CAAC;AACD;AAAA,QACD,OAAO;AACN,gBAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,KAAK;AAAA,QACnD;AAAA,MACD;AACA,YAAM,WAAW,OAAO;AAExB,UAAI,UAAU,cAAc;AAC3B,cAAM,mBAAe;AAAA,UACpB,SAAS;AAAA,QACV;AACA,iBAAS,SAAS,cAAc,UAAU;AAAA,MAC3C;AAGA,YAAM,aAAa;AAAA,QAClB,UAAM,YAAAC;AAAA,UACL;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,QACD;AAAA,QACA,YAAY,EAAE,MAAM,UAAU;AAAA,MAC/B;AAEA,iBAAW,KAAK,UAAU;AAAA,IAC3B,CAAC;AAED,QAAI,IAAI,YAAY,MAAM,UAAU,sBAAsB,GAAG;AAC5D,gBAAM,2BAAM,mBAAmB;AAAA,IAChC;AAAA,EACD;AAEA,SAAO,CAAC,UAAU;AACnB;","names":["assert","outputParser","omit"]}
@@ -127,7 +127,9 @@ class LmChatGoogleGemini {
127
127
  },
128
128
  default: "models/gemini-2.5-flash"
129
129
  },
130
- import_additional_options.additionalOptions
130
+ // thinking budget not supported in @langchain/google-genai
131
+ // as it utilises the old google generative ai SDK
132
+ (0, import_additional_options.getAdditionalOptions)({ supportsThinkingBudget: false })
131
133
  ]
132
134
  };
133
135
  }
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.ts"],"sourcesContent":["import type { SafetySetting } from '@google/generative-ai';\nimport { ChatGoogleGenerativeAI } from '@langchain/google-genai';\nimport { NodeConnectionTypes } from 'n8n-workflow';\nimport type {\n\tNodeError,\n\tINodeType,\n\tINodeTypeDescription,\n\tISupplyDataFunctions,\n\tSupplyData,\n} from 'n8n-workflow';\n\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport { additionalOptions } from '../gemini-common/additional-options';\nimport { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';\nimport { N8nLlmTracing } from '../N8nLlmTracing';\n\nfunction errorDescriptionMapper(error: NodeError) {\n\tif (error.description?.includes('properties: should be non-empty for OBJECT type')) {\n\t\treturn 'Google Gemini requires at least one <a href=\"https://docs.n8n.io/advanced-ai/examples/using-the-fromai-function/\" target=\"_blank\">dynamic parameter</a> when using tools';\n\t}\n\n\treturn error.description ?? 'Unknown error';\n}\nexport class LmChatGoogleGemini implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'Google Gemini Chat Model',\n\n\t\tname: 'lmChatGoogleGemini',\n\t\ticon: 'file:google.svg',\n\t\tgroup: ['transform'],\n\t\tversion: 1,\n\t\tdescription: 'Chat Model Google Gemini',\n\t\tdefaults: {\n\t\t\tname: 'Google Gemini Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgooglegemini/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\n\t\tinputs: [],\n\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'googlePalmApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\t\trequestDefaults: {\n\t\t\tignoreHttpStatusErrors: true,\n\t\t\tbaseURL: '={{ $credentials.host }}',\n\t\t},\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),\n\t\t\t{\n\t\t\t\tdisplayName: 'Model',\n\t\t\t\tname: 'modelName',\n\t\t\t\ttype: 'options',\n\t\t\t\tdescription:\n\t\t\t\t\t'The model which will generate the completion. <a href=\"https://developers.generativeai.google/api/rest/generativelanguage/models/list\">Learn more</a>.',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tloadOptions: {\n\t\t\t\t\t\trouting: {\n\t\t\t\t\t\t\trequest: {\n\t\t\t\t\t\t\t\tmethod: 'GET',\n\t\t\t\t\t\t\t\turl: '/v1beta/models',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\toutput: {\n\t\t\t\t\t\t\t\tpostReceive: [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'rootProperty',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tproperty: 'models',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'filter',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tpass: \"={{ !$responseItem.name.includes('embedding') }}\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'setKeyValue',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tname: '={{$responseItem.name}}',\n\t\t\t\t\t\t\t\t\t\t\tvalue: '={{$responseItem.name}}',\n\t\t\t\t\t\t\t\t\t\t\tdescription: '={{$responseItem.description}}',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'sort',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tkey: 'name',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trouting: {\n\t\t\t\t\tsend: {\n\t\t\t\t\t\ttype: 'body',\n\t\t\t\t\t\tproperty: 'model',\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tdefault: 'models/gemini-2.5-flash',\n\t\t\t},\n\t\t\tadditionalOptions,\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst credentials = await this.getCredentials('googlePalmApi');\n\n\t\tconst modelName = this.getNodeParameter('modelName', itemIndex) as string;\n\t\tconst options = this.getNodeParameter('options', itemIndex, {\n\t\t\tmaxOutputTokens: 1024,\n\t\t\ttemperature: 0.7,\n\t\t\ttopK: 40,\n\t\t\ttopP: 0.9,\n\t\t}) as {\n\t\t\tmaxOutputTokens: number;\n\t\t\ttemperature: number;\n\t\t\ttopK: number;\n\t\t\ttopP: number;\n\t\t};\n\n\t\tconst safetySettings = this.getNodeParameter(\n\t\t\t'options.safetySettings.values',\n\t\t\titemIndex,\n\t\t\tnull,\n\t\t) as SafetySetting[];\n\n\t\tconst model = new ChatGoogleGenerativeAI({\n\t\t\tapiKey: credentials.apiKey as string,\n\t\t\tbaseUrl: credentials.host as string,\n\t\t\tmodel: modelName,\n\t\t\ttopK: options.topK,\n\t\t\ttopP: options.topP,\n\t\t\ttemperature: options.temperature,\n\t\t\tmaxOutputTokens: options.maxOutputTokens,\n\t\t\tsafetySettings,\n\t\t\tcallbacks: [new N8nLlmTracing(this, { errorDescriptionMapper })],\n\t\t\tonFailedAttempt: makeN8nLlmFailedAttemptHandler(this),\n\t\t});\n\n\t\treturn {\n\t\t\tresponse: model,\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,0BAAuC;AACvC,0BAAoC;AASpC,0BAA6C;AAE7C,gCAAkC;AAClC,wCAA+C;AAC/C,2BAA8B;AAE9B,SAAS,uBAAuB,OAAkB;AACjD,MAAI,MAAM,aAAa,SAAS,iDAAiD,GAAG;AACnF,WAAO;AAAA,EACR;AAEA,SAAO,MAAM,eAAe;AAC7B;AACO,MAAM,mBAAwC;AAAA,EAA9C;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MAEb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS;AAAA,MACT,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MAEA,QAAQ,CAAC;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA,iBAAiB;AAAA,QAChB,wBAAwB;AAAA,QACxB,SAAS;AAAA,MACV;AAAA,MACA,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aACC;AAAA,UACD,aAAa;AAAA,YACZ,aAAa;AAAA,cACZ,SAAS;AAAA,gBACR,SAAS;AAAA,kBACR,QAAQ;AAAA,kBACR,KAAK;AAAA,gBACN;AAAA,gBACA,QAAQ;AAAA,kBACP,aAAa;AAAA,oBACZ;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,UAAU;AAAA,sBACX;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,sBACP;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,wBACN,OAAO;AAAA,wBACP,aAAa;AAAA,sBACd;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,KAAK;AAAA,sBACN;AAAA,oBACD;AAAA,kBACD;AAAA,gBACD;AAAA,cACD;AAAA,YACD;AAAA,UACD;AAAA,UACA,SAAS;AAAA,YACR,MAAM;AAAA,cACL,MAAM;AAAA,cACN,UAAU;AAAA,YACX;AAAA,UACD;AAAA,UACA,SAAS;AAAA,QACV;AAAA,QACA;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,cAAc,MAAM,KAAK,eAAe,eAAe;AAE7D,UAAM,YAAY,KAAK,iBAAiB,aAAa,SAAS;AAC9D,UAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW;AAAA,MAC3D,iBAAiB;AAAA,MACjB,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM;AAAA,IACP,CAAC;AAOD,UAAM,iBAAiB,KAAK;AAAA,MAC3B;AAAA,MACA;AAAA,MACA;AAAA,IACD;AAEA,UAAM,QAAQ,IAAI,2CAAuB;AAAA,MACxC,QAAQ,YAAY;AAAA,MACpB,SAAS,YAAY;AAAA,MACrB,OAAO;AAAA,MACP,MAAM,QAAQ;AAAA,MACd,MAAM,QAAQ;AAAA,MACd,aAAa,QAAQ;AAAA,MACrB,iBAAiB,QAAQ;AAAA,MACzB;AAAA,MACA,WAAW,CAAC,IAAI,mCAAc,MAAM,EAAE,uBAAuB,CAAC,CAAC;AAAA,MAC/D,qBAAiB,kEAA+B,IAAI;AAAA,IACrD,CAAC;AAED,WAAO;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AACD;","names":[]}
1
+ {"version":3,"sources":["../../../../nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.ts"],"sourcesContent":["import type { SafetySetting } from '@google/generative-ai';\nimport { ChatGoogleGenerativeAI } from '@langchain/google-genai';\nimport { NodeConnectionTypes } from 'n8n-workflow';\nimport type {\n\tNodeError,\n\tINodeType,\n\tINodeTypeDescription,\n\tISupplyDataFunctions,\n\tSupplyData,\n} from 'n8n-workflow';\n\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport { getAdditionalOptions } from '../gemini-common/additional-options';\nimport { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';\nimport { N8nLlmTracing } from '../N8nLlmTracing';\n\nfunction errorDescriptionMapper(error: NodeError) {\n\tif (error.description?.includes('properties: should be non-empty for OBJECT type')) {\n\t\treturn 'Google Gemini requires at least one <a href=\"https://docs.n8n.io/advanced-ai/examples/using-the-fromai-function/\" target=\"_blank\">dynamic parameter</a> when using tools';\n\t}\n\n\treturn error.description ?? 'Unknown error';\n}\nexport class LmChatGoogleGemini implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'Google Gemini Chat Model',\n\n\t\tname: 'lmChatGoogleGemini',\n\t\ticon: 'file:google.svg',\n\t\tgroup: ['transform'],\n\t\tversion: 1,\n\t\tdescription: 'Chat Model Google Gemini',\n\t\tdefaults: {\n\t\t\tname: 'Google Gemini Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgooglegemini/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\n\t\tinputs: [],\n\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'googlePalmApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\t\trequestDefaults: {\n\t\t\tignoreHttpStatusErrors: true,\n\t\t\tbaseURL: '={{ $credentials.host }}',\n\t\t},\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),\n\t\t\t{\n\t\t\t\tdisplayName: 'Model',\n\t\t\t\tname: 'modelName',\n\t\t\t\ttype: 'options',\n\t\t\t\tdescription:\n\t\t\t\t\t'The model which will generate the completion. <a href=\"https://developers.generativeai.google/api/rest/generativelanguage/models/list\">Learn more</a>.',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tloadOptions: {\n\t\t\t\t\t\trouting: {\n\t\t\t\t\t\t\trequest: {\n\t\t\t\t\t\t\t\tmethod: 'GET',\n\t\t\t\t\t\t\t\turl: '/v1beta/models',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\toutput: {\n\t\t\t\t\t\t\t\tpostReceive: [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'rootProperty',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tproperty: 'models',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'filter',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tpass: \"={{ !$responseItem.name.includes('embedding') }}\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'setKeyValue',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tname: '={{$responseItem.name}}',\n\t\t\t\t\t\t\t\t\t\t\tvalue: '={{$responseItem.name}}',\n\t\t\t\t\t\t\t\t\t\t\tdescription: '={{$responseItem.description}}',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'sort',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tkey: 'name',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trouting: {\n\t\t\t\t\tsend: {\n\t\t\t\t\t\ttype: 'body',\n\t\t\t\t\t\tproperty: 'model',\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tdefault: 'models/gemini-2.5-flash',\n\t\t\t},\n\t\t\t// thinking budget not supported in @langchain/google-genai\n\t\t\t// as it utilises the old google generative ai SDK\n\t\t\tgetAdditionalOptions({ supportsThinkingBudget: false }),\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst credentials = await this.getCredentials('googlePalmApi');\n\n\t\tconst modelName = this.getNodeParameter('modelName', itemIndex) as string;\n\t\tconst options = this.getNodeParameter('options', itemIndex, {\n\t\t\tmaxOutputTokens: 1024,\n\t\t\ttemperature: 0.7,\n\t\t\ttopK: 40,\n\t\t\ttopP: 0.9,\n\t\t}) as {\n\t\t\tmaxOutputTokens: number;\n\t\t\ttemperature: number;\n\t\t\ttopK: number;\n\t\t\ttopP: number;\n\t\t};\n\n\t\tconst safetySettings = this.getNodeParameter(\n\t\t\t'options.safetySettings.values',\n\t\t\titemIndex,\n\t\t\tnull,\n\t\t) as SafetySetting[];\n\n\t\tconst model = new ChatGoogleGenerativeAI({\n\t\t\tapiKey: credentials.apiKey as string,\n\t\t\tbaseUrl: credentials.host as string,\n\t\t\tmodel: modelName,\n\t\t\ttopK: options.topK,\n\t\t\ttopP: options.topP,\n\t\t\ttemperature: options.temperature,\n\t\t\tmaxOutputTokens: options.maxOutputTokens,\n\t\t\tsafetySettings,\n\t\t\tcallbacks: [new N8nLlmTracing(this, { errorDescriptionMapper })],\n\t\t\tonFailedAttempt: makeN8nLlmFailedAttemptHandler(this),\n\t\t});\n\n\t\treturn {\n\t\t\tresponse: model,\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,0BAAuC;AACvC,0BAAoC;AASpC,0BAA6C;AAE7C,gCAAqC;AACrC,wCAA+C;AAC/C,2BAA8B;AAE9B,SAAS,uBAAuB,OAAkB;AACjD,MAAI,MAAM,aAAa,SAAS,iDAAiD,GAAG;AACnF,WAAO;AAAA,EACR;AAEA,SAAO,MAAM,eAAe;AAC7B;AACO,MAAM,mBAAwC;AAAA,EAA9C;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MAEb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS;AAAA,MACT,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MAEA,QAAQ,CAAC;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA,iBAAiB;AAAA,QAChB,wBAAwB;AAAA,QACxB,SAAS;AAAA,MACV;AAAA,MACA,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aACC;AAAA,UACD,aAAa;AAAA,YACZ,aAAa;AAAA,cACZ,SAAS;AAAA,gBACR,SAAS;AAAA,kBACR,QAAQ;AAAA,kBACR,KAAK;AAAA,gBACN;AAAA,gBACA,QAAQ;AAAA,kBACP,aAAa;AAAA,oBACZ;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,UAAU;AAAA,sBACX;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,sBACP;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,wBACN,OAAO;AAAA,wBACP,aAAa;AAAA,sBACd;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,KAAK;AAAA,sBACN;AAAA,oBACD;AAAA,kBACD;AAAA,gBACD;AAAA,cACD;AAAA,YACD;AAAA,UACD;AAAA,UACA,SAAS;AAAA,YACR,MAAM;AAAA,cACL,MAAM;AAAA,cACN,UAAU;AAAA,YACX;AAAA,UACD;AAAA,UACA,SAAS;AAAA,QACV;AAAA;AAAA;AAAA,YAGA,gDAAqB,EAAE,wBAAwB,MAAM,CAAC;AAAA,MACvD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,cAAc,MAAM,KAAK,eAAe,eAAe;AAE7D,UAAM,YAAY,KAAK,iBAAiB,aAAa,SAAS;AAC9D,UAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW;AAAA,MAC3D,iBAAiB;AAAA,MACjB,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM;AAAA,IACP,CAAC;AAOD,UAAM,iBAAiB,KAAK;AAAA,MAC3B;AAAA,MACA;AAAA,MACA;AAAA,IACD;AAEA,UAAM,QAAQ,IAAI,2CAAuB;AAAA,MACxC,QAAQ,YAAY;AAAA,MACpB,SAAS,YAAY;AAAA,MACrB,OAAO;AAAA,MACP,MAAM,QAAQ;AAAA,MACd,MAAM,QAAQ;AAAA,MACd,aAAa,QAAQ;AAAA,MACrB,iBAAiB,QAAQ;AAAA,MACzB;AAAA,MACA,WAAW,CAAC,IAAI,mCAAc,MAAM,EAAE,uBAAuB,CAAC,CAAC;AAAA,MAC/D,qBAAiB,kEAA+B,IAAI;AAAA,IACrD,CAAC;AAED,WAAO;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AACD;","names":[]}
@@ -95,9 +95,9 @@ class LmChatGoogleVertex {
95
95
  name: "modelName",
96
96
  type: "string",
97
97
  description: 'The model which will generate the completion. <a href="https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models">Learn more</a>.',
98
- default: "gemini-1.5-flash"
98
+ default: "gemini-2.5-flash"
99
99
  },
100
- import_additional_options.additionalOptions
100
+ (0, import_additional_options.getAdditionalOptions)({ supportsThinkingBudget: true })
101
101
  ]
102
102
  };
103
103
  this.methods = {
@@ -142,13 +142,24 @@ class LmChatGoogleVertex {
142
142
  topK: 40,
143
143
  topP: 0.9
144
144
  });
145
+ (0, import_n8n_workflow.validateNodeParameters)(
146
+ options,
147
+ {
148
+ maxOutputTokens: { type: "number", required: false },
149
+ temperature: { type: "number", required: false },
150
+ topK: { type: "number", required: false },
151
+ topP: { type: "number", required: false },
152
+ thinkingBudget: { type: "number", required: false }
153
+ },
154
+ this.getNode()
155
+ );
145
156
  const safetySettings = this.getNodeParameter(
146
157
  "options.safetySettings.values",
147
158
  itemIndex,
148
159
  null
149
160
  );
150
161
  try {
151
- const model = new import_google_vertexai.ChatVertexAI({
162
+ const modelConfig = {
152
163
  authOptions: {
153
164
  projectId,
154
165
  credentials: {
@@ -174,7 +185,11 @@ class LmChatGoogleVertex {
174
185
  }
175
186
  throw error;
176
187
  })
177
- });
188
+ };
189
+ if (options.thinkingBudget !== void 0) {
190
+ modelConfig.thinkingBudget = options.thinkingBudget;
191
+ }
192
+ const model = new import_google_vertexai.ChatVertexAI(modelConfig);
178
193
  return {
179
194
  response: model
180
195
  };
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../nodes/llms/LmChatGoogleVertex/LmChatGoogleVertex.node.ts"],"sourcesContent":["import type { SafetySetting } from '@google/generative-ai';\nimport { ProjectsClient } from '@google-cloud/resource-manager';\nimport { ChatVertexAI } from '@langchain/google-vertexai';\nimport { formatPrivateKey } from 'n8n-nodes-base/dist/utils/utilities';\nimport {\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n\ttype ILoadOptionsFunctions,\n\ttype JsonObject,\n\tNodeOperationError,\n} from 'n8n-workflow';\n\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport { makeErrorFromStatus } from './error-handling';\nimport { additionalOptions } from '../gemini-common/additional-options';\nimport { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';\nimport { N8nLlmTracing } from '../N8nLlmTracing';\n\nexport class LmChatGoogleVertex implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'Google Vertex Chat Model',\n\n\t\tname: 'lmChatGoogleVertex',\n\t\ticon: 'file:google.svg',\n\t\tgroup: ['transform'],\n\t\tversion: 1,\n\t\tdescription: 'Chat Model Google Vertex',\n\t\tdefaults: {\n\t\t\tname: 'Google Vertex Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgooglevertex/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\n\t\tinputs: [],\n\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'googleApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),\n\t\t\t{\n\t\t\t\tdisplayName: 'Project ID',\n\t\t\t\tname: 'projectId',\n\t\t\t\ttype: 'resourceLocator',\n\t\t\t\tdefault: { mode: 'list', value: '' },\n\t\t\t\trequired: true,\n\t\t\t\tdescription: 'Select or enter your Google Cloud project ID',\n\t\t\t\tmodes: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'From List',\n\t\t\t\t\t\tname: 'list',\n\t\t\t\t\t\ttype: 'list',\n\t\t\t\t\t\ttypeOptions: {\n\t\t\t\t\t\t\tsearchListMethod: 'gcpProjectsList',\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'ID',\n\t\t\t\t\t\tname: 'id',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Model Name',\n\t\t\t\tname: 'modelName',\n\t\t\t\ttype: 'string',\n\t\t\t\tdescription:\n\t\t\t\t\t'The model which will generate the completion. <a href=\"https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models\">Learn more</a>.',\n\t\t\t\tdefault: 'gemini-1.5-flash',\n\t\t\t},\n\t\t\tadditionalOptions,\n\t\t],\n\t};\n\n\tmethods = {\n\t\tlistSearch: {\n\t\t\tasync gcpProjectsList(this: ILoadOptionsFunctions) {\n\t\t\t\tconst results: Array<{ name: string; value: string }> = [];\n\n\t\t\t\tconst credentials = await this.getCredentials('googleApi');\n\t\t\t\tconst privateKey = formatPrivateKey(credentials.privateKey as string);\n\t\t\t\tconst email = (credentials.email as string).trim();\n\n\t\t\t\tconst client = new ProjectsClient({\n\t\t\t\t\tcredentials: {\n\t\t\t\t\t\tclient_email: email,\n\t\t\t\t\t\tprivate_key: privateKey,\n\t\t\t\t\t},\n\t\t\t\t});\n\n\t\t\t\tconst [projects] = await client.searchProjects();\n\n\t\t\t\tfor (const project of projects) {\n\t\t\t\t\tif (project.projectId) {\n\t\t\t\t\t\tresults.push({\n\t\t\t\t\t\t\tname: project.displayName ?? project.projectId,\n\t\t\t\t\t\t\tvalue: project.projectId,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn { results };\n\t\t\t},\n\t\t},\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst credentials = await this.getCredentials('googleApi');\n\t\tconst privateKey = formatPrivateKey(credentials.privateKey as string);\n\t\tconst email = (credentials.email as string).trim();\n\t\tconst region = credentials.region as string;\n\n\t\tconst modelName = this.getNodeParameter('modelName', itemIndex) as string;\n\n\t\tconst projectId = this.getNodeParameter('projectId', itemIndex, '', {\n\t\t\textractValue: true,\n\t\t}) as string;\n\n\t\tconst options = this.getNodeParameter('options', itemIndex, {\n\t\t\tmaxOutputTokens: 2048,\n\t\t\ttemperature: 0.4,\n\t\t\ttopK: 40,\n\t\t\ttopP: 0.9,\n\t\t}) as {\n\t\t\tmaxOutputTokens: number;\n\t\t\ttemperature: number;\n\t\t\ttopK: number;\n\t\t\ttopP: number;\n\t\t};\n\n\t\tconst safetySettings = this.getNodeParameter(\n\t\t\t'options.safetySettings.values',\n\t\t\titemIndex,\n\t\t\tnull,\n\t\t) as SafetySetting[];\n\n\t\ttry {\n\t\t\tconst model = new ChatVertexAI({\n\t\t\t\tauthOptions: {\n\t\t\t\t\tprojectId,\n\t\t\t\t\tcredentials: {\n\t\t\t\t\t\tclient_email: email,\n\t\t\t\t\t\tprivate_key: privateKey,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tlocation: region,\n\t\t\t\tmodel: modelName,\n\t\t\t\ttopK: options.topK,\n\t\t\t\ttopP: options.topP,\n\t\t\t\ttemperature: options.temperature,\n\t\t\t\tmaxOutputTokens: options.maxOutputTokens,\n\t\t\t\tsafetySettings,\n\t\t\t\tcallbacks: [new N8nLlmTracing(this)],\n\t\t\t\t// Handle ChatVertexAI invocation errors to provide better error messages\n\t\t\t\tonFailedAttempt: makeN8nLlmFailedAttemptHandler(this, (error: any) => {\n\t\t\t\t\t// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access\n\t\t\t\t\tconst customError = makeErrorFromStatus(Number(error?.response?.status), {\n\t\t\t\t\t\tmodelName,\n\t\t\t\t\t});\n\n\t\t\t\t\tif (customError) {\n\t\t\t\t\t\tthrow new NodeOperationError(this.getNode(), error as JsonObject, customError);\n\t\t\t\t\t}\n\n\t\t\t\t\tthrow error;\n\t\t\t\t}),\n\t\t\t});\n\n\t\t\treturn {\n\t\t\t\tresponse: model,\n\t\t\t};\n\t\t} catch (e) {\n\t\t\t// Catch model name validation error from LangChain (https://github.com/langchain-ai/langchainjs/blob/ef201d0ee85ee4049078270a0cfd7a1767e624f8/libs/langchain-google-common/src/utils/common.ts#L124)\n\t\t\t// to show more helpful error message\n\t\t\tif (e?.message?.startsWith('Unable to verify model params')) {\n\t\t\t\tthrow new NodeOperationError(this.getNode(), e as JsonObject, {\n\t\t\t\t\tmessage: 'Unsupported model',\n\t\t\t\t\tdescription: \"Only models starting with 'gemini' are supported.\",\n\t\t\t\t});\n\t\t\t}\n\n\t\t\t// Assume all other exceptions while creating a new ChatVertexAI instance are parameter validation errors\n\t\t\tthrow new NodeOperationError(this.getNode(), e as JsonObject, {\n\t\t\t\tmessage: 'Invalid options',\n\t\t\t\tdescription: e.message,\n\t\t\t});\n\t\t}\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,8BAA+B;AAC/B,6BAA6B;AAC7B,uBAAiC;AACjC,0BASO;AAEP,0BAA6C;AAE7C,4BAAoC;AACpC,gCAAkC;AAClC,wCAA+C;AAC/C,2BAA8B;AAEvB,MAAM,mBAAwC;AAAA,EAA9C;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MAEb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS;AAAA,MACT,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MAEA,QAAQ,CAAC;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS,EAAE,MAAM,QAAQ,OAAO,GAAG;AAAA,UACnC,UAAU;AAAA,UACV,aAAa;AAAA,UACb,OAAO;AAAA,YACN;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,MAAM;AAAA,cACN,aAAa;AAAA,gBACZ,kBAAkB;AAAA,cACnB;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,MAAM;AAAA,YACP;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aACC;AAAA,UACD,SAAS;AAAA,QACV;AAAA,QACA;AAAA,MACD;AAAA,IACD;AAEA,mBAAU;AAAA,MACT,YAAY;AAAA,QACX,MAAM,kBAA6C;AAClD,gBAAM,UAAkD,CAAC;AAEzD,gBAAM,cAAc,MAAM,KAAK,eAAe,WAAW;AACzD,gBAAM,iBAAa,mCAAiB,YAAY,UAAoB;AACpE,gBAAM,QAAS,YAAY,MAAiB,KAAK;AAEjD,gBAAM,SAAS,IAAI,uCAAe;AAAA,YACjC,aAAa;AAAA,cACZ,cAAc;AAAA,cACd,aAAa;AAAA,YACd;AAAA,UACD,CAAC;AAED,gBAAM,CAAC,QAAQ,IAAI,MAAM,OAAO,eAAe;AAE/C,qBAAW,WAAW,UAAU;AAC/B,gBAAI,QAAQ,WAAW;AACtB,sBAAQ,KAAK;AAAA,gBACZ,MAAM,QAAQ,eAAe,QAAQ;AAAA,gBACrC,OAAO,QAAQ;AAAA,cAChB,CAAC;AAAA,YACF;AAAA,UACD;AAEA,iBAAO,EAAE,QAAQ;AAAA,QAClB;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,cAAc,MAAM,KAAK,eAAe,WAAW;AACzD,UAAM,iBAAa,mCAAiB,YAAY,UAAoB;AACpE,UAAM,QAAS,YAAY,MAAiB,KAAK;AACjD,UAAM,SAAS,YAAY;AAE3B,UAAM,YAAY,KAAK,iBAAiB,aAAa,SAAS;AAE9D,UAAM,YAAY,KAAK,iBAAiB,aAAa,WAAW,IAAI;AAAA,MACnE,cAAc;AAAA,IACf,CAAC;AAED,UAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW;AAAA,MAC3D,iBAAiB;AAAA,MACjB,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM;AAAA,IACP,CAAC;AAOD,UAAM,iBAAiB,KAAK;AAAA,MAC3B;AAAA,MACA;AAAA,MACA;AAAA,IACD;AAEA,QAAI;AACH,YAAM,QAAQ,IAAI,oCAAa;AAAA,QAC9B,aAAa;AAAA,UACZ;AAAA,UACA,aAAa;AAAA,YACZ,cAAc;AAAA,YACd,aAAa;AAAA,UACd;AAAA,QACD;AAAA,QACA,UAAU;AAAA,QACV,OAAO;AAAA,QACP,MAAM,QAAQ;AAAA,QACd,MAAM,QAAQ;AAAA,QACd,aAAa,QAAQ;AAAA,QACrB,iBAAiB,QAAQ;AAAA,QACzB;AAAA,QACA,WAAW,CAAC,IAAI,mCAAc,IAAI,CAAC;AAAA;AAAA,QAEnC,qBAAiB,kEAA+B,MAAM,CAAC,UAAe;AAErE,gBAAM,kBAAc,2CAAoB,OAAO,OAAO,UAAU,MAAM,GAAG;AAAA,YACxE;AAAA,UACD,CAAC;AAED,cAAI,aAAa;AAChB,kBAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,OAAqB,WAAW;AAAA,UAC9E;AAEA,gBAAM;AAAA,QACP,CAAC;AAAA,MACF,CAAC;AAED,aAAO;AAAA,QACN,UAAU;AAAA,MACX;AAAA,IACD,SAAS,GAAG;AAGX,UAAI,GAAG,SAAS,WAAW,+BAA+B,GAAG;AAC5D,cAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,GAAiB;AAAA,UAC7D,SAAS;AAAA,UACT,aAAa;AAAA,QACd,CAAC;AAAA,MACF;AAGA,YAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,GAAiB;AAAA,QAC7D,SAAS;AAAA,QACT,aAAa,EAAE;AAAA,MAChB,CAAC;AAAA,IACF;AAAA,EACD;AACD;","names":[]}
1
+ {"version":3,"sources":["../../../../nodes/llms/LmChatGoogleVertex/LmChatGoogleVertex.node.ts"],"sourcesContent":["import { ProjectsClient } from '@google-cloud/resource-manager';\nimport type { GoogleAISafetySetting } from '@langchain/google-common';\nimport { ChatVertexAI, type ChatVertexAIInput } from '@langchain/google-vertexai';\nimport { formatPrivateKey } from 'n8n-nodes-base/dist/utils/utilities';\nimport {\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n\ttype ILoadOptionsFunctions,\n\ttype JsonObject,\n\tNodeOperationError,\n\tvalidateNodeParameters,\n} from 'n8n-workflow';\n\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport { makeErrorFromStatus } from './error-handling';\nimport { getAdditionalOptions } from '../gemini-common/additional-options';\nimport { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';\nimport { N8nLlmTracing } from '../N8nLlmTracing';\n\nexport class LmChatGoogleVertex implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'Google Vertex Chat Model',\n\n\t\tname: 'lmChatGoogleVertex',\n\t\ticon: 'file:google.svg',\n\t\tgroup: ['transform'],\n\t\tversion: 1,\n\t\tdescription: 'Chat Model Google Vertex',\n\t\tdefaults: {\n\t\t\tname: 'Google Vertex Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgooglevertex/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\n\t\tinputs: [],\n\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'googleApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),\n\t\t\t{\n\t\t\t\tdisplayName: 'Project ID',\n\t\t\t\tname: 'projectId',\n\t\t\t\ttype: 'resourceLocator',\n\t\t\t\tdefault: { mode: 'list', value: '' },\n\t\t\t\trequired: true,\n\t\t\t\tdescription: 'Select or enter your Google Cloud project ID',\n\t\t\t\tmodes: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'From List',\n\t\t\t\t\t\tname: 'list',\n\t\t\t\t\t\ttype: 'list',\n\t\t\t\t\t\ttypeOptions: {\n\t\t\t\t\t\t\tsearchListMethod: 'gcpProjectsList',\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'ID',\n\t\t\t\t\t\tname: 'id',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Model Name',\n\t\t\t\tname: 'modelName',\n\t\t\t\ttype: 'string',\n\t\t\t\tdescription:\n\t\t\t\t\t'The model which will generate the completion. <a href=\"https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models\">Learn more</a>.',\n\t\t\t\tdefault: 'gemini-2.5-flash',\n\t\t\t},\n\t\t\tgetAdditionalOptions({ supportsThinkingBudget: true }),\n\t\t],\n\t};\n\n\tmethods = {\n\t\tlistSearch: {\n\t\t\tasync gcpProjectsList(this: ILoadOptionsFunctions) {\n\t\t\t\tconst results: Array<{ name: string; value: string }> = [];\n\n\t\t\t\tconst credentials = await this.getCredentials('googleApi');\n\t\t\t\tconst privateKey = formatPrivateKey(credentials.privateKey as string);\n\t\t\t\tconst email = (credentials.email as string).trim();\n\n\t\t\t\tconst client = new ProjectsClient({\n\t\t\t\t\tcredentials: {\n\t\t\t\t\t\tclient_email: email,\n\t\t\t\t\t\tprivate_key: privateKey,\n\t\t\t\t\t},\n\t\t\t\t});\n\n\t\t\t\tconst [projects] = await client.searchProjects();\n\n\t\t\t\tfor (const project of projects) {\n\t\t\t\t\tif (project.projectId) {\n\t\t\t\t\t\tresults.push({\n\t\t\t\t\t\t\tname: project.displayName ?? project.projectId,\n\t\t\t\t\t\t\tvalue: project.projectId,\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn { results };\n\t\t\t},\n\t\t},\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst credentials = await this.getCredentials('googleApi');\n\t\tconst privateKey = formatPrivateKey(credentials.privateKey as string);\n\t\tconst email = (credentials.email as string).trim();\n\t\tconst region = credentials.region as string;\n\n\t\tconst modelName = this.getNodeParameter('modelName', itemIndex) as string;\n\n\t\tconst projectId = this.getNodeParameter('projectId', itemIndex, '', {\n\t\t\textractValue: true,\n\t\t}) as string;\n\n\t\tconst options = this.getNodeParameter('options', itemIndex, {\n\t\t\tmaxOutputTokens: 2048,\n\t\t\ttemperature: 0.4,\n\t\t\ttopK: 40,\n\t\t\ttopP: 0.9,\n\t\t});\n\n\t\t// Validate options parameter\n\t\tvalidateNodeParameters(\n\t\t\toptions,\n\t\t\t{\n\t\t\t\tmaxOutputTokens: { type: 'number', required: false },\n\t\t\t\ttemperature: { type: 'number', required: false },\n\t\t\t\ttopK: { type: 'number', required: false },\n\t\t\t\ttopP: { type: 'number', required: false },\n\t\t\t\tthinkingBudget: { type: 'number', required: false },\n\t\t\t},\n\t\t\tthis.getNode(),\n\t\t);\n\n\t\tconst safetySettings = this.getNodeParameter(\n\t\t\t'options.safetySettings.values',\n\t\t\titemIndex,\n\t\t\tnull,\n\t\t) as GoogleAISafetySetting[];\n\n\t\ttry {\n\t\t\tconst modelConfig: ChatVertexAIInput = {\n\t\t\t\tauthOptions: {\n\t\t\t\t\tprojectId,\n\t\t\t\t\tcredentials: {\n\t\t\t\t\t\tclient_email: email,\n\t\t\t\t\t\tprivate_key: privateKey,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tlocation: region,\n\t\t\t\tmodel: modelName,\n\t\t\t\ttopK: options.topK,\n\t\t\t\ttopP: options.topP,\n\t\t\t\ttemperature: options.temperature,\n\t\t\t\tmaxOutputTokens: options.maxOutputTokens,\n\t\t\t\tsafetySettings,\n\t\t\t\tcallbacks: [new N8nLlmTracing(this)],\n\t\t\t\t// Handle ChatVertexAI invocation errors to provide better error messages\n\t\t\t\tonFailedAttempt: makeN8nLlmFailedAttemptHandler(this, (error: any) => {\n\t\t\t\t\t// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access\n\t\t\t\t\tconst customError = makeErrorFromStatus(Number(error?.response?.status), {\n\t\t\t\t\t\tmodelName,\n\t\t\t\t\t});\n\n\t\t\t\t\tif (customError) {\n\t\t\t\t\t\tthrow new NodeOperationError(this.getNode(), error as JsonObject, customError);\n\t\t\t\t\t}\n\n\t\t\t\t\tthrow error;\n\t\t\t\t}),\n\t\t\t};\n\n\t\t\t// Add thinkingBudget if specified\n\t\t\tif (options.thinkingBudget !== undefined) {\n\t\t\t\tmodelConfig.thinkingBudget = options.thinkingBudget;\n\t\t\t}\n\n\t\t\tconst model = new ChatVertexAI(modelConfig);\n\n\t\t\treturn {\n\t\t\t\tresponse: model,\n\t\t\t};\n\t\t} catch (e) {\n\t\t\t// Catch model name validation error from LangChain (https://github.com/langchain-ai/langchainjs/blob/ef201d0ee85ee4049078270a0cfd7a1767e624f8/libs/langchain-google-common/src/utils/common.ts#L124)\n\t\t\t// to show more helpful error message\n\t\t\tif (e?.message?.startsWith('Unable to verify model params')) {\n\t\t\t\tthrow new NodeOperationError(this.getNode(), e as JsonObject, {\n\t\t\t\t\tmessage: 'Unsupported model',\n\t\t\t\t\tdescription: \"Only models starting with 'gemini' are supported.\",\n\t\t\t\t});\n\t\t\t}\n\n\t\t\t// Assume all other exceptions while creating a new ChatVertexAI instance are parameter validation errors\n\t\t\tthrow new NodeOperationError(this.getNode(), e as JsonObject, {\n\t\t\t\tmessage: 'Invalid options',\n\t\t\t\tdescription: e.message,\n\t\t\t});\n\t\t}\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,8BAA+B;AAE/B,6BAAqD;AACrD,uBAAiC;AACjC,0BAUO;AAEP,0BAA6C;AAE7C,4BAAoC;AACpC,gCAAqC;AACrC,wCAA+C;AAC/C,2BAA8B;AAEvB,MAAM,mBAAwC;AAAA,EAA9C;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MAEb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS;AAAA,MACT,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MAEA,QAAQ,CAAC;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS,EAAE,MAAM,QAAQ,OAAO,GAAG;AAAA,UACnC,UAAU;AAAA,UACV,aAAa;AAAA,UACb,OAAO;AAAA,YACN;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,MAAM;AAAA,cACN,aAAa;AAAA,gBACZ,kBAAkB;AAAA,cACnB;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,MAAM;AAAA,YACP;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aACC;AAAA,UACD,SAAS;AAAA,QACV;AAAA,YACA,gDAAqB,EAAE,wBAAwB,KAAK,CAAC;AAAA,MACtD;AAAA,IACD;AAEA,mBAAU;AAAA,MACT,YAAY;AAAA,QACX,MAAM,kBAA6C;AAClD,gBAAM,UAAkD,CAAC;AAEzD,gBAAM,cAAc,MAAM,KAAK,eAAe,WAAW;AACzD,gBAAM,iBAAa,mCAAiB,YAAY,UAAoB;AACpE,gBAAM,QAAS,YAAY,MAAiB,KAAK;AAEjD,gBAAM,SAAS,IAAI,uCAAe;AAAA,YACjC,aAAa;AAAA,cACZ,cAAc;AAAA,cACd,aAAa;AAAA,YACd;AAAA,UACD,CAAC;AAED,gBAAM,CAAC,QAAQ,IAAI,MAAM,OAAO,eAAe;AAE/C,qBAAW,WAAW,UAAU;AAC/B,gBAAI,QAAQ,WAAW;AACtB,sBAAQ,KAAK;AAAA,gBACZ,MAAM,QAAQ,eAAe,QAAQ;AAAA,gBACrC,OAAO,QAAQ;AAAA,cAChB,CAAC;AAAA,YACF;AAAA,UACD;AAEA,iBAAO,EAAE,QAAQ;AAAA,QAClB;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,cAAc,MAAM,KAAK,eAAe,WAAW;AACzD,UAAM,iBAAa,mCAAiB,YAAY,UAAoB;AACpE,UAAM,QAAS,YAAY,MAAiB,KAAK;AACjD,UAAM,SAAS,YAAY;AAE3B,UAAM,YAAY,KAAK,iBAAiB,aAAa,SAAS;AAE9D,UAAM,YAAY,KAAK,iBAAiB,aAAa,WAAW,IAAI;AAAA,MACnE,cAAc;AAAA,IACf,CAAC;AAED,UAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW;AAAA,MAC3D,iBAAiB;AAAA,MACjB,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM;AAAA,IACP,CAAC;AAGD;AAAA,MACC;AAAA,MACA;AAAA,QACC,iBAAiB,EAAE,MAAM,UAAU,UAAU,MAAM;AAAA,QACnD,aAAa,EAAE,MAAM,UAAU,UAAU,MAAM;AAAA,QAC/C,MAAM,EAAE,MAAM,UAAU,UAAU,MAAM;AAAA,QACxC,MAAM,EAAE,MAAM,UAAU,UAAU,MAAM;AAAA,QACxC,gBAAgB,EAAE,MAAM,UAAU,UAAU,MAAM;AAAA,MACnD;AAAA,MACA,KAAK,QAAQ;AAAA,IACd;AAEA,UAAM,iBAAiB,KAAK;AAAA,MAC3B;AAAA,MACA;AAAA,MACA;AAAA,IACD;AAEA,QAAI;AACH,YAAM,cAAiC;AAAA,QACtC,aAAa;AAAA,UACZ;AAAA,UACA,aAAa;AAAA,YACZ,cAAc;AAAA,YACd,aAAa;AAAA,UACd;AAAA,QACD;AAAA,QACA,UAAU;AAAA,QACV,OAAO;AAAA,QACP,MAAM,QAAQ;AAAA,QACd,MAAM,QAAQ;AAAA,QACd,aAAa,QAAQ;AAAA,QACrB,iBAAiB,QAAQ;AAAA,QACzB;AAAA,QACA,WAAW,CAAC,IAAI,mCAAc,IAAI,CAAC;AAAA;AAAA,QAEnC,qBAAiB,kEAA+B,MAAM,CAAC,UAAe;AAErE,gBAAM,kBAAc,2CAAoB,OAAO,OAAO,UAAU,MAAM,GAAG;AAAA,YACxE;AAAA,UACD,CAAC;AAED,cAAI,aAAa;AAChB,kBAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,OAAqB,WAAW;AAAA,UAC9E;AAEA,gBAAM;AAAA,QACP,CAAC;AAAA,MACF;AAGA,UAAI,QAAQ,mBAAmB,QAAW;AACzC,oBAAY,iBAAiB,QAAQ;AAAA,MACtC;AAEA,YAAM,QAAQ,IAAI,oCAAa,WAAW;AAE1C,aAAO;AAAA,QACN,UAAU;AAAA,MACX;AAAA,IACD,SAAS,GAAG;AAGX,UAAI,GAAG,SAAS,WAAW,+BAA+B,GAAG;AAC5D,cAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,GAAiB;AAAA,UAC7D,SAAS;AAAA,UACT,aAAa;AAAA,QACd,CAAC;AAAA,MACF;AAGA,YAAM,IAAI,uCAAmB,KAAK,QAAQ,GAAG,GAAiB;AAAA,QAC7D,SAAS;AAAA,QACT,aAAa,EAAE;AAAA,MAChB,CAAC;AAAA,IACF;AAAA,EACD;AACD;","names":[]}
@@ -25,7 +25,7 @@ function makeErrorFromStatus(statusCode, context) {
25
25
  const errorMessages = {
26
26
  403: {
27
27
  message: "Unauthorized for this project",
28
- description: "Check your Google Cloud project ID, and that your credential has access to that project"
28
+ description: "Check your Google Cloud project ID, that your credential has access to that project and that billing is enabled"
29
29
  },
30
30
  404: {
31
31
  message: context?.modelName ? `No model found called '${context.modelName}'` : "No model found"
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../nodes/llms/LmChatGoogleVertex/error-handling.ts"],"sourcesContent":["export interface ErrorLike {\n\tmessage?: string;\n\tdescription?: string;\n}\n\nexport interface ErrorContext {\n\tmodelName?: string;\n}\n\nexport function makeErrorFromStatus(statusCode: number, context?: ErrorContext): ErrorLike {\n\tconst errorMessages: Record<number, ErrorLike> = {\n\t\t403: {\n\t\t\tmessage: 'Unauthorized for this project',\n\t\t\tdescription:\n\t\t\t\t'Check your Google Cloud project ID, and that your credential has access to that project',\n\t\t},\n\t\t404: {\n\t\t\tmessage: context?.modelName\n\t\t\t\t? `No model found called '${context.modelName}'`\n\t\t\t\t: 'No model found',\n\t\t},\n\t};\n\n\treturn errorMessages[statusCode];\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AASO,SAAS,oBAAoB,YAAoB,SAAmC;AAC1F,QAAM,gBAA2C;AAAA,IAChD,KAAK;AAAA,MACJ,SAAS;AAAA,MACT,aACC;AAAA,IACF;AAAA,IACA,KAAK;AAAA,MACJ,SAAS,SAAS,YACf,0BAA0B,QAAQ,SAAS,MAC3C;AAAA,IACJ;AAAA,EACD;AAEA,SAAO,cAAc,UAAU;AAChC;","names":[]}
1
+ {"version":3,"sources":["../../../../nodes/llms/LmChatGoogleVertex/error-handling.ts"],"sourcesContent":["export interface ErrorLike {\n\tmessage?: string;\n\tdescription?: string;\n}\n\nexport interface ErrorContext {\n\tmodelName?: string;\n}\n\nexport function makeErrorFromStatus(statusCode: number, context?: ErrorContext): ErrorLike {\n\tconst errorMessages: Record<number, ErrorLike> = {\n\t\t403: {\n\t\t\tmessage: 'Unauthorized for this project',\n\t\t\tdescription:\n\t\t\t\t'Check your Google Cloud project ID, that your credential has access to that project and that billing is enabled',\n\t\t},\n\t\t404: {\n\t\t\tmessage: context?.modelName\n\t\t\t\t? `No model found called '${context.modelName}'`\n\t\t\t\t: 'No model found',\n\t\t},\n\t};\n\n\treturn errorMessages[statusCode];\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AASO,SAAS,oBAAoB,YAAoB,SAAmC;AAC1F,QAAM,gBAA2C;AAAA,IAChD,KAAK;AAAA,MACJ,SAAS;AAAA,MACT,aACC;AAAA,IACF;AAAA,IACA,KAAK;AAAA,MACJ,SAAS,SAAS,YACf,0BAA0B,QAAQ,SAAS,MAC3C;AAAA,IACJ;AAAA,EACD;AAEA,SAAO,cAAc,UAAU;AAChC;","names":[]}
@@ -18,91 +18,109 @@ var __copyProps = (to, from, except, desc) => {
18
18
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
19
  var additional_options_exports = {};
20
20
  __export(additional_options_exports, {
21
- additionalOptions: () => additionalOptions
21
+ getAdditionalOptions: () => getAdditionalOptions
22
22
  });
23
23
  module.exports = __toCommonJS(additional_options_exports);
24
24
  var import_safety_options = require("./safety-options");
25
- const additionalOptions = {
26
- displayName: "Options",
27
- name: "options",
28
- placeholder: "Add Option",
29
- description: "Additional options to add",
30
- type: "collection",
31
- default: {},
32
- options: [
33
- {
34
- displayName: "Maximum Number of Tokens",
35
- name: "maxOutputTokens",
36
- default: 2048,
37
- description: "The maximum number of tokens to generate in the completion",
38
- type: "number"
39
- },
40
- {
41
- displayName: "Sampling Temperature",
42
- name: "temperature",
43
- default: 0.4,
44
- typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
45
- description: "Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.",
46
- type: "number"
47
- },
48
- {
49
- displayName: "Top K",
50
- name: "topK",
51
- default: 32,
52
- typeOptions: { maxValue: 40, minValue: -1, numberPrecision: 1 },
53
- description: 'Used to remove "long tail" low probability responses. Defaults to -1, which disables it.',
54
- type: "number"
55
- },
56
- {
57
- displayName: "Top P",
58
- name: "topP",
59
- default: 1,
60
- typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
61
- description: "Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.",
62
- type: "number"
63
- },
64
- // Safety Settings
65
- {
66
- displayName: "Safety Settings",
67
- name: "safetySettings",
68
- type: "fixedCollection",
69
- typeOptions: { multipleValues: true },
70
- default: {
71
- values: {
72
- category: import_safety_options.harmCategories[0].name,
73
- threshold: import_safety_options.harmThresholds[0].name
74
- }
25
+ function getAdditionalOptions({
26
+ supportsThinkingBudget
27
+ }) {
28
+ const baseOptions = {
29
+ displayName: "Options",
30
+ name: "options",
31
+ placeholder: "Add Option",
32
+ description: "Additional options to add",
33
+ type: "collection",
34
+ default: {},
35
+ options: [
36
+ {
37
+ displayName: "Maximum Number of Tokens",
38
+ name: "maxOutputTokens",
39
+ default: 2048,
40
+ description: "The maximum number of tokens to generate in the completion",
41
+ type: "number"
75
42
  },
76
- placeholder: "Add Option",
77
- options: [
78
- {
79
- name: "values",
80
- displayName: "Values",
81
- values: [
82
- {
83
- displayName: "Safety Category",
84
- name: "category",
85
- type: "options",
86
- description: "The category of harmful content to block",
87
- default: "HARM_CATEGORY_UNSPECIFIED",
88
- options: import_safety_options.harmCategories
89
- },
90
- {
91
- displayName: "Safety Threshold",
92
- name: "threshold",
93
- type: "options",
94
- description: "The threshold of harmful content to block",
95
- default: "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
96
- options: import_safety_options.harmThresholds
97
- }
98
- ]
99
- }
100
- ]
101
- }
102
- ]
103
- };
43
+ {
44
+ displayName: "Sampling Temperature",
45
+ name: "temperature",
46
+ default: 0.4,
47
+ typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
48
+ description: "Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.",
49
+ type: "number"
50
+ },
51
+ {
52
+ displayName: "Top K",
53
+ name: "topK",
54
+ default: 32,
55
+ typeOptions: { maxValue: 40, minValue: -1, numberPrecision: 1 },
56
+ description: 'Used to remove "long tail" low probability responses. Defaults to -1, which disables it.',
57
+ type: "number"
58
+ },
59
+ {
60
+ displayName: "Top P",
61
+ name: "topP",
62
+ default: 1,
63
+ typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
64
+ description: "Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.",
65
+ type: "number"
66
+ },
67
+ // Safety Settings
68
+ {
69
+ displayName: "Safety Settings",
70
+ name: "safetySettings",
71
+ type: "fixedCollection",
72
+ typeOptions: { multipleValues: true },
73
+ default: {
74
+ values: {
75
+ category: import_safety_options.harmCategories[0].name,
76
+ threshold: import_safety_options.harmThresholds[0].name
77
+ }
78
+ },
79
+ placeholder: "Add Option",
80
+ options: [
81
+ {
82
+ name: "values",
83
+ displayName: "Values",
84
+ values: [
85
+ {
86
+ displayName: "Safety Category",
87
+ name: "category",
88
+ type: "options",
89
+ description: "The category of harmful content to block",
90
+ default: "HARM_CATEGORY_UNSPECIFIED",
91
+ options: import_safety_options.harmCategories
92
+ },
93
+ {
94
+ displayName: "Safety Threshold",
95
+ name: "threshold",
96
+ type: "options",
97
+ description: "The threshold of harmful content to block",
98
+ default: "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
99
+ options: import_safety_options.harmThresholds
100
+ }
101
+ ]
102
+ }
103
+ ]
104
+ }
105
+ ]
106
+ };
107
+ if (supportsThinkingBudget) {
108
+ baseOptions.options?.push({
109
+ displayName: "Thinking Budget",
110
+ name: "thinkingBudget",
111
+ default: void 0,
112
+ description: "Controls reasoning tokens for thinking models. Set to 0 to disable automatic thinking. Set to -1 for dynamic thinking. Leave empty for auto mode.",
113
+ type: "number",
114
+ typeOptions: {
115
+ minValue: -1,
116
+ numberPrecision: 0
117
+ }
118
+ });
119
+ }
120
+ return baseOptions;
121
+ }
104
122
  // Annotate the CommonJS export names for ESM import in node:
105
123
  0 && (module.exports = {
106
- additionalOptions
124
+ getAdditionalOptions
107
125
  });
108
126
  //# sourceMappingURL=additional-options.js.map
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../nodes/llms/gemini-common/additional-options.ts"],"sourcesContent":["import type { HarmBlockThreshold, HarmCategory } from '@google/generative-ai';\nimport type { INodeProperties } from 'n8n-workflow';\n\nimport { harmCategories, harmThresholds } from './safety-options';\n\nexport const additionalOptions: INodeProperties = {\n\tdisplayName: 'Options',\n\tname: 'options',\n\tplaceholder: 'Add Option',\n\tdescription: 'Additional options to add',\n\ttype: 'collection',\n\tdefault: {},\n\toptions: [\n\t\t{\n\t\t\tdisplayName: 'Maximum Number of Tokens',\n\t\t\tname: 'maxOutputTokens',\n\t\t\tdefault: 2048,\n\t\t\tdescription: 'The maximum number of tokens to generate in the completion',\n\t\t\ttype: 'number',\n\t\t},\n\t\t{\n\t\t\tdisplayName: 'Sampling Temperature',\n\t\t\tname: 'temperature',\n\t\t\tdefault: 0.4,\n\t\t\ttypeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },\n\t\t\tdescription:\n\t\t\t\t'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',\n\t\t\ttype: 'number',\n\t\t},\n\t\t{\n\t\t\tdisplayName: 'Top K',\n\t\t\tname: 'topK',\n\t\t\tdefault: 32,\n\t\t\ttypeOptions: { maxValue: 40, minValue: -1, numberPrecision: 1 },\n\t\t\tdescription:\n\t\t\t\t'Used to remove \"long tail\" low probability responses. Defaults to -1, which disables it.',\n\t\t\ttype: 'number',\n\t\t},\n\t\t{\n\t\t\tdisplayName: 'Top P',\n\t\t\tname: 'topP',\n\t\t\tdefault: 1,\n\t\t\ttypeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },\n\t\t\tdescription:\n\t\t\t\t'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',\n\t\t\ttype: 'number',\n\t\t},\n\n\t\t// Safety Settings\n\t\t{\n\t\t\tdisplayName: 'Safety Settings',\n\t\t\tname: 'safetySettings',\n\t\t\ttype: 'fixedCollection',\n\t\t\ttypeOptions: { multipleValues: true },\n\t\t\tdefault: {\n\t\t\t\tvalues: {\n\t\t\t\t\tcategory: harmCategories[0].name as HarmCategory,\n\t\t\t\t\tthreshold: harmThresholds[0].name as HarmBlockThreshold,\n\t\t\t\t},\n\t\t\t},\n\t\t\tplaceholder: 'Add Option',\n\t\t\toptions: [\n\t\t\t\t{\n\t\t\t\t\tname: 'values',\n\t\t\t\t\tdisplayName: 'Values',\n\t\t\t\t\tvalues: [\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tdisplayName: 'Safety Category',\n\t\t\t\t\t\t\tname: 'category',\n\t\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\t\tdescription: 'The category of harmful content to block',\n\t\t\t\t\t\t\tdefault: 'HARM_CATEGORY_UNSPECIFIED',\n\t\t\t\t\t\t\toptions: harmCategories,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tdisplayName: 'Safety Threshold',\n\t\t\t\t\t\t\tname: 'threshold',\n\t\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\t\tdescription: 'The threshold of harmful content to block',\n\t\t\t\t\t\t\tdefault: 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',\n\t\t\t\t\t\t\toptions: harmThresholds,\n\t\t\t\t\t\t},\n\t\t\t\t\t],\n\t\t\t\t},\n\t\t\t],\n\t\t},\n\t],\n};\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAGA,4BAA+C;AAExC,MAAM,oBAAqC;AAAA,EACjD,aAAa;AAAA,EACb,MAAM;AAAA,EACN,aAAa;AAAA,EACb,aAAa;AAAA,EACb,MAAM;AAAA,EACN,SAAS,CAAC;AAAA,EACV,SAAS;AAAA,IACR;AAAA,MACC,aAAa;AAAA,MACb,MAAM;AAAA,MACN,SAAS;AAAA,MACT,aAAa;AAAA,MACb,MAAM;AAAA,IACP;AAAA,IACA;AAAA,MACC,aAAa;AAAA,MACb,MAAM;AAAA,MACN,SAAS;AAAA,MACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,MAC5D,aACC;AAAA,MACD,MAAM;AAAA,IACP;AAAA,IACA;AAAA,MACC,aAAa;AAAA,MACb,MAAM;AAAA,MACN,SAAS;AAAA,MACT,aAAa,EAAE,UAAU,IAAI,UAAU,IAAI,iBAAiB,EAAE;AAAA,MAC9D,aACC;AAAA,MACD,MAAM;AAAA,IACP;AAAA,IACA;AAAA,MACC,aAAa;AAAA,MACb,MAAM;AAAA,MACN,SAAS;AAAA,MACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,MAC5D,aACC;AAAA,MACD,MAAM;AAAA,IACP;AAAA;AAAA,IAGA;AAAA,MACC,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,aAAa,EAAE,gBAAgB,KAAK;AAAA,MACpC,SAAS;AAAA,QACR,QAAQ;AAAA,UACP,UAAU,qCAAe,CAAC,EAAE;AAAA,UAC5B,WAAW,qCAAe,CAAC,EAAE;AAAA,QAC9B;AAAA,MACD;AAAA,MACA,aAAa;AAAA,MACb,SAAS;AAAA,QACR;AAAA,UACC,MAAM;AAAA,UACN,aAAa;AAAA,UACb,QAAQ;AAAA,YACP;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,MAAM;AAAA,cACN,aAAa;AAAA,cACb,SAAS;AAAA,cACT,SAAS;AAAA,YACV;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,MAAM;AAAA,cACN,aAAa;AAAA,cACb,SAAS;AAAA,cACT,SAAS;AAAA,YACV;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA,EACD;AACD;","names":[]}
1
+ {"version":3,"sources":["../../../../nodes/llms/gemini-common/additional-options.ts"],"sourcesContent":["import type { HarmBlockThreshold, HarmCategory } from '@google/genai';\nimport type { INodeProperties } from 'n8n-workflow';\n\nimport { harmCategories, harmThresholds } from './safety-options';\n\nexport function getAdditionalOptions({\n\tsupportsThinkingBudget,\n}: { supportsThinkingBudget: boolean }) {\n\tconst baseOptions: INodeProperties = {\n\t\tdisplayName: 'Options',\n\t\tname: 'options',\n\t\tplaceholder: 'Add Option',\n\t\tdescription: 'Additional options to add',\n\t\ttype: 'collection',\n\t\tdefault: {},\n\t\toptions: [\n\t\t\t{\n\t\t\t\tdisplayName: 'Maximum Number of Tokens',\n\t\t\t\tname: 'maxOutputTokens',\n\t\t\t\tdefault: 2048,\n\t\t\t\tdescription: 'The maximum number of tokens to generate in the completion',\n\t\t\t\ttype: 'number',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Sampling Temperature',\n\t\t\t\tname: 'temperature',\n\t\t\t\tdefault: 0.4,\n\t\t\t\ttypeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },\n\t\t\t\tdescription:\n\t\t\t\t\t'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',\n\t\t\t\ttype: 'number',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Top K',\n\t\t\t\tname: 'topK',\n\t\t\t\tdefault: 32,\n\t\t\t\ttypeOptions: { maxValue: 40, minValue: -1, numberPrecision: 1 },\n\t\t\t\tdescription:\n\t\t\t\t\t'Used to remove \"long tail\" low probability responses. Defaults to -1, which disables it.',\n\t\t\t\ttype: 'number',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Top P',\n\t\t\t\tname: 'topP',\n\t\t\t\tdefault: 1,\n\t\t\t\ttypeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },\n\t\t\t\tdescription:\n\t\t\t\t\t'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',\n\t\t\t\ttype: 'number',\n\t\t\t},\n\t\t\t// Safety Settings\n\t\t\t{\n\t\t\t\tdisplayName: 'Safety Settings',\n\t\t\t\tname: 'safetySettings',\n\t\t\t\ttype: 'fixedCollection',\n\t\t\t\ttypeOptions: { multipleValues: true },\n\t\t\t\tdefault: {\n\t\t\t\t\tvalues: {\n\t\t\t\t\t\tcategory: harmCategories[0].name as HarmCategory,\n\t\t\t\t\t\tthreshold: harmThresholds[0].name as HarmBlockThreshold,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tplaceholder: 'Add Option',\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tname: 'values',\n\t\t\t\t\t\tdisplayName: 'Values',\n\t\t\t\t\t\tvalues: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tdisplayName: 'Safety Category',\n\t\t\t\t\t\t\t\tname: 'category',\n\t\t\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\t\t\tdescription: 'The category of harmful content to block',\n\t\t\t\t\t\t\t\tdefault: 'HARM_CATEGORY_UNSPECIFIED',\n\t\t\t\t\t\t\t\toptions: harmCategories,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tdisplayName: 'Safety Threshold',\n\t\t\t\t\t\t\t\tname: 'threshold',\n\t\t\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\t\t\tdescription: 'The threshold of harmful content to block',\n\t\t\t\t\t\t\t\tdefault: 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',\n\t\t\t\t\t\t\t\toptions: harmThresholds,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t};\n\t// only supported in the new google genai SDK\n\tif (supportsThinkingBudget) {\n\t\tbaseOptions.options?.push({\n\t\t\tdisplayName: 'Thinking Budget',\n\t\t\tname: 'thinkingBudget',\n\t\t\tdefault: undefined,\n\t\t\tdescription:\n\t\t\t\t'Controls reasoning tokens for thinking models. Set to 0 to disable automatic thinking. Set to -1 for dynamic thinking. Leave empty for auto mode.',\n\t\t\ttype: 'number',\n\t\t\ttypeOptions: {\n\t\t\t\tminValue: -1,\n\t\t\t\tnumberPrecision: 0,\n\t\t\t},\n\t\t});\n\t}\n\treturn baseOptions;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAGA,4BAA+C;AAExC,SAAS,qBAAqB;AAAA,EACpC;AACD,GAAwC;AACvC,QAAM,cAA+B;AAAA,IACpC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,aAAa;AAAA,IACb,aAAa;AAAA,IACb,MAAM;AAAA,IACN,SAAS,CAAC;AAAA,IACV,SAAS;AAAA,MACR;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,QACb,MAAM;AAAA,MACP;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,QAC5D,aACC;AAAA,QACD,MAAM;AAAA,MACP;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa,EAAE,UAAU,IAAI,UAAU,IAAI,iBAAiB,EAAE;AAAA,QAC9D,aACC;AAAA,QACD,MAAM;AAAA,MACP;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,QAC5D,aACC;AAAA,QACD,MAAM;AAAA,MACP;AAAA;AAAA,MAEA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,aAAa,EAAE,gBAAgB,KAAK;AAAA,QACpC,SAAS;AAAA,UACR,QAAQ;AAAA,YACP,UAAU,qCAAe,CAAC,EAAE;AAAA,YAC5B,WAAW,qCAAe,CAAC,EAAE;AAAA,UAC9B;AAAA,QACD;AAAA,QACA,aAAa;AAAA,QACb,SAAS;AAAA,UACR;AAAA,YACC,MAAM;AAAA,YACN,aAAa;AAAA,YACb,QAAQ;AAAA,cACP;AAAA,gBACC,aAAa;AAAA,gBACb,MAAM;AAAA,gBACN,MAAM;AAAA,gBACN,aAAa;AAAA,gBACb,SAAS;AAAA,gBACT,SAAS;AAAA,cACV;AAAA,cACA;AAAA,gBACC,aAAa;AAAA,gBACb,MAAM;AAAA,gBACN,MAAM;AAAA,gBACN,aAAa;AAAA,gBACb,SAAS;AAAA,gBACT,SAAS;AAAA,cACV;AAAA,YACD;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA,EACD;AAEA,MAAI,wBAAwB;AAC3B,gBAAY,SAAS,KAAK;AAAA,MACzB,aAAa;AAAA,MACb,MAAM;AAAA,MACN,SAAS;AAAA,MACT,aACC;AAAA,MACD,MAAM;AAAA,MACN,aAAa;AAAA,QACZ,UAAU;AAAA,QACV,iBAAiB;AAAA,MAClB;AAAA,IACD,CAAC;AAAA,EACF;AACA,SAAO;AACR;","names":[]}
@@ -1 +1 @@
1
- {"version":3,"sources":["../../../../../../nodes/vendors/GoogleGemini/actions/audio/transcribe.operation.ts"],"sourcesContent":["import type { IExecuteFunctions, INodeExecutionData, INodeProperties } from 'n8n-workflow';\nimport { updateDisplayOptions } from 'n8n-workflow';\n\nimport type { Content, GenerateContentResponse } from '../../helpers/interfaces';\nimport { downloadFile, uploadFile } from '../../helpers/utils';\nimport { apiRequest } from '../../transport';\nimport { modelRLC } from '../descriptions';\n\nconst properties: INodeProperties[] = [\n\tmodelRLC('audioModelSearch'),\n\t{\n\t\tdisplayName: 'Input Type',\n\t\tname: 'inputType',\n\t\ttype: 'options',\n\t\tdefault: 'url',\n\t\toptions: [\n\t\t\t{\n\t\t\t\tname: 'Audio URL(s)',\n\t\t\t\tvalue: 'url',\n\t\t\t},\n\t\t\t{\n\t\t\t\tname: 'Binary File(s)',\n\t\t\t\tvalue: 'binary',\n\t\t\t},\n\t\t],\n\t},\n\t{\n\t\tdisplayName: 'URL(s)',\n\t\tname: 'audioUrls',\n\t\ttype: 'string',\n\t\tplaceholder: 'e.g. https://example.com/audio.mp3',\n\t\tdescription:\n\t\t\t'URL(s) of the audio(s) to transcribe, multiple URLs can be added separated by comma',\n\t\tdefault: '',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\tinputType: ['url'],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Input Data Field Name(s)',\n\t\tname: 'binaryPropertyName',\n\t\ttype: 'string',\n\t\tdefault: 'data',\n\t\tplaceholder: 'e.g. data',\n\t\thint: 'The name of the input field containing the binary file data to be processed',\n\t\tdescription:\n\t\t\t'Name of the binary field(s) which contains the audio(s), seperate multiple field names with commas',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\tinputType: ['binary'],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Simplify Output',\n\t\tname: 'simplify',\n\t\ttype: 'boolean',\n\t\tdefault: true,\n\t\tdescription: 'Whether to simplify the response or not',\n\t},\n\t{\n\t\tdisplayName: 'Options',\n\t\tname: 'options',\n\t\ttype: 'collection',\n\t\tdefault: {},\n\t\toptions: [\n\t\t\t{\n\t\t\t\tdisplayName: 'Start Time',\n\t\t\t\tname: 'startTime',\n\t\t\t\ttype: 'string',\n\t\t\t\tdefault: '',\n\t\t\t\tdescription: 'The start time of the audio in MM:SS or HH:MM:SS format',\n\t\t\t\tplaceholder: 'e.g. 00:15',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'End Time',\n\t\t\t\tname: 'endTime',\n\t\t\t\ttype: 'string',\n\t\t\t\tdefault: '',\n\t\t\t\tdescription: 'The end time of the audio in MM:SS or HH:MM:SS format',\n\t\t\t\tplaceholder: 'e.g. 02:15',\n\t\t\t},\n\t\t],\n\t},\n];\n\nconst displayOptions = {\n\tshow: {\n\t\toperation: ['transcribe'],\n\t\tresource: ['audio'],\n\t},\n};\n\nexport const description = updateDisplayOptions(displayOptions, properties);\n\nexport async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {\n\tconst model = this.getNodeParameter('modelId', i, '', { extractValue: true }) as string;\n\tconst inputType = this.getNodeParameter('inputType', i, 'url') as string;\n\tconst simplify = this.getNodeParameter('simplify', i, true) as boolean;\n\tconst options = this.getNodeParameter('options', i, {});\n\n\tlet contents: Content[];\n\tif (inputType === 'url') {\n\t\tconst urls = this.getNodeParameter('audioUrls', i, '') as string;\n\t\tconst filesDataPromises = urls\n\t\t\t.split(',')\n\t\t\t.map((url) => url.trim())\n\t\t\t.filter((url) => url)\n\t\t\t.map(async (url) => {\n\t\t\t\tif (url.startsWith('https://generativelanguage.googleapis.com')) {\n\t\t\t\t\tconst { mimeType } = (await apiRequest.call(this, 'GET', '', {\n\t\t\t\t\t\toption: { url },\n\t\t\t\t\t})) as { mimeType: string };\n\t\t\t\t\treturn { fileUri: url, mimeType };\n\t\t\t\t} else {\n\t\t\t\t\tconst { fileContent, mimeType } = await downloadFile.call(this, url, 'audio/mpeg');\n\t\t\t\t\treturn await uploadFile.call(this, fileContent, mimeType);\n\t\t\t\t}\n\t\t\t});\n\n\t\tconst filesData = await Promise.all(filesDataPromises);\n\t\tcontents = [\n\t\t\t{\n\t\t\t\trole: 'user',\n\t\t\t\tparts: filesData.map((fileData) => ({\n\t\t\t\t\tfileData,\n\t\t\t\t})),\n\t\t\t},\n\t\t];\n\t} else {\n\t\tconst binaryPropertyNames = this.getNodeParameter('binaryPropertyName', i, 'data');\n\t\tconst promises = binaryPropertyNames\n\t\t\t.split(',')\n\t\t\t.map((binaryPropertyName) => binaryPropertyName.trim())\n\t\t\t.filter((binaryPropertyName) => binaryPropertyName)\n\t\t\t.map(async (binaryPropertyName) => {\n\t\t\t\tconst binaryData = this.helpers.assertBinaryData(i, binaryPropertyName);\n\t\t\t\tconst buffer = await this.helpers.getBinaryDataBuffer(i, binaryPropertyName);\n\t\t\t\treturn await uploadFile.call(this, buffer, binaryData.mimeType);\n\t\t\t});\n\n\t\tconst filesData = await Promise.all(promises);\n\t\tcontents = [\n\t\t\t{\n\t\t\t\trole: 'user',\n\t\t\t\tparts: filesData.map((fileData) => ({\n\t\t\t\t\tfileData,\n\t\t\t\t})),\n\t\t\t},\n\t\t];\n\t}\n\n\tconst text = `Generate a transcript of the speech${\n\t\toptions.startTime ? ` from ${options.startTime as string}` : ''\n\t}${options.endTime ? ` to ${options.endTime as string}` : ''}`;\n\tcontents[0].parts.push({ text });\n\n\tconst body = {\n\t\tcontents,\n\t};\n\n\tconst response = (await apiRequest.call(this, 'POST', `/v1beta/${model}:generateContent`, {\n\t\tbody,\n\t})) as GenerateContentResponse;\n\n\tif (simplify) {\n\t\treturn response.candidates.map((candidate) => ({\n\t\t\tjson: candidate,\n\t\t\tpairedItem: { item: i },\n\t\t}));\n\t}\n\n\treturn [\n\t\t{\n\t\t\tjson: { ...response },\n\t\t\tpairedItem: { item: i },\n\t\t},\n\t];\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,0BAAqC;AAGrC,mBAAyC;AACzC,uBAA2B;AAC3B,0BAAyB;AAEzB,MAAM,aAAgC;AAAA,MACrC,8BAAS,kBAAkB;AAAA,EAC3B;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,SAAS;AAAA,MACR;AAAA,QACC,MAAM;AAAA,QACN,OAAO;AAAA,MACR;AAAA,MACA;AAAA,QACC,MAAM;AAAA,QACN,OAAO;AAAA,MACR;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,aAAa;AAAA,IACb,aACC;AAAA,IACD,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,WAAW,CAAC,KAAK;AAAA,MAClB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,aAAa;AAAA,IACb,MAAM;AAAA,IACN,aACC;AAAA,IACD,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,WAAW,CAAC,QAAQ;AAAA,MACrB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,aAAa;AAAA,EACd;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS,CAAC;AAAA,IACV,SAAS;AAAA,MACR;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,QACb,aAAa;AAAA,MACd;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,QACb,aAAa;AAAA,MACd;AAAA,IACD;AAAA,EACD;AACD;AAEA,MAAM,iBAAiB;AAAA,EACtB,MAAM;AAAA,IACL,WAAW,CAAC,YAAY;AAAA,IACxB,UAAU,CAAC,OAAO;AAAA,EACnB;AACD;AAEO,MAAM,kBAAc,0CAAqB,gBAAgB,UAAU;AAE1E,eAAsB,QAAiC,GAA0C;AAChG,QAAM,QAAQ,KAAK,iBAAiB,WAAW,GAAG,IAAI,EAAE,cAAc,KAAK,CAAC;AAC5E,QAAM,YAAY,KAAK,iBAAiB,aAAa,GAAG,KAAK;AAC7D,QAAM,WAAW,KAAK,iBAAiB,YAAY,GAAG,IAAI;AAC1D,QAAM,UAAU,KAAK,iBAAiB,WAAW,GAAG,CAAC,CAAC;AAEtD,MAAI;AACJ,MAAI,cAAc,OAAO;AACxB,UAAM,OAAO,KAAK,iBAAiB,aAAa,GAAG,EAAE;AACrD,UAAM,oBAAoB,KACxB,MAAM,GAAG,EACT,IAAI,CAAC,QAAQ,IAAI,KAAK,CAAC,EACvB,OAAO,CAAC,QAAQ,GAAG,EACnB,IAAI,OAAO,QAAQ;AACnB,UAAI,IAAI,WAAW,2CAA2C,GAAG;AAChE,cAAM,EAAE,SAAS,IAAK,MAAM,4BAAW,KAAK,MAAM,OAAO,IAAI;AAAA,UAC5D,QAAQ,EAAE,IAAI;AAAA,QACf,CAAC;AACD,eAAO,EAAE,SAAS,KAAK,SAAS;AAAA,MACjC,OAAO;AACN,cAAM,EAAE,aAAa,SAAS,IAAI,MAAM,0BAAa,KAAK,MAAM,KAAK,YAAY;AACjF,eAAO,MAAM,wBAAW,KAAK,MAAM,aAAa,QAAQ;AAAA,MACzD;AAAA,IACD,CAAC;AAEF,UAAM,YAAY,MAAM,QAAQ,IAAI,iBAAiB;AACrD,eAAW;AAAA,MACV;AAAA,QACC,MAAM;AAAA,QACN,OAAO,UAAU,IAAI,CAAC,cAAc;AAAA,UACnC;AAAA,QACD,EAAE;AAAA,MACH;AAAA,IACD;AAAA,EACD,OAAO;AACN,UAAM,sBAAsB,KAAK,iBAAiB,sBAAsB,GAAG,MAAM;AACjF,UAAM,WAAW,oBACf,MAAM,GAAG,EACT,IAAI,CAAC,uBAAuB,mBAAmB,KAAK,CAAC,EACrD,OAAO,CAAC,uBAAuB,kBAAkB,EACjD,IAAI,OAAO,uBAAuB;AAClC,YAAM,aAAa,KAAK,QAAQ,iBAAiB,GAAG,kBAAkB;AACtE,YAAM,SAAS,MAAM,KAAK,QAAQ,oBAAoB,GAAG,kBAAkB;AAC3E,aAAO,MAAM,wBAAW,KAAK,MAAM,QAAQ,WAAW,QAAQ;AAAA,IAC/D,CAAC;AAEF,UAAM,YAAY,MAAM,QAAQ,IAAI,QAAQ;AAC5C,eAAW;AAAA,MACV;AAAA,QACC,MAAM;AAAA,QACN,OAAO,UAAU,IAAI,CAAC,cAAc;AAAA,UACnC;AAAA,QACD,EAAE;AAAA,MACH;AAAA,IACD;AAAA,EACD;AAEA,QAAM,OAAO,sCACZ,QAAQ,YAAY,SAAS,QAAQ,SAAmB,KAAK,EAC9D,GAAG,QAAQ,UAAU,OAAO,QAAQ,OAAiB,KAAK,EAAE;AAC5D,WAAS,CAAC,EAAE,MAAM,KAAK,EAAE,KAAK,CAAC;AAE/B,QAAM,OAAO;AAAA,IACZ;AAAA,EACD;AAEA,QAAM,WAAY,MAAM,4BAAW,KAAK,MAAM,QAAQ,WAAW,KAAK,oBAAoB;AAAA,IACzF;AAAA,EACD,CAAC;AAED,MAAI,UAAU;AACb,WAAO,SAAS,WAAW,IAAI,CAAC,eAAe;AAAA,MAC9C,MAAM;AAAA,MACN,YAAY,EAAE,MAAM,EAAE;AAAA,IACvB,EAAE;AAAA,EACH;AAEA,SAAO;AAAA,IACN;AAAA,MACC,MAAM,EAAE,GAAG,SAAS;AAAA,MACpB,YAAY,EAAE,MAAM,EAAE;AAAA,IACvB;AAAA,EACD;AACD;","names":[]}
1
+ {"version":3,"sources":["../../../../../../nodes/vendors/GoogleGemini/actions/audio/transcribe.operation.ts"],"sourcesContent":["import type { IExecuteFunctions, INodeExecutionData, INodeProperties } from 'n8n-workflow';\nimport { updateDisplayOptions } from 'n8n-workflow';\n\nimport type {\n\tContent,\n\tGenerateContentRequest,\n\tGenerateContentResponse,\n} from '../../helpers/interfaces';\nimport { downloadFile, uploadFile } from '../../helpers/utils';\nimport { apiRequest } from '../../transport';\nimport { modelRLC } from '../descriptions';\n\nconst properties: INodeProperties[] = [\n\tmodelRLC('audioModelSearch'),\n\t{\n\t\tdisplayName: 'Input Type',\n\t\tname: 'inputType',\n\t\ttype: 'options',\n\t\tdefault: 'url',\n\t\toptions: [\n\t\t\t{\n\t\t\t\tname: 'Audio URL(s)',\n\t\t\t\tvalue: 'url',\n\t\t\t},\n\t\t\t{\n\t\t\t\tname: 'Binary File(s)',\n\t\t\t\tvalue: 'binary',\n\t\t\t},\n\t\t],\n\t},\n\t{\n\t\tdisplayName: 'URL(s)',\n\t\tname: 'audioUrls',\n\t\ttype: 'string',\n\t\tplaceholder: 'e.g. https://example.com/audio.mp3',\n\t\tdescription:\n\t\t\t'URL(s) of the audio(s) to transcribe, multiple URLs can be added separated by comma',\n\t\tdefault: '',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\tinputType: ['url'],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Input Data Field Name(s)',\n\t\tname: 'binaryPropertyName',\n\t\ttype: 'string',\n\t\tdefault: 'data',\n\t\tplaceholder: 'e.g. data',\n\t\thint: 'The name of the input field containing the binary file data to be processed',\n\t\tdescription:\n\t\t\t'Name of the binary field(s) which contains the audio(s), seperate multiple field names with commas',\n\t\tdisplayOptions: {\n\t\t\tshow: {\n\t\t\t\tinputType: ['binary'],\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tdisplayName: 'Simplify Output',\n\t\tname: 'simplify',\n\t\ttype: 'boolean',\n\t\tdefault: true,\n\t\tdescription: 'Whether to simplify the response or not',\n\t},\n\t{\n\t\tdisplayName: 'Options',\n\t\tname: 'options',\n\t\ttype: 'collection',\n\t\tdefault: {},\n\t\toptions: [\n\t\t\t{\n\t\t\t\tdisplayName: 'Start Time',\n\t\t\t\tname: 'startTime',\n\t\t\t\ttype: 'string',\n\t\t\t\tdefault: '',\n\t\t\t\tdescription: 'The start time of the audio in MM:SS or HH:MM:SS format',\n\t\t\t\tplaceholder: 'e.g. 00:15',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'End Time',\n\t\t\t\tname: 'endTime',\n\t\t\t\ttype: 'string',\n\t\t\t\tdefault: '',\n\t\t\t\tdescription: 'The end time of the audio in MM:SS or HH:MM:SS format',\n\t\t\t\tplaceholder: 'e.g. 02:15',\n\t\t\t},\n\t\t],\n\t},\n];\n\nconst displayOptions = {\n\tshow: {\n\t\toperation: ['transcribe'],\n\t\tresource: ['audio'],\n\t},\n};\n\nexport const description = updateDisplayOptions(displayOptions, properties);\n\nexport async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {\n\tconst model = this.getNodeParameter('modelId', i, '', { extractValue: true }) as string;\n\tconst inputType = this.getNodeParameter('inputType', i, 'url') as string;\n\tconst simplify = this.getNodeParameter('simplify', i, true) as boolean;\n\tconst options = this.getNodeParameter('options', i, {});\n\n\tlet contents: Content[];\n\tif (inputType === 'url') {\n\t\tconst urls = this.getNodeParameter('audioUrls', i, '') as string;\n\t\tconst filesDataPromises = urls\n\t\t\t.split(',')\n\t\t\t.map((url) => url.trim())\n\t\t\t.filter((url) => url)\n\t\t\t.map(async (url) => {\n\t\t\t\tif (url.startsWith('https://generativelanguage.googleapis.com')) {\n\t\t\t\t\tconst { mimeType } = (await apiRequest.call(this, 'GET', '', {\n\t\t\t\t\t\toption: { url },\n\t\t\t\t\t})) as { mimeType: string };\n\t\t\t\t\treturn { fileUri: url, mimeType };\n\t\t\t\t} else {\n\t\t\t\t\tconst { fileContent, mimeType } = await downloadFile.call(this, url, 'audio/mpeg');\n\t\t\t\t\treturn await uploadFile.call(this, fileContent, mimeType);\n\t\t\t\t}\n\t\t\t});\n\n\t\tconst filesData = await Promise.all(filesDataPromises);\n\t\tcontents = [\n\t\t\t{\n\t\t\t\trole: 'user',\n\t\t\t\tparts: filesData.map((fileData) => ({\n\t\t\t\t\tfileData,\n\t\t\t\t})),\n\t\t\t},\n\t\t];\n\t} else {\n\t\tconst binaryPropertyNames = this.getNodeParameter('binaryPropertyName', i, 'data');\n\t\tconst promises = binaryPropertyNames\n\t\t\t.split(',')\n\t\t\t.map((binaryPropertyName) => binaryPropertyName.trim())\n\t\t\t.filter((binaryPropertyName) => binaryPropertyName)\n\t\t\t.map(async (binaryPropertyName) => {\n\t\t\t\tconst binaryData = this.helpers.assertBinaryData(i, binaryPropertyName);\n\t\t\t\tconst buffer = await this.helpers.getBinaryDataBuffer(i, binaryPropertyName);\n\t\t\t\treturn await uploadFile.call(this, buffer, binaryData.mimeType);\n\t\t\t});\n\n\t\tconst filesData = await Promise.all(promises);\n\t\tcontents = [\n\t\t\t{\n\t\t\t\trole: 'user',\n\t\t\t\tparts: filesData.map((fileData) => ({\n\t\t\t\t\tfileData,\n\t\t\t\t})),\n\t\t\t},\n\t\t];\n\t}\n\n\tconst text = `Generate a transcript of the speech${\n\t\toptions.startTime ? ` from ${options.startTime as string}` : ''\n\t}${options.endTime ? ` to ${options.endTime as string}` : ''}`;\n\tcontents[0].parts.push({ text });\n\n\tconst body: GenerateContentRequest = {\n\t\tcontents,\n\t};\n\n\tconst response = (await apiRequest.call(this, 'POST', `/v1beta/${model}:generateContent`, {\n\t\tbody,\n\t})) as GenerateContentResponse;\n\n\tif (simplify) {\n\t\treturn response.candidates.map((candidate) => ({\n\t\t\tjson: candidate,\n\t\t\tpairedItem: { item: i },\n\t\t}));\n\t}\n\n\treturn [\n\t\t{\n\t\t\tjson: { ...response },\n\t\t\tpairedItem: { item: i },\n\t\t},\n\t];\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,0BAAqC;AAOrC,mBAAyC;AACzC,uBAA2B;AAC3B,0BAAyB;AAEzB,MAAM,aAAgC;AAAA,MACrC,8BAAS,kBAAkB;AAAA,EAC3B;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,SAAS;AAAA,MACR;AAAA,QACC,MAAM;AAAA,QACN,OAAO;AAAA,MACR;AAAA,MACA;AAAA,QACC,MAAM;AAAA,QACN,OAAO;AAAA,MACR;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,aAAa;AAAA,IACb,aACC;AAAA,IACD,SAAS;AAAA,IACT,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,WAAW,CAAC,KAAK;AAAA,MAClB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,aAAa;AAAA,IACb,MAAM;AAAA,IACN,aACC;AAAA,IACD,gBAAgB;AAAA,MACf,MAAM;AAAA,QACL,WAAW,CAAC,QAAQ;AAAA,MACrB;AAAA,IACD;AAAA,EACD;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS;AAAA,IACT,aAAa;AAAA,EACd;AAAA,EACA;AAAA,IACC,aAAa;AAAA,IACb,MAAM;AAAA,IACN,MAAM;AAAA,IACN,SAAS,CAAC;AAAA,IACV,SAAS;AAAA,MACR;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,QACb,aAAa;AAAA,MACd;AAAA,MACA;AAAA,QACC,aAAa;AAAA,QACb,MAAM;AAAA,QACN,MAAM;AAAA,QACN,SAAS;AAAA,QACT,aAAa;AAAA,QACb,aAAa;AAAA,MACd;AAAA,IACD;AAAA,EACD;AACD;AAEA,MAAM,iBAAiB;AAAA,EACtB,MAAM;AAAA,IACL,WAAW,CAAC,YAAY;AAAA,IACxB,UAAU,CAAC,OAAO;AAAA,EACnB;AACD;AAEO,MAAM,kBAAc,0CAAqB,gBAAgB,UAAU;AAE1E,eAAsB,QAAiC,GAA0C;AAChG,QAAM,QAAQ,KAAK,iBAAiB,WAAW,GAAG,IAAI,EAAE,cAAc,KAAK,CAAC;AAC5E,QAAM,YAAY,KAAK,iBAAiB,aAAa,GAAG,KAAK;AAC7D,QAAM,WAAW,KAAK,iBAAiB,YAAY,GAAG,IAAI;AAC1D,QAAM,UAAU,KAAK,iBAAiB,WAAW,GAAG,CAAC,CAAC;AAEtD,MAAI;AACJ,MAAI,cAAc,OAAO;AACxB,UAAM,OAAO,KAAK,iBAAiB,aAAa,GAAG,EAAE;AACrD,UAAM,oBAAoB,KACxB,MAAM,GAAG,EACT,IAAI,CAAC,QAAQ,IAAI,KAAK,CAAC,EACvB,OAAO,CAAC,QAAQ,GAAG,EACnB,IAAI,OAAO,QAAQ;AACnB,UAAI,IAAI,WAAW,2CAA2C,GAAG;AAChE,cAAM,EAAE,SAAS,IAAK,MAAM,4BAAW,KAAK,MAAM,OAAO,IAAI;AAAA,UAC5D,QAAQ,EAAE,IAAI;AAAA,QACf,CAAC;AACD,eAAO,EAAE,SAAS,KAAK,SAAS;AAAA,MACjC,OAAO;AACN,cAAM,EAAE,aAAa,SAAS,IAAI,MAAM,0BAAa,KAAK,MAAM,KAAK,YAAY;AACjF,eAAO,MAAM,wBAAW,KAAK,MAAM,aAAa,QAAQ;AAAA,MACzD;AAAA,IACD,CAAC;AAEF,UAAM,YAAY,MAAM,QAAQ,IAAI,iBAAiB;AACrD,eAAW;AAAA,MACV;AAAA,QACC,MAAM;AAAA,QACN,OAAO,UAAU,IAAI,CAAC,cAAc;AAAA,UACnC;AAAA,QACD,EAAE;AAAA,MACH;AAAA,IACD;AAAA,EACD,OAAO;AACN,UAAM,sBAAsB,KAAK,iBAAiB,sBAAsB,GAAG,MAAM;AACjF,UAAM,WAAW,oBACf,MAAM,GAAG,EACT,IAAI,CAAC,uBAAuB,mBAAmB,KAAK,CAAC,EACrD,OAAO,CAAC,uBAAuB,kBAAkB,EACjD,IAAI,OAAO,uBAAuB;AAClC,YAAM,aAAa,KAAK,QAAQ,iBAAiB,GAAG,kBAAkB;AACtE,YAAM,SAAS,MAAM,KAAK,QAAQ,oBAAoB,GAAG,kBAAkB;AAC3E,aAAO,MAAM,wBAAW,KAAK,MAAM,QAAQ,WAAW,QAAQ;AAAA,IAC/D,CAAC;AAEF,UAAM,YAAY,MAAM,QAAQ,IAAI,QAAQ;AAC5C,eAAW;AAAA,MACV;AAAA,QACC,MAAM;AAAA,QACN,OAAO,UAAU,IAAI,CAAC,cAAc;AAAA,UACnC;AAAA,QACD,EAAE;AAAA,MACH;AAAA,IACD;AAAA,EACD;AAEA,QAAM,OAAO,sCACZ,QAAQ,YAAY,SAAS,QAAQ,SAAmB,KAAK,EAC9D,GAAG,QAAQ,UAAU,OAAO,QAAQ,OAAiB,KAAK,EAAE;AAC5D,WAAS,CAAC,EAAE,MAAM,KAAK,EAAE,KAAK,CAAC;AAE/B,QAAM,OAA+B;AAAA,IACpC;AAAA,EACD;AAEA,QAAM,WAAY,MAAM,4BAAW,KAAK,MAAM,QAAQ,WAAW,KAAK,oBAAoB;AAAA,IACzF;AAAA,EACD,CAAC;AAED,MAAI,UAAU;AACb,WAAO,SAAS,WAAW,IAAI,CAAC,eAAe;AAAA,MAC9C,MAAM;AAAA,MACN,YAAY,EAAE,MAAM,EAAE;AAAA,IACvB,EAAE;AAAA,EACH;AAEA,SAAO;AAAA,IACN;AAAA,MACC,MAAM,EAAE,GAAG,SAAS;AAAA,MACpB,YAAY,EAAE,MAAM,EAAE;AAAA,IACvB;AAAA,EACD;AACD;","names":[]}
@@ -79,36 +79,19 @@ const displayOptions = {
79
79
  const description = (0, import_n8n_workflow.updateDisplayOptions)(displayOptions, properties);
80
80
  async function execute(i) {
81
81
  const inputType = this.getNodeParameter("inputType", i, "url");
82
+ let fileUrl;
82
83
  if (inputType === "url") {
83
- const fileUrl = this.getNodeParameter("fileUrl", i, "");
84
- const { fileContent, mimeType } = await import_utils.downloadFile.call(
85
- this,
86
- fileUrl,
87
- "application/octet-stream"
88
- );
89
- const response = await import_utils.uploadFile.call(this, fileContent, mimeType);
90
- return [
91
- {
92
- json: response,
93
- pairedItem: {
94
- item: i
95
- }
96
- }
97
- ];
98
- } else {
99
- const binaryPropertyName = this.getNodeParameter("binaryPropertyName", i, "data");
100
- const binaryData = this.helpers.assertBinaryData(i, binaryPropertyName);
101
- const buffer = await this.helpers.getBinaryDataBuffer(i, binaryPropertyName);
102
- const response = await import_utils.uploadFile.call(this, buffer, binaryData.mimeType);
103
- return [
104
- {
105
- json: response,
106
- pairedItem: {
107
- item: i
108
- }
109
- }
110
- ];
84
+ fileUrl = this.getNodeParameter("fileUrl", i, "");
111
85
  }
86
+ const response = await import_utils.transferFile.call(this, i, fileUrl, "application/octet-stream");
87
+ return [
88
+ {
89
+ json: response,
90
+ pairedItem: {
91
+ item: i
92
+ }
93
+ }
94
+ ];
112
95
  }
113
96
  // Annotate the CommonJS export names for ESM import in node:
114
97
  0 && (module.exports = {