@atom8n/n8n-nodes-langchain 2.5.6 → 2.5.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/known/nodes.json +8 -0
- package/dist/nodes/llms/LmChatCodexCli/LmChatCodexCli.node.js +456 -0
- package/dist/nodes/llms/LmChatCodexCli/LmChatCodexCli.node.js.map +1 -0
- package/dist/nodes/llms/LmChatCodexCli/codexCli.svg +1 -0
- package/dist/nodes/llms/LmChatCursorAgent/LmChatCursorAgent.node.js +46 -6
- package/dist/nodes/llms/LmChatCursorAgent/LmChatCursorAgent.node.js.map +1 -1
- package/dist/nodes/llms/LmChatOpenCodeCli/LmChatOpenCodeCli.node.js +443 -0
- package/dist/nodes/llms/LmChatOpenCodeCli/LmChatOpenCodeCli.node.js.map +1 -0
- package/dist/nodes/llms/LmChatOpenCodeCli/openCodeCli.svg +1 -0
- package/dist/types/nodes.json +3 -1
- package/package.json +13 -11
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../../../nodes/llms/LmChatOpenCodeCli/LmChatOpenCodeCli.node.ts"],"sourcesContent":["import { BaseChatModel } from '@langchain/core/language_models/chat_models';\nimport type { BaseMessage } from '@langchain/core/messages';\nimport { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages';\nimport type { ChatResult } from '@langchain/core/outputs';\nimport type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';\nimport type { BindToolsInput } from '@langchain/core/language_models/chat_models';\nimport {\n\tApplicationError,\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n} from 'n8n-workflow';\n\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport { N8nLlmTracing } from '../N8nLlmTracing';\nimport { spawn } from 'child_process';\nimport { existsSync, statSync } from 'fs';\n\ninterface OpenCodeCliFields {\n\tmodel: string;\n\tbinaryPath: string;\n\tworkingDirectory: string;\n}\n\ninterface ParsedToolCall {\n\tid: string;\n\tname: string;\n\targs: Record<string, unknown>;\n}\n\ninterface ParsedJsonEventResult {\n\tassistantText: string;\n\terrorMessage: string;\n}\n\nconst TOOL_CALL_SYSTEM_PROMPT = `You have access to the following tools. When you need to call a tool, respond ONLY with a JSON block in this exact format (no other text before or after):\n\n\\`\\`\\`tool_calls\n[{\"id\": \"call_1\", \"name\": \"tool_name\", \"args\": {\"param\": \"value\"}}]\n\\`\\`\\`\n\nWhen you do NOT need to call a tool, respond normally with text. Never mix tool calls and text in the same response.\n\nAvailable tools:\n`;\n\n/**\n * Custom LangChain chat model that wraps the OpenCode CLI binary.\n * Uses `opencode run --format json` for non-interactive execution.\n * Supports tool calling by injecting tool schemas into the prompt\n * and parsing structured JSON responses for tool calls.\n *\n * OpenCode CLI JSON output event types (verified empirically):\n * - {\"type\":\"step_start\", \"part\":{\"type\":\"step-start\",...}}\n * - {\"type\":\"text\", \"part\":{\"type\":\"text\",\"text\":\"...the response...\",...}}\n * - {\"type\":\"step_finish\", \"part\":{\"type\":\"step-finish\",\"reason\":\"stop\",\"cost\":...,\"tokens\":{...}}}\n */\nclass ChatOpenCodeCLI extends BaseChatModel {\n\tmodel: string;\n\n\tbinaryPath: string;\n\n\tworkingDirectory: string;\n\n\tboundTools: BindToolsInput[] = [];\n\n\tconstructor(fields: OpenCodeCliFields) {\n\t\tsuper({});\n\t\tthis.model = fields.model;\n\t\tthis.binaryPath = fields.binaryPath;\n\t\tthis.workingDirectory = fields.workingDirectory;\n\t}\n\n\t_llmType(): string {\n\t\treturn 'opencode-cli';\n\t}\n\n\toverride bindTools(tools: BindToolsInput[], kwargs?: Partial<this['ParsedCallOptions']>) {\n\t\tconsole.log('[LmChatOpenCodeCli] bindTools called, tool count:', tools.length);\n\t\tconst clone = new ChatOpenCodeCLI({\n\t\t\tmodel: this.model,\n\t\t\tbinaryPath: this.binaryPath,\n\t\t\tworkingDirectory: this.workingDirectory,\n\t\t});\n\t\tclone.boundTools = tools;\n\t\tclone.callbacks = this.callbacks;\n\t\tif (kwargs) {\n\t\t\treturn (\n\t\t\t\tclone as unknown as {\n\t\t\t\t\tbind: (kwargs: Record<string, unknown>) => ChatOpenCodeCLI;\n\t\t\t\t}\n\t\t\t).bind(kwargs as Record<string, unknown>);\n\t\t}\n\t\treturn clone;\n\t}\n\n\tasync _generate(\n\t\tmessages: BaseMessage[],\n\t\t_options: this['ParsedCallOptions'],\n\t\t_runManager?: CallbackManagerForLLMRun,\n\t): Promise<ChatResult> {\n\t\tconsole.log('[LmChatOpenCodeCli] _generate called', {\n\t\t\tmessageCount: messages.length,\n\t\t\tboundToolCount: this.boundTools.length,\n\t\t\tmodel: this.model,\n\t\t});\n\n\t\t// If tools are bound, inject tool schemas into a system message\n\t\tconst processedMessages = [...messages];\n\t\tif (this.boundTools.length > 0) {\n\t\t\tconst toolDescriptions = this.boundTools\n\t\t\t\t.map((tool) => {\n\t\t\t\t\tconst t = tool as Record<string, unknown>;\n\t\t\t\t\tconst name = (t.name as string) ?? '';\n\t\t\t\t\tconst description = (t.description as string) ?? '';\n\t\t\t\t\tconst schema = t.parameters ?? t.schema ?? {};\n\t\t\t\t\treturn `- ${name}: ${description}\\n Parameters: ${JSON.stringify(schema)}`;\n\t\t\t\t})\n\t\t\t\t.join('\\n\\n');\n\n\t\t\tconst systemPrompt = TOOL_CALL_SYSTEM_PROMPT + toolDescriptions;\n\t\t\tprocessedMessages.unshift(new SystemMessage(systemPrompt));\n\t\t\tconsole.log(\n\t\t\t\t'[LmChatOpenCodeCli] injected tool system prompt, tool count:',\n\t\t\t\tthis.boundTools.length,\n\t\t\t);\n\t\t}\n\n\t\t// Build prompt from messages\n\t\tconst prompt = processedMessages\n\t\t\t.map((m) => {\n\t\t\t\tconst content = typeof m.content === 'string' ? m.content : JSON.stringify(m.content);\n\t\t\t\tif (m instanceof SystemMessage) return `[system]: ${content}`;\n\t\t\t\tif (m instanceof HumanMessage) return `[user]: ${content}`;\n\t\t\t\tif (m instanceof AIMessage) return `[assistant]: ${content}`;\n\t\t\t\treturn `[${m._getType()}]: ${content}`;\n\t\t\t})\n\t\t\t.join('\\n\\n');\n\n\t\tconsole.log('[LmChatOpenCodeCli] prompt built, length:', prompt.length);\n\n\t\t// Execute opencode CLI\n\t\tconst rawResponse = await this.executeOpenCodeCli(prompt);\n\n\t\tconsole.log('[LmChatOpenCodeCli] raw response received, length:', rawResponse.length);\n\n\t\t// Check for tool calls in response\n\t\tif (this.boundTools.length > 0) {\n\t\t\tconst toolCalls = this.extractToolCalls(rawResponse);\n\t\t\tif (toolCalls.length > 0) {\n\t\t\t\tconsole.log('[LmChatOpenCodeCli] extracted tool calls:', toolCalls.length);\n\t\t\t\tconst aiMessage = new AIMessage({\n\t\t\t\t\tcontent: '',\n\t\t\t\t\ttool_calls: toolCalls.map((tc) => ({\n\t\t\t\t\t\tid: tc.id,\n\t\t\t\t\t\tname: tc.name,\n\t\t\t\t\t\targs: tc.args,\n\t\t\t\t\t\ttype: 'tool_call' as const,\n\t\t\t\t\t})),\n\t\t\t\t});\n\n\t\t\t\treturn {\n\t\t\t\t\tgenerations: [{ message: aiMessage, text: '' }],\n\t\t\t\t};\n\t\t\t}\n\t\t}\n\n\t\t// Normal text response\n\t\tconsole.log('[LmChatOpenCodeCli] returning text response');\n\t\tconst aiMessage = new AIMessage({ content: rawResponse });\n\t\treturn {\n\t\t\tgenerations: [{ message: aiMessage, text: rawResponse }],\n\t\t};\n\t}\n\n\tprivate extractToolCalls(text: string): ParsedToolCall[] {\n\t\t// Look for tool_calls JSON block\n\t\tconst toolCallRegex = /```tool_calls\\s*\\n([\\s\\S]*?)\\n```/;\n\t\tconst match = toolCallRegex.exec(text);\n\t\tif (!match) return [];\n\n\t\ttry {\n\t\t\tconst parsed = JSON.parse(match[1]) as Array<{\n\t\t\t\tid?: string;\n\t\t\t\tname: string;\n\t\t\t\targs: Record<string, unknown>;\n\t\t\t}>;\n\t\t\tif (!Array.isArray(parsed)) return [];\n\n\t\t\tconsole.log('[LmChatOpenCodeCli] parsed tool calls from response:', parsed.length);\n\t\t\treturn parsed.map((tc, i) => ({\n\t\t\t\tid: tc.id ?? `call_${i}`,\n\t\t\t\tname: tc.name,\n\t\t\t\targs: tc.args ?? {},\n\t\t\t}));\n\t\t} catch {\n\t\t\tconsole.log('[LmChatOpenCodeCli] failed to parse tool calls JSON block');\n\t\t\treturn [];\n\t\t}\n\t}\n\n\tprivate async executeOpenCodeCli(prompt: string): Promise<string> {\n\t\t// Build args: opencode run --format json [--model <provider/model>] <prompt>\n\t\t// The prompt is passed as a positional argument to `opencode run`\n\t\t// We use --format json to get structured JSONL output\n\t\tconst args = ['run', '--format', 'json'];\n\n\t\tif (this.model && this.model !== 'auto') {\n\t\t\targs.push('--model', this.model);\n\t\t}\n\n\t\t// The prompt is passed via stdin-like mechanism — actually as positional arg\n\t\t// But since prompts can be very long, we pass it as a positional argument\n\t\targs.push(prompt);\n\n\t\tconst cwd = this.workingDirectory?.trim() || undefined;\n\n\t\tconsole.log('[LmChatOpenCodeCli] spawning opencode run', {\n\t\t\tbinaryPath: this.binaryPath,\n\t\t\targs: args.map((a, i) => (i === args.length - 1 ? `<prompt len=${a.length}>` : a)),\n\t\t\tmodel: this.model,\n\t\t\tcwd,\n\t\t});\n\n\t\treturn await new Promise<string>((resolve, reject) => {\n\t\t\tconst child = spawn(this.binaryPath, args, {\n\t\t\t\tcwd,\n\t\t\t\tstdio: ['pipe', 'pipe', 'pipe'],\n\t\t\t\tenv: { ...process.env },\n\t\t\t});\n\n\t\t\tlet stdout = '';\n\t\t\tlet stderr = '';\n\n\t\t\tchild.stdout.on('data', (data: Buffer) => {\n\t\t\t\tstdout += data.toString();\n\t\t\t});\n\n\t\t\tchild.stderr.on('data', (data: Buffer) => {\n\t\t\t\tstderr += data.toString();\n\t\t\t});\n\n\t\t\tchild.on('error', (err: Error) => {\n\t\t\t\tconsole.error('[LmChatOpenCodeCli] spawn error:', err.message);\n\t\t\t\treject(\n\t\t\t\t\tnew Error(\n\t\t\t\t\t\t`Failed to spawn opencode: ${err.message}. Make sure OpenCode CLI is installed (brew install opencode-ai/tap/opencode or curl -fsSL https://opencode.ai/install | bash) and accessible. Working directory: ${cwd ?? '<default>'}`,\n\t\t\t\t\t),\n\t\t\t\t);\n\t\t\t});\n\n\t\t\tchild.on('close', (code: number | null) => {\n\t\t\t\tconsole.log('[LmChatOpenCodeCli] opencode run exited', {\n\t\t\t\t\tcode,\n\t\t\t\t\tstdoutLength: stdout.length,\n\t\t\t\t\tstderrLength: stderr.length,\n\t\t\t\t});\n\n\t\t\t\t// Parse the JSON event output — even on non-zero exit, stdout may\n\t\t\t\t// contain useful events (e.g. error messages from the provider)\n\t\t\t\tconst parseResult = this.parseJsonEventOutput(stdout);\n\n\t\t\t\tif (parseResult.assistantText) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t'[LmChatOpenCodeCli] parsed assistant content, length:',\n\t\t\t\t\t\tparseResult.assistantText.length,\n\t\t\t\t\t);\n\t\t\t\t\tresolve(parseResult.assistantText);\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\t// No assistant response — build a meaningful error from available info\n\t\t\t\tif (parseResult.errorMessage) {\n\t\t\t\t\tconsole.error('[LmChatOpenCodeCli] opencode returned error:', parseResult.errorMessage);\n\t\t\t\t\treject(new Error(`OpenCode CLI error: ${parseResult.errorMessage}`));\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\tif (code !== 0) {\n\t\t\t\t\tconst stderrMsg = stderr.trim();\n\t\t\t\t\tconst errorMsg = stderrMsg || `opencode run exited with code ${code}`;\n\t\t\t\t\tconsole.error('[LmChatOpenCodeCli] opencode run failed with code', code, ':', errorMsg);\n\t\t\t\t\treject(new Error(errorMsg));\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\tconsole.error(\n\t\t\t\t\t'[LmChatOpenCodeCli] no assistant response parsed from output, stdout preview:',\n\t\t\t\t\tstdout.substring(0, 500),\n\t\t\t\t);\n\t\t\t\treject(new Error('No assistant response received from opencode run'));\n\t\t\t});\n\n\t\t\t// Close stdin immediately — opencode run takes the prompt as a positional arg\n\t\t\tif (child.stdin) {\n\t\t\t\tchild.stdin.end();\n\t\t\t}\n\t\t});\n\t}\n\n\t/**\n\t * Parse JSON event output from `opencode run --format json`.\n\t *\n\t * Actual event types from opencode run --format json (verified empirically):\n\t * - {\"type\":\"step_start\",\"part\":{\"type\":\"step-start\",...}}\n\t * - {\"type\":\"text\",\"part\":{\"type\":\"text\",\"text\":\"...the response...\",...}}\n\t * - {\"type\":\"step_finish\",\"part\":{\"type\":\"step-finish\",\"reason\":\"stop\",\"cost\":...,\"tokens\":{...}}}\n\t *\n\t * Returns both assistant text and any error messages found.\n\t */\n\tprivate parseJsonEventOutput(output: string): ParsedJsonEventResult {\n\t\tconst lines = output.split('\\n').filter((line) => line.trim());\n\t\tconst assistantParts: string[] = [];\n\t\tconst errorParts: string[] = [];\n\n\t\tconsole.log('[LmChatOpenCodeCli] parsing JSON event output, line count:', lines.length);\n\n\t\tfor (const line of lines) {\n\t\t\ttry {\n\t\t\t\tconst parsed = JSON.parse(line) as Record<string, unknown>;\n\t\t\t\tconst eventType = parsed.type as string | undefined;\n\n\t\t\t\tconsole.log(\n\t\t\t\t\t'[LmChatOpenCodeCli] JSON event:',\n\t\t\t\t\teventType,\n\t\t\t\t\t'| keys:',\n\t\t\t\t\tObject.keys(parsed).join(','),\n\t\t\t\t);\n\n\t\t\t\t// \"text\" event — contains assistant response text in part.text\n\t\t\t\tif (eventType === 'text') {\n\t\t\t\t\tconst part = parsed.part as Record<string, unknown> | undefined;\n\t\t\t\t\tif (part?.type === 'text' && typeof part.text === 'string') {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t'[LmChatOpenCodeCli] found text event, text length:',\n\t\t\t\t\t\t\t(part.text as string).length,\n\t\t\t\t\t\t);\n\t\t\t\t\t\tassistantParts.push(part.text as string);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// \"message\" event — fallback for alternative output formats\n\t\t\t\tif (eventType === 'message') {\n\t\t\t\t\tconst part = parsed.part as Record<string, unknown> | undefined;\n\t\t\t\t\tif (part && typeof part.text === 'string') {\n\t\t\t\t\t\tassistantParts.push(part.text as string);\n\t\t\t\t\t}\n\t\t\t\t\t// Also handle role-based messages\n\t\t\t\t\tif (parsed.role === 'assistant' && typeof parsed.content === 'string') {\n\t\t\t\t\t\tassistantParts.push(parsed.content as string);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// \"assistant\" event — compatibility fallback\n\t\t\t\tif (eventType === 'assistant') {\n\t\t\t\t\tconst message = parsed.message as Record<string, unknown> | undefined;\n\t\t\t\t\tif (message?.content) {\n\t\t\t\t\t\tif (Array.isArray(message.content)) {\n\t\t\t\t\t\t\tfor (const c of message.content as Array<Record<string, unknown>>) {\n\t\t\t\t\t\t\t\tif (c.type === 'text' && typeof c.text === 'string') {\n\t\t\t\t\t\t\t\t\tassistantParts.push(c.text as string);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else if (typeof message.content === 'string') {\n\t\t\t\t\t\t\tassistantParts.push(message.content);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Error events\n\t\t\t\tif (eventType === 'error') {\n\t\t\t\t\tconst errMsg =\n\t\t\t\t\t\ttypeof parsed.message === 'string'\n\t\t\t\t\t\t\t? parsed.message\n\t\t\t\t\t\t\t: typeof parsed.error === 'string'\n\t\t\t\t\t\t\t\t? parsed.error\n\t\t\t\t\t\t\t\t: undefined;\n\t\t\t\t\tif (errMsg) {\n\t\t\t\t\t\tconsole.error('[LmChatOpenCodeCli] error event received:', errMsg);\n\t\t\t\t\t\terrorParts.push(errMsg);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// step_finish with error info\n\t\t\t\tif (eventType === 'step_finish') {\n\t\t\t\t\tconst part = parsed.part as Record<string, unknown> | undefined;\n\t\t\t\t\tif (part) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t'[LmChatOpenCodeCli] step_finish event, reason:',\n\t\t\t\t\t\t\tpart.reason,\n\t\t\t\t\t\t\t'cost:',\n\t\t\t\t\t\t\tpart.cost,\n\t\t\t\t\t\t);\n\t\t\t\t\t\tif (part.reason === 'error' && typeof part.error === 'string') {\n\t\t\t\t\t\t\terrorParts.push(part.error as string);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} catch {\n\t\t\t\t// Skip non-JSON lines (e.g. progress output, banners)\n\t\t\t}\n\t\t}\n\n\t\treturn {\n\t\t\tassistantText: assistantParts.join(''),\n\t\t\terrorMessage: errorParts.join('; '),\n\t\t};\n\t}\n}\n\nexport class LmChatOpenCodeCli implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'OpenCode CLI Chat Model',\n\n\t\tname: 'lmChatOpenCodeCli',\n\t\ticon: 'file:openCodeCli.svg',\n\t\tgroup: ['transform'],\n\t\tversion: [1],\n\t\tdescription:\n\t\t\t'Chat model powered by the OpenCode CLI. Requires opencode to be installed locally (brew install opencode-ai/tap/opencode or curl -fsSL https://opencode.ai/install | bash).',\n\t\tdefaults: {\n\t\t\tname: 'OpenCode CLI Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {},\n\t\t},\n\n\t\tinputs: [],\n\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),\n\t\t\t{\n\t\t\t\tdisplayName: 'Model',\n\t\t\t\tname: 'model',\n\t\t\t\ttype: 'options',\n\t\t\t\tdescription:\n\t\t\t\t\t'The model to use via opencode CLI. Format: provider/model (e.g. anthropic/claude-sonnet-4-20250514). Select \"Auto\" to use the default model configured in opencode.',\n\t\t\t\t// eslint-disable-next-line n8n-nodes-base/node-param-options-type-unsorted-items\n\t\t\t\toptions: [\n\t\t\t\t\t{ name: 'Auto (Default)', value: 'auto' },\n\t\t\t\t\t// OpenCode built-in models\n\t\t\t\t\t{ name: 'OpenCode Big Pickle', value: 'opencode/big-pickle' },\n\t\t\t\t\t{ name: 'OpenCode GPT-5 Nano', value: 'opencode/gpt-5-nano' },\n\t\t\t\t\t{ name: 'OpenCode Hy3 Preview Free', value: 'opencode/hy3-preview-free' },\n\t\t\t\t\t// Anthropic models\n\t\t\t\t\t{ name: 'Claude Sonnet 4 (Anthropic)', value: 'anthropic/claude-sonnet-4-20250514' },\n\t\t\t\t\t{ name: 'Claude Opus 4 (Anthropic)', value: 'anthropic/claude-opus-4-20250918' },\n\t\t\t\t\t{\n\t\t\t\t\t\tname: 'Claude 3.5 Sonnet (Anthropic)',\n\t\t\t\t\t\tvalue: 'anthropic/claude-3-5-sonnet-20241022',\n\t\t\t\t\t},\n\t\t\t\t\t// OpenAI models\n\t\t\t\t\t{ name: 'GPT-4o (OpenAI)', value: 'openai/gpt-4o' },\n\t\t\t\t\t{ name: 'GPT-4o Mini (OpenAI)', value: 'openai/gpt-4o-mini' },\n\t\t\t\t\t{ name: 'o3 (OpenAI)', value: 'openai/o3' },\n\t\t\t\t\t{ name: 'o3 Mini (OpenAI)', value: 'openai/o3-mini' },\n\t\t\t\t\t// Google models\n\t\t\t\t\t{ name: 'Gemini 2.5 Pro (Google)', value: 'google/gemini-2.5-pro' },\n\t\t\t\t\t{ name: 'Gemini 2.5 Flash (Google)', value: 'google/gemini-2.5-flash' },\n\t\t\t\t\t{ name: 'Gemini 2.0 Flash (Google)', value: 'google/gemini-2.0-flash' },\n\t\t\t\t\t// xAI models\n\t\t\t\t\t{ name: 'Grok 3 (xAI)', value: 'xai/grok-3' },\n\t\t\t\t\t{ name: 'Grok 3 Mini (xAI)', value: 'xai/grok-3-mini' },\n\t\t\t\t\t// DeepSeek models\n\t\t\t\t\t{ name: 'DeepSeek Chat (DeepSeek)', value: 'deepseek/deepseek-chat' },\n\t\t\t\t\t{ name: 'DeepSeek Reasoner (DeepSeek)', value: 'deepseek/deepseek-reasoner' },\n\t\t\t\t],\n\t\t\t\tdefault: 'auto',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Options',\n\t\t\t\tname: 'options',\n\t\t\t\tplaceholder: 'Add Option',\n\t\t\t\tdescription: 'Additional options to configure',\n\t\t\t\ttype: 'collection',\n\t\t\t\tdefault: {},\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Binary Path',\n\t\t\t\t\t\tname: 'binaryPath',\n\t\t\t\t\t\tdefault: 'opencode',\n\t\t\t\t\t\tdescription: 'Path to the opencode binary. Defaults to \"opencode\" (must be in PATH).',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Working Directory',\n\t\t\t\t\t\tname: 'workingDirectory',\n\t\t\t\t\t\tdefault: '',\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Working directory for the opencode process. Leave empty to use the default.',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst modelName = this.getNodeParameter('model', itemIndex) as string;\n\n\t\tconst binaryPath = this.getNodeParameter('options.binaryPath', itemIndex, 'opencode') as string;\n\t\tconst rawWorkingDirectory = this.getNodeParameter('options.workingDirectory', itemIndex, '', {\n\t\t\trawExpressions: true,\n\t\t}) as string | undefined;\n\t\tconst workingDirectory = this.getNodeParameter('options.workingDirectory', itemIndex, '') as\n\t\t\t| string\n\t\t\t| undefined;\n\t\tconst normalizedWorkingDirectory = (workingDirectory ?? '').trim();\n\t\tconst rawWorkingDirectoryValue = rawWorkingDirectory ?? '';\n\t\tconst isWorkingDirectoryExpression =\n\t\t\trawWorkingDirectoryValue.startsWith('=') ||\n\t\t\trawWorkingDirectoryValue.includes('{{') ||\n\t\t\trawWorkingDirectoryValue.includes('$workspace');\n\n\t\tconsole.log('[LmChatOpenCodeCli] resolved OpenCode CLI options', {\n\t\t\titemIndex,\n\t\t\tmodelName,\n\t\t\tbinaryPath,\n\t\t\trawWorkingDirectory,\n\t\t\tworkingDirectory: normalizedWorkingDirectory,\n\t\t});\n\n\t\tif (isWorkingDirectoryExpression && !normalizedWorkingDirectory) {\n\t\t\tthrow new ApplicationError(\n\t\t\t\t`OpenCode CLI working directory expression resolved to an empty value: ${rawWorkingDirectoryValue}`,\n\t\t\t);\n\t\t}\n\n\t\tif (\n\t\t\tnormalizedWorkingDirectory.includes('{{') ||\n\t\t\tnormalizedWorkingDirectory.includes('$workspace')\n\t\t) {\n\t\t\tthrow new ApplicationError(\n\t\t\t\t`OpenCode CLI working directory was not resolved before execution: ${normalizedWorkingDirectory}`,\n\t\t\t);\n\t\t}\n\n\t\tif (\n\t\t\tnormalizedWorkingDirectory &&\n\t\t\t(!existsSync(normalizedWorkingDirectory) ||\n\t\t\t\t!statSync(normalizedWorkingDirectory).isDirectory())\n\t\t) {\n\t\t\tthrow new ApplicationError(\n\t\t\t\t`OpenCode CLI working directory does not exist or is not a directory: ${normalizedWorkingDirectory}`,\n\t\t\t);\n\t\t}\n\n\t\tconsole.log('[LmChatOpenCodeCli] creating ChatOpenCodeCLI instance', {\n\t\t\tmodel: modelName,\n\t\t\tbinaryPath,\n\t\t\tworkingDirectory: normalizedWorkingDirectory,\n\t\t});\n\n\t\tconst model = new ChatOpenCodeCLI({\n\t\t\tmodel: modelName,\n\t\t\tbinaryPath,\n\t\t\tworkingDirectory: normalizedWorkingDirectory,\n\t\t});\n\n\t\tmodel.callbacks = [new N8nLlmTracing(this)];\n\n\t\treturn {\n\t\t\tresponse: model,\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,yBAA8B;AAE9B,sBAAuD;AAIvD,0BAOO;AAEP,0BAA6C;AAE7C,2BAA8B;AAC9B,2BAAsB;AACtB,gBAAqC;AAmBrC,MAAM,0BAA0B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAsBhC,MAAM,wBAAwB,iCAAc;AAAA,EAS3C,YAAY,QAA2B;AACtC,UAAM,CAAC,CAAC;AAHT,sBAA+B,CAAC;AAI/B,SAAK,QAAQ,OAAO;AACpB,SAAK,aAAa,OAAO;AACzB,SAAK,mBAAmB,OAAO;AAAA,EAChC;AAAA,EAEA,WAAmB;AAClB,WAAO;AAAA,EACR;AAAA,EAES,UAAU,OAAyB,QAA6C;AACxF,YAAQ,IAAI,qDAAqD,MAAM,MAAM;AAC7E,UAAM,QAAQ,IAAI,gBAAgB;AAAA,MACjC,OAAO,KAAK;AAAA,MACZ,YAAY,KAAK;AAAA,MACjB,kBAAkB,KAAK;AAAA,IACxB,CAAC;AACD,UAAM,aAAa;AACnB,UAAM,YAAY,KAAK;AACvB,QAAI,QAAQ;AACX,aACC,MAGC,KAAK,MAAiC;AAAA,IACzC;AACA,WAAO;AAAA,EACR;AAAA,EAEA,MAAM,UACL,UACA,UACA,aACsB;AACtB,YAAQ,IAAI,wCAAwC;AAAA,MACnD,cAAc,SAAS;AAAA,MACvB,gBAAgB,KAAK,WAAW;AAAA,MAChC,OAAO,KAAK;AAAA,IACb,CAAC;AAGD,UAAM,oBAAoB,CAAC,GAAG,QAAQ;AACtC,QAAI,KAAK,WAAW,SAAS,GAAG;AAC/B,YAAM,mBAAmB,KAAK,WAC5B,IAAI,CAAC,SAAS;AACd,cAAM,IAAI;AACV,cAAM,OAAQ,EAAE,QAAmB;AACnC,cAAM,cAAe,EAAE,eAA0B;AACjD,cAAM,SAAS,EAAE,cAAc,EAAE,UAAU,CAAC;AAC5C,eAAO,KAAK,IAAI,KAAK,WAAW;AAAA,gBAAmB,KAAK,UAAU,MAAM,CAAC;AAAA,MAC1E,CAAC,EACA,KAAK,MAAM;AAEb,YAAM,eAAe,0BAA0B;AAC/C,wBAAkB,QAAQ,IAAI,8BAAc,YAAY,CAAC;AACzD,cAAQ;AAAA,QACP;AAAA,QACA,KAAK,WAAW;AAAA,MACjB;AAAA,IACD;AAGA,UAAM,SAAS,kBACb,IAAI,CAAC,MAAM;AACX,YAAM,UAAU,OAAO,EAAE,YAAY,WAAW,EAAE,UAAU,KAAK,UAAU,EAAE,OAAO;AACpF,UAAI,aAAa,8BAAe,QAAO,aAAa,OAAO;AAC3D,UAAI,aAAa,6BAAc,QAAO,WAAW,OAAO;AACxD,UAAI,aAAa,0BAAW,QAAO,gBAAgB,OAAO;AAC1D,aAAO,IAAI,EAAE,SAAS,CAAC,MAAM,OAAO;AAAA,IACrC,CAAC,EACA,KAAK,MAAM;AAEb,YAAQ,IAAI,6CAA6C,OAAO,MAAM;AAGtE,UAAM,cAAc,MAAM,KAAK,mBAAmB,MAAM;AAExD,YAAQ,IAAI,sDAAsD,YAAY,MAAM;AAGpF,QAAI,KAAK,WAAW,SAAS,GAAG;AAC/B,YAAM,YAAY,KAAK,iBAAiB,WAAW;AACnD,UAAI,UAAU,SAAS,GAAG;AACzB,gBAAQ,IAAI,6CAA6C,UAAU,MAAM;AACzE,cAAMA,aAAY,IAAI,0BAAU;AAAA,UAC/B,SAAS;AAAA,UACT,YAAY,UAAU,IAAI,CAAC,QAAQ;AAAA,YAClC,IAAI,GAAG;AAAA,YACP,MAAM,GAAG;AAAA,YACT,MAAM,GAAG;AAAA,YACT,MAAM;AAAA,UACP,EAAE;AAAA,QACH,CAAC;AAED,eAAO;AAAA,UACN,aAAa,CAAC,EAAE,SAASA,YAAW,MAAM,GAAG,CAAC;AAAA,QAC/C;AAAA,MACD;AAAA,IACD;AAGA,YAAQ,IAAI,6CAA6C;AACzD,UAAM,YAAY,IAAI,0BAAU,EAAE,SAAS,YAAY,CAAC;AACxD,WAAO;AAAA,MACN,aAAa,CAAC,EAAE,SAAS,WAAW,MAAM,YAAY,CAAC;AAAA,IACxD;AAAA,EACD;AAAA,EAEQ,iBAAiB,MAAgC;AAExD,UAAM,gBAAgB;AACtB,UAAM,QAAQ,cAAc,KAAK,IAAI;AACrC,QAAI,CAAC,MAAO,QAAO,CAAC;AAEpB,QAAI;AACH,YAAM,SAAS,KAAK,MAAM,MAAM,CAAC,CAAC;AAKlC,UAAI,CAAC,MAAM,QAAQ,MAAM,EAAG,QAAO,CAAC;AAEpC,cAAQ,IAAI,wDAAwD,OAAO,MAAM;AACjF,aAAO,OAAO,IAAI,CAAC,IAAI,OAAO;AAAA,QAC7B,IAAI,GAAG,MAAM,QAAQ,CAAC;AAAA,QACtB,MAAM,GAAG;AAAA,QACT,MAAM,GAAG,QAAQ,CAAC;AAAA,MACnB,EAAE;AAAA,IACH,QAAQ;AACP,cAAQ,IAAI,2DAA2D;AACvE,aAAO,CAAC;AAAA,IACT;AAAA,EACD;AAAA,EAEA,MAAc,mBAAmB,QAAiC;AAIjE,UAAM,OAAO,CAAC,OAAO,YAAY,MAAM;AAEvC,QAAI,KAAK,SAAS,KAAK,UAAU,QAAQ;AACxC,WAAK,KAAK,WAAW,KAAK,KAAK;AAAA,IAChC;AAIA,SAAK,KAAK,MAAM;AAEhB,UAAM,MAAM,KAAK,kBAAkB,KAAK,KAAK;AAE7C,YAAQ,IAAI,6CAA6C;AAAA,MACxD,YAAY,KAAK;AAAA,MACjB,MAAM,KAAK,IAAI,CAAC,GAAG,MAAO,MAAM,KAAK,SAAS,IAAI,eAAe,EAAE,MAAM,MAAM,CAAE;AAAA,MACjF,OAAO,KAAK;AAAA,MACZ;AAAA,IACD,CAAC;AAED,WAAO,MAAM,IAAI,QAAgB,CAAC,SAAS,WAAW;AACrD,YAAM,YAAQ,4BAAM,KAAK,YAAY,MAAM;AAAA,QAC1C;AAAA,QACA,OAAO,CAAC,QAAQ,QAAQ,MAAM;AAAA,QAC9B,KAAK,EAAE,GAAG,QAAQ,IAAI;AAAA,MACvB,CAAC;AAED,UAAI,SAAS;AACb,UAAI,SAAS;AAEb,YAAM,OAAO,GAAG,QAAQ,CAAC,SAAiB;AACzC,kBAAU,KAAK,SAAS;AAAA,MACzB,CAAC;AAED,YAAM,OAAO,GAAG,QAAQ,CAAC,SAAiB;AACzC,kBAAU,KAAK,SAAS;AAAA,MACzB,CAAC;AAED,YAAM,GAAG,SAAS,CAAC,QAAe;AACjC,gBAAQ,MAAM,oCAAoC,IAAI,OAAO;AAC7D;AAAA,UACC,IAAI;AAAA,YACH,6BAA6B,IAAI,OAAO,qKAAqK,OAAO,WAAW;AAAA,UAChO;AAAA,QACD;AAAA,MACD,CAAC;AAED,YAAM,GAAG,SAAS,CAAC,SAAwB;AAC1C,gBAAQ,IAAI,2CAA2C;AAAA,UACtD;AAAA,UACA,cAAc,OAAO;AAAA,UACrB,cAAc,OAAO;AAAA,QACtB,CAAC;AAID,cAAM,cAAc,KAAK,qBAAqB,MAAM;AAEpD,YAAI,YAAY,eAAe;AAC9B,kBAAQ;AAAA,YACP;AAAA,YACA,YAAY,cAAc;AAAA,UAC3B;AACA,kBAAQ,YAAY,aAAa;AACjC;AAAA,QACD;AAGA,YAAI,YAAY,cAAc;AAC7B,kBAAQ,MAAM,gDAAgD,YAAY,YAAY;AACtF,iBAAO,IAAI,MAAM,uBAAuB,YAAY,YAAY,EAAE,CAAC;AACnE;AAAA,QACD;AAEA,YAAI,SAAS,GAAG;AACf,gBAAM,YAAY,OAAO,KAAK;AAC9B,gBAAM,WAAW,aAAa,iCAAiC,IAAI;AACnE,kBAAQ,MAAM,qDAAqD,MAAM,KAAK,QAAQ;AACtF,iBAAO,IAAI,MAAM,QAAQ,CAAC;AAC1B;AAAA,QACD;AAEA,gBAAQ;AAAA,UACP;AAAA,UACA,OAAO,UAAU,GAAG,GAAG;AAAA,QACxB;AACA,eAAO,IAAI,MAAM,kDAAkD,CAAC;AAAA,MACrE,CAAC;AAGD,UAAI,MAAM,OAAO;AAChB,cAAM,MAAM,IAAI;AAAA,MACjB;AAAA,IACD,CAAC;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYQ,qBAAqB,QAAuC;AACnE,UAAM,QAAQ,OAAO,MAAM,IAAI,EAAE,OAAO,CAAC,SAAS,KAAK,KAAK,CAAC;AAC7D,UAAM,iBAA2B,CAAC;AAClC,UAAM,aAAuB,CAAC;AAE9B,YAAQ,IAAI,8DAA8D,MAAM,MAAM;AAEtF,eAAW,QAAQ,OAAO;AACzB,UAAI;AACH,cAAM,SAAS,KAAK,MAAM,IAAI;AAC9B,cAAM,YAAY,OAAO;AAEzB,gBAAQ;AAAA,UACP;AAAA,UACA;AAAA,UACA;AAAA,UACA,OAAO,KAAK,MAAM,EAAE,KAAK,GAAG;AAAA,QAC7B;AAGA,YAAI,cAAc,QAAQ;AACzB,gBAAM,OAAO,OAAO;AACpB,cAAI,MAAM,SAAS,UAAU,OAAO,KAAK,SAAS,UAAU;AAC3D,oBAAQ;AAAA,cACP;AAAA,cACC,KAAK,KAAgB;AAAA,YACvB;AACA,2BAAe,KAAK,KAAK,IAAc;AAAA,UACxC;AAAA,QACD;AAGA,YAAI,cAAc,WAAW;AAC5B,gBAAM,OAAO,OAAO;AACpB,cAAI,QAAQ,OAAO,KAAK,SAAS,UAAU;AAC1C,2BAAe,KAAK,KAAK,IAAc;AAAA,UACxC;AAEA,cAAI,OAAO,SAAS,eAAe,OAAO,OAAO,YAAY,UAAU;AACtE,2BAAe,KAAK,OAAO,OAAiB;AAAA,UAC7C;AAAA,QACD;AAGA,YAAI,cAAc,aAAa;AAC9B,gBAAM,UAAU,OAAO;AACvB,cAAI,SAAS,SAAS;AACrB,gBAAI,MAAM,QAAQ,QAAQ,OAAO,GAAG;AACnC,yBAAW,KAAK,QAAQ,SAA2C;AAClE,oBAAI,EAAE,SAAS,UAAU,OAAO,EAAE,SAAS,UAAU;AACpD,iCAAe,KAAK,EAAE,IAAc;AAAA,gBACrC;AAAA,cACD;AAAA,YACD,WAAW,OAAO,QAAQ,YAAY,UAAU;AAC/C,6BAAe,KAAK,QAAQ,OAAO;AAAA,YACpC;AAAA,UACD;AAAA,QACD;AAGA,YAAI,cAAc,SAAS;AAC1B,gBAAM,SACL,OAAO,OAAO,YAAY,WACvB,OAAO,UACP,OAAO,OAAO,UAAU,WACvB,OAAO,QACP;AACL,cAAI,QAAQ;AACX,oBAAQ,MAAM,6CAA6C,MAAM;AACjE,uBAAW,KAAK,MAAM;AAAA,UACvB;AAAA,QACD;AAGA,YAAI,cAAc,eAAe;AAChC,gBAAM,OAAO,OAAO;AACpB,cAAI,MAAM;AACT,oBAAQ;AAAA,cACP;AAAA,cACA,KAAK;AAAA,cACL;AAAA,cACA,KAAK;AAAA,YACN;AACA,gBAAI,KAAK,WAAW,WAAW,OAAO,KAAK,UAAU,UAAU;AAC9D,yBAAW,KAAK,KAAK,KAAe;AAAA,YACrC;AAAA,UACD;AAAA,QACD;AAAA,MACD,QAAQ;AAAA,MAER;AAAA,IACD;AAEA,WAAO;AAAA,MACN,eAAe,eAAe,KAAK,EAAE;AAAA,MACrC,cAAc,WAAW,KAAK,IAAI;AAAA,IACnC;AAAA,EACD;AACD;AAEO,MAAM,kBAAuC;AAAA,EAA7C;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MAEb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS,CAAC,CAAC;AAAA,MACX,aACC;AAAA,MACD,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW,CAAC;AAAA,MACb;AAAA,MAEA,QAAQ,CAAC;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aACC;AAAA;AAAA,UAED,SAAS;AAAA,YACR,EAAE,MAAM,kBAAkB,OAAO,OAAO;AAAA;AAAA,YAExC,EAAE,MAAM,uBAAuB,OAAO,sBAAsB;AAAA,YAC5D,EAAE,MAAM,uBAAuB,OAAO,sBAAsB;AAAA,YAC5D,EAAE,MAAM,6BAA6B,OAAO,4BAA4B;AAAA;AAAA,YAExE,EAAE,MAAM,+BAA+B,OAAO,qCAAqC;AAAA,YACnF,EAAE,MAAM,6BAA6B,OAAO,mCAAmC;AAAA,YAC/E;AAAA,cACC,MAAM;AAAA,cACN,OAAO;AAAA,YACR;AAAA;AAAA,YAEA,EAAE,MAAM,mBAAmB,OAAO,gBAAgB;AAAA,YAClD,EAAE,MAAM,wBAAwB,OAAO,qBAAqB;AAAA,YAC5D,EAAE,MAAM,eAAe,OAAO,YAAY;AAAA,YAC1C,EAAE,MAAM,oBAAoB,OAAO,iBAAiB;AAAA;AAAA,YAEpD,EAAE,MAAM,2BAA2B,OAAO,wBAAwB;AAAA,YAClE,EAAE,MAAM,6BAA6B,OAAO,0BAA0B;AAAA,YACtE,EAAE,MAAM,6BAA6B,OAAO,0BAA0B;AAAA;AAAA,YAEtE,EAAE,MAAM,gBAAgB,OAAO,aAAa;AAAA,YAC5C,EAAE,MAAM,qBAAqB,OAAO,kBAAkB;AAAA;AAAA,YAEtD,EAAE,MAAM,4BAA4B,OAAO,yBAAyB;AAAA,YACpE,EAAE,MAAM,gCAAgC,OAAO,6BAA6B;AAAA,UAC7E;AAAA,UACA,SAAS;AAAA,QACV;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,aAAa;AAAA,UACb,aAAa;AAAA,UACb,MAAM;AAAA,UACN,SAAS,CAAC;AAAA,UACV,SAAS;AAAA,YACR;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,YAAY,KAAK,iBAAiB,SAAS,SAAS;AAE1D,UAAM,aAAa,KAAK,iBAAiB,sBAAsB,WAAW,UAAU;AACpF,UAAM,sBAAsB,KAAK,iBAAiB,4BAA4B,WAAW,IAAI;AAAA,MAC5F,gBAAgB;AAAA,IACjB,CAAC;AACD,UAAM,mBAAmB,KAAK,iBAAiB,4BAA4B,WAAW,EAAE;AAGxF,UAAM,8BAA8B,oBAAoB,IAAI,KAAK;AACjE,UAAM,2BAA2B,uBAAuB;AACxD,UAAM,+BACL,yBAAyB,WAAW,GAAG,KACvC,yBAAyB,SAAS,IAAI,KACtC,yBAAyB,SAAS,YAAY;AAE/C,YAAQ,IAAI,qDAAqD;AAAA,MAChE;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,kBAAkB;AAAA,IACnB,CAAC;AAED,QAAI,gCAAgC,CAAC,4BAA4B;AAChE,YAAM,IAAI;AAAA,QACT,yEAAyE,wBAAwB;AAAA,MAClG;AAAA,IACD;AAEA,QACC,2BAA2B,SAAS,IAAI,KACxC,2BAA2B,SAAS,YAAY,GAC/C;AACD,YAAM,IAAI;AAAA,QACT,qEAAqE,0BAA0B;AAAA,MAChG;AAAA,IACD;AAEA,QACC,+BACC,KAAC,sBAAW,0BAA0B,KACtC,KAAC,oBAAS,0BAA0B,EAAE,YAAY,IAClD;AACD,YAAM,IAAI;AAAA,QACT,wEAAwE,0BAA0B;AAAA,MACnG;AAAA,IACD;AAEA,YAAQ,IAAI,yDAAyD;AAAA,MACpE,OAAO;AAAA,MACP;AAAA,MACA,kBAAkB;AAAA,IACnB,CAAC;AAED,UAAM,QAAQ,IAAI,gBAAgB;AAAA,MACjC,OAAO;AAAA,MACP;AAAA,MACA,kBAAkB;AAAA,IACnB,CAAC;AAED,UAAM,YAAY,CAAC,IAAI,mCAAc,IAAI,CAAC;AAE1C,WAAO;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AACD;","names":["aiMessage"]}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
<svg fill="none" height="512" viewBox="0 0 512 512" width="512" xmlns="http://www.w3.org/2000/svg"><rect width="512" height="512" rx="80" fill="#0f172a"/><rect x="4" y="4" width="504" height="504" rx="76" stroke="#38bdf8" stroke-opacity=".2" stroke-width="8"/><path d="M160 176l-56 80 56 80" stroke="#38bdf8" stroke-width="28" stroke-linecap="round" stroke-linejoin="round"/><path d="M352 176l56 80-56 80" stroke="#38bdf8" stroke-width="28" stroke-linecap="round" stroke-linejoin="round"/><path d="M288 148l-64 216" stroke="#7dd3fc" stroke-width="24" stroke-linecap="round"/><circle cx="256" cy="256" r="120" stroke="#38bdf8" stroke-width="8" stroke-dasharray="16 12" opacity=".35"/></svg>
|
package/dist/types/nodes.json
CHANGED
|
@@ -36,7 +36,9 @@
|
|
|
36
36
|
{"displayName":"Azure OpenAI Chat Model","name":"lmChatAzureOpenAi","group":["transform"],"version":1,"description":"For advanced usage with an AI chain","defaults":{"name":"Azure OpenAI Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatazureopenai/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"azureOpenAiApi","required":true,"displayOptions":{"show":{"authentication":["azureOpenAiApi"]}}},{"name":"azureEntraCognitiveServicesOAuth2Api","required":true,"displayOptions":{"show":{"authentication":["azureEntraCognitiveServicesOAuth2Api"]}}}],"properties":[{"displayName":"Authentication","name":"authentication","type":"options","default":"azureOpenAiApi","options":[{"name":"API Key","value":"azureOpenAiApi"},{"name":"Azure Entra ID (OAuth2)","value":"azureEntraCognitiveServicesOAuth2Api"}]},{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"If using JSON response format, you must include word \"json\" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.","name":"notice","type":"notice","default":"","displayOptions":{"show":{"/options.responseFormat":["json_object"]}}},{"displayName":"Model (Deployment) Name","name":"model","type":"string","description":"The name of the model(deployment) to use (e.g., gpt-4, gpt-35-turbo)","required":true,"default":""},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Frequency Penalty","name":"frequencyPenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim","type":"number"},{"displayName":"Maximum Number of Tokens","name":"maxTokens","default":-1,"description":"The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768). Use -1 for default.","type":"number","typeOptions":{"maxValue":128000}},{"displayName":"Response Format","name":"responseFormat","default":"text","type":"options","options":[{"name":"Text","value":"text","description":"Regular text response"},{"name":"JSON","value":"json_object","description":"Enables JSON mode, which should guarantee the message the model generates is valid JSON"}]},{"displayName":"Presence Penalty","name":"presencePenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":2,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Timeout (Ms)","name":"timeout","default":60000,"description":"Maximum amount of time a request is allowed to take in milliseconds","type":"number"},{"displayName":"Max Retries","name":"maxRetries","default":2,"description":"Maximum number of retries to attempt on failure","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatAzureOpenAi/azure.svg"},
|
|
37
37
|
{"displayName":"AWS Bedrock Chat Model","name":"lmChatAwsBedrock","group":["transform"],"version":[1,1.1],"description":"Language Model AWS Bedrock","defaults":{"name":"AWS Bedrock Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatawsbedrock/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"aws","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"=https://bedrock.{{$credentials?.region ?? \"eu-central-1\"}}.amazonaws.com"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model Source","name":"modelSource","type":"options","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.1}}]}},"options":[{"name":"On-Demand Models","value":"onDemand","description":"Standard foundation models with on-demand pricing"},{"name":"Inference Profiles","value":"inferenceProfile","description":"Cross-region inference profiles (required for models like Claude Sonnet 4 and others)"}],"default":"onDemand","description":"Choose between on-demand foundation models or inference profiles"},{"displayName":"Model","name":"model","type":"options","allowArbitraryValues":true,"description":"The model which will generate the completion. <a href=\"https://docs.aws.amazon.com/bedrock/latest/userguide/foundation-models.html\">Learn more</a>.","displayOptions":{"hide":{"modelSource":["inferenceProfile"]}},"typeOptions":{"loadOptionsDependsOn":["modelSource"],"loadOptions":{"routing":{"request":{"method":"GET","url":"/foundation-models?&byOutputModality=TEXT&byInferenceType=ON_DEMAND"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"modelSummaries"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.modelName}}","description":"={{$responseItem.modelArn}}","value":"={{$responseItem.modelId}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":""},{"displayName":"Model","name":"model","type":"options","allowArbitraryValues":true,"description":"The inference profile which will generate the completion. <a href=\"https://docs.aws.amazon.com/bedrock/latest/userguide/inference-profiles-use.html\">Learn more</a>.","displayOptions":{"show":{"modelSource":["inferenceProfile"]}},"typeOptions":{"loadOptionsDependsOn":["modelSource"],"loadOptions":{"routing":{"request":{"method":"GET","url":"/inference-profiles?maxResults=1000"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"inferenceProfileSummaries"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.inferenceProfileName}}","description":"={{$responseItem.description || $responseItem.inferenceProfileArn}}","value":"={{$responseItem.inferenceProfileId}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":""},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Maximum Number of Tokens","name":"maxTokensToSample","default":2000,"description":"The maximum number of tokens to generate in the completion","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatAwsBedrock/bedrock.svg"},
|
|
38
38
|
{"displayName":"Cohere Chat Model","name":"lmChatCohere","group":["transform"],"version":[1],"description":"For advanced usage with an AI chain","defaults":{"name":"Cohere Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatcohere/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"cohereApi","required":true}],"requestDefaults":{"baseURL":"={{$credentials?.url}}","headers":{"accept":"application/json","authorization":"=Bearer {{$credentials?.apiKey}}"}},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","description":"The model which will generate the completion. <a href=\"https://docs.cohere.com/docs/models\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/v1/models?page_size=100&endpoint=chat"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"models"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.name}}","value":"={{$responseItem.name}}","description":"={{$responseItem.description}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"default":"command-a-03-2025"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":2,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Max Retries","name":"maxRetries","default":2,"description":"Maximum number of retries to attempt","type":"number"}]}],"iconUrl":{"light":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatCohere/cohere.svg","dark":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatCohere/cohere.dark.svg"}},
|
|
39
|
-
{"displayName":"Cursor Agent CLI Chat Model","name":"lmChatCursorAgent","group":["transform"],"version":[1],"description":"Chat model powered by the Cursor Agent CLI. Requires cursor-agent to be installed locally.","defaults":{"name":"Cursor Agent CLI Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","description":"The model to use via cursor-agent CLI
|
|
39
|
+
{"displayName":"Cursor Agent CLI Chat Model","name":"lmChatCursorAgent","group":["transform"],"version":[1],"description":"Chat model powered by the Cursor Agent CLI. Requires cursor-agent to be installed locally.","defaults":{"name":"Cursor Agent CLI Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","description":"The model to use via cursor-agent CLI","options":[{"name":"Auto","value":"auto"},{"name":"Composer 2 Fast","value":"composer-2-fast"},{"name":"Composer 2","value":"composer-2"},{"name":"Composer 1.5","value":"composer-1.5"},{"name":"Claude 4.6 Opus High","value":"claude-4.6-opus-high"},{"name":"Claude 4.6 Opus High Thinking","value":"claude-4.6-opus-high-thinking"},{"name":"Claude 4.6 Opus Max","value":"claude-4.6-opus-max"},{"name":"Claude 4.6 Opus Max Thinking","value":"claude-4.6-opus-max-thinking"},{"name":"Claude 4.6 Sonnet Medium","value":"claude-4.6-sonnet-medium"},{"name":"Claude 4.6 Sonnet Medium Thinking","value":"claude-4.6-sonnet-medium-thinking"},{"name":"Claude 4.5 Opus High","value":"claude-4.5-opus-high"},{"name":"Claude 4.5 Opus High Thinking","value":"claude-4.5-opus-high-thinking"},{"name":"Claude 4.5 Sonnet","value":"claude-4.5-sonnet"},{"name":"Claude 4.5 Sonnet Thinking","value":"claude-4.5-sonnet-thinking"},{"name":"Claude 4 Sonnet","value":"claude-4-sonnet"},{"name":"Claude 4 Sonnet 1M","value":"claude-4-sonnet-1m"},{"name":"Claude 4 Sonnet Thinking","value":"claude-4-sonnet-thinking"},{"name":"Claude 4 Sonnet 1M Thinking","value":"claude-4-sonnet-1m-thinking"},{"name":"Gemini 3.1 Pro","value":"gemini-3.1-pro"},{"name":"Gemini 3 Flash","value":"gemini-3-flash"},{"name":"GPT-5.4 Low","value":"gpt-5.4-low"},{"name":"GPT-5.4 Medium","value":"gpt-5.4-medium"},{"name":"GPT-5.4 Medium Fast","value":"gpt-5.4-medium-fast"},{"name":"GPT-5.4 High","value":"gpt-5.4-high"},{"name":"GPT-5.4 High Fast","value":"gpt-5.4-high-fast"},{"name":"GPT-5.4 XHigh","value":"gpt-5.4-xhigh"},{"name":"GPT-5.4 XHigh Fast","value":"gpt-5.4-xhigh-fast"},{"name":"GPT-5.4 Mini None","value":"gpt-5.4-mini-none"},{"name":"GPT-5.4 Mini Low","value":"gpt-5.4-mini-low"},{"name":"GPT-5.4 Mini Medium","value":"gpt-5.4-mini-medium"},{"name":"GPT-5.4 Mini High","value":"gpt-5.4-mini-high"},{"name":"GPT-5.4 Mini XHigh","value":"gpt-5.4-mini-xhigh"},{"name":"GPT-5.4 Nano None","value":"gpt-5.4-nano-none"},{"name":"GPT-5.4 Nano Low","value":"gpt-5.4-nano-low"},{"name":"GPT-5.4 Nano Medium","value":"gpt-5.4-nano-medium"},{"name":"GPT-5.4 Nano High","value":"gpt-5.4-nano-high"},{"name":"GPT-5.4 Nano XHigh","value":"gpt-5.4-nano-xhigh"},{"name":"GPT-5.3 Codex Low","value":"gpt-5.3-codex-low"},{"name":"GPT-5.3 Codex Low Fast","value":"gpt-5.3-codex-low-fast"},{"name":"GPT-5.3 Codex","value":"gpt-5.3-codex"},{"name":"GPT-5.3 Codex Fast","value":"gpt-5.3-codex-fast"},{"name":"GPT-5.3 Codex High","value":"gpt-5.3-codex-high"},{"name":"GPT-5.3 Codex High Fast","value":"gpt-5.3-codex-high-fast"},{"name":"GPT-5.3 Codex XHigh","value":"gpt-5.3-codex-xhigh"},{"name":"GPT-5.3 Codex XHigh Fast","value":"gpt-5.3-codex-xhigh-fast"},{"name":"GPT-5.3 Codex Spark Preview Low","value":"gpt-5.3-codex-spark-preview-low"},{"name":"GPT-5.3 Codex Spark Preview","value":"gpt-5.3-codex-spark-preview"},{"name":"GPT-5.3 Codex Spark Preview High","value":"gpt-5.3-codex-spark-preview-high"},{"name":"GPT-5.3 Codex Spark Preview XHigh","value":"gpt-5.3-codex-spark-preview-xhigh"},{"name":"GPT-5.2 Low","value":"gpt-5.2-low"},{"name":"GPT-5.2 Low Fast","value":"gpt-5.2-low-fast"},{"name":"GPT-5.2","value":"gpt-5.2"},{"name":"GPT-5.2 Fast","value":"gpt-5.2-fast"},{"name":"GPT-5.2 High","value":"gpt-5.2-high"},{"name":"GPT-5.2 High Fast","value":"gpt-5.2-high-fast"},{"name":"GPT-5.2 XHigh","value":"gpt-5.2-xhigh"},{"name":"GPT-5.2 XHigh Fast","value":"gpt-5.2-xhigh-fast"},{"name":"GPT-5.2 Codex Low","value":"gpt-5.2-codex-low"},{"name":"GPT-5.2 Codex Low Fast","value":"gpt-5.2-codex-low-fast"},{"name":"GPT-5.2 Codex","value":"gpt-5.2-codex"},{"name":"GPT-5.2 Codex Fast","value":"gpt-5.2-codex-fast"},{"name":"GPT-5.2 Codex High","value":"gpt-5.2-codex-high"},{"name":"GPT-5.2 Codex High Fast","value":"gpt-5.2-codex-high-fast"},{"name":"GPT-5.2 Codex XHigh","value":"gpt-5.2-codex-xhigh"},{"name":"GPT-5.2 Codex XHigh Fast","value":"gpt-5.2-codex-xhigh-fast"},{"name":"GPT-5.1 Low","value":"gpt-5.1-low"},{"name":"GPT-5.1","value":"gpt-5.1"},{"name":"GPT-5.1 High","value":"gpt-5.1-high"},{"name":"GPT-5.1 Codex Max Low","value":"gpt-5.1-codex-max-low"},{"name":"GPT-5.1 Codex Max Low Fast","value":"gpt-5.1-codex-max-low-fast"},{"name":"GPT-5.1 Codex Max Medium","value":"gpt-5.1-codex-max-medium"},{"name":"GPT-5.1 Codex Max Medium Fast","value":"gpt-5.1-codex-max-medium-fast"},{"name":"GPT-5.1 Codex Max High","value":"gpt-5.1-codex-max-high"},{"name":"GPT-5.1 Codex Max High Fast","value":"gpt-5.1-codex-max-high-fast"},{"name":"GPT-5.1 Codex Max XHigh","value":"gpt-5.1-codex-max-xhigh"},{"name":"GPT-5.1 Codex Max XHigh Fast","value":"gpt-5.1-codex-max-xhigh-fast"},{"name":"GPT-5.1 Codex Mini Low","value":"gpt-5.1-codex-mini-low"},{"name":"GPT-5.1 Codex Mini","value":"gpt-5.1-codex-mini"},{"name":"GPT-5.1 Codex Mini High","value":"gpt-5.1-codex-mini-high"},{"name":"GPT-5 Mini","value":"gpt-5-mini"},{"name":"Grok 4 20","value":"grok-4-20"},{"name":"Grok 4 20 Thinking","value":"grok-4-20-thinking"},{"name":"Kimi K2.5","value":"kimi-k2.5"}],"default":"auto"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to configure","type":"collection","default":{},"options":[{"displayName":"Binary Path","name":"binaryPath","default":"cursor-agent","description":"Path to the cursor-agent binary. Defaults to \"cursor-agent\" (must be in PATH).","type":"string"},{"displayName":"Working Directory","name":"workingDirectory","default":"","description":"Working directory for the cursor-agent process. Leave empty to use the default.","type":"string"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatCursorAgent/cursorAgent.svg"},
|
|
40
|
+
{"displayName":"Codex CLI Chat Model","name":"lmChatCodexCli","group":["transform"],"version":[1],"description":"Chat model powered by the OpenAI Codex CLI. Requires codex to be installed locally (npm install -g @openai/codex).","defaults":{"name":"Codex CLI Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","description":"The model to use via codex CLI","options":[{"name":"Auto (Default)","value":"auto"},{"name":"GPT-5.5","value":"gpt-5.5"},{"name":"GPT-5.5 Fast","value":"gpt-5.5-fast"},{"name":"GPT-5.4","value":"gpt-5.4"},{"name":"GPT-5.4 Fast","value":"gpt-5.4-fast"},{"name":"GPT-5.4 Mini","value":"gpt-5.4-mini"},{"name":"GPT-5.3 Codex","value":"gpt-5.3-codex"},{"name":"GPT-5.3 Codex Spark","value":"gpt-5.3-codex-spark"},{"name":"GPT-5.2","value":"gpt-5.2"}],"default":"auto"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to configure","type":"collection","default":{},"options":[{"displayName":"Binary Path","name":"binaryPath","default":"codex","description":"Path to the codex binary. Defaults to \"codex\" (must be in PATH).","type":"string"},{"displayName":"Working Directory","name":"workingDirectory","default":"","description":"Working directory for the codex process. Leave empty to use the default.","type":"string"},{"displayName":"Sandbox Mode","name":"sandboxMode","type":"options","default":"read-only","description":"Sandbox policy for executing model-generated shell commands","options":[{"name":"Read Only","value":"read-only","description":"Only allow read operations (safest)"},{"name":"Workspace Write","value":"workspace-write","description":"Allow writes within the workspace directory"},{"name":"Full Access (Dangerous)","value":"danger-full-access","description":"Full filesystem access — use with extreme caution"}]}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatCodexCli/codexCli.svg"},
|
|
41
|
+
{"displayName":"OpenCode CLI Chat Model","name":"lmChatOpenCodeCli","group":["transform"],"version":[1],"description":"Chat model powered by the OpenCode CLI. Requires opencode to be installed locally (brew install opencode-ai/tap/opencode or curl -fsSL https://opencode.ai/install | bash).","defaults":{"name":"OpenCode CLI Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","description":"The model to use via opencode CLI. Format: provider/model (e.g. anthropic/claude-sonnet-4-20250514). Select \"Auto\" to use the default model configured in opencode.","options":[{"name":"Auto (Default)","value":"auto"},{"name":"OpenCode Big Pickle","value":"opencode/big-pickle"},{"name":"OpenCode GPT-5 Nano","value":"opencode/gpt-5-nano"},{"name":"OpenCode Hy3 Preview Free","value":"opencode/hy3-preview-free"},{"name":"Claude Sonnet 4 (Anthropic)","value":"anthropic/claude-sonnet-4-20250514"},{"name":"Claude Opus 4 (Anthropic)","value":"anthropic/claude-opus-4-20250918"},{"name":"Claude 3.5 Sonnet (Anthropic)","value":"anthropic/claude-3-5-sonnet-20241022"},{"name":"GPT-4o (OpenAI)","value":"openai/gpt-4o"},{"name":"GPT-4o Mini (OpenAI)","value":"openai/gpt-4o-mini"},{"name":"o3 (OpenAI)","value":"openai/o3"},{"name":"o3 Mini (OpenAI)","value":"openai/o3-mini"},{"name":"Gemini 2.5 Pro (Google)","value":"google/gemini-2.5-pro"},{"name":"Gemini 2.5 Flash (Google)","value":"google/gemini-2.5-flash"},{"name":"Gemini 2.0 Flash (Google)","value":"google/gemini-2.0-flash"},{"name":"Grok 3 (xAI)","value":"xai/grok-3"},{"name":"Grok 3 Mini (xAI)","value":"xai/grok-3-mini"},{"name":"DeepSeek Chat (DeepSeek)","value":"deepseek/deepseek-chat"},{"name":"DeepSeek Reasoner (DeepSeek)","value":"deepseek/deepseek-reasoner"}],"default":"auto"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to configure","type":"collection","default":{},"options":[{"displayName":"Binary Path","name":"binaryPath","default":"opencode","description":"Path to the opencode binary. Defaults to \"opencode\" (must be in PATH).","type":"string"},{"displayName":"Working Directory","name":"workingDirectory","default":"","description":"Working directory for the opencode process. Leave empty to use the default.","type":"string"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatOpenCodeCli/openCodeCli.svg"},
|
|
40
42
|
{"displayName":"DeepSeek Chat Model","name":"lmChatDeepSeek","group":["transform"],"version":[1],"description":"For advanced usage with an AI chain","defaults":{"name":"DeepSeek Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatdeepseek/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"deepSeekApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $credentials?.url }}"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"If using JSON response format, you must include word \"json\" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.","name":"notice","type":"notice","default":"","displayOptions":{"show":{"/options.responseFormat":["json_object"]}}},{"displayName":"Model","name":"model","type":"options","description":"The model which will generate the completion. <a href=\"https://api-docs.deepseek.com/quick_start/pricing\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"data"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.id}}","value":"={{$responseItem.id}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":"deepseek-chat"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Frequency Penalty","name":"frequencyPenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim","type":"number"},{"displayName":"Maximum Number of Tokens","name":"maxTokens","default":-1,"description":"The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).","type":"number","typeOptions":{"maxValue":32768}},{"displayName":"Response Format","name":"responseFormat","default":"text","type":"options","options":[{"name":"Text","value":"text","description":"Regular text response"},{"name":"JSON","value":"json_object","description":"Enables JSON mode, which should guarantee the message the model generates is valid JSON"}]},{"displayName":"Presence Penalty","name":"presencePenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":2,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Timeout","name":"timeout","default":360000,"description":"Maximum amount of time a request is allowed to take in milliseconds","type":"number"},{"displayName":"Max Retries","name":"maxRetries","default":2,"description":"Maximum number of retries to attempt","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatDeepSeek/deepseek.svg"},
|
|
41
43
|
{"displayName":"Google Gemini Chat Model","name":"lmChatGoogleGemini","group":["transform"],"version":1,"description":"Chat Model Google Gemini","defaults":{"name":"Google Gemini Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgooglegemini/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"googlePalmApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $credentials.host }}"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"modelName","type":"options","description":"The model which will generate the completion. <a href=\"https://developers.generativeai.google/api/rest/generativelanguage/models/list\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/v1beta/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"models"}},{"type":"filter","properties":{"pass":"={{ !$responseItem.name.includes('embedding') }}"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.name}}","value":"={{$responseItem.name}}","description":"={{$responseItem.description}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":"models/gemini-2.5-flash"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Maximum Number of Tokens","name":"maxOutputTokens","default":2048,"description":"The maximum number of tokens to generate in the completion","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.4,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Top K","name":"topK","default":32,"typeOptions":{"maxValue":40,"minValue":-1,"numberPrecision":1},"description":"Used to remove \"long tail\" low probability responses. Defaults to -1, which disables it.","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"},{"displayName":"Safety Settings","name":"safetySettings","type":"fixedCollection","typeOptions":{"multipleValues":true},"default":{"values":{"category":"HARM_CATEGORY_HARASSMENT","threshold":"HARM_BLOCK_THRESHOLD_UNSPECIFIED"}},"placeholder":"Add Option","options":[{"name":"values","displayName":"Values","values":[{"displayName":"Safety Category","name":"category","type":"options","description":"The category of harmful content to block","default":"HARM_CATEGORY_UNSPECIFIED","options":[{"value":"HARM_CATEGORY_HARASSMENT","name":"HARM_CATEGORY_HARASSMENT","description":"Harassment content"},{"value":"HARM_CATEGORY_HATE_SPEECH","name":"HARM_CATEGORY_HATE_SPEECH","description":"Hate speech and content"},{"value":"HARM_CATEGORY_SEXUALLY_EXPLICIT","name":"HARM_CATEGORY_SEXUALLY_EXPLICIT","description":"Sexually explicit content"},{"value":"HARM_CATEGORY_DANGEROUS_CONTENT","name":"HARM_CATEGORY_DANGEROUS_CONTENT","description":"Dangerous content"}]},{"displayName":"Safety Threshold","name":"threshold","type":"options","description":"The threshold of harmful content to block","default":"HARM_BLOCK_THRESHOLD_UNSPECIFIED","options":[{"value":"HARM_BLOCK_THRESHOLD_UNSPECIFIED","name":"HARM_BLOCK_THRESHOLD_UNSPECIFIED","description":"Threshold is unspecified"},{"value":"BLOCK_LOW_AND_ABOVE","name":"BLOCK_LOW_AND_ABOVE","description":"Content with NEGLIGIBLE will be allowed"},{"value":"BLOCK_MEDIUM_AND_ABOVE","name":"BLOCK_MEDIUM_AND_ABOVE","description":"Content with NEGLIGIBLE and LOW will be allowed"},{"value":"BLOCK_ONLY_HIGH","name":"BLOCK_ONLY_HIGH","description":"Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed"},{"value":"BLOCK_NONE","name":"BLOCK_NONE","description":"All content will be allowed"}]}]}]}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatGoogleGemini/google.svg"},
|
|
42
44
|
{"displayName":"Google Vertex Chat Model","name":"lmChatGoogleVertex","group":["transform"],"version":1,"description":"Chat Model Google Vertex","defaults":{"name":"Google Vertex Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgooglevertex/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"googleApi","required":true}],"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Project ID","name":"projectId","type":"resourceLocator","default":{"mode":"list","value":""},"required":true,"description":"Select or enter your Google Cloud project ID","modes":[{"displayName":"From List","name":"list","type":"list","typeOptions":{"searchListMethod":"gcpProjectsList"}},{"displayName":"ID","name":"id","type":"string"}]},{"displayName":"Model Name","name":"modelName","type":"string","description":"The model which will generate the completion. <a href=\"https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models\">Learn more</a>.","default":"gemini-2.5-flash"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Maximum Number of Tokens","name":"maxOutputTokens","default":2048,"description":"The maximum number of tokens to generate in the completion","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.4,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Top K","name":"topK","default":32,"typeOptions":{"maxValue":40,"minValue":-1,"numberPrecision":1},"description":"Used to remove \"long tail\" low probability responses. Defaults to -1, which disables it.","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"},{"displayName":"Safety Settings","name":"safetySettings","type":"fixedCollection","typeOptions":{"multipleValues":true},"default":{"values":{"category":"HARM_CATEGORY_HARASSMENT","threshold":"HARM_BLOCK_THRESHOLD_UNSPECIFIED"}},"placeholder":"Add Option","options":[{"name":"values","displayName":"Values","values":[{"displayName":"Safety Category","name":"category","type":"options","description":"The category of harmful content to block","default":"HARM_CATEGORY_UNSPECIFIED","options":[{"value":"HARM_CATEGORY_HARASSMENT","name":"HARM_CATEGORY_HARASSMENT","description":"Harassment content"},{"value":"HARM_CATEGORY_HATE_SPEECH","name":"HARM_CATEGORY_HATE_SPEECH","description":"Hate speech and content"},{"value":"HARM_CATEGORY_SEXUALLY_EXPLICIT","name":"HARM_CATEGORY_SEXUALLY_EXPLICIT","description":"Sexually explicit content"},{"value":"HARM_CATEGORY_DANGEROUS_CONTENT","name":"HARM_CATEGORY_DANGEROUS_CONTENT","description":"Dangerous content"}]},{"displayName":"Safety Threshold","name":"threshold","type":"options","description":"The threshold of harmful content to block","default":"HARM_BLOCK_THRESHOLD_UNSPECIFIED","options":[{"value":"HARM_BLOCK_THRESHOLD_UNSPECIFIED","name":"HARM_BLOCK_THRESHOLD_UNSPECIFIED","description":"Threshold is unspecified"},{"value":"BLOCK_LOW_AND_ABOVE","name":"BLOCK_LOW_AND_ABOVE","description":"Content with NEGLIGIBLE will be allowed"},{"value":"BLOCK_MEDIUM_AND_ABOVE","name":"BLOCK_MEDIUM_AND_ABOVE","description":"Content with NEGLIGIBLE and LOW will be allowed"},{"value":"BLOCK_ONLY_HIGH","name":"BLOCK_ONLY_HIGH","description":"Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed"},{"value":"BLOCK_NONE","name":"BLOCK_NONE","description":"All content will be allowed"}]}]}]},{"displayName":"Thinking Budget","name":"thinkingBudget","description":"Controls reasoning tokens for thinking models. Set to 0 to disable automatic thinking. Set to -1 for dynamic thinking. Leave empty for auto mode.","type":"number","typeOptions":{"minValue":-1,"numberPrecision":0}}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatGoogleVertex/google.svg"},
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@atom8n/n8n-nodes-langchain",
|
|
3
|
-
"version": "2.5.
|
|
3
|
+
"version": "2.5.7",
|
|
4
4
|
"description": "",
|
|
5
5
|
"main": "index.js",
|
|
6
6
|
"scripts": {
|
|
@@ -87,6 +87,8 @@
|
|
|
87
87
|
"dist/nodes/llms/LmChatAwsBedrock/LmChatAwsBedrock.node.js",
|
|
88
88
|
"dist/nodes/llms/LmChatCohere/LmChatCohere.node.js",
|
|
89
89
|
"dist/nodes/llms/LmChatCursorAgent/LmChatCursorAgent.node.js",
|
|
90
|
+
"dist/nodes/llms/LmChatCodexCli/LmChatCodexCli.node.js",
|
|
91
|
+
"dist/nodes/llms/LmChatOpenCodeCli/LmChatOpenCodeCli.node.js",
|
|
90
92
|
"dist/nodes/llms/LmChatDeepSeek/LmChatDeepSeek.node.js",
|
|
91
93
|
"dist/nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.js",
|
|
92
94
|
"dist/nodes/llms/LmChatGoogleVertex/LmChatGoogleVertex.node.js",
|
|
@@ -166,7 +168,7 @@
|
|
|
166
168
|
"nodeTypePrefix": "@n8n/n8n-nodes-langchain"
|
|
167
169
|
},
|
|
168
170
|
"devDependencies": {
|
|
169
|
-
"@n8n/eslint-plugin-community-nodes": "npm:@atom8n/eslint-plugin-community-nodes@0.10.
|
|
171
|
+
"@n8n/eslint-plugin-community-nodes": "npm:@atom8n/eslint-plugin-community-nodes@0.10.7",
|
|
170
172
|
"@types/basic-auth": "^1.1.3",
|
|
171
173
|
"@types/cheerio": "^0.22.15",
|
|
172
174
|
"@types/html-to-text": "^9.0.1",
|
|
@@ -177,7 +179,7 @@
|
|
|
177
179
|
"@types/temp": "^0.9.1",
|
|
178
180
|
"fast-glob": "3.2.12",
|
|
179
181
|
"jest-mock-extended": "^3.0.4",
|
|
180
|
-
"n8n-core": "npm:@atom8n/n8n-core@2.5.
|
|
182
|
+
"n8n-core": "npm:@atom8n/n8n-core@2.5.7",
|
|
181
183
|
"tsup": "^8.5.0"
|
|
182
184
|
},
|
|
183
185
|
"dependencies": {
|
|
@@ -209,13 +211,13 @@
|
|
|
209
211
|
"@langchain/weaviate": "1.0.1",
|
|
210
212
|
"@modelcontextprotocol/sdk": "1.24.0",
|
|
211
213
|
"@mozilla/readability": "0.6.0",
|
|
212
|
-
"@n8n/client-oauth2": "npm:@atom8n/client-oauth2@1.3.
|
|
213
|
-
"@n8n/config": "npm:@atom8n/config@2.4.
|
|
214
|
-
"@n8n/di": "npm:@atom8n/di@0.13.
|
|
215
|
-
"@n8n/errors": "npm:@atom8n/errors@0.8.
|
|
216
|
-
"@n8n/json-schema-to-zod": "npm:@atom8n/json-schema-to-zod@1.9.
|
|
214
|
+
"@n8n/client-oauth2": "npm:@atom8n/client-oauth2@1.3.7",
|
|
215
|
+
"@n8n/config": "npm:@atom8n/config@2.4.7",
|
|
216
|
+
"@n8n/di": "npm:@atom8n/di@0.13.7",
|
|
217
|
+
"@n8n/errors": "npm:@atom8n/errors@0.8.7",
|
|
218
|
+
"@n8n/json-schema-to-zod": "npm:@atom8n/json-schema-to-zod@1.9.7",
|
|
217
219
|
"@n8n/typeorm": "0.3.20-15",
|
|
218
|
-
"@n8n/typescript-config": "npm:@atom8n/typescript-config@1.6.
|
|
220
|
+
"@n8n/typescript-config": "npm:@atom8n/typescript-config@1.6.7",
|
|
219
221
|
"@n8n/vm2": "3.9.25",
|
|
220
222
|
"@pinecone-database/pinecone": "^5.0.2",
|
|
221
223
|
"@qdrant/js-client-rest": "^1.16.2",
|
|
@@ -240,8 +242,8 @@
|
|
|
240
242
|
"mammoth": "1.11.0",
|
|
241
243
|
"mime-types": "3.0.1",
|
|
242
244
|
"mongodb": "^6.17.0",
|
|
243
|
-
"n8n-nodes-base": "npm:@atom8n/n8n-nodes-base@2.5.
|
|
244
|
-
"n8n-workflow": "npm:@atom8n/n8n-workflow@2.5.
|
|
245
|
+
"n8n-nodes-base": "npm:@atom8n/n8n-nodes-base@2.5.7",
|
|
246
|
+
"n8n-workflow": "npm:@atom8n/n8n-workflow@2.5.7",
|
|
245
247
|
"openai": "^6.9.0",
|
|
246
248
|
"pdf-parse": "1.1.1",
|
|
247
249
|
"pg": "8.12.0",
|