@atom8n/n8n-nodes-langchain 2.5.7 → 2.5.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/known/nodes.json +24 -0
- package/dist/methods/defined.json +3 -0
- package/dist/methods/referenced.json +3 -0
- package/dist/nodes/agents/OpenClawAgent/OpenClawAgent.node.js +62 -0
- package/dist/nodes/agents/OpenClawAgent/OpenClawAgent.node.js.map +1 -0
- package/dist/nodes/agents/OpenClawAgent/V1/OpenClawAgentV1.node.js +821 -0
- package/dist/nodes/agents/OpenClawAgent/V1/OpenClawAgentV1.node.js.map +1 -0
- package/dist/nodes/agents/OpenClawAgent/V2/OpenClawAgentV2.node.js +2059 -0
- package/dist/nodes/agents/OpenClawAgent/V2/OpenClawAgentV2.node.js.map +1 -0
- package/dist/nodes/agents/OpenClawAgent/channels/TelegramChannel/TelegramChannel.node.js +329 -0
- package/dist/nodes/agents/OpenClawAgent/channels/TelegramChannel/TelegramChannel.node.js.map +1 -0
- package/dist/nodes/agents/OpenClawAgent/channels/TelegramChannel/telegram-channel.svg +4 -0
- package/dist/nodes/agents/OpenClawAgent/channels/WhatsAppChannel/WhatsAppChannel.node.js +108 -0
- package/dist/nodes/agents/OpenClawAgent/channels/WhatsAppChannel/WhatsAppChannel.node.js.map +1 -0
- package/dist/nodes/agents/OpenClawAgent/channels/WhatsAppChannel/whatsapp-channel.svg +3 -0
- package/dist/nodes/agents/OpenClawAgent/mcpServers/OpenClawMcpServer/OpenClawMcpServer.node.js +228 -0
- package/dist/nodes/agents/OpenClawAgent/mcpServers/OpenClawMcpServer/OpenClawMcpServer.node.js.map +1 -0
- package/dist/nodes/agents/OpenClawAgent/mcpServers/OpenClawMcpServer/openclaw-mcp-server.svg +9 -0
- package/dist/nodes/agents/OpenClawAgent/models/OpenCodeFreeModel/OpenCodeFreeModel.node.js +97 -0
- package/dist/nodes/agents/OpenClawAgent/models/OpenCodeFreeModel/OpenCodeFreeModel.node.js.map +1 -0
- package/dist/nodes/agents/OpenClawAgent/models/OpenCodeFreeModel/opencode-free-model.svg +1 -0
- package/dist/nodes/agents/OpenClawAgent/openclaw.svg +8 -0
- package/dist/nodes/agents/OpenClawAgent/plugins/OpenClawPlugin/OpenClawPlugin.node.js +261 -0
- package/dist/nodes/agents/OpenClawAgent/plugins/OpenClawPlugin/OpenClawPlugin.node.js.map +1 -0
- package/dist/nodes/agents/OpenClawAgent/plugins/OpenClawPlugin/openclaw-plugin.svg +3 -0
- package/dist/nodes/llms/LmChat9Router/LmChat9Router.node.js +40 -3
- package/dist/nodes/llms/LmChat9Router/LmChat9Router.node.js.map +1 -1
- package/dist/types/nodes.json +8 -1
- package/package.json +17 -11
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../../../../../nodes/agents/OpenClawAgent/plugins/OpenClawPlugin/OpenClawPlugin.node.ts"],"sourcesContent":["import { existsSync, readFileSync } from 'fs';\nimport { join } from 'path';\n\nimport {\n\tNodeConnectionTypes,\n\tNodeOperationError,\n\ttype IDataObject,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n} from 'n8n-workflow';\n\nimport type { PluginConfig } from '../../V2/OpenClawAgentV2.node';\n\nconst PLUGIN_MANIFEST_FILENAME = 'openclaw.plugin.json';\n\n/**\n * Attempt to read and parse an openclaw.plugin.json manifest from a directory.\n * Returns the parsed manifest object or undefined if not found / invalid.\n */\nfunction loadLocalPluginManifest(dirPath: string): PluginConfig['pluginManifest'] | undefined {\n\tconst manifestPath = join(dirPath, PLUGIN_MANIFEST_FILENAME);\n\tconsole.log('[OpenClawPlugin] Scanning for local manifest', {\n\t\tdirPath,\n\t\tmanifestPath,\n\t\texists: existsSync(manifestPath),\n\t});\n\n\tif (!existsSync(manifestPath)) {\n\t\tconsole.log('[OpenClawPlugin] No openclaw.plugin.json found at path', {\n\t\t\tdirPath,\n\t\t\tmanifestPath,\n\t\t});\n\t\treturn undefined;\n\t}\n\n\ttry {\n\t\tconst raw = readFileSync(manifestPath, 'utf8').trim();\n\t\tif (!raw) {\n\t\t\tconsole.log('[OpenClawPlugin] Manifest file is empty', { manifestPath });\n\t\t\treturn undefined;\n\t\t}\n\n\t\tconst parsed = JSON.parse(raw) as IDataObject;\n\t\tif (typeof parsed !== 'object' || parsed === null || Array.isArray(parsed)) {\n\t\t\tconsole.log('[OpenClawPlugin] Manifest is not a valid JSON object', {\n\t\t\t\tmanifestPath,\n\t\t\t\ttype: typeof parsed,\n\t\t\t});\n\t\t\treturn undefined;\n\t\t}\n\n\t\tconst manifest: PluginConfig['pluginManifest'] = {\n\t\t\tid: typeof parsed.id === 'string' ? parsed.id : undefined,\n\t\t\tname: typeof parsed.name === 'string' ? parsed.name : undefined,\n\t\t\tdescription: typeof parsed.description === 'string' ? parsed.description : undefined,\n\t\t\tversion: typeof parsed.version === 'string' ? parsed.version : undefined,\n\t\t\tproviders: Array.isArray(parsed.providers)\n\t\t\t\t? (parsed.providers as unknown[]).filter((p): p is string => typeof p === 'string')\n\t\t\t\t: undefined,\n\t\t\tchannels: Array.isArray(parsed.channels)\n\t\t\t\t? (parsed.channels as unknown[]).filter((c): c is string => typeof c === 'string')\n\t\t\t\t: undefined,\n\t\t};\n\n\t\tconsole.log('[OpenClawPlugin] Successfully loaded local manifest', {\n\t\t\tmanifestPath,\n\t\t\tid: manifest.id,\n\t\t\tname: manifest.name,\n\t\t\tversion: manifest.version,\n\t\t\tproviderCount: manifest.providers?.length ?? 0,\n\t\t\tchannelCount: manifest.channels?.length ?? 0,\n\t\t});\n\n\t\treturn manifest;\n\t} catch (error) {\n\t\tconsole.log('[OpenClawPlugin] Failed to parse manifest file', {\n\t\t\tmanifestPath,\n\t\t\terror: error instanceof Error ? error.message : String(error),\n\t\t});\n\t\treturn undefined;\n\t}\n}\n\n/**\n * OpenClaw Plugin sub-node for OpenClaw Agent.\n *\n * This node provides plugin configuration to the OpenClaw AI Agent\n * via the AiTool connection. It supports two plugin sources:\n *\n * - **Local**: scans a directory path for `openclaw.plugin.json` and\n * loads manifest info (id, name, providers, channels, etc.).\n * Uses `$workspace.__dirPath` by default to scan the workflow directory.\n *\n * - **Cloud**: references a plugin from ClawHub by package name\n * (e.g. \"openai\" or \"@scope/pkg\") and optional version.\n */\nexport class OpenClawPlugin implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'OpenClaw Plugin',\n\t\tname: 'openClawPlugin',\n\t\ticon: 'file:openclaw-plugin.svg',\n\t\ticonColor: 'purple',\n\t\tgroup: ['transform'],\n\t\tversion: 1,\n\t\tdescription: 'Provides plugin configuration to an OpenClaw AI Agent (local or ClawHub)',\n\t\tdefaults: {\n\t\t\tname: 'OpenClaw Plugin',\n\t\t},\n\t\tcodex: {\n\t\t\talias: ['OpenClaw', 'Plugin', 'Extension', 'ClawHub'],\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Other'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.openclaw.ai/plugins',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\t\tinputs: [],\n\t\toutputs: [NodeConnectionTypes.AiTool],\n\t\toutputNames: ['Plugin'],\n\t\t// No credentials needed — local plugins are filesystem-based, cloud uses public ClawHub\n\t\tproperties: [\n\t\t\t{\n\t\t\t\tdisplayName:\n\t\t\t\t\t'Connect this node to an OpenClaw AI Agent to provide plugin configuration. Local plugins are scanned from a directory; Cloud plugins are loaded from ClawHub.',\n\t\t\t\tname: 'pluginNotice',\n\t\t\t\ttype: 'notice',\n\t\t\t\tdefault: '',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Plugin Source',\n\t\t\t\tname: 'pluginSource',\n\t\t\t\ttype: 'options',\n\t\t\t\tdefault: 'local',\n\t\t\t\tnoDataExpression: true,\n\t\t\t\tdescription: 'Where to load the plugin from',\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tname: 'Local',\n\t\t\t\t\t\tvalue: 'local',\n\t\t\t\t\t\tdescription: 'Scan a directory for openclaw.plugin.json and load plugin info from it',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tname: 'Cloud (ClawHub)',\n\t\t\t\t\t\tvalue: 'cloud',\n\t\t\t\t\t\tdescription: 'Load a plugin from the ClawHub marketplace',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t\t// ── Local source fields ──\n\t\t\t{\n\t\t\t\tdisplayName: 'Plugin Directory',\n\t\t\t\tname: 'pluginDirectory',\n\t\t\t\ttype: 'string',\n\t\t\t\trequired: true,\n\t\t\t\tdefault: '={{ $workspace.__dirPath }}',\n\t\t\t\tdescription:\n\t\t\t\t\t'Directory path to scan for openclaw.plugin.json. Supports expressions like {{ $workspace.__dirPath }}.',\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\tshow: {\n\t\t\t\t\t\tpluginSource: ['local'],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// ── Cloud source fields ──\n\t\t\t{\n\t\t\t\tdisplayName: 'Plugin ID',\n\t\t\t\tname: 'pluginId',\n\t\t\t\ttype: 'string',\n\t\t\t\trequired: true,\n\t\t\t\tdefault: '',\n\t\t\t\tplaceholder: 'e.g. openai, @scope/my-plugin',\n\t\t\t\tdescription: 'ClawHub plugin package name',\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\tshow: {\n\t\t\t\t\t\tpluginSource: ['cloud'],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Version',\n\t\t\t\tname: 'pluginVersion',\n\t\t\t\ttype: 'string',\n\t\t\t\tdefault: '',\n\t\t\t\tplaceholder: 'latest',\n\t\t\t\tdescription: 'ClawHub plugin version. Leave empty to use the latest available version.',\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\tshow: {\n\t\t\t\t\t\tpluginSource: ['cloud'],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst node = this.getNode();\n\t\tconsole.log('[OpenClawPlugin] supplyData ENTRY', {\n\t\t\tnodeName: node.name,\n\t\t\tnodeId: node.id,\n\t\t\tnodeType: node.type,\n\t\t\titemIndex,\n\t\t\tparameterNames: Object.keys(node.parameters ?? {}),\n\t\t\trawParameters: JSON.stringify(node.parameters ?? {}).slice(0, 500),\n\t\t});\n\n\t\tconst pluginSource = this.getNodeParameter('pluginSource', itemIndex, 'local') as\n\t\t\t| 'local'\n\t\t\t| 'cloud';\n\n\t\tconsole.log('[OpenClawPlugin] supplyData called', {\n\t\t\titemIndex,\n\t\t\tpluginSource,\n\t\t});\n\n\t\tif (pluginSource === 'local') {\n\t\t\tconst rawPluginDirectory = this.getNodeParameter('pluginDirectory', itemIndex, '') as string;\n\t\t\tconst pluginDirectory = rawPluginDirectory.trim();\n\n\t\t\tconsole.log('[OpenClawPlugin] Local source: scanning directory', {\n\t\t\t\titemIndex,\n\t\t\t\trawPluginDirectory,\n\t\t\t\tpluginDirectory,\n\t\t\t\tisEmpty: !pluginDirectory,\n\t\t\t\tlength: pluginDirectory.length,\n\t\t\t});\n\n\t\t\tif (!pluginDirectory) {\n\t\t\t\tthrow new NodeOperationError(\n\t\t\t\t\tthis.getNode(),\n\t\t\t\t\t'Plugin Directory must not be empty for local plugin source',\n\t\t\t\t\t{ itemIndex },\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Scan the directory for openclaw.plugin.json\n\t\t\tconst pluginManifest = loadLocalPluginManifest(pluginDirectory);\n\n\t\t\tconst pluginConfig: PluginConfig = {\n\t\t\t\tpluginSource: 'local',\n\t\t\t\tpluginPath: pluginDirectory,\n\t\t\t\tpluginManifest,\n\t\t\t};\n\n\t\t\tconsole.log('[OpenClawPlugin] Returning local plugin config', {\n\t\t\t\tpluginPath: pluginConfig.pluginPath,\n\t\t\t\thasManifest: !!pluginManifest,\n\t\t\t\tmanifestId: pluginManifest?.id,\n\t\t\t\tmanifestName: pluginManifest?.name,\n\t\t\t\tmanifestVersion: pluginManifest?.version,\n\t\t\t});\n\n\t\t\treturn { response: pluginConfig };\n\t\t}\n\n\t\t// Cloud source\n\t\tconst pluginId = (this.getNodeParameter('pluginId', itemIndex, '') as string).trim();\n\t\tconst pluginVersion =\n\t\t\t(this.getNodeParameter('pluginVersion', itemIndex, '') as string).trim() || undefined;\n\n\t\tconsole.log('[OpenClawPlugin] Cloud source: ClawHub plugin', {\n\t\t\titemIndex,\n\t\t\tpluginId,\n\t\t\tpluginVersion: pluginVersion ?? '(latest)',\n\t\t});\n\n\t\tif (!pluginId) {\n\t\t\tthrow new NodeOperationError(\n\t\t\t\tthis.getNode(),\n\t\t\t\t'Plugin ID must not be empty for cloud plugin source',\n\t\t\t\t{ itemIndex },\n\t\t\t);\n\t\t}\n\n\t\tconst pluginConfig: PluginConfig = {\n\t\t\tpluginSource: 'cloud',\n\t\t\tpluginId,\n\t\t\tpluginVersion,\n\t\t};\n\n\t\tconsole.log('[OpenClawPlugin] Returning cloud plugin config', {\n\t\t\tpluginId: pluginConfig.pluginId,\n\t\t\tpluginVersion: pluginConfig.pluginVersion ?? '(latest)',\n\t\t});\n\n\t\treturn { response: pluginConfig };\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAAyC;AACzC,kBAAqB;AAErB,0BAQO;AAIP,MAAM,2BAA2B;AAMjC,SAAS,wBAAwB,SAA6D;AAC7F,QAAM,mBAAe,kBAAK,SAAS,wBAAwB;AAC3D,UAAQ,IAAI,gDAAgD;AAAA,IAC3D;AAAA,IACA;AAAA,IACA,YAAQ,sBAAW,YAAY;AAAA,EAChC,CAAC;AAED,MAAI,KAAC,sBAAW,YAAY,GAAG;AAC9B,YAAQ,IAAI,0DAA0D;AAAA,MACrE;AAAA,MACA;AAAA,IACD,CAAC;AACD,WAAO;AAAA,EACR;AAEA,MAAI;AACH,UAAM,UAAM,wBAAa,cAAc,MAAM,EAAE,KAAK;AACpD,QAAI,CAAC,KAAK;AACT,cAAQ,IAAI,2CAA2C,EAAE,aAAa,CAAC;AACvE,aAAO;AAAA,IACR;AAEA,UAAM,SAAS,KAAK,MAAM,GAAG;AAC7B,QAAI,OAAO,WAAW,YAAY,WAAW,QAAQ,MAAM,QAAQ,MAAM,GAAG;AAC3E,cAAQ,IAAI,wDAAwD;AAAA,QACnE;AAAA,QACA,MAAM,OAAO;AAAA,MACd,CAAC;AACD,aAAO;AAAA,IACR;AAEA,UAAM,WAA2C;AAAA,MAChD,IAAI,OAAO,OAAO,OAAO,WAAW,OAAO,KAAK;AAAA,MAChD,MAAM,OAAO,OAAO,SAAS,WAAW,OAAO,OAAO;AAAA,MACtD,aAAa,OAAO,OAAO,gBAAgB,WAAW,OAAO,cAAc;AAAA,MAC3E,SAAS,OAAO,OAAO,YAAY,WAAW,OAAO,UAAU;AAAA,MAC/D,WAAW,MAAM,QAAQ,OAAO,SAAS,IACrC,OAAO,UAAwB,OAAO,CAAC,MAAmB,OAAO,MAAM,QAAQ,IAChF;AAAA,MACH,UAAU,MAAM,QAAQ,OAAO,QAAQ,IACnC,OAAO,SAAuB,OAAO,CAAC,MAAmB,OAAO,MAAM,QAAQ,IAC/E;AAAA,IACJ;AAEA,YAAQ,IAAI,uDAAuD;AAAA,MAClE;AAAA,MACA,IAAI,SAAS;AAAA,MACb,MAAM,SAAS;AAAA,MACf,SAAS,SAAS;AAAA,MAClB,eAAe,SAAS,WAAW,UAAU;AAAA,MAC7C,cAAc,SAAS,UAAU,UAAU;AAAA,IAC5C,CAAC;AAED,WAAO;AAAA,EACR,SAAS,OAAO;AACf,YAAQ,IAAI,kDAAkD;AAAA,MAC7D;AAAA,MACA,OAAO,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AAAA,IAC7D,CAAC;AACD,WAAO;AAAA,EACR;AACD;AAeO,MAAM,eAAoC;AAAA,EAA1C;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,WAAW;AAAA,MACX,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS;AAAA,MACT,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,OAAO,CAAC,YAAY,UAAU,aAAa,SAAS;AAAA,QACpD,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,OAAO;AAAA,QACb;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MACA,QAAQ,CAAC;AAAA,MACT,SAAS,CAAC,wCAAoB,MAAM;AAAA,MACpC,aAAa,CAAC,QAAQ;AAAA;AAAA,MAEtB,YAAY;AAAA,QACX;AAAA,UACC,aACC;AAAA,UACD,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,QACV;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,kBAAkB;AAAA,UAClB,aAAa;AAAA,UACb,SAAS;AAAA,YACR;AAAA,cACC,MAAM;AAAA,cACN,OAAO;AAAA,cACP,aAAa;AAAA,YACd;AAAA,YACA;AAAA,cACC,MAAM;AAAA,cACN,OAAO;AAAA,cACP,aAAa;AAAA,YACd;AAAA,UACD;AAAA,QACD;AAAA;AAAA,QAEA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,UAAU;AAAA,UACV,SAAS;AAAA,UACT,aACC;AAAA,UACD,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,cAAc,CAAC,OAAO;AAAA,YACvB;AAAA,UACD;AAAA,QACD;AAAA;AAAA,QAEA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,UAAU;AAAA,UACV,SAAS;AAAA,UACT,aAAa;AAAA,UACb,aAAa;AAAA,UACb,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,cAAc,CAAC,OAAO;AAAA,YACvB;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,aAAa;AAAA,UACb,aAAa;AAAA,UACb,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,cAAc,CAAC,OAAO;AAAA,YACvB;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,OAAO,KAAK,QAAQ;AAC1B,YAAQ,IAAI,qCAAqC;AAAA,MAChD,UAAU,KAAK;AAAA,MACf,QAAQ,KAAK;AAAA,MACb,UAAU,KAAK;AAAA,MACf;AAAA,MACA,gBAAgB,OAAO,KAAK,KAAK,cAAc,CAAC,CAAC;AAAA,MACjD,eAAe,KAAK,UAAU,KAAK,cAAc,CAAC,CAAC,EAAE,MAAM,GAAG,GAAG;AAAA,IAClE,CAAC;AAED,UAAM,eAAe,KAAK,iBAAiB,gBAAgB,WAAW,OAAO;AAI7E,YAAQ,IAAI,sCAAsC;AAAA,MACjD;AAAA,MACA;AAAA,IACD,CAAC;AAED,QAAI,iBAAiB,SAAS;AAC7B,YAAM,qBAAqB,KAAK,iBAAiB,mBAAmB,WAAW,EAAE;AACjF,YAAM,kBAAkB,mBAAmB,KAAK;AAEhD,cAAQ,IAAI,qDAAqD;AAAA,QAChE;AAAA,QACA;AAAA,QACA;AAAA,QACA,SAAS,CAAC;AAAA,QACV,QAAQ,gBAAgB;AAAA,MACzB,CAAC;AAED,UAAI,CAAC,iBAAiB;AACrB,cAAM,IAAI;AAAA,UACT,KAAK,QAAQ;AAAA,UACb;AAAA,UACA,EAAE,UAAU;AAAA,QACb;AAAA,MACD;AAGA,YAAM,iBAAiB,wBAAwB,eAAe;AAE9D,YAAMA,gBAA6B;AAAA,QAClC,cAAc;AAAA,QACd,YAAY;AAAA,QACZ;AAAA,MACD;AAEA,cAAQ,IAAI,kDAAkD;AAAA,QAC7D,YAAYA,cAAa;AAAA,QACzB,aAAa,CAAC,CAAC;AAAA,QACf,YAAY,gBAAgB;AAAA,QAC5B,cAAc,gBAAgB;AAAA,QAC9B,iBAAiB,gBAAgB;AAAA,MAClC,CAAC;AAED,aAAO,EAAE,UAAUA,cAAa;AAAA,IACjC;AAGA,UAAM,WAAY,KAAK,iBAAiB,YAAY,WAAW,EAAE,EAAa,KAAK;AACnF,UAAM,gBACJ,KAAK,iBAAiB,iBAAiB,WAAW,EAAE,EAAa,KAAK,KAAK;AAE7E,YAAQ,IAAI,iDAAiD;AAAA,MAC5D;AAAA,MACA;AAAA,MACA,eAAe,iBAAiB;AAAA,IACjC,CAAC;AAED,QAAI,CAAC,UAAU;AACd,YAAM,IAAI;AAAA,QACT,KAAK,QAAQ;AAAA,QACb;AAAA,QACA,EAAE,UAAU;AAAA,MACb;AAAA,IACD;AAEA,UAAM,eAA6B;AAAA,MAClC,cAAc;AAAA,MACd;AAAA,MACA;AAAA,IACD;AAEA,YAAQ,IAAI,kDAAkD;AAAA,MAC7D,UAAU,aAAa;AAAA,MACvB,eAAe,aAAa,iBAAiB;AAAA,IAC9C,CAAC;AAED,WAAO,EAAE,UAAU,aAAa;AAAA,EACjC;AACD;","names":["pluginConfig"]}
|
|
@@ -0,0 +1,3 @@
|
|
|
1
|
+
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
|
2
|
+
<path d="M14.7 6.3a1 1 0 0 0 0 1.4l1.6 1.6a1 1 0 0 0 1.4 0l3.77-3.77a6 6 0 0 1-7.94 7.94l-6.91 6.91a2.12 2.12 0 0 1-3-3l6.91-6.91a6 6 0 0 1 7.94-7.94l-3.76 3.76z"/>
|
|
3
|
+
</svg>
|
|
@@ -28,6 +28,22 @@ var import_sharedFields = require("../../../utils/sharedFields");
|
|
|
28
28
|
var import_error_handling = require("../../vendors/OpenAi/helpers/error-handling");
|
|
29
29
|
var import_n8nLlmFailedAttemptHandler = require("../n8nLlmFailedAttemptHandler");
|
|
30
30
|
var import_N8nLlmTracing = require("../N8nLlmTracing");
|
|
31
|
+
const NINE_ROUTER_OPENCLAW_PROVIDER = "9router";
|
|
32
|
+
const NINE_ROUTER_OPENCLAW_MODEL_SOURCE = "9router";
|
|
33
|
+
const NINE_ROUTER_OPENCLAW_API = "openai-completions";
|
|
34
|
+
const NINE_ROUTER_DEFAULT_BASE_URL = "http://localhost:20128/api/v1";
|
|
35
|
+
function normalizeOptionalString(value) {
|
|
36
|
+
if (typeof value !== "string") {
|
|
37
|
+
return void 0;
|
|
38
|
+
}
|
|
39
|
+
const trimmed = value.trim();
|
|
40
|
+
return trimmed || void 0;
|
|
41
|
+
}
|
|
42
|
+
function toOpenClawNineRouterModelId(modelName) {
|
|
43
|
+
const normalizedModelName = normalizeOptionalString(modelName) ?? "auto";
|
|
44
|
+
const providerPrefix = `${NINE_ROUTER_OPENCLAW_PROVIDER}/`;
|
|
45
|
+
return normalizedModelName.toLowerCase().startsWith(providerPrefix) ? normalizedModelName : `${NINE_ROUTER_OPENCLAW_PROVIDER}/${normalizedModelName}`;
|
|
46
|
+
}
|
|
31
47
|
class LmChat9Router {
|
|
32
48
|
constructor() {
|
|
33
49
|
this.description = {
|
|
@@ -84,7 +100,7 @@ class LmChat9Router {
|
|
|
84
100
|
displayName: "Model",
|
|
85
101
|
name: "model",
|
|
86
102
|
type: "options",
|
|
87
|
-
description: 'The model which will generate the completion. <a href="
|
|
103
|
+
description: 'The model which will generate the completion. <a href="https://github.com/9router/9router">Learn more</a>.',
|
|
88
104
|
typeOptions: {
|
|
89
105
|
loadOptions: {
|
|
90
106
|
routing: {
|
|
@@ -216,11 +232,13 @@ class LmChat9Router {
|
|
|
216
232
|
async supplyData(itemIndex) {
|
|
217
233
|
const credentials = await this.getCredentials("nineRouterApi");
|
|
218
234
|
const modelName = this.getNodeParameter("model", itemIndex);
|
|
235
|
+
const baseURL = normalizeOptionalString(credentials.url) ?? NINE_ROUTER_DEFAULT_BASE_URL;
|
|
236
|
+
const hasApiKey = normalizeOptionalString(credentials.apiKey) !== void 0;
|
|
219
237
|
const options = this.getNodeParameter("options", itemIndex, {});
|
|
220
238
|
const configuration = {
|
|
221
|
-
baseURL
|
|
239
|
+
baseURL,
|
|
222
240
|
fetchOptions: {
|
|
223
|
-
dispatcher: (0, import_httpProxyAgent.getProxyAgent)(
|
|
241
|
+
dispatcher: (0, import_httpProxyAgent.getProxyAgent)(baseURL)
|
|
224
242
|
}
|
|
225
243
|
};
|
|
226
244
|
const model = new import_openai.ChatOpenAI({
|
|
@@ -236,6 +254,25 @@ class LmChat9Router {
|
|
|
236
254
|
} : void 0,
|
|
237
255
|
onFailedAttempt: (0, import_n8nLlmFailedAttemptHandler.makeN8nLlmFailedAttemptHandler)(this, import_error_handling.openAiFailedAttemptHandler)
|
|
238
256
|
});
|
|
257
|
+
const openClawModelConfig = {
|
|
258
|
+
modelId: toOpenClawNineRouterModelId(modelName),
|
|
259
|
+
modelSource: NINE_ROUTER_OPENCLAW_MODEL_SOURCE,
|
|
260
|
+
extra: {
|
|
261
|
+
baseUrl: baseURL,
|
|
262
|
+
api: NINE_ROUTER_OPENCLAW_API,
|
|
263
|
+
hasApiKey
|
|
264
|
+
}
|
|
265
|
+
};
|
|
266
|
+
Object.assign(model, openClawModelConfig);
|
|
267
|
+
console.log("[LmChat9Router] returning model with OpenClaw metadata", {
|
|
268
|
+
itemIndex,
|
|
269
|
+
modelName,
|
|
270
|
+
openClawModelId: openClawModelConfig.modelId,
|
|
271
|
+
modelSource: openClawModelConfig.modelSource,
|
|
272
|
+
baseUrl: baseURL,
|
|
273
|
+
api: NINE_ROUTER_OPENCLAW_API,
|
|
274
|
+
hasApiKey
|
|
275
|
+
});
|
|
239
276
|
return {
|
|
240
277
|
response: model
|
|
241
278
|
};
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../nodes/llms/LmChat9Router/LmChat9Router.node.ts"],"sourcesContent":["import { ChatOpenAI, type ClientOptions } from '@langchain/openai';\nimport {\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n} from 'n8n-workflow';\n\nimport { getProxyAgent } from '@utils/httpProxyAgent';\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport type { OpenAICompatibleCredential } from '../../../types/types';\nimport { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';\nimport { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';\nimport { N8nLlmTracing } from '../N8nLlmTracing';\n\nexport class LmChat9Router implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: '9Router Chat Model',\n\t\tname: 'lmChat9Router',\n\t\ticon: { light: 'file:9router.svg', dark: 'file:9router.dark.svg' },\n\t\tgroup: ['transform'],\n\t\tversion: [1],\n\t\tdescription: 'For advanced usage with an AI chain',\n\t\tdefaults: {\n\t\t\tname: '9Router Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://github.com/9router/9router',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\n\t\tinputs: [],\n\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'nineRouterApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\t\trequestDefaults: {\n\t\t\tignoreHttpStatusErrors: true,\n\t\t\tbaseURL: '={{ $credentials?.url }}',\n\t\t},\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),\n\t\t\t{\n\t\t\t\tdisplayName:\n\t\t\t\t\t'If using JSON response format, you must include word \"json\" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.',\n\t\t\t\tname: 'notice',\n\t\t\t\ttype: 'notice',\n\t\t\t\tdefault: '',\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\tshow: {\n\t\t\t\t\t\t'/options.responseFormat': ['json_object'],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Model',\n\t\t\t\tname: 'model',\n\t\t\t\ttype: 'options',\n\t\t\t\tdescription:\n\t\t\t\t\t'The model which will generate the completion. <a href=\"http://localhost:20128\">Learn more</a>.',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tloadOptions: {\n\t\t\t\t\t\trouting: {\n\t\t\t\t\t\t\trequest: {\n\t\t\t\t\t\t\t\tmethod: 'GET',\n\t\t\t\t\t\t\t\turl: '/models',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\toutput: {\n\t\t\t\t\t\t\t\tpostReceive: [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'rootProperty',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tproperty: 'data',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'setKeyValue',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tname: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t\tvalue: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'sort',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tkey: 'name',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trouting: {\n\t\t\t\t\tsend: {\n\t\t\t\t\t\ttype: 'body',\n\t\t\t\t\t\tproperty: 'model',\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tdefault: 'openai/gpt-4o',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Options',\n\t\t\t\tname: 'options',\n\t\t\t\tplaceholder: 'Add Option',\n\t\t\t\tdescription: 'Additional options to add',\n\t\t\t\ttype: 'collection',\n\t\t\t\tdefault: {},\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Frequency Penalty',\n\t\t\t\t\t\tname: 'frequencyPenalty',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim\",\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Maximum Number of Tokens',\n\t\t\t\t\t\tname: 'maxTokens',\n\t\t\t\t\t\tdefault: -1,\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t\ttypeOptions: {\n\t\t\t\t\t\t\tmaxValue: 32768,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Response Format',\n\t\t\t\t\t\tname: 'responseFormat',\n\t\t\t\t\t\tdefault: 'text',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Text',\n\t\t\t\t\t\t\t\tvalue: 'text',\n\t\t\t\t\t\t\t\tdescription: 'Regular text response',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'JSON',\n\t\t\t\t\t\t\t\tvalue: 'json_object',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'Enables JSON mode, which should guarantee the message the model generates is valid JSON',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Presence Penalty',\n\t\t\t\t\t\tname: 'presencePenalty',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics\",\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Sampling Temperature',\n\t\t\t\t\t\tname: 'temperature',\n\t\t\t\t\t\tdefault: 0.7,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Timeout',\n\t\t\t\t\t\tname: 'timeout',\n\t\t\t\t\t\tdefault: 360000,\n\t\t\t\t\t\tdescription: 'Maximum amount of time a request is allowed to take in milliseconds',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Max Retries',\n\t\t\t\t\t\tname: 'maxRetries',\n\t\t\t\t\t\tdefault: 2,\n\t\t\t\t\t\tdescription: 'Maximum number of retries to attempt',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Top P',\n\t\t\t\t\t\tname: 'topP',\n\t\t\t\t\t\tdefault: 1,\n\t\t\t\t\t\ttypeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst credentials = await this.getCredentials<OpenAICompatibleCredential>('nineRouterApi');\n\n\t\tconst modelName = this.getNodeParameter('model', itemIndex) as string;\n\n\t\tconst options = this.getNodeParameter('options', itemIndex, {}) as {\n\t\t\tfrequencyPenalty?: number;\n\t\t\tmaxTokens?: number;\n\t\t\tmaxRetries: number;\n\t\t\ttimeout: number;\n\t\t\tpresencePenalty?: number;\n\t\t\ttemperature?: number;\n\t\t\ttopP?: number;\n\t\t\tresponseFormat?: 'text' | 'json_object';\n\t\t};\n\n\t\tconst configuration: ClientOptions = {\n\t\t\tbaseURL: credentials.url,\n\t\t\tfetchOptions: {\n\t\t\t\tdispatcher: getProxyAgent(credentials.url),\n\t\t\t},\n\t\t};\n\n\t\tconst model = new ChatOpenAI({\n\t\t\tapiKey: credentials.apiKey || 'no-key',\n\t\t\tmodel: modelName,\n\t\t\t...options,\n\t\t\ttimeout: options.timeout ?? 60000,\n\t\t\tmaxRetries: options.maxRetries ?? 2,\n\t\t\tconfiguration,\n\t\t\tcallbacks: [new N8nLlmTracing(this)],\n\t\t\tmodelKwargs: options.responseFormat\n\t\t\t\t? {\n\t\t\t\t\t\tresponse_format: { type: options.responseFormat },\n\t\t\t\t\t}\n\t\t\t\t: undefined,\n\t\t\tonFailedAttempt: makeN8nLlmFailedAttemptHandler(this, openAiFailedAttemptHandler),\n\t\t});\n\n\t\treturn {\n\t\t\tresponse: model,\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,oBAA+C;AAC/C,0BAMO;AAEP,4BAA8B;AAC9B,0BAA6C;AAG7C,4BAA2C;AAC3C,wCAA+C;AAC/C,2BAA8B;AAEvB,MAAM,cAAmC;AAAA,EAAzC;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM,EAAE,OAAO,oBAAoB,MAAM,wBAAwB;AAAA,MACjE,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS,CAAC,CAAC;AAAA,MACX,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MAEA,QAAQ,CAAC;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA,iBAAiB;AAAA,QAChB,wBAAwB;AAAA,QACxB,SAAS;AAAA,MACV;AAAA,MACA,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aACC;AAAA,UACD,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,2BAA2B,CAAC,aAAa;AAAA,YAC1C;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aACC;AAAA,UACD,aAAa;AAAA,YACZ,aAAa;AAAA,cACZ,SAAS;AAAA,gBACR,SAAS;AAAA,kBACR,QAAQ;AAAA,kBACR,KAAK;AAAA,gBACN;AAAA,gBACA,QAAQ;AAAA,kBACP,aAAa;AAAA,oBACZ;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,UAAU;AAAA,sBACX;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,wBACN,OAAO;AAAA,sBACR;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,KAAK;AAAA,sBACN;AAAA,oBACD;AAAA,kBACD;AAAA,gBACD;AAAA,cACD;AAAA,YACD;AAAA,UACD;AAAA,UACA,SAAS;AAAA,YACR,MAAM;AAAA,cACL,MAAM;AAAA,cACN,UAAU;AAAA,YACX;AAAA,UACD;AAAA,UACA,SAAS;AAAA,QACV;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,aAAa;AAAA,UACb,aAAa;AAAA,UACb,MAAM;AAAA,UACN,SAAS,CAAC;AAAA,UACV,SAAS;AAAA,YACR;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,cAC7D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aACC;AAAA,cACD,MAAM;AAAA,cACN,aAAa;AAAA,gBACZ,UAAU;AAAA,cACX;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,MAAM;AAAA,cACN,SAAS;AAAA,gBACR;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aAAa;AAAA,gBACd;AAAA,gBACA;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aACC;AAAA,gBACF;AAAA,cACD;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,cAC7D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,cAAc,MAAM,KAAK,eAA2C,eAAe;AAEzF,UAAM,YAAY,KAAK,iBAAiB,SAAS,SAAS;AAE1D,UAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW,CAAC,CAAC;AAW9D,UAAM,gBAA+B;AAAA,MACpC,SAAS,YAAY;AAAA,MACrB,cAAc;AAAA,QACb,gBAAY,qCAAc,YAAY,GAAG;AAAA,MAC1C;AAAA,IACD;AAEA,UAAM,QAAQ,IAAI,yBAAW;AAAA,MAC5B,QAAQ,YAAY,UAAU;AAAA,MAC9B,OAAO;AAAA,MACP,GAAG;AAAA,MACH,SAAS,QAAQ,WAAW;AAAA,MAC5B,YAAY,QAAQ,cAAc;AAAA,MAClC;AAAA,MACA,WAAW,CAAC,IAAI,mCAAc,IAAI,CAAC;AAAA,MACnC,aAAa,QAAQ,iBAClB;AAAA,QACA,iBAAiB,EAAE,MAAM,QAAQ,eAAe;AAAA,MACjD,IACC;AAAA,MACH,qBAAiB,kEAA+B,MAAM,gDAA0B;AAAA,IACjF,CAAC;AAED,WAAO;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AACD;","names":[]}
|
|
1
|
+
{"version":3,"sources":["../../../../nodes/llms/LmChat9Router/LmChat9Router.node.ts"],"sourcesContent":["import { ChatOpenAI, type ClientOptions } from '@langchain/openai';\nimport {\n\ttype IDataObject,\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n} from 'n8n-workflow';\n\nimport { getProxyAgent } from '@utils/httpProxyAgent';\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\n\nimport type { OpenAICompatibleCredential } from '../../../types/types';\nimport { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';\nimport { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';\nimport { N8nLlmTracing } from '../N8nLlmTracing';\nimport type { ModelConfig } from '../../agents/OpenClawAgent/V2/OpenClawAgentV2.node';\n\nconst NINE_ROUTER_OPENCLAW_PROVIDER = '9router';\nconst NINE_ROUTER_OPENCLAW_MODEL_SOURCE = '9router';\nconst NINE_ROUTER_OPENCLAW_API = 'openai-completions';\nconst NINE_ROUTER_DEFAULT_BASE_URL = 'http://localhost:20128/api/v1';\n\nfunction normalizeOptionalString(value: unknown): string | undefined {\n\tif (typeof value !== 'string') {\n\t\treturn undefined;\n\t}\n\tconst trimmed = value.trim();\n\treturn trimmed || undefined;\n}\n\nfunction toOpenClawNineRouterModelId(modelName: string): string {\n\tconst normalizedModelName = normalizeOptionalString(modelName) ?? 'auto';\n\tconst providerPrefix = `${NINE_ROUTER_OPENCLAW_PROVIDER}/`;\n\treturn normalizedModelName.toLowerCase().startsWith(providerPrefix)\n\t\t? normalizedModelName\n\t\t: `${NINE_ROUTER_OPENCLAW_PROVIDER}/${normalizedModelName}`;\n}\n\nexport class LmChat9Router implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: '9Router Chat Model',\n\t\tname: 'lmChat9Router',\n\t\ticon: { light: 'file:9router.svg', dark: 'file:9router.dark.svg' },\n\t\tgroup: ['transform'],\n\t\tversion: [1],\n\t\tdescription: 'For advanced usage with an AI chain',\n\t\tdefaults: {\n\t\t\tname: '9Router Chat Model',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Language Models', 'Root Nodes'],\n\t\t\t\t'Language Models': ['Chat Models (Recommended)'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://github.com/9router/9router',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\n\t\tinputs: [],\n\n\t\toutputs: [NodeConnectionTypes.AiLanguageModel],\n\t\toutputNames: ['Model'],\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'nineRouterApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\t\trequestDefaults: {\n\t\t\tignoreHttpStatusErrors: true,\n\t\t\tbaseURL: '={{ $credentials?.url }}',\n\t\t},\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),\n\t\t\t{\n\t\t\t\tdisplayName:\n\t\t\t\t\t'If using JSON response format, you must include word \"json\" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.',\n\t\t\t\tname: 'notice',\n\t\t\t\ttype: 'notice',\n\t\t\t\tdefault: '',\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\tshow: {\n\t\t\t\t\t\t'/options.responseFormat': ['json_object'],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Model',\n\t\t\t\tname: 'model',\n\t\t\t\ttype: 'options',\n\t\t\t\tdescription:\n\t\t\t\t\t'The model which will generate the completion. <a href=\"https://github.com/9router/9router\">Learn more</a>.',\n\t\t\t\ttypeOptions: {\n\t\t\t\t\tloadOptions: {\n\t\t\t\t\t\trouting: {\n\t\t\t\t\t\t\trequest: {\n\t\t\t\t\t\t\t\tmethod: 'GET',\n\t\t\t\t\t\t\t\turl: '/models',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\toutput: {\n\t\t\t\t\t\t\t\tpostReceive: [\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'rootProperty',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tproperty: 'data',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'setKeyValue',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tname: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t\tvalue: '={{$responseItem.id}}',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\ttype: 'sort',\n\t\t\t\t\t\t\t\t\t\tproperties: {\n\t\t\t\t\t\t\t\t\t\t\tkey: 'name',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trouting: {\n\t\t\t\t\tsend: {\n\t\t\t\t\t\ttype: 'body',\n\t\t\t\t\t\tproperty: 'model',\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tdefault: 'openai/gpt-4o',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Options',\n\t\t\t\tname: 'options',\n\t\t\t\tplaceholder: 'Add Option',\n\t\t\t\tdescription: 'Additional options to add',\n\t\t\t\ttype: 'collection',\n\t\t\t\tdefault: {},\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Frequency Penalty',\n\t\t\t\t\t\tname: 'frequencyPenalty',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim\",\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Maximum Number of Tokens',\n\t\t\t\t\t\tname: 'maxTokens',\n\t\t\t\t\t\tdefault: -1,\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t\ttypeOptions: {\n\t\t\t\t\t\t\tmaxValue: 32768,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Response Format',\n\t\t\t\t\t\tname: 'responseFormat',\n\t\t\t\t\t\tdefault: 'text',\n\t\t\t\t\t\ttype: 'options',\n\t\t\t\t\t\toptions: [\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'Text',\n\t\t\t\t\t\t\t\tvalue: 'text',\n\t\t\t\t\t\t\t\tdescription: 'Regular text response',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tname: 'JSON',\n\t\t\t\t\t\t\t\tvalue: 'json_object',\n\t\t\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\t\t'Enables JSON mode, which should guarantee the message the model generates is valid JSON',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t],\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Presence Penalty',\n\t\t\t\t\t\tname: 'presencePenalty',\n\t\t\t\t\t\tdefault: 0,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t\"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics\",\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Sampling Temperature',\n\t\t\t\t\t\tname: 'temperature',\n\t\t\t\t\t\tdefault: 0.7,\n\t\t\t\t\t\ttypeOptions: { maxValue: 2, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Timeout',\n\t\t\t\t\t\tname: 'timeout',\n\t\t\t\t\t\tdefault: 360000,\n\t\t\t\t\t\tdescription: 'Maximum amount of time a request is allowed to take in milliseconds',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Max Retries',\n\t\t\t\t\t\tname: 'maxRetries',\n\t\t\t\t\t\tdefault: 2,\n\t\t\t\t\t\tdescription: 'Maximum number of retries to attempt',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Top P',\n\t\t\t\t\t\tname: 'topP',\n\t\t\t\t\t\tdefault: 1,\n\t\t\t\t\t\ttypeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\t'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',\n\t\t\t\t\t\ttype: 'number',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tconst credentials = await this.getCredentials<OpenAICompatibleCredential>('nineRouterApi');\n\n\t\tconst modelName = this.getNodeParameter('model', itemIndex) as string;\n\t\tconst baseURL = normalizeOptionalString(credentials.url) ?? NINE_ROUTER_DEFAULT_BASE_URL;\n\t\tconst hasApiKey = normalizeOptionalString(credentials.apiKey) !== undefined;\n\n\t\tconst options = this.getNodeParameter('options', itemIndex, {}) as {\n\t\t\tfrequencyPenalty?: number;\n\t\t\tmaxTokens?: number;\n\t\t\tmaxRetries: number;\n\t\t\ttimeout: number;\n\t\t\tpresencePenalty?: number;\n\t\t\ttemperature?: number;\n\t\t\ttopP?: number;\n\t\t\tresponseFormat?: 'text' | 'json_object';\n\t\t};\n\n\t\tconst configuration: ClientOptions = {\n\t\t\tbaseURL,\n\t\t\tfetchOptions: {\n\t\t\t\tdispatcher: getProxyAgent(baseURL),\n\t\t\t},\n\t\t};\n\n\t\tconst model = new ChatOpenAI({\n\t\t\tapiKey: credentials.apiKey || 'no-key',\n\t\t\tmodel: modelName,\n\t\t\t...options,\n\t\t\ttimeout: options.timeout ?? 60000,\n\t\t\tmaxRetries: options.maxRetries ?? 2,\n\t\t\tconfiguration,\n\t\t\tcallbacks: [new N8nLlmTracing(this)],\n\t\t\tmodelKwargs: options.responseFormat\n\t\t\t\t? {\n\t\t\t\t\t\tresponse_format: { type: options.responseFormat },\n\t\t\t\t\t}\n\t\t\t\t: undefined,\n\t\t\tonFailedAttempt: makeN8nLlmFailedAttemptHandler(this, openAiFailedAttemptHandler),\n\t\t});\n\n\t\tconst openClawModelConfig: ModelConfig = {\n\t\t\tmodelId: toOpenClawNineRouterModelId(modelName),\n\t\t\tmodelSource: NINE_ROUTER_OPENCLAW_MODEL_SOURCE,\n\t\t\textra: {\n\t\t\t\tbaseUrl: baseURL,\n\t\t\t\tapi: NINE_ROUTER_OPENCLAW_API,\n\t\t\t\thasApiKey,\n\t\t\t} satisfies IDataObject,\n\t\t};\n\n\t\tObject.assign(model, openClawModelConfig);\n\n\t\tconsole.log('[LmChat9Router] returning model with OpenClaw metadata', {\n\t\t\titemIndex,\n\t\t\tmodelName,\n\t\t\topenClawModelId: openClawModelConfig.modelId,\n\t\t\tmodelSource: openClawModelConfig.modelSource,\n\t\t\tbaseUrl: baseURL,\n\t\t\tapi: NINE_ROUTER_OPENCLAW_API,\n\t\t\thasApiKey,\n\t\t});\n\n\t\treturn {\n\t\t\tresponse: model,\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,oBAA+C;AAC/C,0BAOO;AAEP,4BAA8B;AAC9B,0BAA6C;AAG7C,4BAA2C;AAC3C,wCAA+C;AAC/C,2BAA8B;AAG9B,MAAM,gCAAgC;AACtC,MAAM,oCAAoC;AAC1C,MAAM,2BAA2B;AACjC,MAAM,+BAA+B;AAErC,SAAS,wBAAwB,OAAoC;AACpE,MAAI,OAAO,UAAU,UAAU;AAC9B,WAAO;AAAA,EACR;AACA,QAAM,UAAU,MAAM,KAAK;AAC3B,SAAO,WAAW;AACnB;AAEA,SAAS,4BAA4B,WAA2B;AAC/D,QAAM,sBAAsB,wBAAwB,SAAS,KAAK;AAClE,QAAM,iBAAiB,GAAG,6BAA6B;AACvD,SAAO,oBAAoB,YAAY,EAAE,WAAW,cAAc,IAC/D,sBACA,GAAG,6BAA6B,IAAI,mBAAmB;AAC3D;AAEO,MAAM,cAAmC;AAAA,EAAzC;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM,EAAE,OAAO,oBAAoB,MAAM,wBAAwB;AAAA,MACjE,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS,CAAC,CAAC;AAAA,MACX,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,mBAAmB,YAAY;AAAA,UACpC,mBAAmB,CAAC,2BAA2B;AAAA,QAChD;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MAEA,QAAQ,CAAC;AAAA,MAET,SAAS,CAAC,wCAAoB,eAAe;AAAA,MAC7C,aAAa,CAAC,OAAO;AAAA,MACrB,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MACA,iBAAiB;AAAA,QAChB,wBAAwB;AAAA,QACxB,SAAS;AAAA,MACV;AAAA,MACA,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,SAAS,wCAAoB,OAAO,CAAC;AAAA,QACvF;AAAA,UACC,aACC;AAAA,UACD,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,2BAA2B,CAAC,aAAa;AAAA,YAC1C;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aACC;AAAA,UACD,aAAa;AAAA,YACZ,aAAa;AAAA,cACZ,SAAS;AAAA,gBACR,SAAS;AAAA,kBACR,QAAQ;AAAA,kBACR,KAAK;AAAA,gBACN;AAAA,gBACA,QAAQ;AAAA,kBACP,aAAa;AAAA,oBACZ;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,UAAU;AAAA,sBACX;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,MAAM;AAAA,wBACN,OAAO;AAAA,sBACR;AAAA,oBACD;AAAA,oBACA;AAAA,sBACC,MAAM;AAAA,sBACN,YAAY;AAAA,wBACX,KAAK;AAAA,sBACN;AAAA,oBACD;AAAA,kBACD;AAAA,gBACD;AAAA,cACD;AAAA,YACD;AAAA,UACD;AAAA,UACA,SAAS;AAAA,YACR,MAAM;AAAA,cACL,MAAM;AAAA,cACN,UAAU;AAAA,YACX;AAAA,UACD;AAAA,UACA,SAAS;AAAA,QACV;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,aAAa;AAAA,UACb,aAAa;AAAA,UACb,MAAM;AAAA,UACN,SAAS,CAAC;AAAA,UACV,SAAS;AAAA,YACR;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,cAC7D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aACC;AAAA,cACD,MAAM;AAAA,cACN,aAAa;AAAA,gBACZ,UAAU;AAAA,cACX;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,MAAM;AAAA,cACN,SAAS;AAAA,gBACR;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aAAa;AAAA,gBACd;AAAA,gBACA;AAAA,kBACC,MAAM;AAAA,kBACN,OAAO;AAAA,kBACP,aACC;AAAA,gBACF;AAAA,cACD;AAAA,YACD;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,IAAI,iBAAiB,EAAE;AAAA,cAC7D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa;AAAA,cACb,MAAM;AAAA,YACP;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,SAAS;AAAA,cACT,aAAa,EAAE,UAAU,GAAG,UAAU,GAAG,iBAAiB,EAAE;AAAA,cAC5D,aACC;AAAA,cACD,MAAM;AAAA,YACP;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,UAAM,cAAc,MAAM,KAAK,eAA2C,eAAe;AAEzF,UAAM,YAAY,KAAK,iBAAiB,SAAS,SAAS;AAC1D,UAAM,UAAU,wBAAwB,YAAY,GAAG,KAAK;AAC5D,UAAM,YAAY,wBAAwB,YAAY,MAAM,MAAM;AAElE,UAAM,UAAU,KAAK,iBAAiB,WAAW,WAAW,CAAC,CAAC;AAW9D,UAAM,gBAA+B;AAAA,MACpC;AAAA,MACA,cAAc;AAAA,QACb,gBAAY,qCAAc,OAAO;AAAA,MAClC;AAAA,IACD;AAEA,UAAM,QAAQ,IAAI,yBAAW;AAAA,MAC5B,QAAQ,YAAY,UAAU;AAAA,MAC9B,OAAO;AAAA,MACP,GAAG;AAAA,MACH,SAAS,QAAQ,WAAW;AAAA,MAC5B,YAAY,QAAQ,cAAc;AAAA,MAClC;AAAA,MACA,WAAW,CAAC,IAAI,mCAAc,IAAI,CAAC;AAAA,MACnC,aAAa,QAAQ,iBAClB;AAAA,QACA,iBAAiB,EAAE,MAAM,QAAQ,eAAe;AAAA,MACjD,IACC;AAAA,MACH,qBAAiB,kEAA+B,MAAM,gDAA0B;AAAA,IACjF,CAAC;AAED,UAAM,sBAAmC;AAAA,MACxC,SAAS,4BAA4B,SAAS;AAAA,MAC9C,aAAa;AAAA,MACb,OAAO;AAAA,QACN,SAAS;AAAA,QACT,KAAK;AAAA,QACL;AAAA,MACD;AAAA,IACD;AAEA,WAAO,OAAO,OAAO,mBAAmB;AAExC,YAAQ,IAAI,0DAA0D;AAAA,MACrE;AAAA,MACA;AAAA,MACA,iBAAiB,oBAAoB;AAAA,MACrC,aAAa,oBAAoB;AAAA,MACjC,SAAS;AAAA,MACT,KAAK;AAAA,MACL;AAAA,IACD,CAAC;AAED,WAAO;AAAA,MACN,UAAU;AAAA,IACX;AAAA,EACD;AACD;","names":[]}
|
package/dist/types/nodes.json
CHANGED
|
@@ -10,6 +10,13 @@
|
|
|
10
10
|
{"displayName":"AI Agent Tool","name":"agentTool","icon":"fa:robot","iconColor":"black","group":["transform"],"description":"Generates an action plan and executes it. Can use external tools.","codex":{"alias":["LangChain","Chat","Conversational","Plan and Execute","ReAct","Tools"],"categories":["AI"],"subcategories":{"AI":["Tools"],"Tools":["Other Tools"]}},"defaultVersion":3,"version":[2.2],"defaults":{"name":"AI Agent Tool","color":"#404040"},"inputs":"={{\n\t\t\t\t((hasOutputParser, needsFallback) => {\n\t\t\t\t\tfunction getInputs(hasMainInput, hasOutputParser, needsFallback) {\n const getInputData = (inputs) => {\n return inputs.map(({ type, filter, displayName, required }) => {\n const input = {\n type,\n displayName,\n required,\n maxConnections: [\"ai_languageModel\", \"ai_memory\", \"ai_outputParser\"].includes(type) ? 1 : void 0\n };\n if (filter) {\n input.filter = filter;\n }\n return input;\n });\n };\n let specialInputs = [\n {\n type: \"ai_languageModel\",\n displayName: \"Chat Model\",\n required: true,\n filter: {\n excludedNodes: [\n \"@n8n/n8n-nodes-langchain.lmCohere\",\n \"@n8n/n8n-nodes-langchain.lmOllama\",\n \"n8n/n8n-nodes-langchain.lmOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference\"\n ]\n }\n },\n {\n type: \"ai_languageModel\",\n displayName: \"Fallback Model\",\n required: true,\n filter: {\n excludedNodes: [\n \"@n8n/n8n-nodes-langchain.lmCohere\",\n \"@n8n/n8n-nodes-langchain.lmOllama\",\n \"n8n/n8n-nodes-langchain.lmOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference\"\n ]\n }\n },\n {\n displayName: \"Memory\",\n type: \"ai_memory\"\n },\n {\n displayName: \"Tool\",\n type: \"ai_tool\"\n },\n {\n displayName: \"Output Parser\",\n type: \"ai_outputParser\"\n }\n ];\n if (hasOutputParser === false) {\n specialInputs = specialInputs.filter((input) => input.type !== \"ai_outputParser\");\n }\n if (needsFallback === false) {\n specialInputs = specialInputs.filter((input) => input.displayName !== \"Fallback Model\");\n }\n const mainInputs = hasMainInput ? [\"main\"] : [];\n return [...mainInputs, ...getInputData(specialInputs)];\n};\n\t\t\t\t\treturn getInputs(false, hasOutputParser, needsFallback)\n\t\t\t\t})($parameter.hasOutputParser === undefined || $parameter.hasOutputParser === true, $parameter.needsFallback !== undefined && $parameter.needsFallback === true)\n\t\t\t}}","outputs":["ai_tool"],"properties":[{"displayName":"Description","name":"toolDescription","type":"string","default":"AI Agent that can call other tools","required":true,"typeOptions":{"rows":2},"description":"Explain to the LLM what this tool does, a good, specific description would allow LLMs to produce expected results much more often"},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2}},{"displayName":"Require Specific Output Format","name":"hasOutputParser","type":"boolean","default":false,"noDataExpression":true},{"displayName":"Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='ai_outputParser'>output parser</a> on the canvas to specify the output format you require","name":"notice","type":"notice","default":"","displayOptions":{"show":{"hasOutputParser":[true]}}},{"displayName":"Enable Fallback Model","name":"needsFallback","type":"boolean","default":false,"noDataExpression":true,"displayOptions":{"show":{"@version":[{"_cnd":{"gte":2.1}}]}}},{"displayName":"Connect an additional language model on the canvas to use it as a fallback if the main model fails","name":"fallbackNotice","type":"notice","default":"","displayOptions":{"show":{"needsFallback":[true]}}},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"System Message","name":"systemMessage","type":"string","default":"You are a helpful assistant","description":"The message that will be sent to the agent before the conversation starts","typeOptions":{"rows":6}},{"displayName":"Max Iterations","name":"maxIterations","type":"number","default":10,"description":"The maximum number of iterations the agent will run before stopping"},{"displayName":"Return Intermediate Steps","name":"returnIntermediateSteps","type":"boolean","default":false,"description":"Whether or not the output should include intermediate steps the agent took"},{"displayName":"Automatically Passthrough Binary Images","name":"passthroughBinaryImages","type":"boolean","default":true,"description":"Whether or not binary images should be automatically passed through to the agent as image type messages"},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":1,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}]}],"displayOptions":{"hide":{"@version":[{"_cnd":{"lt":2.2}}]}}},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"System Message","name":"systemMessage","type":"string","default":"You are a helpful assistant","description":"The message that will be sent to the agent before the conversation starts","typeOptions":{"rows":6}},{"displayName":"Max Iterations","name":"maxIterations","type":"number","default":10,"description":"The maximum number of iterations the agent will run before stopping"},{"displayName":"Return Intermediate Steps","name":"returnIntermediateSteps","type":"boolean","default":false,"description":"Whether or not the output should include intermediate steps the agent took"},{"displayName":"Automatically Passthrough Binary Images","name":"passthroughBinaryImages","type":"boolean","default":true,"description":"Whether or not binary images should be automatically passed through to the agent as image type messages"},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":1,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}]}],"displayOptions":{"show":{"@version":[{"_cnd":{"lt":2.2}}]}}}]},
|
|
11
11
|
{"displayName":"AI Agent Tool","name":"agentTool","icon":"fa:robot","iconColor":"black","group":["transform"],"description":"Generates an action plan and executes it. Can use external tools.","codex":{"alias":["LangChain","Chat","Conversational","Plan and Execute","ReAct","Tools"],"categories":["AI"],"subcategories":{"AI":["Tools"],"Tools":["Other Tools"]}},"defaultVersion":3,"version":[3],"defaults":{"name":"AI Agent Tool","color":"#404040"},"inputs":"={{\n\t\t\t\t((hasOutputParser, needsFallback) => {\n\t\t\t\t\tfunction getInputs(hasMainInput, hasOutputParser, needsFallback) {\n const getInputData = (inputs) => {\n return inputs.map(({ type, filter, displayName, required }) => {\n const input = {\n type,\n displayName,\n required,\n maxConnections: [\"ai_languageModel\", \"ai_memory\", \"ai_outputParser\"].includes(type) ? 1 : void 0\n };\n if (filter) {\n input.filter = filter;\n }\n return input;\n });\n };\n let specialInputs = [\n {\n type: \"ai_languageModel\",\n displayName: \"Chat Model\",\n required: true,\n filter: {\n excludedNodes: [\n \"@n8n/n8n-nodes-langchain.lmCohere\",\n \"@n8n/n8n-nodes-langchain.lmOllama\",\n \"n8n/n8n-nodes-langchain.lmOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference\"\n ]\n }\n },\n {\n type: \"ai_languageModel\",\n displayName: \"Fallback Model\",\n required: true,\n filter: {\n excludedNodes: [\n \"@n8n/n8n-nodes-langchain.lmCohere\",\n \"@n8n/n8n-nodes-langchain.lmOllama\",\n \"n8n/n8n-nodes-langchain.lmOpenAi\",\n \"@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference\"\n ]\n }\n },\n {\n displayName: \"Memory\",\n type: \"ai_memory\"\n },\n {\n displayName: \"Tool\",\n type: \"ai_tool\"\n },\n {\n displayName: \"Output Parser\",\n type: \"ai_outputParser\"\n }\n ];\n if (hasOutputParser === false) {\n specialInputs = specialInputs.filter((input) => input.type !== \"ai_outputParser\");\n }\n if (needsFallback === false) {\n specialInputs = specialInputs.filter((input) => input.displayName !== \"Fallback Model\");\n }\n const mainInputs = hasMainInput ? [\"main\"] : [];\n return [...mainInputs, ...getInputData(specialInputs)];\n};\n\t\t\t\t\treturn getInputs(false, hasOutputParser, needsFallback)\n\t\t\t\t})($parameter.hasOutputParser === undefined || $parameter.hasOutputParser === true, $parameter.needsFallback !== undefined && $parameter.needsFallback === true)\n\t\t\t}}","outputs":["ai_tool"],"properties":[{"displayName":"Description","name":"toolDescription","type":"string","default":"AI Agent that can call other tools","required":true,"typeOptions":{"rows":2},"description":"Explain to the LLM what this tool does, a good, specific description would allow LLMs to produce expected results much more often"},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2}},{"displayName":"Require Specific Output Format","name":"hasOutputParser","type":"boolean","default":false,"noDataExpression":true},{"displayName":"Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='ai_outputParser'>output parser</a> on the canvas to specify the output format you require","name":"notice","type":"notice","default":"","displayOptions":{"show":{"hasOutputParser":[true]}}},{"displayName":"Enable Fallback Model","name":"needsFallback","type":"boolean","default":false,"noDataExpression":true},{"displayName":"Connect an additional language model on the canvas to use it as a fallback if the main model fails","name":"fallbackNotice","type":"notice","default":"","displayOptions":{"show":{"needsFallback":[true]}}},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"System Message","name":"systemMessage","type":"string","default":"You are a helpful assistant","description":"The message that will be sent to the agent before the conversation starts","typeOptions":{"rows":6}},{"displayName":"Max Iterations","name":"maxIterations","type":"number","default":10,"description":"The maximum number of iterations the agent will run before stopping"},{"displayName":"Return Intermediate Steps","name":"returnIntermediateSteps","type":"boolean","default":false,"description":"Whether or not the output should include intermediate steps the agent took"},{"displayName":"Automatically Passthrough Binary Images","name":"passthroughBinaryImages","type":"boolean","default":true,"description":"Whether or not binary images should be automatically passed through to the agent as image type messages"},{"displayName":"Enable Streaming","name":"enableStreaming","type":"boolean","default":true,"description":"Whether this agent will stream the response in real-time as it generates text"},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":1,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}]},{"displayName":"Max Tokens To Read From Memory","name":"maxTokensFromMemory","type":"hidden","default":0,"description":"The maximum number of tokens to read from the chat memory history. Set to 0 to read all history."}]}]},
|
|
12
12
|
{"displayName":"OpenAI Assistant","name":"openAiAssistant","hidden":true,"icon":"fa:robot","group":["transform"],"version":[1,1.1],"description":"Utilizes Assistant API from Open AI.","subtitle":"Open AI Assistant","defaults":{"name":"OpenAI Assistant","color":"#404040"},"codex":{"alias":["LangChain"],"categories":["AI"],"subcategories":{"AI":["Agents","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.openaiassistant/"}]}},"inputs":[{"type":"main"},{"type":"ai_tool","displayName":"Tools"}],"outputs":["main"],"credentials":[{"name":"openAiApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $parameter.options?.baseURL?.split(\"/\").slice(0,-1).join(\"/\") || \"https://api.openai.com\" }}"},"properties":[{"displayName":"Operation","name":"mode","type":"options","noDataExpression":true,"default":"existing","options":[{"name":"Use New Assistant","value":"new"},{"name":"Use Existing Assistant","value":"existing"}]},{"displayName":"Name","name":"name","type":"string","default":"","required":true,"displayOptions":{"show":{"/mode":["new"]}}},{"displayName":"Instructions","name":"instructions","type":"string","description":"How the Assistant and model should behave or respond","default":"","typeOptions":{"rows":5},"displayOptions":{"show":{"/mode":["new"]}}},{"displayName":"Model","name":"model","type":"options","description":"The model which will be used to power the assistant. <a href=\"https://beta.openai.com/docs/models/overview\">Learn more</a>. The Retrieval tool requires gpt-3.5-turbo-1106 and gpt-4-1106-preview models.","required":true,"displayOptions":{"show":{"/mode":["new"]}},"typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"={{ $parameter.options?.baseURL?.split(\"/\").slice(-1).pop() || \"v1\" }}/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"data"}},{"type":"filter","properties":{"pass":"={{ $responseItem.id.startsWith('gpt-') && !$responseItem.id.includes('instruct') }}"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.id}}","value":"={{$responseItem.id}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":"gpt-3.5-turbo-1106"},{"displayName":"Assistant","name":"assistantId","type":"options","noDataExpression":true,"displayOptions":{"show":{"/mode":["existing"]}},"description":"The assistant to use. <a href=\"https://beta.openai.com/docs/assistants/overview\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","headers":{"OpenAI-Beta":"assistants=v1"},"url":"={{ $parameter.options?.baseURL?.split(\"/\").slice(-1).pop() || \"v1\" }}/assistants"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"data"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.name}}","value":"={{$responseItem.id}}","description":"={{$responseItem.model}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"assistant"}},"required":true,"default":""},{"displayName":"Text","name":"text","type":"string","required":true,"default":"={{ $json.chat_input }}","displayOptions":{"show":{"@version":[1]}}},{"displayName":"Text","name":"text","type":"string","required":true,"default":"={{ $json.chatInput }}","displayOptions":{"show":{"@version":[1.1]}}},{"displayName":"OpenAI Tools","name":"nativeTools","type":"multiOptions","default":[],"options":[{"name":"Code Interpreter","value":"code_interpreter"},{"name":"Knowledge Retrieval","value":"retrieval"}]},{"displayName":"Connect your own custom tools to this node on the canvas","name":"noticeTools","type":"notice","default":""},{"displayName":"Upload files for retrieval using the <a href=\"https://platform.openai.com/playground\" target=\"_blank\">OpenAI website<a/>","name":"noticeTools","type":"notice","typeOptions":{"noticeTheme":"info"},"displayOptions":{"show":{"/nativeTools":["retrieval"]}},"default":""},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Base URL","name":"baseURL","default":"https://api.openai.com/v1","description":"Override the default base URL for the API","type":"string"},{"displayName":"Max Retries","name":"maxRetries","default":2,"description":"Maximum number of retries to attempt","type":"number"},{"displayName":"Timeout","name":"timeout","default":10000,"description":"Maximum amount of time a request is allowed to take in milliseconds","type":"number"}]}]},
|
|
13
|
+
{"displayName":"OpenClaw AI Agent","name":"openClawAgent","group":["trigger","transform"],"description":"Runs a one-shot OpenClaw agent turn through the OpenClaw CLI","codex":{"alias":["OpenClaw","Agent","Gateway","Assistant","Channel"],"categories":["AI"],"subcategories":{"AI":["Agents","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.openclaw.ai/cli/agent"}]}},"defaultVersion":2,"version":2,"subtitle":"={{$parameter.selectorType === \"agent\" ? \"Agent: \" + $parameter.agentId : $parameter.selectorType === \"sessionId\" ? \"Session: \" + $parameter.sessionId : $parameter.selectorType === \"recipient\" ? \"To: \" + $parameter.to : \"Default route\"}}","defaults":{"name":"OpenClaw AI Agent"},"inputs":["main",{"type":"ai_channel","displayName":"Channel"},{"type":"ai_languageModel","displayName":"Model","required":false,"maxConnections":1},{"type":"ai_tool","displayName":"Plugin","required":false},{"type":"ai_tool","displayName":"MCP","required":false}],"outputs":["main"],"properties":[{"displayName":"Requires the OpenClaw CLI to be installed and configured on the n8n host. Connect Channel sub-nodes to configure messaging channels (Telegram, WhatsApp, etc.).","name":"openClawNotice","type":"notice","default":""},{"displayName":"Message","name":"message","type":"string","required":true,"default":"={{ $json.chatInput || $json.chat_input || $json.message || $json.text || \"\" }}","description":"Message body to send to the OpenClaw agent","typeOptions":{"rows":5}},{"displayName":"Route By","name":"selectorType","type":"options","default":"agent","noDataExpression":true,"description":"How to target the OpenClaw agent turn","options":[{"name":"Agent ID","value":"agent","description":"Run against a configured OpenClaw agent"},{"name":"Existing Session ID","value":"sessionId","description":"Continue an existing OpenClaw session"},{"name":"Recipient","value":"recipient","description":"Use a recipient/channel target to derive the session"},{"name":"OpenClaw Default","value":"default","description":"Let OpenClaw choose its default route"}]},{"displayName":"Agent ID","name":"agentId","type":"string","default":"main","description":"Configured OpenClaw agent ID","displayOptions":{"show":{"selectorType":["agent"]}}},{"displayName":"Session ID","name":"sessionId","type":"string","default":"","description":"OpenClaw session ID to continue","displayOptions":{"show":{"selectorType":["sessionId"]}}},{"displayName":"Recipient","name":"to","type":"string","default":"","description":"Recipient or channel target passed to OpenClaw as --to","displayOptions":{"show":{"selectorType":["recipient"]}}},{"displayName":"Thinking Level","name":"thinking","type":"options","default":"","description":"Optional OpenClaw thinking level override for this run","options":[{"name":"Adaptive","value":"adaptive"},{"name":"Extra High","value":"xhigh"},{"name":"High","value":"high"},{"name":"Low","value":"low"},{"name":"Max","value":"max"},{"name":"Medium","value":"medium"},{"name":"Minimal","value":"minimal"},{"name":"Off","value":"off"},{"name":"Use OpenClaw Default","value":""}]},{"displayName":"Run Locally","name":"local","type":"boolean","default":false,"description":"Whether to force OpenClaw embedded local runtime instead of Gateway mode"},{"displayName":"Deliver Reply","name":"deliver","type":"boolean","default":false,"description":"Whether OpenClaw should deliver the reply back to the selected channel/target"},{"displayName":"Options","name":"options","type":"collection","placeholder":"Add Option","default":{},"options":[{"displayName":"System Message","name":"systemMessage","type":"string","default":"","description":"Additional system instructions for this OpenClaw run","typeOptions":{"rows":5}},{"displayName":"Binary Path","name":"binaryPath","type":"string","default":"openclaw","description":"Path to the openclaw binary"},{"displayName":"Working Directory","name":"workingDirectory","type":"string","default":"","description":"Working directory for the OpenClaw process"},{"displayName":"Timeout","name":"timeout","type":"number","default":300,"description":"OpenClaw agent timeout in seconds","typeOptions":{"minValue":1}},{"displayName":"Verbose","name":"verbose","type":"options","default":"","description":"Optional OpenClaw verbose setting","options":[{"name":"Full","value":"full"},{"name":"Leave Unchanged","value":""},{"name":"Off","value":"off"},{"name":"On","value":"on"}]},{"displayName":"Include Raw Output","name":"includeRawOutput","type":"boolean","default":false,"description":"Whether to include raw stdout and stderr from the OpenClaw CLI in the output"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/agents/OpenClawAgent/openclaw.svg"},
|
|
14
|
+
{"displayName":"OpenClaw AI Agent","name":"openClawAgent","group":["trigger","transform"],"description":"Runs a one-shot OpenClaw agent turn through the OpenClaw CLI","codex":{"alias":["OpenClaw","Agent","Gateway","Assistant","Channel"],"categories":["AI"],"subcategories":{"AI":["Agents","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.openclaw.ai/cli/agent"}]}},"defaultVersion":2,"version":1,"subtitle":"={{$parameter.selectorType === \"agent\" ? \"Agent: \" + $parameter.agentId : $parameter.selectorType === \"sessionId\" ? \"Session: \" + $parameter.sessionId : $parameter.selectorType === \"recipient\" ? \"To: \" + $parameter.to : \"Default route\"}}","defaults":{"name":"OpenClaw AI Agent"},"inputs":["main"],"outputs":["main"],"credentials":[{"name":"telegramApi","displayName":"Telegram Credential","required":false}],"properties":[{"displayName":"Requires the OpenClaw CLI to be installed and configured on the n8n host. The node runs <code>openclaw agent --json</code> and returns OpenClaw payloads and metadata.","name":"openClawNotice","type":"notice","default":""},{"displayName":"Message","name":"message","type":"string","required":true,"default":"={{ $json.chatInput || $json.chat_input || $json.message || $json.text || \"\" }}","description":"Message body to send to the OpenClaw agent","typeOptions":{"rows":5}},{"displayName":"Route By","name":"selectorType","type":"options","default":"agent","noDataExpression":true,"description":"How to target the OpenClaw agent turn","options":[{"name":"Agent ID","value":"agent","description":"Run against a configured OpenClaw agent"},{"name":"Existing Session ID","value":"sessionId","description":"Continue an existing OpenClaw session"},{"name":"Recipient","value":"recipient","description":"Use a recipient/channel target to derive the session"},{"name":"OpenClaw Default","value":"default","description":"Let OpenClaw choose its default route"}]},{"displayName":"Agent ID","name":"agentId","type":"string","default":"main","description":"Configured OpenClaw agent ID","displayOptions":{"show":{"selectorType":["agent"]}}},{"displayName":"Session ID","name":"sessionId","type":"string","default":"","description":"OpenClaw session ID to continue","displayOptions":{"show":{"selectorType":["sessionId"]}}},{"displayName":"Recipient","name":"to","type":"string","default":"","description":"Recipient or channel target passed to OpenClaw as --to","displayOptions":{"show":{"selectorType":["recipient"]}}},{"displayName":"Model","name":"model","type":"string","default":"openai-codex/gpt-5.5","description":"Model override for this run. Use an OpenClaw model reference such as openai-codex/gpt-5.5."},{"displayName":"Thinking Level","name":"thinking","type":"options","default":"","description":"Optional OpenClaw thinking level override for this run","options":[{"name":"Adaptive","value":"adaptive"},{"name":"Extra High","value":"xhigh"},{"name":"High","value":"high"},{"name":"Low","value":"low"},{"name":"Max","value":"max"},{"name":"Medium","value":"medium"},{"name":"Minimal","value":"minimal"},{"name":"Off","value":"off"},{"name":"Use OpenClaw Default","value":""}]},{"displayName":"Run Locally","name":"local","type":"boolean","default":false,"description":"Whether to force OpenClaw embedded local runtime instead of Gateway mode"},{"displayName":"Deliver Reply","name":"deliver","type":"boolean","default":false,"description":"Whether OpenClaw should deliver the reply back to the selected channel/target"},{"displayName":"Options","name":"options","type":"collection","placeholder":"Add Option","default":{},"options":[{"displayName":"System Message","name":"systemMessage","type":"string","default":"","description":"Additional system instructions for this OpenClaw run. When set, the node sends the run through the OpenClaw Gateway agent RPC so the instructions are passed as extraSystemPrompt.","typeOptions":{"rows":5}},{"displayName":"Binary Path","name":"binaryPath","type":"string","default":"openclaw","description":"Path to the openclaw binary. Defaults to \"openclaw\" in PATH."},{"displayName":"Working Directory","name":"workingDirectory","type":"string","default":"","description":"Working directory for the OpenClaw process. Leave empty to use n8n default."},{"displayName":"Timeout","name":"timeout","type":"number","default":300,"description":"OpenClaw agent timeout in seconds. The node allows extra time for the CLI to return the final gateway result before stopping the process.","typeOptions":{"minValue":1}},{"displayName":"Channel","name":"channel","type":"string","default":"","description":"Delivery channel passed to OpenClaw as --channel"},{"displayName":"Reply To","name":"replyTo","type":"string","default":"","description":"Delivery target override passed to OpenClaw as --reply-to"},{"displayName":"Reply Channel","name":"replyChannel","type":"string","default":"","description":"Delivery channel override passed to OpenClaw as --reply-channel"},{"displayName":"Reply Account","name":"replyAccount","type":"string","default":"","description":"Delivery account ID override passed to OpenClaw as --reply-account"},{"displayName":"Verbose","name":"verbose","type":"options","default":"","description":"Optional OpenClaw verbose setting to persist for the session","options":[{"name":"Full","value":"full"},{"name":"Leave Unchanged","value":""},{"name":"Off","value":"off"},{"name":"On","value":"on"}]},{"displayName":"Include Raw Output","name":"includeRawOutput","type":"boolean","default":false,"description":"Whether to include raw stdout and stderr from the OpenClaw CLI in the output"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/agents/OpenClawAgent/openclaw.svg"},
|
|
15
|
+
{"displayName":"Telegram Channel","name":"openClawTelegramChannel","iconColor":"blue","group":["transform"],"version":1,"description":"Provides Telegram channel configuration to an OpenClaw AI Agent","defaults":{"name":"Telegram Channel"},"codex":{"alias":["Telegram","Channel","Bot","OpenClaw"],"categories":["AI"],"subcategories":{"AI":["Other"]},"resources":{"primaryDocumentation":[{"url":"https://docs.openclaw.ai/channels/telegram"}]}},"inputs":[],"outputs":["ai_channel"],"outputNames":["Channel"],"credentials":[{"name":"telegramApi","required":true}],"properties":[{"displayName":"Connect this node to an OpenClaw AI Agent to provide Telegram channel configuration. The bot token is read from the Telegram credential.","name":"telegramNotice","type":"notice","default":""},{"displayName":"Account ID","name":"accountId","type":"string","default":"","description":"Optional OpenClaw account ID for multi-account setups. Leave empty for the default account."},{"displayName":"DM Policy","name":"dmPolicy","type":"options","default":"pairing","description":"Direct message policy for this Telegram channel","options":[{"name":"Pairing","value":"pairing"},{"name":"Open","value":"open"},{"name":"Allowlist","value":"allowlist"},{"name":"Disabled","value":"disabled"}]},{"displayName":"Allow From Names or IDs","name":"allowFrom","type":"multiOptions","typeOptions":{"loadOptionsMethod":"getAllowFromOptions","loadOptionsDependsOn":["accountId"]},"default":[],"allowArbitraryValues":true,"description":"Choose from the list, or specify IDs using an <a href=\"https://docs.n8n.io/code/expressions/\">expression</a>"},{"displayName":"Group Policy","name":"groupPolicy","type":"options","default":"allowlist","description":"Group message policy for this Telegram channel","options":[{"name":"Allowlist","value":"allowlist"},{"name":"Open","value":"open"},{"name":"Disabled","value":"disabled"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/agents/OpenClawAgent/channels/TelegramChannel/telegram-channel.svg"},
|
|
16
|
+
{"displayName":"WhatsApp Channel","name":"openClawWhatsAppChannel","iconColor":"green","group":["transform"],"version":1,"description":"Provides WhatsApp channel configuration to an OpenClaw AI Agent","defaults":{"name":"WhatsApp Channel"},"codex":{"alias":["WhatsApp","Channel","OpenClaw"],"categories":["AI"],"subcategories":{"AI":["Other"]},"resources":{"primaryDocumentation":[{"url":"https://docs.openclaw.ai/channels/whatsapp"}]}},"inputs":[],"outputs":["ai_channel"],"outputNames":["Channel"],"credentials":[{"name":"whatsAppBusinessApi","required":true}],"properties":[{"displayName":"Connect this node to an OpenClaw AI Agent to provide WhatsApp channel configuration.","name":"whatsappNotice","type":"notice","default":""},{"displayName":"Phone Number ID","name":"phoneNumberId","type":"string","default":"","required":true,"description":"WhatsApp Business phone number ID"},{"displayName":"Account ID","name":"accountId","type":"string","default":"","description":"Optional OpenClaw account ID for multi-account setups. Leave empty for the default account."}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/agents/OpenClawAgent/channels/WhatsAppChannel/whatsapp-channel.svg"},
|
|
17
|
+
{"displayName":"OpenCode Free Chat Model","name":"openClawOpenCodeFreeModel","iconColor":"green","group":["transform"],"version":1,"description":"Provides an OpenCode free-tier model to an OpenClaw AI Agent (no API key needed)","defaults":{"name":"OpenCode Free Chat Model"},"codex":{"alias":["OpenCode","Free","Model","OpenClaw"],"categories":["AI"],"subcategories":{"AI":["Language Models"]},"resources":{"primaryDocumentation":[{"url":"https://opencode.ai"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"properties":[{"displayName":"Connect this node to an OpenClaw AI Agent to provide a free OpenCode model. No API key is needed for these models.","name":"openCodeFreeNotice","type":"notice","default":""},{"displayName":"Model","name":"model","type":"options","default":"opencode/big-pickle","description":"Select a free OpenCode model to use with the OpenClaw agent","options":[{"name":"OpenCode Big Pickle","value":"opencode/big-pickle"},{"name":"OpenCode GPT-5 Nano","value":"opencode/gpt-5-nano"},{"name":"OpenCode Hy3 Preview Free","value":"opencode/hy3-preview-free"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/agents/OpenClawAgent/models/OpenCodeFreeModel/opencode-free-model.svg"},
|
|
18
|
+
{"displayName":"OpenClaw MCP Server","name":"openClawMcpServer","iconColor":"blue","group":["transform"],"version":1,"description":"Provides MCP server configuration to an OpenClaw AI Agent","defaults":{"name":"OpenClaw MCP Server"},"codex":{"alias":["OpenClaw","MCP","MCP Server","Model Context Protocol","BrowserOS"],"categories":["AI"],"subcategories":{"AI":["Model Context Protocol","Tools"]},"resources":{"primaryDocumentation":[{"url":"https://docs.openclaw.ai/cli/mcp"}]}},"inputs":[],"outputs":[{"type":"ai_tool","displayName":"MCP Server"}],"outputNames":["MCP Server"],"properties":[{"displayName":"Connect this node to the MCP Server input of an OpenClaw AI Agent to sync a named MCP endpoint into OpenClaw config.","name":"mcpServerNotice","type":"notice","default":""},{"displayName":"Server Name","name":"serverName","type":"string","required":true,"default":"browseros","description":"Name to use under OpenClaw mcp.servers"},{"displayName":"Endpoint URL","name":"endpointUrl","type":"string","required":true,"default":"http://127.0.0.1:9001/mcp","placeholder":"e.g. http://127.0.0.1:9001/mcp","description":"HTTP or HTTPS URL of the MCP server endpoint"},{"displayName":"Transport","name":"transport","type":"options","default":"streamable-http","noDataExpression":true,"description":"HTTP transport OpenClaw should use for this MCP server","options":[{"name":"OpenClaw Default","value":"","description":"Do not write a transport value"},{"name":"Server-Sent Events (SSE)","value":"sse","description":"Use SSE transport"},{"name":"Streamable HTTP","value":"streamable-http","description":"Use Streamable HTTP transport"}]},{"displayName":"Options","name":"options","type":"collection","placeholder":"Add Option","default":{},"options":[{"displayName":"Connection Timeout","name":"connectionTimeoutMs","type":"number","default":30000,"description":"Time in milliseconds to wait while connecting to the MCP server","typeOptions":{"minValue":1}},{"displayName":"Headers","name":"headers","type":"json","default":"{}","description":"Optional HTTP headers as a JSON object","typeOptions":{"rows":4}}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/agents/OpenClawAgent/mcpServers/OpenClawMcpServer/openclaw-mcp-server.svg"},
|
|
19
|
+
{"displayName":"OpenClaw Plugin","name":"openClawPlugin","iconColor":"purple","group":["transform"],"version":1,"description":"Provides plugin configuration to an OpenClaw AI Agent (local or ClawHub)","defaults":{"name":"OpenClaw Plugin"},"codex":{"alias":["OpenClaw","Plugin","Extension","ClawHub"],"categories":["AI"],"subcategories":{"AI":["Other"]},"resources":{"primaryDocumentation":[{"url":"https://docs.openclaw.ai/plugins"}]}},"inputs":[],"outputs":["ai_tool"],"outputNames":["Plugin"],"properties":[{"displayName":"Connect this node to an OpenClaw AI Agent to provide plugin configuration. Local plugins are scanned from a directory; Cloud plugins are loaded from ClawHub.","name":"pluginNotice","type":"notice","default":""},{"displayName":"Plugin Source","name":"pluginSource","type":"options","default":"local","noDataExpression":true,"description":"Where to load the plugin from","options":[{"name":"Local","value":"local","description":"Scan a directory for openclaw.plugin.json and load plugin info from it"},{"name":"Cloud (ClawHub)","value":"cloud","description":"Load a plugin from the ClawHub marketplace"}]},{"displayName":"Plugin Directory","name":"pluginDirectory","type":"string","required":true,"default":"={{ $workspace.__dirPath }}","description":"Directory path to scan for openclaw.plugin.json. Supports expressions like {{ $workspace.__dirPath }}.","displayOptions":{"show":{"pluginSource":["local"]}}},{"displayName":"Plugin ID","name":"pluginId","type":"string","required":true,"default":"","placeholder":"e.g. openai, @scope/my-plugin","description":"ClawHub plugin package name","displayOptions":{"show":{"pluginSource":["cloud"]}}},{"displayName":"Version","name":"pluginVersion","type":"string","default":"","placeholder":"latest","description":"ClawHub plugin version. Leave empty to use the latest available version.","displayOptions":{"show":{"pluginSource":["cloud"]}}}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/agents/OpenClawAgent/plugins/OpenClawPlugin/openclaw-plugin.svg"},
|
|
13
20
|
{"displayName":"Summarization Chain","name":"chainSummarization","icon":"fa:link","iconColor":"black","group":["transform"],"description":"Transforms text into a concise summary","codex":{"alias":["LangChain"],"categories":["AI"],"subcategories":{"AI":["Chains","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.chainsummarization/"}]}},"defaultVersion":2.1,"version":[2,2.1],"defaults":{"name":"Summarization Chain","color":"#909298"},"inputs":"={{ ((parameter) => { function getInputs(parameters) {\n const chunkingMode = parameters?.chunkingMode;\n const operationMode = parameters?.operationMode;\n const inputs = [\n { displayName: \"\", type: \"main\" },\n {\n displayName: \"Model\",\n maxConnections: 1,\n type: \"ai_languageModel\",\n required: true\n }\n ];\n if (operationMode === \"documentLoader\") {\n inputs.push({\n displayName: \"Document\",\n type: \"ai_document\",\n required: true,\n maxConnections: 1\n });\n return inputs;\n }\n if (chunkingMode === \"advanced\") {\n inputs.push({\n displayName: \"Text Splitter\",\n type: \"ai_textSplitter\",\n required: false,\n maxConnections: 1\n });\n return inputs;\n }\n return inputs;\n}; return getInputs(parameter) })($parameter) }}","outputs":["main"],"credentials":[],"properties":[{"displayName":"Save time with an <a href=\"/templates/1951\" target=\"_blank\">example</a> of how this node works","name":"notice","type":"notice","default":""},{"displayName":"Data to Summarize","name":"operationMode","noDataExpression":true,"type":"options","description":"How to pass data into the summarization chain","default":"nodeInputJson","options":[{"name":"Use Node Input (JSON)","value":"nodeInputJson","description":"Summarize the JSON data coming into this node from the previous one"},{"name":"Use Node Input (Binary)","value":"nodeInputBinary","description":"Summarize the binary data coming into this node from the previous one"},{"name":"Use Document Loader","value":"documentLoader","description":"Use a loader sub-node with more configuration options"}]},{"displayName":"Chunking Strategy","name":"chunkingMode","noDataExpression":true,"type":"options","description":"Chunk splitting strategy","default":"simple","options":[{"name":"Simple (Define Below)","value":"simple"},{"name":"Advanced","value":"advanced","description":"Use a splitter sub-node with more configuration options"}],"displayOptions":{"show":{"/operationMode":["nodeInputJson","nodeInputBinary"]}}},{"displayName":"Characters Per Chunk","name":"chunkSize","description":"Controls the max size (in terms of number of characters) of the final document chunk","type":"number","default":1000,"displayOptions":{"show":{"/chunkingMode":["simple"]}}},{"displayName":"Chunk Overlap (Characters)","name":"chunkOverlap","type":"number","description":"Specifies how much characters overlap there should be between chunks","default":200,"displayOptions":{"show":{"/chunkingMode":["simple"]}}},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"Input Data Field Name","name":"binaryDataKey","type":"string","default":"data","description":"The name of the field in the agent or chain’s input that contains the binary file to be processed","displayOptions":{"show":{"/operationMode":["nodeInputBinary"]}}},{"displayName":"Summarization Method and Prompts","name":"summarizationMethodAndPrompts","type":"fixedCollection","default":{"values":{"summarizationMethod":"map_reduce","prompt":"Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:","combineMapPrompt":"Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:"}},"placeholder":"Add Option","typeOptions":{},"options":[{"name":"values","displayName":"Values","values":[{"displayName":"Summarization Method","name":"summarizationMethod","type":"options","description":"The type of summarization to run","default":"map_reduce","options":[{"name":"Map Reduce (Recommended)","value":"map_reduce","description":"Summarize each document (or chunk) individually, then summarize those summaries"},{"name":"Refine","value":"refine","description":"Summarize the first document (or chunk). Then update that summary based on the next document (or chunk), and repeat."},{"name":"Stuff","value":"stuff","description":"Pass all documents (or chunks) at once. Ideal for small datasets."}]},{"displayName":"Individual Summary Prompt","name":"combineMapPrompt","type":"string","hint":"The prompt to summarize an individual document (or chunk)","displayOptions":{"hide":{"/options.summarizationMethodAndPrompts.values.summarizationMethod":["stuff","refine"]}},"default":"Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:","typeOptions":{"rows":9}},{"displayName":"Final Prompt to Combine","name":"prompt","type":"string","default":"Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:","hint":"The prompt to combine individual summaries","displayOptions":{"hide":{"/options.summarizationMethodAndPrompts.values.summarizationMethod":["stuff","refine"]}},"typeOptions":{"rows":9}},{"displayName":"Prompt","name":"prompt","type":"string","default":"Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:","displayOptions":{"hide":{"/options.summarizationMethodAndPrompts.values.summarizationMethod":["refine","map_reduce"]}},"typeOptions":{"rows":9}},{"displayName":"Subsequent (Refine) Prompt","name":"refinePrompt","type":"string","displayOptions":{"hide":{"/options.summarizationMethodAndPrompts.values.summarizationMethod":["stuff","map_reduce"]}},"default":"Your job is to produce a final summary\nWe have provided an existing summary up to a certain point: \"{existing_answer}\"\nWe have the opportunity to refine the existing summary\n(only if needed) with some more context below.\n------------\n\"{text}\"\n------------\n\nGiven the new context, refine the original summary\nIf the context isn't useful, return the original summary.\n\nREFINED SUMMARY:","hint":"The prompt to refine the summary based on the next document (or chunk)","typeOptions":{"rows":9}},{"displayName":"Initial Prompt","name":"refineQuestionPrompt","type":"string","displayOptions":{"hide":{"/options.summarizationMethodAndPrompts.values.summarizationMethod":["stuff","map_reduce"]}},"default":"Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:","hint":"The prompt for the first document (or chunk)","typeOptions":{"rows":9}}]}]},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":5,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}],"displayOptions":{"show":{"@version":[{"_cnd":{"gte":2.1}}]}}}]}]},
|
|
14
21
|
{"displayName":"Summarization Chain","name":"chainSummarization","icon":"fa:link","iconColor":"black","group":["transform"],"description":"Transforms text into a concise summary","codex":{"alias":["LangChain"],"categories":["AI"],"subcategories":{"AI":["Chains","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.chainsummarization/"}]}},"defaultVersion":2.1,"version":1,"defaults":{"name":"Summarization Chain","color":"#909298"},"inputs":["main",{"displayName":"Model","maxConnections":1,"type":"ai_languageModel","required":true},{"displayName":"Document","maxConnections":1,"type":"ai_document","required":true}],"outputs":["main"],"credentials":[],"properties":[{"displayName":"Save time with an <a href=\"/templates/1951\" target=\"_blank\">example</a> of how this node works","name":"notice","type":"notice","default":""},{"displayName":"Type","name":"type","type":"options","description":"The type of summarization to run","default":"map_reduce","options":[{"name":"Map Reduce (Recommended)","value":"map_reduce","description":"Summarize each document (or chunk) individually, then summarize those summaries"},{"name":"Refine","value":"refine","description":"Summarize the first document (or chunk). Then update that summary based on the next document (or chunk), and repeat."},{"name":"Stuff","value":"stuff","description":"Pass all documents (or chunks) at once. Ideal for small datasets."}]},{"displayName":"Options","name":"options","type":"collection","default":{},"placeholder":"Add Option","options":[{"displayName":"Final Prompt to Combine","name":"combineMapPrompt","type":"string","hint":"The prompt to combine individual summaries","displayOptions":{"show":{"/type":["map_reduce"]}},"default":"Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:","typeOptions":{"rows":6}},{"displayName":"Individual Summary Prompt","name":"prompt","type":"string","default":"Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:","hint":"The prompt to summarize an individual document (or chunk)","displayOptions":{"show":{"/type":["map_reduce"]}},"typeOptions":{"rows":6}},{"displayName":"Prompt","name":"prompt","type":"string","default":"Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:","displayOptions":{"show":{"/type":["stuff"]}},"typeOptions":{"rows":6}},{"displayName":"Subsequent (Refine) Prompt","name":"refinePrompt","type":"string","displayOptions":{"show":{"/type":["refine"]}},"default":"Your job is to produce a final summary\nWe have provided an existing summary up to a certain point: \"{existing_answer}\"\nWe have the opportunity to refine the existing summary\n(only if needed) with some more context below.\n------------\n\"{text}\"\n------------\n\nGiven the new context, refine the original summary\nIf the context isn't useful, return the original summary.\n\nREFINED SUMMARY:","hint":"The prompt to refine the summary based on the next document (or chunk)","typeOptions":{"rows":6}},{"displayName":"Initial Prompt","name":"refineQuestionPrompt","type":"string","displayOptions":{"show":{"/type":["refine"]}},"default":"Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:","hint":"The prompt for the first document (or chunk)","typeOptions":{"rows":6}}]}]},
|
|
15
22
|
{"displayName":"Basic LLM Chain","name":"chainLlm","icon":"fa:link","iconColor":"black","group":["transform"],"version":[1,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9],"description":"A simple chain to prompt a large language model","defaults":{"name":"Basic LLM Chain","color":"#909298"},"codex":{"alias":["LangChain"],"categories":["AI"],"subcategories":{"AI":["Chains","Root Nodes"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.chainllm/"}]}},"inputs":"={{ ((parameter) => { function getInputs(parameters) {\n const inputs = [\n { displayName: \"\", type: \"main\" },\n {\n displayName: \"Model\",\n maxConnections: 1,\n type: \"ai_languageModel\",\n required: true\n }\n ];\n const needsFallback = parameters?.needsFallback;\n if (needsFallback === true) {\n inputs.push({\n displayName: \"Fallback Model\",\n maxConnections: 1,\n type: \"ai_languageModel\",\n required: true\n });\n }\n const hasOutputParser = parameters?.hasOutputParser;\n if (hasOutputParser === void 0 || hasOutputParser === true) {\n inputs.push({\n displayName: \"Output Parser\",\n type: \"ai_outputParser\",\n maxConnections: 1,\n required: false\n });\n }\n return inputs;\n}; return getInputs(parameter) })($parameter) }}","outputs":["main"],"credentials":[],"properties":[{"displayName":"Save time with an <a href=\"/templates/1978\" target=\"_blank\">example</a> of how this node works","name":"notice","type":"notice","default":""},{"displayName":"Prompt","name":"prompt","type":"string","required":true,"default":"={{ $json.input }}","displayOptions":{"show":{"@version":[1]}}},{"displayName":"Prompt","name":"prompt","type":"string","required":true,"default":"={{ $json.chat_input }}","displayOptions":{"show":{"@version":[1.1,1.2]}}},{"displayName":"Prompt","name":"prompt","type":"string","required":true,"default":"={{ $json.chatInput }}","displayOptions":{"show":{"@version":[1.3]}}},{"displayName":"Source for Prompt (User Message)","name":"promptType","type":"options","options":[{"name":"Connected Chat Trigger Node","value":"auto","description":"Looks for an input field called 'chatInput' that is coming from a directly connected Chat Trigger"},{"name":"Connected Guardrails Node","value":"guardrails","description":"Looks for an input field called 'guardrailsInput' that is coming from a directly connected Guardrails Node"},{"name":"Define below","value":"define","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"auto","displayOptions":{"hide":{"@version":[{"_cnd":{"lte":1.3}},{"_cnd":{"gte":1.8}}]}}},{"displayName":"Source for Prompt (User Message)","name":"promptType","type":"options","options":[{"name":"Connected Chat Trigger Node","value":"auto","description":"Looks for an input field called 'chatInput' that is coming from a directly connected Chat Trigger"},{"name":"Define below","value":"define","description":"Use an expression to reference data in previous nodes or enter static text"}],"default":"auto","displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.8}}]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.guardrailsInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["guardrails"]}},"displayOptions":{"show":{"promptType":["guardrails"],"@version":[{"_cnd":{"gte":1.5}}]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"={{ $json.chatInput }}","typeOptions":{"rows":2},"disabledOptions":{"show":{"promptType":["auto"]}},"displayOptions":{"show":{"promptType":["auto"],"@version":[{"_cnd":{"gte":1.5}}]}}},{"displayName":"Prompt (User Message)","name":"text","type":"string","required":true,"default":"","placeholder":"e.g. Hello, how can you help me?","typeOptions":{"rows":2},"displayOptions":{"show":{"promptType":["define"]}}},{"displayName":"Require Specific Output Format","name":"hasOutputParser","type":"boolean","default":false,"noDataExpression":true,"displayOptions":{"hide":{"@version":[1,1.1,1.3]}}},{"displayName":"Enable Fallback Model","name":"needsFallback","type":"boolean","default":false,"noDataExpression":true,"displayOptions":{"hide":{"@version":[1,1.1,1.3]}}},{"displayName":"Chat Messages (if Using a Chat Model)","name":"messages","type":"fixedCollection","typeOptions":{"multipleValues":true},"default":{},"placeholder":"Add prompt","options":[{"name":"messageValues","displayName":"Prompt","values":[{"displayName":"Type Name or ID","name":"type","type":"options","options":[{"name":"AI","value":"AIMessagePromptTemplate"},{"name":"System","value":"SystemMessagePromptTemplate"},{"name":"User","value":"HumanMessagePromptTemplate"}],"default":"SystemMessagePromptTemplate"},{"displayName":"Message Type","name":"messageType","type":"options","displayOptions":{"show":{"type":["HumanMessagePromptTemplate"]}},"options":[{"name":"Text","value":"text","description":"Simple text message"},{"name":"Image (Binary)","value":"imageBinary","description":"Process the binary input from the previous node"},{"name":"Image (URL)","value":"imageUrl","description":"Process the image from the specified URL"}],"default":"text"},{"displayName":"Image Data Field Name","name":"binaryImageDataKey","type":"string","default":"data","required":true,"description":"The name of the field in the chain's input that contains the binary image file to be processed","displayOptions":{"show":{"messageType":["imageBinary"]}}},{"displayName":"Image URL","name":"imageUrl","type":"string","default":"","required":true,"description":"URL to the image to be processed","displayOptions":{"show":{"messageType":["imageUrl"]}}},{"displayName":"Image Details","description":"Control how the model processes the image and generates its textual understanding","name":"imageDetail","type":"options","displayOptions":{"show":{"type":["HumanMessagePromptTemplate"],"messageType":["imageBinary","imageUrl"]}},"options":[{"name":"Auto","value":"auto","description":"Model will use the auto setting which will look at the image input size and decide if it should use the low or high setting"},{"name":"Low","value":"low","description":"The model will receive a low-res 512px x 512px version of the image, and represent the image with a budget of 65 tokens. This allows the API to return faster responses and consume fewer input tokens for use cases that do not require high detail."},{"name":"High","value":"high","description":"Allows the model to see the low res image and then creates detailed crops of input images as 512px squares based on the input image size. Each of the detailed crops uses twice the token budget (65 tokens) for a total of 129 tokens."}],"default":"auto"},{"displayName":"Message","name":"message","type":"string","required":true,"displayOptions":{"hide":{"messageType":["imageBinary","imageUrl"]}},"default":""}]}]},{"displayName":"Batch Processing","name":"batching","type":"collection","placeholder":"Add Batch Processing Option","description":"Batch processing options for rate limiting","default":{},"options":[{"displayName":"Batch Size","name":"batchSize","default":5,"type":"number","description":"How many items to process in parallel. This is useful for rate limiting, but might impact the log output ordering."},{"displayName":"Delay Between Batches","name":"delayBetweenBatches","default":0,"type":"number","description":"Delay in milliseconds between batches. This is useful for rate limiting."}],"displayOptions":{"show":{"@version":[{"_cnd":{"gte":1.7}}]}}},{"displayName":"Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='ai_outputParser'>output parser</a> on the canvas to specify the output format you require","name":"notice","type":"notice","default":"","displayOptions":{"show":{"hasOutputParser":[true]}}},{"displayName":"Connect an additional language model on the canvas to use it as a fallback if the main model fails","name":"fallbackNotice","type":"notice","default":"","displayOptions":{"show":{"needsFallback":[true]}}}]},
|
|
@@ -46,7 +53,7 @@
|
|
|
46
53
|
{"displayName":"Mistral Cloud Chat Model","name":"lmChatMistralCloud","group":["transform"],"version":1,"description":"For advanced usage with an AI chain","defaults":{"name":"Mistral Cloud Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatmistralcloud/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"mistralCloudApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"https://api.mistral.ai/v1"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","description":"The model which will generate the completion. <a href=\"https://docs.mistral.ai/platform/endpoints/\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"data"}},{"type":"filter","properties":{"pass":"={{ !$responseItem.id.includes('embed') }}"}},{"type":"setKeyValue","properties":{"name":"={{ $responseItem.id }}","value":"={{ $responseItem.id }}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":"mistral-small"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Maximum Number of Tokens","name":"maxTokens","default":-1,"description":"The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).","type":"number","typeOptions":{"maxValue":32768}},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Max Retries","name":"maxRetries","default":2,"description":"Maximum number of retries to attempt","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"},{"displayName":"Enable Safe Mode","name":"safeMode","default":false,"type":"boolean","description":"Whether to inject a safety prompt before all conversations"},{"displayName":"Random Seed","name":"randomSeed","type":"number","description":"The seed to use for random sampling. If set, different calls will generate deterministic results."}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatMistralCloud/mistral.svg"},
|
|
47
54
|
{"displayName":"Lemonade Chat Model","name":"lmChatLemonade","group":["transform"],"version":1,"description":"Language Model Lemonade Chat","defaults":{"name":"Lemonade Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatlemonade/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"lemonadeApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $credentials.baseUrl.replace(new RegExp(\"/$\"), \"\") }}"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","default":"","description":"The model which will generate the completion. Models are loaded and managed through the Lemonade server.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"data"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.id}}","value":"={{$responseItem.id}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"required":true},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":2,"minValue":0,"numberPrecision":1},"description":"Controls the randomness of the generated text. Lower values make the output more focused and deterministic, while higher values make it more diverse and random.","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Chooses from the smallest possible set of tokens whose cumulative probability exceeds the probability top_p. Helps generate more human-like text by reducing repetitions.","type":"number"},{"displayName":"Frequency Penalty","name":"frequencyPenalty","type":"number","default":0,"typeOptions":{"minValue":-2,"maxValue":2,"numberPrecision":1},"description":"Adjusts the penalty for tokens that have already appeared in the generated text. Positive values discourage repetition, negative values encourage it."},{"displayName":"Presence Penalty","name":"presencePenalty","type":"number","default":0,"typeOptions":{"minValue":-2,"maxValue":2,"numberPrecision":1},"description":"Adjusts the penalty for tokens based on their presence in the generated text so far. Positive values penalize tokens that have already appeared, encouraging diversity."},{"displayName":"Max Tokens to Generate","name":"maxTokens","type":"number","default":-1,"description":"The maximum number of tokens to generate. Set to -1 for no limit. Be cautious when setting this to a large value, as it can lead to very long outputs."},{"displayName":"Stop Sequences","name":"stop","type":"string","default":"","description":"Comma-separated list of sequences where the model will stop generating text"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LMChatLemonade/lemonade.svg"},
|
|
48
55
|
{"displayName":"Ollama Chat Model","name":"lmChatOllama","group":["transform"],"version":1,"description":"Language Model Ollama","defaults":{"name":"Ollama Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatollama/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"ollamaApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $credentials.baseUrl.replace(new RegExp(\"/$\"), \"\") }}"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"Model","name":"model","type":"options","default":"llama3.2","description":"The model which will generate the completion. To download models, visit <a href=\"https://ollama.ai/library\">Ollama Models Library</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/api/tags"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"models"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.name}}","value":"={{$responseItem.name}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"required":true},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls the randomness of the generated text. Lower values make the output more focused and deterministic, while higher values make it more diverse and random.","type":"number"},{"displayName":"Top K","name":"topK","default":-1,"typeOptions":{"maxValue":100,"minValue":-1,"numberPrecision":1},"description":"Limits the number of highest probability vocabulary tokens to consider at each step. A higher value increases diversity but may reduce coherence. Set to -1 to disable.","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Chooses from the smallest possible set of tokens whose cumulative probability exceeds the probability top_p. Helps generate more human-like text by reducing repetitions.","type":"number"},{"displayName":"Frequency Penalty","name":"frequencyPenalty","type":"number","default":0,"typeOptions":{"minValue":0},"description":"Adjusts the penalty for tokens that have already appeared in the generated text. Higher values discourage repetition."},{"displayName":"Keep Alive","name":"keepAlive","type":"string","default":"5m","description":"Specifies the duration to keep the loaded model in memory after use. Useful for frequently used models. Format: 1h30m (1 hour 30 minutes)."},{"displayName":"Low VRAM Mode","name":"lowVram","type":"boolean","default":false,"description":"Whether to Activate low VRAM mode, which reduces memory usage at the cost of slower generation speed. Useful for GPUs with limited memory."},{"displayName":"Main GPU ID","name":"mainGpu","type":"number","default":0,"description":"Specifies the ID of the GPU to use for the main computation. Only change this if you have multiple GPUs."},{"displayName":"Context Batch Size","name":"numBatch","type":"number","default":512,"description":"Sets the batch size for prompt processing. Larger batch sizes may improve generation speed but increase memory usage."},{"displayName":"Context Length","name":"numCtx","type":"number","default":2048,"description":"The maximum number of tokens to use as context for generating the next token. Smaller values reduce memory usage, while larger values provide more context to the model."},{"displayName":"Number of GPUs","name":"numGpu","type":"number","default":-1,"description":"Specifies the number of GPUs to use for parallel processing. Set to -1 for auto-detection."},{"displayName":"Max Tokens to Generate","name":"numPredict","type":"number","default":-1,"description":"The maximum number of tokens to generate. Set to -1 for no limit. Be cautious when setting this to a large value, as it can lead to very long outputs."},{"displayName":"Number of CPU Threads","name":"numThread","type":"number","default":0,"description":"Specifies the number of CPU threads to use for processing. Set to 0 for auto-detection."},{"displayName":"Penalize Newlines","name":"penalizeNewline","type":"boolean","default":true,"description":"Whether the model will be less likely to generate newline characters, encouraging longer continuous sequences of text"},{"displayName":"Presence Penalty","name":"presencePenalty","type":"number","default":0,"description":"Adjusts the penalty for tokens based on their presence in the generated text so far. Positive values penalize tokens that have already appeared, encouraging diversity."},{"displayName":"Repetition Penalty","name":"repeatPenalty","type":"number","default":1,"description":"Adjusts the penalty factor for repeated tokens. Higher values more strongly discourage repetition. Set to 1.0 to disable repetition penalty."},{"displayName":"Use Memory Locking","name":"useMLock","type":"boolean","default":false,"description":"Whether to lock the model in memory to prevent swapping. This can improve performance but requires sufficient available memory."},{"displayName":"Use Memory Mapping","name":"useMMap","type":"boolean","default":true,"description":"Whether to use memory mapping for loading the model. This can reduce memory usage but may impact performance. Recommended to keep enabled."},{"displayName":"Load Vocabulary Only","name":"vocabOnly","type":"boolean","default":false,"description":"Whether to only load the model vocabulary without the weights. Useful for quickly testing tokenization."},{"displayName":"Output Format","name":"format","type":"options","options":[{"name":"Default","value":"default"},{"name":"JSON","value":"json"}],"default":"default","description":"Specifies the format of the API response"}]}],"iconUrl":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LMChatOllama/ollama.svg"},
|
|
49
|
-
{"displayName":"9Router Chat Model","name":"lmChat9Router","group":["transform"],"version":[1],"description":"For advanced usage with an AI chain","defaults":{"name":"9Router Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://github.com/9router/9router"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"nineRouterApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $credentials?.url }}"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"If using JSON response format, you must include word \"json\" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.","name":"notice","type":"notice","default":"","displayOptions":{"show":{"/options.responseFormat":["json_object"]}}},{"displayName":"Model","name":"model","type":"options","description":"The model which will generate the completion. <a href=\"
|
|
56
|
+
{"displayName":"9Router Chat Model","name":"lmChat9Router","group":["transform"],"version":[1],"description":"For advanced usage with an AI chain","defaults":{"name":"9Router Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://github.com/9router/9router"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"nineRouterApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $credentials?.url }}"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"If using JSON response format, you must include word \"json\" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.","name":"notice","type":"notice","default":"","displayOptions":{"show":{"/options.responseFormat":["json_object"]}}},{"displayName":"Model","name":"model","type":"options","description":"The model which will generate the completion. <a href=\"https://github.com/9router/9router\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"data"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.id}}","value":"={{$responseItem.id}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":"openai/gpt-4o"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Frequency Penalty","name":"frequencyPenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim","type":"number"},{"displayName":"Maximum Number of Tokens","name":"maxTokens","default":-1,"description":"The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).","type":"number","typeOptions":{"maxValue":32768}},{"displayName":"Response Format","name":"responseFormat","default":"text","type":"options","options":[{"name":"Text","value":"text","description":"Regular text response"},{"name":"JSON","value":"json_object","description":"Enables JSON mode, which should guarantee the message the model generates is valid JSON"}]},{"displayName":"Presence Penalty","name":"presencePenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":2,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Timeout","name":"timeout","default":360000,"description":"Maximum amount of time a request is allowed to take in milliseconds","type":"number"},{"displayName":"Max Retries","name":"maxRetries","default":2,"description":"Maximum number of retries to attempt","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"}]}],"iconUrl":{"light":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChat9Router/9router.svg","dark":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChat9Router/9router.dark.svg"}},
|
|
50
57
|
{"displayName":"OpenRouter Chat Model","name":"lmChatOpenRouter","group":["transform"],"version":[1],"description":"For advanced usage with an AI chain","defaults":{"name":"OpenRouter Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatopenrouter/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"openRouterApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $credentials?.url }}"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"If using JSON response format, you must include word \"json\" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.","name":"notice","type":"notice","default":"","displayOptions":{"show":{"/options.responseFormat":["json_object"]}}},{"displayName":"Model","name":"model","type":"options","description":"The model which will generate the completion. <a href=\"https://openrouter.ai/docs/models\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"data"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.id}}","value":"={{$responseItem.id}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":"openai/gpt-4.1-mini"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Frequency Penalty","name":"frequencyPenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim","type":"number"},{"displayName":"Maximum Number of Tokens","name":"maxTokens","default":-1,"description":"The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).","type":"number","typeOptions":{"maxValue":32768}},{"displayName":"Response Format","name":"responseFormat","default":"text","type":"options","options":[{"name":"Text","value":"text","description":"Regular text response"},{"name":"JSON","value":"json_object","description":"Enables JSON mode, which should guarantee the message the model generates is valid JSON"}]},{"displayName":"Presence Penalty","name":"presencePenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":2,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Timeout","name":"timeout","default":360000,"description":"Maximum amount of time a request is allowed to take in milliseconds","type":"number"},{"displayName":"Max Retries","name":"maxRetries","default":2,"description":"Maximum number of retries to attempt","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"}]}],"iconUrl":{"light":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatOpenRouter/openrouter.svg","dark":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatOpenRouter/openrouter.dark.svg"}},
|
|
51
58
|
{"displayName":"Vercel AI Gateway Chat Model","name":"lmChatVercelAiGateway","group":["transform"],"version":[1],"description":"For advanced usage with an AI chain via Vercel AI Gateway","defaults":{"name":"Vercel AI Gateway Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatvercel/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"vercelAiGatewayApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $credentials?.url }}"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"If using JSON response format, you must include word \"json\" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.","name":"notice","type":"notice","default":"","displayOptions":{"show":{"/options.responseFormat":["json_object"]}}},{"displayName":"Model","name":"model","type":"options","description":"The model which will generate the completion","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"data"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.id}}","value":"={{$responseItem.id}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":"openai/gpt-4o"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Frequency Penalty","name":"frequencyPenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim","type":"number"},{"displayName":"Maximum Number of Tokens","name":"maxTokens","default":-1,"description":"The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).","type":"number","typeOptions":{"maxValue":32768}},{"displayName":"Response Format","name":"responseFormat","default":"text","type":"options","options":[{"name":"Text","value":"text","description":"Regular text response"},{"name":"JSON","value":"json_object","description":"Enables JSON mode, which should guarantee the message the model generates is valid JSON"}]},{"displayName":"Presence Penalty","name":"presencePenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":2,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Timeout","name":"timeout","default":360000,"description":"Maximum amount of time a request is allowed to take in milliseconds","type":"number"},{"displayName":"Max Retries","name":"maxRetries","default":2,"description":"Maximum number of retries to attempt","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"}]}],"iconUrl":{"light":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatVercelAiGateway/vercel.dark.svg","dark":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatVercelAiGateway/vercel.svg"}},
|
|
52
59
|
{"displayName":"xAI Grok Chat Model","name":"lmChatXAiGrok","group":["transform"],"version":[1],"description":"For advanced usage with an AI chain","defaults":{"name":"xAI Grok Chat Model"},"codex":{"categories":["AI"],"subcategories":{"AI":["Language Models","Root Nodes"],"Language Models":["Chat Models (Recommended)"]},"resources":{"primaryDocumentation":[{"url":"https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatxaigrok/"}]}},"inputs":[],"outputs":["ai_languageModel"],"outputNames":["Model"],"credentials":[{"name":"xAiApi","required":true}],"requestDefaults":{"ignoreHttpStatusErrors":true,"baseURL":"={{ $credentials?.url }}"},"properties":[{"displayName":"This node must be connected to an AI chain. <a data-action='openSelectiveNodeCreator' data-action-parameter-creatorview='AI'>Insert one</a>","name":"notice","type":"notice","default":"","typeOptions":{"containerClass":"ndv-connection-hint-notice"}},{"displayName":"If using JSON response format, you must include word \"json\" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.","name":"notice","type":"notice","default":"","displayOptions":{"show":{"/options.responseFormat":["json_object"]}}},{"displayName":"Model","name":"model","type":"options","description":"The model which will generate the completion. <a href=\"https://docs.x.ai/docs/models\">Learn more</a>.","typeOptions":{"loadOptions":{"routing":{"request":{"method":"GET","url":"/models"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"data"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.id}}","value":"={{$responseItem.id}}"}},{"type":"sort","properties":{"key":"name"}}]}}}},"routing":{"send":{"type":"body","property":"model"}},"default":"grok-2-vision-1212"},{"displayName":"Options","name":"options","placeholder":"Add Option","description":"Additional options to add","type":"collection","default":{},"options":[{"displayName":"Frequency Penalty","name":"frequencyPenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim","type":"number"},{"displayName":"Maximum Number of Tokens","name":"maxTokens","default":-1,"description":"The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).","type":"number","typeOptions":{"maxValue":32768}},{"displayName":"Response Format","name":"responseFormat","default":"text","type":"options","options":[{"name":"Text","value":"text","description":"Regular text response"},{"name":"JSON","value":"json_object","description":"Enables JSON mode, which should guarantee the message the model generates is valid JSON"}]},{"displayName":"Presence Penalty","name":"presencePenalty","default":0,"typeOptions":{"maxValue":2,"minValue":-2,"numberPrecision":1},"description":"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics","type":"number"},{"displayName":"Sampling Temperature","name":"temperature","default":0.7,"typeOptions":{"maxValue":2,"minValue":0,"numberPrecision":1},"description":"Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.","type":"number"},{"displayName":"Timeout","name":"timeout","default":360000,"description":"Maximum amount of time a request is allowed to take in milliseconds","type":"number"},{"displayName":"Max Retries","name":"maxRetries","default":2,"description":"Maximum number of retries to attempt","type":"number"},{"displayName":"Top P","name":"topP","default":1,"typeOptions":{"maxValue":1,"minValue":0,"numberPrecision":1},"description":"Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.","type":"number"}]}],"iconUrl":{"light":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatXAiGrok/logo.dark.svg","dark":"icons/@n8n/n8n-nodes-langchain/dist/nodes/llms/LmChatXAiGrok/logo.svg"}},
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@atom8n/n8n-nodes-langchain",
|
|
3
|
-
"version": "2.5.
|
|
3
|
+
"version": "2.5.8",
|
|
4
4
|
"description": "",
|
|
5
5
|
"main": "index.js",
|
|
6
6
|
"scripts": {
|
|
@@ -61,6 +61,12 @@
|
|
|
61
61
|
"dist/nodes/agents/Agent/Agent.node.js",
|
|
62
62
|
"dist/nodes/agents/Agent/AgentTool.node.js",
|
|
63
63
|
"dist/nodes/agents/OpenAiAssistant/OpenAiAssistant.node.js",
|
|
64
|
+
"dist/nodes/agents/OpenClawAgent/OpenClawAgent.node.js",
|
|
65
|
+
"dist/nodes/agents/OpenClawAgent/channels/TelegramChannel/TelegramChannel.node.js",
|
|
66
|
+
"dist/nodes/agents/OpenClawAgent/channels/WhatsAppChannel/WhatsAppChannel.node.js",
|
|
67
|
+
"dist/nodes/agents/OpenClawAgent/models/OpenCodeFreeModel/OpenCodeFreeModel.node.js",
|
|
68
|
+
"dist/nodes/agents/OpenClawAgent/mcpServers/OpenClawMcpServer/OpenClawMcpServer.node.js",
|
|
69
|
+
"dist/nodes/agents/OpenClawAgent/plugins/OpenClawPlugin/OpenClawPlugin.node.js",
|
|
64
70
|
"dist/nodes/chains/ChainSummarization/ChainSummarization.node.js",
|
|
65
71
|
"dist/nodes/chains/ChainLLM/ChainLlm.node.js",
|
|
66
72
|
"dist/nodes/chains/ChainRetrievalQA/ChainRetrievalQa.node.js",
|
|
@@ -168,7 +174,7 @@
|
|
|
168
174
|
"nodeTypePrefix": "@n8n/n8n-nodes-langchain"
|
|
169
175
|
},
|
|
170
176
|
"devDependencies": {
|
|
171
|
-
"@n8n/eslint-plugin-community-nodes": "npm:@atom8n/eslint-plugin-community-nodes@0.10.
|
|
177
|
+
"@n8n/eslint-plugin-community-nodes": "npm:@atom8n/eslint-plugin-community-nodes@0.10.8",
|
|
172
178
|
"@types/basic-auth": "^1.1.3",
|
|
173
179
|
"@types/cheerio": "^0.22.15",
|
|
174
180
|
"@types/html-to-text": "^9.0.1",
|
|
@@ -179,7 +185,7 @@
|
|
|
179
185
|
"@types/temp": "^0.9.1",
|
|
180
186
|
"fast-glob": "3.2.12",
|
|
181
187
|
"jest-mock-extended": "^3.0.4",
|
|
182
|
-
"n8n-core": "npm:@atom8n/n8n-core@2.5.
|
|
188
|
+
"n8n-core": "npm:@atom8n/n8n-core@2.5.8",
|
|
183
189
|
"tsup": "^8.5.0"
|
|
184
190
|
},
|
|
185
191
|
"dependencies": {
|
|
@@ -211,13 +217,13 @@
|
|
|
211
217
|
"@langchain/weaviate": "1.0.1",
|
|
212
218
|
"@modelcontextprotocol/sdk": "1.24.0",
|
|
213
219
|
"@mozilla/readability": "0.6.0",
|
|
214
|
-
"@n8n/client-oauth2": "npm:@atom8n/client-oauth2@1.3.
|
|
215
|
-
"@n8n/config": "npm:@atom8n/config@2.4.
|
|
216
|
-
"@n8n/di": "npm:@atom8n/di@0.13.
|
|
217
|
-
"@n8n/errors": "npm:@atom8n/errors@0.8.
|
|
218
|
-
"@n8n/json-schema-to-zod": "npm:@atom8n/json-schema-to-zod@1.9.
|
|
220
|
+
"@n8n/client-oauth2": "npm:@atom8n/client-oauth2@1.3.8",
|
|
221
|
+
"@n8n/config": "npm:@atom8n/config@2.4.8",
|
|
222
|
+
"@n8n/di": "npm:@atom8n/di@0.13.8",
|
|
223
|
+
"@n8n/errors": "npm:@atom8n/errors@0.8.8",
|
|
224
|
+
"@n8n/json-schema-to-zod": "npm:@atom8n/json-schema-to-zod@1.9.8",
|
|
219
225
|
"@n8n/typeorm": "0.3.20-15",
|
|
220
|
-
"@n8n/typescript-config": "npm:@atom8n/typescript-config@1.6.
|
|
226
|
+
"@n8n/typescript-config": "npm:@atom8n/typescript-config@1.6.8",
|
|
221
227
|
"@n8n/vm2": "3.9.25",
|
|
222
228
|
"@pinecone-database/pinecone": "^5.0.2",
|
|
223
229
|
"@qdrant/js-client-rest": "^1.16.2",
|
|
@@ -242,8 +248,8 @@
|
|
|
242
248
|
"mammoth": "1.11.0",
|
|
243
249
|
"mime-types": "3.0.1",
|
|
244
250
|
"mongodb": "^6.17.0",
|
|
245
|
-
"n8n-nodes-base": "npm:@atom8n/n8n-nodes-base@2.5.
|
|
246
|
-
"n8n-workflow": "npm:@atom8n/n8n-workflow@2.5.
|
|
251
|
+
"n8n-nodes-base": "npm:@atom8n/n8n-nodes-base@2.5.8",
|
|
252
|
+
"n8n-workflow": "npm:@atom8n/n8n-workflow@2.5.8",
|
|
247
253
|
"openai": "^6.9.0",
|
|
248
254
|
"pdf-parse": "1.1.1",
|
|
249
255
|
"pg": "8.12.0",
|