@n8n/n8n-nodes-langchain 1.120.1 → 1.121.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/credentials/VercelAiGatewayApi.credentials.js +1 -1
- package/dist/credentials/VercelAiGatewayApi.credentials.js.map +1 -1
- package/dist/known/credentials.json +1 -0
- package/dist/known/nodes.json +4 -0
- package/dist/nodes/agents/Agent/V3/AgentV3.node.js +1 -1
- package/dist/nodes/agents/Agent/V3/AgentV3.node.js.map +1 -1
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/execute.js +20 -399
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/execute.js.map +1 -1
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/buildExecutionContext.js +74 -0
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/buildExecutionContext.js.map +1 -0
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/buildResponseMetadata.js +37 -0
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/buildResponseMetadata.js.map +1 -0
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/checkMaxIterations.js +40 -0
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/checkMaxIterations.js.map +1 -0
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/createAgentSequence.js +61 -0
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/createAgentSequence.js.map +1 -0
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/executeBatch.js +88 -0
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/executeBatch.js.map +1 -0
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/finalizeResult.js +58 -0
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/finalizeResult.js.map +1 -0
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/index.js +50 -0
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/index.js.map +1 -0
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/prepareItemContext.js +66 -0
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/prepareItemContext.js.map +1 -0
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/runAgent.js +99 -0
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/helpers/runAgent.js.map +1 -0
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/types.js +17 -0
- package/dist/nodes/agents/Agent/agents/ToolsAgent/V3/types.js.map +1 -0
- package/dist/nodes/agents/Agent/agents/ToolsAgent/common.js +55 -19
- package/dist/nodes/agents/Agent/agents/ToolsAgent/common.js.map +1 -1
- package/dist/nodes/document_loaders/DocumentGithubLoader/DocumentGithubLoader.node.js +1 -0
- package/dist/nodes/document_loaders/DocumentGithubLoader/DocumentGithubLoader.node.js.map +1 -1
- package/dist/nodes/mcp/McpClient/McpClient.node.js +335 -0
- package/dist/nodes/mcp/McpClient/McpClient.node.js.map +1 -0
- package/dist/nodes/mcp/McpClient/listSearch.js +58 -0
- package/dist/nodes/mcp/McpClient/listSearch.js.map +1 -0
- package/dist/nodes/mcp/McpClient/resourceMapping.js +61 -0
- package/dist/nodes/mcp/McpClient/resourceMapping.js.map +1 -0
- package/dist/nodes/mcp/McpClient/utils.js +248 -0
- package/dist/nodes/mcp/McpClient/utils.js.map +1 -0
- package/dist/nodes/mcp/McpClientTool/McpClientTool.node.js +13 -55
- package/dist/nodes/mcp/McpClientTool/McpClientTool.node.js.map +1 -1
- package/dist/nodes/mcp/McpClientTool/loadOptions.js +2 -3
- package/dist/nodes/mcp/McpClientTool/loadOptions.js.map +1 -1
- package/dist/nodes/mcp/McpClientTool/types.js.map +1 -1
- package/dist/nodes/mcp/McpClientTool/utils.js +2 -174
- package/dist/nodes/mcp/McpClientTool/utils.js.map +1 -1
- package/dist/nodes/mcp/{McpClientTool → shared}/descriptions.js +40 -0
- package/dist/nodes/mcp/shared/descriptions.js.map +1 -0
- package/dist/nodes/mcp/shared/types.js +17 -0
- package/dist/nodes/mcp/shared/types.js.map +1 -0
- package/dist/nodes/mcp/shared/utils.js +231 -0
- package/dist/nodes/mcp/shared/utils.js.map +1 -0
- package/dist/nodes/tools/ToolHttpRequest/utils.js +5 -11
- package/dist/nodes/tools/ToolHttpRequest/utils.js.map +1 -1
- package/dist/nodes/vendors/OpenAi/OpenAi.node.js +3 -2
- package/dist/nodes/vendors/OpenAi/OpenAi.node.js.map +1 -1
- package/dist/nodes/vendors/OpenAi/v1/actions/audio/index.js +2 -2
- package/dist/nodes/vendors/OpenAi/v1/actions/audio/index.js.map +1 -1
- package/dist/nodes/vendors/OpenAi/v2/OpenAiV2.node.js +1 -1
- package/dist/nodes/vendors/OpenAi/v2/OpenAiV2.node.js.map +1 -1
- package/dist/nodes/vendors/OpenAi/v2/actions/audio/index.js +2 -2
- package/dist/nodes/vendors/OpenAi/v2/actions/audio/index.js.map +1 -1
- package/dist/nodes/vendors/OpenAi/v2/actions/text/classify.operation.js +12 -3
- package/dist/nodes/vendors/OpenAi/v2/actions/text/classify.operation.js.map +1 -1
- package/dist/types/credentials.json +2 -2
- package/dist/types/nodes.json +5 -4
- package/dist/utils/agent-execution/buildSteps.js +77 -0
- package/dist/utils/agent-execution/buildSteps.js.map +1 -0
- package/dist/utils/agent-execution/createEngineRequests.js +48 -0
- package/dist/utils/agent-execution/createEngineRequests.js.map +1 -0
- package/dist/utils/agent-execution/index.js +42 -0
- package/dist/utils/agent-execution/index.js.map +1 -0
- package/dist/utils/agent-execution/memoryManagement.js +66 -0
- package/dist/utils/agent-execution/memoryManagement.js.map +1 -0
- package/dist/utils/agent-execution/processEventStream.js +128 -0
- package/dist/utils/agent-execution/processEventStream.js.map +1 -0
- package/dist/utils/agent-execution/types.js +17 -0
- package/dist/utils/agent-execution/types.js.map +1 -0
- package/package.json +12 -10
- package/dist/nodes/mcp/McpClientTool/descriptions.js.map +0 -1
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../../../../../../../nodes/agents/Agent/agents/ToolsAgent/V3/helpers/runAgent.ts"],"sourcesContent":["import type { BaseChatModel } from '@langchain/core/language_models/chat_models';\nimport type { AgentRunnableSequence } from 'langchain/agents';\nimport type { BaseChatMemory } from 'langchain/memory';\nimport type {\n\tIExecuteFunctions,\n\tISupplyDataFunctions,\n\tEngineResponse,\n\tEngineRequest,\n} from 'n8n-workflow';\n\nimport {\n\tloadMemory,\n\tprocessEventStream,\n\tcreateEngineRequests,\n\tsaveToMemory,\n} from '@utils/agent-execution';\n\nimport { SYSTEM_MESSAGE } from '../../prompt';\nimport type { AgentResult, RequestResponseMetadata } from '../types';\nimport { buildResponseMetadata } from './buildResponseMetadata';\nimport type { ItemContext } from './prepareItemContext';\n\ntype RunAgentResult = AgentResult | EngineRequest<RequestResponseMetadata>;\n/**\n * Runs the agent for a single item, choosing between streaming or non-streaming execution.\n * Handles both regular execution and execution after tool calls.\n *\n * @param ctx - The execution context\n * @param executor - The agent runnable sequence\n * @param itemContext - Context for the current item\n * @param model - The chat model for token counting\n * @param memory - Optional memory for conversation context\n * @param response - Optional engine response with previous tool calls\n * @returns AgentResult or engine request with tool calls\n */\nexport async function runAgent(\n\tctx: IExecuteFunctions | ISupplyDataFunctions,\n\texecutor: AgentRunnableSequence,\n\titemContext: ItemContext,\n\tmodel: BaseChatModel,\n\tmemory: BaseChatMemory | undefined,\n\tresponse?: EngineResponse<RequestResponseMetadata>,\n): Promise<RunAgentResult> {\n\tconst { itemIndex, input, steps, tools, options } = itemContext;\n\n\tconst invokeParams = {\n\t\tsteps,\n\t\tinput,\n\t\tsystem_message: options.systemMessage ?? SYSTEM_MESSAGE,\n\t\tformatting_instructions:\n\t\t\t'IMPORTANT: For your response to user, you MUST use the `format_final_json_response` tool with your complete answer formatted according to the required schema. Do not attempt to format the JSON manually - always use this tool. Your response will be rejected if it is not properly formatted through this tool. Only use this tool once you are ready to provide your final answer.',\n\t};\n\tconst executeOptions = { signal: ctx.getExecutionCancelSignal() };\n\n\t// Check if streaming is actually available\n\tconst isStreamingAvailable = 'isStreaming' in ctx ? ctx.isStreaming?.() : undefined;\n\n\tif (\n\t\t'isStreaming' in ctx &&\n\t\toptions.enableStreaming &&\n\t\tisStreamingAvailable &&\n\t\tctx.getNode().typeVersion >= 2.1\n\t) {\n\t\tconst chatHistory = await loadMemory(memory, model, options.maxTokensFromMemory);\n\t\tconst eventStream = executor.streamEvents(\n\t\t\t{\n\t\t\t\t...invokeParams,\n\t\t\t\tchat_history: chatHistory,\n\t\t\t},\n\t\t\t{\n\t\t\t\tversion: 'v2',\n\t\t\t\t...executeOptions,\n\t\t\t},\n\t\t);\n\n\t\tconst result = await processEventStream(\n\t\t\tctx,\n\t\t\teventStream,\n\t\t\titemIndex,\n\t\t\toptions.returnIntermediateSteps,\n\t\t\tmemory,\n\t\t\tinput,\n\t\t);\n\n\t\t// If result contains tool calls, build the request object like the normal flow\n\t\tif (result.toolCalls && result.toolCalls.length > 0) {\n\t\t\tconst actions = await createEngineRequests(result.toolCalls, itemIndex, tools);\n\n\t\t\treturn {\n\t\t\t\tactions,\n\t\t\t\tmetadata: buildResponseMetadata(response, itemIndex),\n\t\t\t};\n\t\t}\n\n\t\treturn result;\n\t} else {\n\t\t// Handle regular execution\n\t\tconst chatHistory = await loadMemory(memory, model, options.maxTokensFromMemory);\n\n\t\tconst modelResponse = await executor.invoke({\n\t\t\t...invokeParams,\n\t\t\tchat_history: chatHistory,\n\t\t});\n\n\t\tif ('returnValues' in modelResponse) {\n\t\t\t// Save conversation to memory including any tool call context\n\t\t\tif (memory && input && modelResponse.returnValues.output) {\n\t\t\t\t// If there were tool calls in this conversation, include them in the context\n\t\t\t\tlet fullOutput = modelResponse.returnValues.output as string;\n\n\t\t\t\tif (steps.length > 0) {\n\t\t\t\t\t// Include tool call information in the conversation context\n\t\t\t\t\tconst toolContext = steps\n\t\t\t\t\t\t.map(\n\t\t\t\t\t\t\t(step) =>\n\t\t\t\t\t\t\t\t`Tool: ${step.action.tool}, Input: ${JSON.stringify(step.action.toolInput)}, Result: ${step.observation}`,\n\t\t\t\t\t\t)\n\t\t\t\t\t\t.join('; ');\n\t\t\t\t\tfullOutput = `[Used tools: ${toolContext}] ${fullOutput}`;\n\t\t\t\t}\n\n\t\t\t\tawait saveToMemory(input, fullOutput, memory);\n\t\t\t}\n\t\t\t// Include intermediate steps if requested\n\t\t\tconst result = { ...modelResponse.returnValues };\n\t\t\tif (options.returnIntermediateSteps && steps.length > 0) {\n\t\t\t\tresult.intermediateSteps = steps;\n\t\t\t}\n\t\t\treturn result;\n\t\t}\n\n\t\t// If response contains tool calls, we need to return this in the right format\n\t\tconst actions = await createEngineRequests(modelResponse, itemIndex, tools);\n\n\t\treturn {\n\t\t\tactions,\n\t\t\tmetadata: buildResponseMetadata(response, itemIndex),\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAUA,6BAKO;AAEP,oBAA+B;AAE/B,mCAAsC;AAgBtC,eAAsB,SACrB,KACA,UACA,aACA,OACA,QACA,UAC0B;AAC1B,QAAM,EAAE,WAAW,OAAO,OAAO,OAAO,QAAQ,IAAI;AAEpD,QAAM,eAAe;AAAA,IACpB;AAAA,IACA;AAAA,IACA,gBAAgB,QAAQ,iBAAiB;AAAA,IACzC,yBACC;AAAA,EACF;AACA,QAAM,iBAAiB,EAAE,QAAQ,IAAI,yBAAyB,EAAE;AAGhE,QAAM,uBAAuB,iBAAiB,MAAM,IAAI,cAAc,IAAI;AAE1E,MACC,iBAAiB,OACjB,QAAQ,mBACR,wBACA,IAAI,QAAQ,EAAE,eAAe,KAC5B;AACD,UAAM,cAAc,UAAM,mCAAW,QAAQ,OAAO,QAAQ,mBAAmB;AAC/E,UAAM,cAAc,SAAS;AAAA,MAC5B;AAAA,QACC,GAAG;AAAA,QACH,cAAc;AAAA,MACf;AAAA,MACA;AAAA,QACC,SAAS;AAAA,QACT,GAAG;AAAA,MACJ;AAAA,IACD;AAEA,UAAM,SAAS,UAAM;AAAA,MACpB;AAAA,MACA;AAAA,MACA;AAAA,MACA,QAAQ;AAAA,MACR;AAAA,MACA;AAAA,IACD;AAGA,QAAI,OAAO,aAAa,OAAO,UAAU,SAAS,GAAG;AACpD,YAAM,UAAU,UAAM,6CAAqB,OAAO,WAAW,WAAW,KAAK;AAE7E,aAAO;AAAA,QACN;AAAA,QACA,cAAU,oDAAsB,UAAU,SAAS;AAAA,MACpD;AAAA,IACD;AAEA,WAAO;AAAA,EACR,OAAO;AAEN,UAAM,cAAc,UAAM,mCAAW,QAAQ,OAAO,QAAQ,mBAAmB;AAE/E,UAAM,gBAAgB,MAAM,SAAS,OAAO;AAAA,MAC3C,GAAG;AAAA,MACH,cAAc;AAAA,IACf,CAAC;AAED,QAAI,kBAAkB,eAAe;AAEpC,UAAI,UAAU,SAAS,cAAc,aAAa,QAAQ;AAEzD,YAAI,aAAa,cAAc,aAAa;AAE5C,YAAI,MAAM,SAAS,GAAG;AAErB,gBAAM,cAAc,MAClB;AAAA,YACA,CAAC,SACA,SAAS,KAAK,OAAO,IAAI,YAAY,KAAK,UAAU,KAAK,OAAO,SAAS,CAAC,aAAa,KAAK,WAAW;AAAA,UACzG,EACC,KAAK,IAAI;AACX,uBAAa,gBAAgB,WAAW,KAAK,UAAU;AAAA,QACxD;AAEA,kBAAM,qCAAa,OAAO,YAAY,MAAM;AAAA,MAC7C;AAEA,YAAM,SAAS,EAAE,GAAG,cAAc,aAAa;AAC/C,UAAI,QAAQ,2BAA2B,MAAM,SAAS,GAAG;AACxD,eAAO,oBAAoB;AAAA,MAC5B;AACA,aAAO;AAAA,IACR;AAGA,UAAM,UAAU,UAAM,6CAAqB,eAAe,WAAW,KAAK;AAE1E,WAAO;AAAA,MACN;AAAA,MACA,cAAU,oDAAsB,UAAU,SAAS;AAAA,IACpD;AAAA,EACD;AACD;","names":[]}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __copyProps = (to, from, except, desc) => {
|
|
7
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
8
|
+
for (let key of __getOwnPropNames(from))
|
|
9
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
10
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
11
|
+
}
|
|
12
|
+
return to;
|
|
13
|
+
};
|
|
14
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
15
|
+
var types_exports = {};
|
|
16
|
+
module.exports = __toCommonJS(types_exports);
|
|
17
|
+
//# sourceMappingURL=types.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../../../../../../nodes/agents/Agent/agents/ToolsAgent/V3/types.ts"],"sourcesContent":["import type {\n\tToolCallData,\n\tToolCallRequest,\n\tAgentResult,\n\tRequestResponseMetadata as SharedRequestResponseMetadata,\n} from '@utils/agent-execution';\n\n// Re-export shared types for backwards compatibility\nexport type { ToolCallData, ToolCallRequest, AgentResult };\n\n// Use the shared metadata type directly (it already includes previousRequests)\nexport type RequestResponseMetadata = SharedRequestResponseMetadata;\n\n// Keep the IntermediateStep type for compatibility\nexport type IntermediateStep = {\n\taction: {\n\t\ttool: string;\n\t\ttoolInput: Record<string, unknown>;\n\t\tlog: string;\n\t\tmessageLog: unknown[];\n\t\ttoolCallId: string;\n\t\ttype: string;\n\t};\n\tobservation?: string;\n};\n\nexport type AgentOptions = {\n\tsystemMessage?: string;\n\tmaxIterations?: number;\n\treturnIntermediateSteps?: boolean;\n\tpassthroughBinaryImages?: boolean;\n\tenableStreaming?: boolean;\n\tmaxTokensFromMemory?: number;\n};\n"],"mappings":";;;;;;;;;;;;;;AAAA;AAAA;","names":[]}
|
|
@@ -44,27 +44,56 @@ function getOutputParserSchema(outputParser) {
|
|
|
44
44
|
);
|
|
45
45
|
return schema;
|
|
46
46
|
}
|
|
47
|
+
function isTextFile(mimeType) {
|
|
48
|
+
return mimeType.startsWith("text/") || mimeType === "application/json" || mimeType === "application/xml" || mimeType === "application/csv" || mimeType === "application/x-yaml" || mimeType === "application/yaml";
|
|
49
|
+
}
|
|
50
|
+
function isImageFile(mimeType) {
|
|
51
|
+
return mimeType.startsWith("image/");
|
|
52
|
+
}
|
|
47
53
|
async function extractBinaryMessages(ctx, itemIndex) {
|
|
48
54
|
const binaryData = ctx.getInputData()?.[itemIndex]?.binary ?? {};
|
|
49
55
|
const binaryMessages = await Promise.all(
|
|
50
|
-
Object.values(binaryData).filter((data) => data.mimeType.
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
await ctx.helpers.
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
56
|
+
Object.values(binaryData).filter((data) => isImageFile(data.mimeType) || isTextFile(data.mimeType)).map(async (data) => {
|
|
57
|
+
if (isImageFile(data.mimeType)) {
|
|
58
|
+
let binaryUrlString;
|
|
59
|
+
if (data.id) {
|
|
60
|
+
const binaryBuffer = await ctx.helpers.binaryToBuffer(
|
|
61
|
+
await ctx.helpers.getBinaryStream(data.id)
|
|
62
|
+
);
|
|
63
|
+
binaryUrlString = `data:${data.mimeType};base64,${Buffer.from(binaryBuffer).toString(
|
|
64
|
+
import_n8n_workflow.BINARY_ENCODING
|
|
65
|
+
)}`;
|
|
66
|
+
} else {
|
|
67
|
+
binaryUrlString = data.data.includes("base64") ? data.data : `data:${data.mimeType};base64,${data.data}`;
|
|
68
|
+
}
|
|
69
|
+
return {
|
|
70
|
+
type: "image_url",
|
|
71
|
+
image_url: {
|
|
72
|
+
url: binaryUrlString
|
|
73
|
+
}
|
|
74
|
+
};
|
|
59
75
|
} else {
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
76
|
+
let textContent;
|
|
77
|
+
if (data.id) {
|
|
78
|
+
const binaryBuffer = await ctx.helpers.binaryToBuffer(
|
|
79
|
+
await ctx.helpers.getBinaryStream(data.id)
|
|
80
|
+
);
|
|
81
|
+
textContent = binaryBuffer.toString("utf-8");
|
|
82
|
+
} else {
|
|
83
|
+
if (data.data.includes("base64,")) {
|
|
84
|
+
const base64Data = data.data.split("base64,")[1];
|
|
85
|
+
textContent = Buffer.from(base64Data, "base64").toString("utf-8");
|
|
86
|
+
} else {
|
|
87
|
+
textContent = Buffer.from(data.data, "base64").toString("utf-8");
|
|
88
|
+
}
|
|
66
89
|
}
|
|
67
|
-
|
|
90
|
+
return {
|
|
91
|
+
type: "text",
|
|
92
|
+
text: `File: ${data.fileName ?? "attachment"}
|
|
93
|
+
Content:
|
|
94
|
+
${textContent}`
|
|
95
|
+
};
|
|
96
|
+
}
|
|
68
97
|
})
|
|
69
98
|
);
|
|
70
99
|
return new import_messages.HumanMessage({
|
|
@@ -96,9 +125,16 @@ function handleAgentFinishOutput(steps) {
|
|
|
96
125
|
const isMultiOutput = Array.isArray(agentFinishSteps.returnValues?.output);
|
|
97
126
|
if (isMultiOutput) {
|
|
98
127
|
const multiOutputSteps = agentFinishSteps.returnValues.output;
|
|
99
|
-
const
|
|
100
|
-
if (
|
|
101
|
-
agentFinishSteps.returnValues.output =
|
|
128
|
+
const textOutputs = multiOutputSteps.filter((output) => output.type === "text" && output.text).map((output) => output.text).join("\n").trim();
|
|
129
|
+
if (textOutputs) {
|
|
130
|
+
agentFinishSteps.returnValues.output = textOutputs;
|
|
131
|
+
} else {
|
|
132
|
+
const thinkingOutputs = multiOutputSteps.filter((output) => output.type === "thinking" && output.thinking).map((output) => output.thinking).join("\n").trim();
|
|
133
|
+
if (thinkingOutputs) {
|
|
134
|
+
agentFinishSteps.returnValues.output = thinkingOutputs;
|
|
135
|
+
} else {
|
|
136
|
+
agentFinishSteps.returnValues.output = "";
|
|
137
|
+
}
|
|
102
138
|
}
|
|
103
139
|
return agentFinishSteps;
|
|
104
140
|
}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../../../nodes/agents/Agent/agents/ToolsAgent/common.ts"],"sourcesContent":["import type { BaseChatModel } from '@langchain/core/language_models/chat_models';\nimport { HumanMessage } from '@langchain/core/messages';\nimport type { BaseMessage } from '@langchain/core/messages';\nimport { ChatPromptTemplate, type BaseMessagePromptTemplateLike } from '@langchain/core/prompts';\nimport type { AgentAction, AgentFinish } from 'langchain/agents';\nimport type { ToolsAgentAction } from 'langchain/dist/agents/tool_calling/output_parser';\nimport type { BaseChatMemory } from 'langchain/memory';\nimport { DynamicStructuredTool, type Tool } from 'langchain/tools';\nimport { BINARY_ENCODING, jsonParse, NodeConnectionTypes, NodeOperationError } from 'n8n-workflow';\nimport type { IExecuteFunctions, ISupplyDataFunctions } from 'n8n-workflow';\nimport type { ZodObject } from 'zod';\nimport { z } from 'zod';\n\nimport { isChatInstance, getConnectedTools } from '@utils/helpers';\nimport { type N8nOutputParser } from '@utils/output_parsers/N8nOutputParser';\n/* -----------------------------------------------------------\n Output Parser Helper\n----------------------------------------------------------- */\n/**\n * Retrieve the output parser schema.\n * If the parser does not return a valid schema, default to a schema with a single text field.\n */\nexport function getOutputParserSchema(\n\toutputParser: N8nOutputParser,\n\t// eslint-disable-next-line @typescript-eslint/no-explicit-any\n): ZodObject<any, any, any, any> {\n\tconst schema =\n\t\t// eslint-disable-next-line @typescript-eslint/no-explicit-any\n\t\t(outputParser.getSchema() as ZodObject<any, any, any, any>) ?? z.object({ text: z.string() });\n\treturn schema;\n}\n\n/* -----------------------------------------------------------\n Binary Data Helpers\n----------------------------------------------------------- */\n/**\n * Extracts binary image messages from the input data.\n * When operating in filesystem mode, the binary stream is first converted to a buffer.\n *\n * @param ctx - The execution context\n * @param itemIndex - The current item index\n * @returns A HumanMessage containing the binary image messages.\n */\nexport async function extractBinaryMessages(\n\tctx: IExecuteFunctions | ISupplyDataFunctions,\n\titemIndex: number,\n): Promise<HumanMessage> {\n\tconst binaryData = ctx.getInputData()?.[itemIndex]?.binary ?? {};\n\tconst binaryMessages = await Promise.all(\n\t\tObject.values(binaryData)\n\t\t\t.filter((data) => data.mimeType.startsWith('image/'))\n\t\t\t.map(async (data) => {\n\t\t\t\tlet binaryUrlString: string;\n\n\t\t\t\t// In filesystem mode we need to get binary stream by id before converting it to buffer\n\t\t\t\tif (data.id) {\n\t\t\t\t\tconst binaryBuffer = await ctx.helpers.binaryToBuffer(\n\t\t\t\t\t\tawait ctx.helpers.getBinaryStream(data.id),\n\t\t\t\t\t);\n\t\t\t\t\tbinaryUrlString = `data:${data.mimeType};base64,${Buffer.from(binaryBuffer).toString(\n\t\t\t\t\t\tBINARY_ENCODING,\n\t\t\t\t\t)}`;\n\t\t\t\t} else {\n\t\t\t\t\tbinaryUrlString = data.data.includes('base64')\n\t\t\t\t\t\t? data.data\n\t\t\t\t\t\t: `data:${data.mimeType};base64,${data.data}`;\n\t\t\t\t}\n\n\t\t\t\treturn {\n\t\t\t\t\ttype: 'image_url',\n\t\t\t\t\timage_url: {\n\t\t\t\t\t\turl: binaryUrlString,\n\t\t\t\t\t},\n\t\t\t\t};\n\t\t\t}),\n\t);\n\treturn new HumanMessage({\n\t\tcontent: [...binaryMessages],\n\t});\n}\n\n/* -----------------------------------------------------------\n Agent Output Format Helpers\n----------------------------------------------------------- */\n/**\n * Fixes empty content messages in agent steps.\n *\n * This function is necessary when using RunnableSequence.from in LangChain.\n * If a tool doesn't have any arguments, LangChain returns input: '' (empty string).\n * This can throw an error for some providers (like Anthropic) which expect the input to always be an object.\n * This function replaces empty string inputs with empty objects to prevent such errors.\n *\n * @param steps - The agent steps to fix\n * @returns The fixed agent steps\n */\nexport function fixEmptyContentMessage(\n\tsteps: AgentFinish | ToolsAgentAction[],\n): AgentFinish | ToolsAgentAction[] {\n\tif (!Array.isArray(steps)) return steps;\n\n\tsteps.forEach((step) => {\n\t\tif ('messageLog' in step && step.messageLog !== undefined) {\n\t\t\tif (Array.isArray(step.messageLog)) {\n\t\t\t\tstep.messageLog.forEach((message: BaseMessage) => {\n\t\t\t\t\tif ('content' in message && Array.isArray(message.content)) {\n\t\t\t\t\t\t(message.content as Array<{ input?: string | object }>).forEach((content) => {\n\t\t\t\t\t\t\tif (content.input === '') {\n\t\t\t\t\t\t\t\tcontent.input = {};\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t});\n\n\treturn steps;\n}\n\n/**\n * Ensures consistent handling of outputs regardless of the model used,\n * providing a unified output format for further processing.\n *\n * This method is necessary to handle different output formats from various language models.\n * Specifically, it checks if the agent step is the final step (contains returnValues) and determines\n * if the output is a simple string (e.g., from OpenAI models) or an array of outputs (e.g., from Anthropic models).\n *\n * Examples:\n * 1. Anthropic model output:\n * ```json\n * {\n * \"output\": [\n * {\n * \"index\": 0,\n * \"type\": \"text\",\n * \"text\": \"The result of the calculation is approximately 1001.8166...\"\n * }\n * ]\n * }\n *```\n * 2. OpenAI model output:\n * ```json\n * {\n * \"output\": \"The result of the calculation is approximately 1001.82...\"\n * }\n * ```\n *\n * @param steps - The agent finish or agent action steps.\n * @returns The modified agent finish steps or the original steps.\n */\nexport function handleAgentFinishOutput(\n\tsteps: AgentFinish | AgentAction[],\n): AgentFinish | AgentAction[] {\n\ttype AgentMultiOutputFinish = AgentFinish & {\n\t\treturnValues: { output: Array<{ text: string; type: string; index: number }> };\n\t};\n\tconst agentFinishSteps = steps as AgentMultiOutputFinish | AgentFinish;\n\n\tif (agentFinishSteps.returnValues) {\n\t\tconst isMultiOutput = Array.isArray(agentFinishSteps.returnValues?.output);\n\t\tif (isMultiOutput) {\n\t\t\t// If all items in the multi-output array are of type 'text', merge them into a single string\n\t\t\tconst multiOutputSteps = agentFinishSteps.returnValues.output as Array<{\n\t\t\t\tindex: number;\n\t\t\t\ttype: string;\n\t\t\t\ttext: string;\n\t\t\t}>;\n\t\t\tconst isTextOnly = multiOutputSteps.every((output) => 'text' in output);\n\t\t\tif (isTextOnly) {\n\t\t\t\tagentFinishSteps.returnValues.output = multiOutputSteps\n\t\t\t\t\t.map((output) => output.text)\n\t\t\t\t\t.join('\\n')\n\t\t\t\t\t.trim();\n\t\t\t}\n\t\t\treturn agentFinishSteps;\n\t\t}\n\t}\n\n\treturn agentFinishSteps;\n}\n\n/**\n * Wraps the parsed output so that it can be stored in memory.\n * If memory is connected, the output is stringified.\n *\n * @param output - The parsed output object\n * @param memory - The connected memory (if any)\n * @returns The formatted output object\n */\nexport function handleParsedStepOutput(\n\toutput: Record<string, unknown>,\n\tmemory?: BaseChatMemory,\n): { returnValues: Record<string, unknown>; log: string } {\n\treturn {\n\t\treturnValues: memory ? { output: JSON.stringify(output) } : output,\n\t\tlog: 'Final response formatted',\n\t};\n}\n\n/**\n * Parses agent steps using the provided output parser.\n * If the agent used the 'format_final_json_response' tool, the output is parsed accordingly.\n *\n * @param steps - The agent finish or action steps\n * @param outputParser - The output parser (if defined)\n * @param memory - The connected memory (if any)\n * @returns The parsed steps with the final output\n */\nexport const getAgentStepsParser =\n\t(outputParser?: N8nOutputParser, memory?: BaseChatMemory) =>\n\tasync (steps: AgentFinish | AgentAction[]): Promise<AgentFinish | AgentAction[]> => {\n\t\t// Check if the steps contain the 'format_final_json_response' tool invocation.\n\t\tif (Array.isArray(steps)) {\n\t\t\tconst responseParserTool = steps.find((step) => step.tool === 'format_final_json_response');\n\t\t\tif (responseParserTool && outputParser) {\n\t\t\t\tconst toolInput = responseParserTool.toolInput;\n\t\t\t\t// Ensure the tool input is a string\n\t\t\t\tconst parserInput = toolInput instanceof Object ? JSON.stringify(toolInput) : toolInput;\n\t\t\t\tconst returnValues = (await outputParser.parse(parserInput)) as Record<string, unknown>;\n\t\t\t\treturn handleParsedStepOutput(returnValues, memory);\n\t\t\t}\n\t\t}\n\n\t\t// Otherwise, if the steps contain a returnValues field, try to parse them manually.\n\t\tif (outputParser && typeof steps === 'object' && (steps as AgentFinish).returnValues) {\n\t\t\tconst finalResponse = (steps as AgentFinish).returnValues;\n\t\t\tlet parserInput: string;\n\n\t\t\tif (finalResponse instanceof Object) {\n\t\t\t\tif ('output' in finalResponse) {\n\t\t\t\t\ttry {\n\t\t\t\t\t\tconst parsedOutput = jsonParse<Record<string, unknown>>(finalResponse.output);\n\t\t\t\t\t\t// Check if the parsed output already has the expected structure\n\t\t\t\t\t\t// If it already has { output: ... }, use it as-is to avoid double wrapping\n\t\t\t\t\t\t// Otherwise, wrap it in { output: ... } as expected by the parser\n\t\t\t\t\t\tif (\n\t\t\t\t\t\t\tparsedOutput !== null &&\n\t\t\t\t\t\t\ttypeof parsedOutput === 'object' &&\n\t\t\t\t\t\t\t'output' in parsedOutput &&\n\t\t\t\t\t\t\tObject.keys(parsedOutput).length === 1\n\t\t\t\t\t\t) {\n\t\t\t\t\t\t\t// Already has the expected structure, use as-is\n\t\t\t\t\t\t\tparserInput = JSON.stringify(parsedOutput);\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t// Needs wrapping for the parser\n\t\t\t\t\t\t\tparserInput = JSON.stringify({ output: parsedOutput });\n\t\t\t\t\t\t}\n\t\t\t\t\t} catch (error) {\n\t\t\t\t\t\t// Fallback to the raw output if parsing fails.\n\t\t\t\t\t\tparserInput = finalResponse.output;\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// If the output is not an object, we will stringify it as it is\n\t\t\t\t\tparserInput = JSON.stringify(finalResponse);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tparserInput = finalResponse;\n\t\t\t}\n\n\t\t\tconst returnValues = (await outputParser.parse(parserInput)) as Record<string, unknown>;\n\t\t\treturn handleParsedStepOutput(returnValues, memory);\n\t\t}\n\n\t\treturn handleAgentFinishOutput(steps);\n\t};\n\n/* -----------------------------------------------------------\n Agent Setup Helpers\n----------------------------------------------------------- */\n/**\n * Retrieves the language model from the input connection.\n * Throws an error if the model is not a valid chat instance or does not support tools.\n *\n * @param ctx - The execution context\n * @returns The validated chat model\n */\nexport async function getChatModel(\n\tctx: IExecuteFunctions | ISupplyDataFunctions,\n\tindex: number = 0,\n): Promise<BaseChatModel | undefined> {\n\tconst connectedModels = await ctx.getInputConnectionData(NodeConnectionTypes.AiLanguageModel, 0);\n\n\tlet model;\n\n\tif (Array.isArray(connectedModels) && index !== undefined) {\n\t\tif (connectedModels.length <= index) {\n\t\t\treturn undefined;\n\t\t}\n\t\t// We get the models in reversed order from the workflow so we need to reverse them to match the right index\n\t\tconst reversedModels = [...connectedModels].reverse();\n\t\tmodel = reversedModels[index] as BaseChatModel;\n\t} else {\n\t\tmodel = connectedModels as BaseChatModel;\n\t}\n\n\tif (!isChatInstance(model) || !model.bindTools) {\n\t\tthrow new NodeOperationError(\n\t\t\tctx.getNode(),\n\t\t\t'Tools Agent requires Chat Model which supports Tools calling',\n\t\t);\n\t}\n\treturn model;\n}\n\n/**\n * Retrieves the memory instance from the input connection if it is connected\n *\n * @param ctx - The execution context\n * @returns The connected memory (if any)\n */\nexport async function getOptionalMemory(\n\tctx: IExecuteFunctions | ISupplyDataFunctions,\n): Promise<BaseChatMemory | undefined> {\n\treturn (await ctx.getInputConnectionData(NodeConnectionTypes.AiMemory, 0)) as\n\t\t| BaseChatMemory\n\t\t| undefined;\n}\n\n/**\n * Retrieves the connected tools and (if an output parser is defined)\n * appends a structured output parser tool.\n *\n * @param ctx - The execution context\n * @param outputParser - The optional output parser\n * @returns The array of connected tools\n */\nexport async function getTools(\n\tctx: IExecuteFunctions | ISupplyDataFunctions,\n\toutputParser?: N8nOutputParser,\n): Promise<Array<DynamicStructuredTool | Tool>> {\n\tconst tools = (await getConnectedTools(ctx, true, false)) as Array<DynamicStructuredTool | Tool>;\n\n\t// If an output parser is available, create a dynamic tool to validate the final output.\n\tif (outputParser) {\n\t\tconst schema = getOutputParserSchema(outputParser);\n\t\tconst structuredOutputParserTool = new DynamicStructuredTool({\n\t\t\tschema,\n\t\t\tname: 'format_final_json_response',\n\t\t\tdescription:\n\t\t\t\t'Use this tool to format your final response to the user in a structured JSON format. This tool validates your output against a schema to ensure it meets the required format. ONLY use this tool when you have completed all necessary reasoning and are ready to provide your final answer. Do not use this tool for intermediate steps or for asking questions. The output from this tool will be directly returned to the user.',\n\t\t\t// We do not use a function here because we intercept the output with the parser.\n\t\t\tfunc: async () => '',\n\t\t});\n\t\ttools.push(structuredOutputParserTool);\n\t}\n\treturn tools;\n}\n\n/**\n * Prepares the prompt messages for the agent.\n *\n * @param ctx - The execution context\n * @param itemIndex - The current item index\n * @param options - Options containing systemMessage and other parameters\n * @returns The array of prompt messages\n */\nexport async function prepareMessages(\n\tctx: IExecuteFunctions | ISupplyDataFunctions,\n\titemIndex: number,\n\toptions: {\n\t\tsystemMessage?: string;\n\t\tpassthroughBinaryImages?: boolean;\n\t\toutputParser?: N8nOutputParser;\n\t},\n): Promise<BaseMessagePromptTemplateLike[]> {\n\tconst useSystemMessage = options.systemMessage ?? ctx.getNode().typeVersion < 1.9;\n\n\tconst messages: BaseMessagePromptTemplateLike[] = [];\n\n\tif (useSystemMessage) {\n\t\tmessages.push([\n\t\t\t'system',\n\t\t\t`{system_message}${options.outputParser ? '\\n\\n{formatting_instructions}' : ''}`,\n\t\t]);\n\t} else if (options.outputParser) {\n\t\tmessages.push(['system', '{formatting_instructions}']);\n\t}\n\n\tmessages.push(['placeholder', '{chat_history}'], ['human', '{input}']);\n\n\t// If there is binary data and the node option permits it, add a binary message\n\tconst hasBinaryData = ctx.getInputData()?.[itemIndex]?.binary !== undefined;\n\tif (hasBinaryData && options.passthroughBinaryImages) {\n\t\tconst binaryMessage = await extractBinaryMessages(ctx, itemIndex);\n\t\tif (binaryMessage.content.length !== 0) {\n\t\t\tmessages.push(binaryMessage);\n\t\t} else {\n\t\t\tctx.logger.debug('Not attaching binary message, since its content was empty');\n\t\t}\n\t}\n\n\t// We add the agent scratchpad last, so that the agent will not run in loops\n\t// by adding binary messages between each interaction\n\tmessages.push(['placeholder', '{agent_scratchpad}']);\n\treturn messages;\n}\n\n/**\n * Creates the chat prompt from messages.\n *\n * @param messages - The messages array\n * @returns The ChatPromptTemplate instance\n */\nexport function preparePrompt(messages: BaseMessagePromptTemplateLike[]): ChatPromptTemplate {\n\treturn ChatPromptTemplate.fromMessages(messages);\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,sBAA6B;AAE7B,qBAAuE;AAIvE,mBAAiD;AACjD,0BAAoF;AAGpF,iBAAkB;AAElB,qBAAkD;AAS3C,SAAS,sBACf,cAEgC;AAChC,QAAM;AAAA;AAAA,IAEJ,aAAa,UAAU,KAAuC,aAAE,OAAO,EAAE,MAAM,aAAE,OAAO,EAAE,CAAC;AAAA;AAC7F,SAAO;AACR;AAaA,eAAsB,sBACrB,KACA,WACwB;AACxB,QAAM,aAAa,IAAI,aAAa,IAAI,SAAS,GAAG,UAAU,CAAC;AAC/D,QAAM,iBAAiB,MAAM,QAAQ;AAAA,IACpC,OAAO,OAAO,UAAU,EACtB,OAAO,CAAC,SAAS,KAAK,SAAS,WAAW,QAAQ,CAAC,EACnD,IAAI,OAAO,SAAS;AACpB,UAAI;AAGJ,UAAI,KAAK,IAAI;AACZ,cAAM,eAAe,MAAM,IAAI,QAAQ;AAAA,UACtC,MAAM,IAAI,QAAQ,gBAAgB,KAAK,EAAE;AAAA,QAC1C;AACA,0BAAkB,QAAQ,KAAK,QAAQ,WAAW,OAAO,KAAK,YAAY,EAAE;AAAA,UAC3E;AAAA,QACD,CAAC;AAAA,MACF,OAAO;AACN,0BAAkB,KAAK,KAAK,SAAS,QAAQ,IAC1C,KAAK,OACL,QAAQ,KAAK,QAAQ,WAAW,KAAK,IAAI;AAAA,MAC7C;AAEA,aAAO;AAAA,QACN,MAAM;AAAA,QACN,WAAW;AAAA,UACV,KAAK;AAAA,QACN;AAAA,MACD;AAAA,IACD,CAAC;AAAA,EACH;AACA,SAAO,IAAI,6BAAa;AAAA,IACvB,SAAS,CAAC,GAAG,cAAc;AAAA,EAC5B,CAAC;AACF;AAgBO,SAAS,uBACf,OACmC;AACnC,MAAI,CAAC,MAAM,QAAQ,KAAK,EAAG,QAAO;AAElC,QAAM,QAAQ,CAAC,SAAS;AACvB,QAAI,gBAAgB,QAAQ,KAAK,eAAe,QAAW;AAC1D,UAAI,MAAM,QAAQ,KAAK,UAAU,GAAG;AACnC,aAAK,WAAW,QAAQ,CAAC,YAAyB;AACjD,cAAI,aAAa,WAAW,MAAM,QAAQ,QAAQ,OAAO,GAAG;AAC3D,YAAC,QAAQ,QAA+C,QAAQ,CAAC,YAAY;AAC5E,kBAAI,QAAQ,UAAU,IAAI;AACzB,wBAAQ,QAAQ,CAAC;AAAA,cAClB;AAAA,YACD,CAAC;AAAA,UACF;AAAA,QACD,CAAC;AAAA,MACF;AAAA,IACD;AAAA,EACD,CAAC;AAED,SAAO;AACR;AAiCO,SAAS,wBACf,OAC8B;AAI9B,QAAM,mBAAmB;AAEzB,MAAI,iBAAiB,cAAc;AAClC,UAAM,gBAAgB,MAAM,QAAQ,iBAAiB,cAAc,MAAM;AACzE,QAAI,eAAe;AAElB,YAAM,mBAAmB,iBAAiB,aAAa;AAKvD,YAAM,aAAa,iBAAiB,MAAM,CAAC,WAAW,UAAU,MAAM;AACtE,UAAI,YAAY;AACf,yBAAiB,aAAa,SAAS,iBACrC,IAAI,CAAC,WAAW,OAAO,IAAI,EAC3B,KAAK,IAAI,EACT,KAAK;AAAA,MACR;AACA,aAAO;AAAA,IACR;AAAA,EACD;AAEA,SAAO;AACR;AAUO,SAAS,uBACf,QACA,QACyD;AACzD,SAAO;AAAA,IACN,cAAc,SAAS,EAAE,QAAQ,KAAK,UAAU,MAAM,EAAE,IAAI;AAAA,IAC5D,KAAK;AAAA,EACN;AACD;AAWO,MAAM,sBACZ,CAAC,cAAgC,WACjC,OAAO,UAA6E;AAEnF,MAAI,MAAM,QAAQ,KAAK,GAAG;AACzB,UAAM,qBAAqB,MAAM,KAAK,CAAC,SAAS,KAAK,SAAS,4BAA4B;AAC1F,QAAI,sBAAsB,cAAc;AACvC,YAAM,YAAY,mBAAmB;AAErC,YAAM,cAAc,qBAAqB,SAAS,KAAK,UAAU,SAAS,IAAI;AAC9E,YAAM,eAAgB,MAAM,aAAa,MAAM,WAAW;AAC1D,aAAO,uBAAuB,cAAc,MAAM;AAAA,IACnD;AAAA,EACD;AAGA,MAAI,gBAAgB,OAAO,UAAU,YAAa,MAAsB,cAAc;AACrF,UAAM,gBAAiB,MAAsB;AAC7C,QAAI;AAEJ,QAAI,yBAAyB,QAAQ;AACpC,UAAI,YAAY,eAAe;AAC9B,YAAI;AACH,gBAAM,mBAAe,+BAAmC,cAAc,MAAM;AAI5E,cACC,iBAAiB,QACjB,OAAO,iBAAiB,YACxB,YAAY,gBACZ,OAAO,KAAK,YAAY,EAAE,WAAW,GACpC;AAED,0BAAc,KAAK,UAAU,YAAY;AAAA,UAC1C,OAAO;AAEN,0BAAc,KAAK,UAAU,EAAE,QAAQ,aAAa,CAAC;AAAA,UACtD;AAAA,QACD,SAAS,OAAO;AAEf,wBAAc,cAAc;AAAA,QAC7B;AAAA,MACD,OAAO;AAEN,sBAAc,KAAK,UAAU,aAAa;AAAA,MAC3C;AAAA,IACD,OAAO;AACN,oBAAc;AAAA,IACf;AAEA,UAAM,eAAgB,MAAM,aAAa,MAAM,WAAW;AAC1D,WAAO,uBAAuB,cAAc,MAAM;AAAA,EACnD;AAEA,SAAO,wBAAwB,KAAK;AACrC;AAYD,eAAsB,aACrB,KACA,QAAgB,GACqB;AACrC,QAAM,kBAAkB,MAAM,IAAI,uBAAuB,wCAAoB,iBAAiB,CAAC;AAE/F,MAAI;AAEJ,MAAI,MAAM,QAAQ,eAAe,KAAK,UAAU,QAAW;AAC1D,QAAI,gBAAgB,UAAU,OAAO;AACpC,aAAO;AAAA,IACR;AAEA,UAAM,iBAAiB,CAAC,GAAG,eAAe,EAAE,QAAQ;AACpD,YAAQ,eAAe,KAAK;AAAA,EAC7B,OAAO;AACN,YAAQ;AAAA,EACT;AAEA,MAAI,KAAC,+BAAe,KAAK,KAAK,CAAC,MAAM,WAAW;AAC/C,UAAM,IAAI;AAAA,MACT,IAAI,QAAQ;AAAA,MACZ;AAAA,IACD;AAAA,EACD;AACA,SAAO;AACR;AAQA,eAAsB,kBACrB,KACsC;AACtC,SAAQ,MAAM,IAAI,uBAAuB,wCAAoB,UAAU,CAAC;AAGzE;AAUA,eAAsB,SACrB,KACA,cAC+C;AAC/C,QAAM,QAAS,UAAM,kCAAkB,KAAK,MAAM,KAAK;AAGvD,MAAI,cAAc;AACjB,UAAM,SAAS,sBAAsB,YAAY;AACjD,UAAM,6BAA6B,IAAI,mCAAsB;AAAA,MAC5D;AAAA,MACA,MAAM;AAAA,MACN,aACC;AAAA;AAAA,MAED,MAAM,YAAY;AAAA,IACnB,CAAC;AACD,UAAM,KAAK,0BAA0B;AAAA,EACtC;AACA,SAAO;AACR;AAUA,eAAsB,gBACrB,KACA,WACA,SAK2C;AAC3C,QAAM,mBAAmB,QAAQ,iBAAiB,IAAI,QAAQ,EAAE,cAAc;AAE9E,QAAM,WAA4C,CAAC;AAEnD,MAAI,kBAAkB;AACrB,aAAS,KAAK;AAAA,MACb;AAAA,MACA,mBAAmB,QAAQ,eAAe,kCAAkC,EAAE;AAAA,IAC/E,CAAC;AAAA,EACF,WAAW,QAAQ,cAAc;AAChC,aAAS,KAAK,CAAC,UAAU,2BAA2B,CAAC;AAAA,EACtD;AAEA,WAAS,KAAK,CAAC,eAAe,gBAAgB,GAAG,CAAC,SAAS,SAAS,CAAC;AAGrE,QAAM,gBAAgB,IAAI,aAAa,IAAI,SAAS,GAAG,WAAW;AAClE,MAAI,iBAAiB,QAAQ,yBAAyB;AACrD,UAAM,gBAAgB,MAAM,sBAAsB,KAAK,SAAS;AAChE,QAAI,cAAc,QAAQ,WAAW,GAAG;AACvC,eAAS,KAAK,aAAa;AAAA,IAC5B,OAAO;AACN,UAAI,OAAO,MAAM,2DAA2D;AAAA,IAC7E;AAAA,EACD;AAIA,WAAS,KAAK,CAAC,eAAe,oBAAoB,CAAC;AACnD,SAAO;AACR;AAQO,SAAS,cAAc,UAA+D;AAC5F,SAAO,kCAAmB,aAAa,QAAQ;AAChD;","names":[]}
|
|
1
|
+
{"version":3,"sources":["../../../../../../nodes/agents/Agent/agents/ToolsAgent/common.ts"],"sourcesContent":["import type { BaseChatModel } from '@langchain/core/language_models/chat_models';\nimport { HumanMessage } from '@langchain/core/messages';\nimport type { BaseMessage } from '@langchain/core/messages';\nimport { ChatPromptTemplate, type BaseMessagePromptTemplateLike } from '@langchain/core/prompts';\nimport type { AgentAction, AgentFinish } from 'langchain/agents';\nimport type { ToolsAgentAction } from 'langchain/dist/agents/tool_calling/output_parser';\nimport type { BaseChatMemory } from 'langchain/memory';\nimport { DynamicStructuredTool, type Tool } from 'langchain/tools';\nimport { BINARY_ENCODING, jsonParse, NodeConnectionTypes, NodeOperationError } from 'n8n-workflow';\nimport type { IExecuteFunctions, ISupplyDataFunctions } from 'n8n-workflow';\nimport type { ZodObject } from 'zod';\nimport { z } from 'zod';\n\nimport { isChatInstance, getConnectedTools } from '@utils/helpers';\nimport { type N8nOutputParser } from '@utils/output_parsers/N8nOutputParser';\n\n/* -----------------------------------------------------------\n Output Parser Helper\n----------------------------------------------------------- */\n/**\n * Retrieve the output parser schema.\n * If the parser does not return a valid schema, default to a schema with a single text field.\n */\nexport function getOutputParserSchema(\n\toutputParser: N8nOutputParser,\n\t// eslint-disable-next-line @typescript-eslint/no-explicit-any\n): ZodObject<any, any, any, any> {\n\tconst schema =\n\t\t// eslint-disable-next-line @typescript-eslint/no-explicit-any\n\t\t(outputParser.getSchema() as ZodObject<any, any, any, any>) ?? z.object({ text: z.string() });\n\treturn schema;\n}\n\n/* -----------------------------------------------------------\n Binary Data Helpers\n----------------------------------------------------------- */\nfunction isTextFile(mimeType: string): boolean {\n\treturn (\n\t\tmimeType.startsWith('text/') ||\n\t\tmimeType === 'application/json' ||\n\t\tmimeType === 'application/xml' ||\n\t\tmimeType === 'application/csv' ||\n\t\tmimeType === 'application/x-yaml' ||\n\t\tmimeType === 'application/yaml'\n\t);\n}\n\nfunction isImageFile(mimeType: string): boolean {\n\treturn mimeType.startsWith('image/');\n}\n\n/**\n * Extracts binary messages (images and text files) from the input data.\n * When operating in filesystem mode, the binary stream is first converted to a buffer.\n *\n * Images are converted to base64 data URLs.\n * Text files are read as UTF-8 text and included in the message content.\n *\n * @param ctx - The execution context\n * @param itemIndex - The current item index\n * @returns A HumanMessage containing the binary messages (images and text files).\n */\nexport async function extractBinaryMessages(\n\tctx: IExecuteFunctions | ISupplyDataFunctions,\n\titemIndex: number,\n): Promise<HumanMessage> {\n\tconst binaryData = ctx.getInputData()?.[itemIndex]?.binary ?? {};\n\tconst binaryMessages = await Promise.all(\n\t\tObject.values(binaryData)\n\t\t\t// select only the files we can process\n\t\t\t.filter((data) => isImageFile(data.mimeType) || isTextFile(data.mimeType))\n\t\t\t.map(async (data) => {\n\t\t\t\t// Handle images\n\t\t\t\tif (isImageFile(data.mimeType)) {\n\t\t\t\t\tlet binaryUrlString: string;\n\n\t\t\t\t\t// In filesystem mode we need to get binary stream by id before converting it to buffer\n\t\t\t\t\tif (data.id) {\n\t\t\t\t\t\tconst binaryBuffer = await ctx.helpers.binaryToBuffer(\n\t\t\t\t\t\t\tawait ctx.helpers.getBinaryStream(data.id),\n\t\t\t\t\t\t);\n\t\t\t\t\t\tbinaryUrlString = `data:${data.mimeType};base64,${Buffer.from(binaryBuffer).toString(\n\t\t\t\t\t\t\tBINARY_ENCODING,\n\t\t\t\t\t\t)}`;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tbinaryUrlString = data.data.includes('base64')\n\t\t\t\t\t\t\t? data.data\n\t\t\t\t\t\t\t: `data:${data.mimeType};base64,${data.data}`;\n\t\t\t\t\t}\n\n\t\t\t\t\treturn {\n\t\t\t\t\t\ttype: 'image_url',\n\t\t\t\t\t\timage_url: {\n\t\t\t\t\t\t\turl: binaryUrlString,\n\t\t\t\t\t\t},\n\t\t\t\t\t};\n\t\t\t\t}\n\t\t\t\t// Handle text files\n\t\t\t\telse {\n\t\t\t\t\tlet textContent: string;\n\t\t\t\t\tif (data.id) {\n\t\t\t\t\t\tconst binaryBuffer = await ctx.helpers.binaryToBuffer(\n\t\t\t\t\t\t\tawait ctx.helpers.getBinaryStream(data.id),\n\t\t\t\t\t\t);\n\t\t\t\t\t\ttextContent = binaryBuffer.toString('utf-8');\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// Data might be base64 encoded with or without data URL prefix\n\t\t\t\t\t\tif (data.data.includes('base64,')) {\n\t\t\t\t\t\t\tconst base64Data = data.data.split('base64,')[1];\n\t\t\t\t\t\t\ttextContent = Buffer.from(base64Data, 'base64').toString('utf-8');\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t// Default: binary data is base64-encoded without prefix\n\t\t\t\t\t\t\ttextContent = Buffer.from(data.data, 'base64').toString('utf-8');\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\treturn {\n\t\t\t\t\t\ttype: 'text',\n\t\t\t\t\t\ttext: `File: ${data.fileName ?? 'attachment'}\\nContent:\\n${textContent}`,\n\t\t\t\t\t};\n\t\t\t\t}\n\t\t\t}),\n\t);\n\treturn new HumanMessage({\n\t\tcontent: [...binaryMessages],\n\t});\n}\n\n/* -----------------------------------------------------------\n Agent Output Format Helpers\n----------------------------------------------------------- */\n/**\n * Fixes empty content messages in agent steps.\n *\n * This function is necessary when using RunnableSequence.from in LangChain.\n * If a tool doesn't have any arguments, LangChain returns input: '' (empty string).\n * This can throw an error for some providers (like Anthropic) which expect the input to always be an object.\n * This function replaces empty string inputs with empty objects to prevent such errors.\n *\n * @param steps - The agent steps to fix\n * @returns The fixed agent steps\n */\nexport function fixEmptyContentMessage(\n\tsteps: AgentFinish | ToolsAgentAction[],\n): AgentFinish | ToolsAgentAction[] {\n\tif (!Array.isArray(steps)) return steps;\n\n\tsteps.forEach((step) => {\n\t\tif ('messageLog' in step && step.messageLog !== undefined) {\n\t\t\tif (Array.isArray(step.messageLog)) {\n\t\t\t\tstep.messageLog.forEach((message: BaseMessage) => {\n\t\t\t\t\tif ('content' in message && Array.isArray(message.content)) {\n\t\t\t\t\t\t(message.content as Array<{ input?: string | object }>).forEach((content) => {\n\t\t\t\t\t\t\tif (content.input === '') {\n\t\t\t\t\t\t\t\tcontent.input = {};\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t});\n\n\treturn steps;\n}\n\n/**\n * Ensures consistent handling of outputs regardless of the model used,\n * providing a unified output format for further processing.\n *\n * This method is necessary to handle different output formats from various language models.\n * Specifically, it checks if the agent step is the final step (contains returnValues) and determines\n * if the output is a simple string (e.g., from OpenAI models) or an array of outputs (e.g., from Anthropic models).\n *\n * Examples:\n * 1. Anthropic model output:\n * ```json\n * {\n * \"output\": [\n * {\n * \"index\": 0,\n * \"type\": \"text\",\n * \"text\": \"The result of the calculation is approximately 1001.8166...\"\n * }\n * ]\n * }\n *```\n * 2. OpenAI model output:\n * ```json\n * {\n * \"output\": \"The result of the calculation is approximately 1001.82...\"\n * }\n * ```\n *\n * @param steps - The agent finish or agent action steps.\n * @returns The modified agent finish steps or the original steps.\n */\nexport function handleAgentFinishOutput(\n\tsteps: AgentFinish | AgentAction[],\n): AgentFinish | AgentAction[] {\n\ttype AgentMultiOutputFinish = AgentFinish & {\n\t\treturnValues: { output: Array<{ text: string; type: string; index: number }> };\n\t};\n\tconst agentFinishSteps = steps as AgentMultiOutputFinish | AgentFinish;\n\n\tif (agentFinishSteps.returnValues) {\n\t\tconst isMultiOutput = Array.isArray(agentFinishSteps.returnValues?.output);\n\t\tif (isMultiOutput) {\n\t\t\tconst multiOutputSteps = agentFinishSteps.returnValues.output as Array<{\n\t\t\t\tindex: number;\n\t\t\t\ttype: string;\n\t\t\t\ttext?: string;\n\t\t\t\tthinking?: string;\n\t\t\t}>;\n\n\t\t\t// Filter out thinking blocks and join text blocks\n\t\t\tconst textOutputs = multiOutputSteps\n\t\t\t\t.filter((output) => output.type === 'text' && output.text)\n\t\t\t\t.map((output) => output.text)\n\t\t\t\t.join('\\n')\n\t\t\t\t.trim();\n\n\t\t\tif (textOutputs) {\n\t\t\t\tagentFinishSteps.returnValues.output = textOutputs;\n\t\t\t} else {\n\t\t\t\tconst thinkingOutputs = multiOutputSteps\n\t\t\t\t\t.filter((output) => output.type === 'thinking' && output.thinking)\n\t\t\t\t\t.map((output) => output.thinking)\n\t\t\t\t\t.join('\\n')\n\t\t\t\t\t.trim();\n\n\t\t\t\tif (thinkingOutputs) {\n\t\t\t\t\tagentFinishSteps.returnValues.output = thinkingOutputs;\n\t\t\t\t} else {\n\t\t\t\t\t// no output was found\n\t\t\t\t\tagentFinishSteps.returnValues.output = '';\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn agentFinishSteps;\n\t\t}\n\t}\n\n\treturn agentFinishSteps;\n}\n\n/**\n * Wraps the parsed output so that it can be stored in memory.\n * If memory is connected, the output is stringified.\n *\n * @param output - The parsed output object\n * @param memory - The connected memory (if any)\n * @returns The formatted output object\n */\nexport function handleParsedStepOutput(\n\toutput: Record<string, unknown>,\n\tmemory?: BaseChatMemory,\n): { returnValues: Record<string, unknown>; log: string } {\n\treturn {\n\t\treturnValues: memory ? { output: JSON.stringify(output) } : output,\n\t\tlog: 'Final response formatted',\n\t};\n}\n\n/**\n * Parses agent steps using the provided output parser.\n * If the agent used the 'format_final_json_response' tool, the output is parsed accordingly.\n *\n * @param steps - The agent finish or action steps\n * @param outputParser - The output parser (if defined)\n * @param memory - The connected memory (if any)\n * @returns The parsed steps with the final output\n */\nexport const getAgentStepsParser =\n\t(outputParser?: N8nOutputParser, memory?: BaseChatMemory) =>\n\tasync (steps: AgentFinish | AgentAction[]): Promise<AgentFinish | AgentAction[]> => {\n\t\t// Check if the steps contain the 'format_final_json_response' tool invocation.\n\t\tif (Array.isArray(steps)) {\n\t\t\tconst responseParserTool = steps.find((step) => step.tool === 'format_final_json_response');\n\t\t\tif (responseParserTool && outputParser) {\n\t\t\t\tconst toolInput = responseParserTool.toolInput;\n\t\t\t\t// Ensure the tool input is a string\n\t\t\t\tconst parserInput = toolInput instanceof Object ? JSON.stringify(toolInput) : toolInput;\n\t\t\t\tconst returnValues = (await outputParser.parse(parserInput)) as Record<string, unknown>;\n\t\t\t\treturn handleParsedStepOutput(returnValues, memory);\n\t\t\t}\n\t\t}\n\n\t\t// Otherwise, if the steps contain a returnValues field, try to parse them manually.\n\t\tif (outputParser && typeof steps === 'object' && (steps as AgentFinish).returnValues) {\n\t\t\tconst finalResponse = (steps as AgentFinish).returnValues;\n\t\t\tlet parserInput: string;\n\n\t\t\tif (finalResponse instanceof Object) {\n\t\t\t\tif ('output' in finalResponse) {\n\t\t\t\t\ttry {\n\t\t\t\t\t\tconst parsedOutput = jsonParse<Record<string, unknown>>(finalResponse.output);\n\t\t\t\t\t\t// Check if the parsed output already has the expected structure\n\t\t\t\t\t\t// If it already has { output: ... }, use it as-is to avoid double wrapping\n\t\t\t\t\t\t// Otherwise, wrap it in { output: ... } as expected by the parser\n\t\t\t\t\t\tif (\n\t\t\t\t\t\t\tparsedOutput !== null &&\n\t\t\t\t\t\t\ttypeof parsedOutput === 'object' &&\n\t\t\t\t\t\t\t'output' in parsedOutput &&\n\t\t\t\t\t\t\tObject.keys(parsedOutput).length === 1\n\t\t\t\t\t\t) {\n\t\t\t\t\t\t\t// Already has the expected structure, use as-is\n\t\t\t\t\t\t\tparserInput = JSON.stringify(parsedOutput);\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t// Needs wrapping for the parser\n\t\t\t\t\t\t\tparserInput = JSON.stringify({ output: parsedOutput });\n\t\t\t\t\t\t}\n\t\t\t\t\t} catch (error) {\n\t\t\t\t\t\t// Fallback to the raw output if parsing fails.\n\t\t\t\t\t\tparserInput = finalResponse.output;\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// If the output is not an object, we will stringify it as it is\n\t\t\t\t\tparserInput = JSON.stringify(finalResponse);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tparserInput = finalResponse;\n\t\t\t}\n\n\t\t\tconst returnValues = (await outputParser.parse(parserInput)) as Record<string, unknown>;\n\t\t\treturn handleParsedStepOutput(returnValues, memory);\n\t\t}\n\n\t\treturn handleAgentFinishOutput(steps);\n\t};\n\n/* -----------------------------------------------------------\n Agent Setup Helpers\n----------------------------------------------------------- */\n/**\n * Retrieves the language model from the input connection.\n * Throws an error if the model is not a valid chat instance or does not support tools.\n *\n * @param ctx - The execution context\n * @returns The validated chat model\n */\nexport async function getChatModel(\n\tctx: IExecuteFunctions | ISupplyDataFunctions,\n\tindex: number = 0,\n): Promise<BaseChatModel | undefined> {\n\tconst connectedModels = await ctx.getInputConnectionData(NodeConnectionTypes.AiLanguageModel, 0);\n\n\tlet model;\n\n\tif (Array.isArray(connectedModels) && index !== undefined) {\n\t\tif (connectedModels.length <= index) {\n\t\t\treturn undefined;\n\t\t}\n\t\t// We get the models in reversed order from the workflow so we need to reverse them to match the right index\n\t\tconst reversedModels = [...connectedModels].reverse();\n\t\tmodel = reversedModels[index] as BaseChatModel;\n\t} else {\n\t\tmodel = connectedModels as BaseChatModel;\n\t}\n\n\tif (!isChatInstance(model) || !model.bindTools) {\n\t\tthrow new NodeOperationError(\n\t\t\tctx.getNode(),\n\t\t\t'Tools Agent requires Chat Model which supports Tools calling',\n\t\t);\n\t}\n\treturn model;\n}\n\n/**\n * Retrieves the memory instance from the input connection if it is connected\n *\n * @param ctx - The execution context\n * @returns The connected memory (if any)\n */\nexport async function getOptionalMemory(\n\tctx: IExecuteFunctions | ISupplyDataFunctions,\n): Promise<BaseChatMemory | undefined> {\n\treturn (await ctx.getInputConnectionData(NodeConnectionTypes.AiMemory, 0)) as\n\t\t| BaseChatMemory\n\t\t| undefined;\n}\n\n/**\n * Retrieves the connected tools and (if an output parser is defined)\n * appends a structured output parser tool.\n *\n * @param ctx - The execution context\n * @param outputParser - The optional output parser\n * @returns The array of connected tools\n */\nexport async function getTools(\n\tctx: IExecuteFunctions | ISupplyDataFunctions,\n\toutputParser?: N8nOutputParser,\n): Promise<Array<DynamicStructuredTool | Tool>> {\n\tconst tools = (await getConnectedTools(ctx, true, false)) as Array<DynamicStructuredTool | Tool>;\n\n\t// If an output parser is available, create a dynamic tool to validate the final output.\n\tif (outputParser) {\n\t\tconst schema = getOutputParserSchema(outputParser);\n\t\tconst structuredOutputParserTool = new DynamicStructuredTool({\n\t\t\tschema,\n\t\t\tname: 'format_final_json_response',\n\t\t\tdescription:\n\t\t\t\t'Use this tool to format your final response to the user in a structured JSON format. This tool validates your output against a schema to ensure it meets the required format. ONLY use this tool when you have completed all necessary reasoning and are ready to provide your final answer. Do not use this tool for intermediate steps or for asking questions. The output from this tool will be directly returned to the user.',\n\t\t\t// We do not use a function here because we intercept the output with the parser.\n\t\t\tfunc: async () => '',\n\t\t});\n\t\ttools.push(structuredOutputParserTool);\n\t}\n\treturn tools;\n}\n\n/**\n * Prepares the prompt messages for the agent.\n *\n * @param ctx - The execution context\n * @param itemIndex - The current item index\n * @param options - Options containing systemMessage and other parameters\n * @returns The array of prompt messages\n */\nexport async function prepareMessages(\n\tctx: IExecuteFunctions | ISupplyDataFunctions,\n\titemIndex: number,\n\toptions: {\n\t\tsystemMessage?: string;\n\t\tpassthroughBinaryImages?: boolean;\n\t\toutputParser?: N8nOutputParser;\n\t},\n): Promise<BaseMessagePromptTemplateLike[]> {\n\tconst useSystemMessage = options.systemMessage ?? ctx.getNode().typeVersion < 1.9;\n\n\tconst messages: BaseMessagePromptTemplateLike[] = [];\n\n\tif (useSystemMessage) {\n\t\tmessages.push([\n\t\t\t'system',\n\t\t\t`{system_message}${options.outputParser ? '\\n\\n{formatting_instructions}' : ''}`,\n\t\t]);\n\t} else if (options.outputParser) {\n\t\tmessages.push(['system', '{formatting_instructions}']);\n\t}\n\n\tmessages.push(['placeholder', '{chat_history}'], ['human', '{input}']);\n\n\t// If there is binary data and the node option permits it, add a binary message\n\tconst hasBinaryData = ctx.getInputData()?.[itemIndex]?.binary !== undefined;\n\tif (hasBinaryData && options.passthroughBinaryImages) {\n\t\tconst binaryMessage = await extractBinaryMessages(ctx, itemIndex);\n\t\tif (binaryMessage.content.length !== 0) {\n\t\t\tmessages.push(binaryMessage);\n\t\t} else {\n\t\t\tctx.logger.debug('Not attaching binary message, since its content was empty');\n\t\t}\n\t}\n\n\t// We add the agent scratchpad last, so that the agent will not run in loops\n\t// by adding binary messages between each interaction\n\tmessages.push(['placeholder', '{agent_scratchpad}']);\n\treturn messages;\n}\n\n/**\n * Creates the chat prompt from messages.\n *\n * @param messages - The messages array\n * @returns The ChatPromptTemplate instance\n */\nexport function preparePrompt(messages: BaseMessagePromptTemplateLike[]): ChatPromptTemplate {\n\treturn ChatPromptTemplate.fromMessages(messages);\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,sBAA6B;AAE7B,qBAAuE;AAIvE,mBAAiD;AACjD,0BAAoF;AAGpF,iBAAkB;AAElB,qBAAkD;AAU3C,SAAS,sBACf,cAEgC;AAChC,QAAM;AAAA;AAAA,IAEJ,aAAa,UAAU,KAAuC,aAAE,OAAO,EAAE,MAAM,aAAE,OAAO,EAAE,CAAC;AAAA;AAC7F,SAAO;AACR;AAKA,SAAS,WAAW,UAA2B;AAC9C,SACC,SAAS,WAAW,OAAO,KAC3B,aAAa,sBACb,aAAa,qBACb,aAAa,qBACb,aAAa,wBACb,aAAa;AAEf;AAEA,SAAS,YAAY,UAA2B;AAC/C,SAAO,SAAS,WAAW,QAAQ;AACpC;AAaA,eAAsB,sBACrB,KACA,WACwB;AACxB,QAAM,aAAa,IAAI,aAAa,IAAI,SAAS,GAAG,UAAU,CAAC;AAC/D,QAAM,iBAAiB,MAAM,QAAQ;AAAA,IACpC,OAAO,OAAO,UAAU,EAEtB,OAAO,CAAC,SAAS,YAAY,KAAK,QAAQ,KAAK,WAAW,KAAK,QAAQ,CAAC,EACxE,IAAI,OAAO,SAAS;AAEpB,UAAI,YAAY,KAAK,QAAQ,GAAG;AAC/B,YAAI;AAGJ,YAAI,KAAK,IAAI;AACZ,gBAAM,eAAe,MAAM,IAAI,QAAQ;AAAA,YACtC,MAAM,IAAI,QAAQ,gBAAgB,KAAK,EAAE;AAAA,UAC1C;AACA,4BAAkB,QAAQ,KAAK,QAAQ,WAAW,OAAO,KAAK,YAAY,EAAE;AAAA,YAC3E;AAAA,UACD,CAAC;AAAA,QACF,OAAO;AACN,4BAAkB,KAAK,KAAK,SAAS,QAAQ,IAC1C,KAAK,OACL,QAAQ,KAAK,QAAQ,WAAW,KAAK,IAAI;AAAA,QAC7C;AAEA,eAAO;AAAA,UACN,MAAM;AAAA,UACN,WAAW;AAAA,YACV,KAAK;AAAA,UACN;AAAA,QACD;AAAA,MACD,OAEK;AACJ,YAAI;AACJ,YAAI,KAAK,IAAI;AACZ,gBAAM,eAAe,MAAM,IAAI,QAAQ;AAAA,YACtC,MAAM,IAAI,QAAQ,gBAAgB,KAAK,EAAE;AAAA,UAC1C;AACA,wBAAc,aAAa,SAAS,OAAO;AAAA,QAC5C,OAAO;AAEN,cAAI,KAAK,KAAK,SAAS,SAAS,GAAG;AAClC,kBAAM,aAAa,KAAK,KAAK,MAAM,SAAS,EAAE,CAAC;AAC/C,0BAAc,OAAO,KAAK,YAAY,QAAQ,EAAE,SAAS,OAAO;AAAA,UACjE,OAAO;AAEN,0BAAc,OAAO,KAAK,KAAK,MAAM,QAAQ,EAAE,SAAS,OAAO;AAAA,UAChE;AAAA,QACD;AAEA,eAAO;AAAA,UACN,MAAM;AAAA,UACN,MAAM,SAAS,KAAK,YAAY,YAAY;AAAA;AAAA,EAAe,WAAW;AAAA,QACvE;AAAA,MACD;AAAA,IACD,CAAC;AAAA,EACH;AACA,SAAO,IAAI,6BAAa;AAAA,IACvB,SAAS,CAAC,GAAG,cAAc;AAAA,EAC5B,CAAC;AACF;AAgBO,SAAS,uBACf,OACmC;AACnC,MAAI,CAAC,MAAM,QAAQ,KAAK,EAAG,QAAO;AAElC,QAAM,QAAQ,CAAC,SAAS;AACvB,QAAI,gBAAgB,QAAQ,KAAK,eAAe,QAAW;AAC1D,UAAI,MAAM,QAAQ,KAAK,UAAU,GAAG;AACnC,aAAK,WAAW,QAAQ,CAAC,YAAyB;AACjD,cAAI,aAAa,WAAW,MAAM,QAAQ,QAAQ,OAAO,GAAG;AAC3D,YAAC,QAAQ,QAA+C,QAAQ,CAAC,YAAY;AAC5E,kBAAI,QAAQ,UAAU,IAAI;AACzB,wBAAQ,QAAQ,CAAC;AAAA,cAClB;AAAA,YACD,CAAC;AAAA,UACF;AAAA,QACD,CAAC;AAAA,MACF;AAAA,IACD;AAAA,EACD,CAAC;AAED,SAAO;AACR;AAiCO,SAAS,wBACf,OAC8B;AAI9B,QAAM,mBAAmB;AAEzB,MAAI,iBAAiB,cAAc;AAClC,UAAM,gBAAgB,MAAM,QAAQ,iBAAiB,cAAc,MAAM;AACzE,QAAI,eAAe;AAClB,YAAM,mBAAmB,iBAAiB,aAAa;AAQvD,YAAM,cAAc,iBAClB,OAAO,CAAC,WAAW,OAAO,SAAS,UAAU,OAAO,IAAI,EACxD,IAAI,CAAC,WAAW,OAAO,IAAI,EAC3B,KAAK,IAAI,EACT,KAAK;AAEP,UAAI,aAAa;AAChB,yBAAiB,aAAa,SAAS;AAAA,MACxC,OAAO;AACN,cAAM,kBAAkB,iBACtB,OAAO,CAAC,WAAW,OAAO,SAAS,cAAc,OAAO,QAAQ,EAChE,IAAI,CAAC,WAAW,OAAO,QAAQ,EAC/B,KAAK,IAAI,EACT,KAAK;AAEP,YAAI,iBAAiB;AACpB,2BAAiB,aAAa,SAAS;AAAA,QACxC,OAAO;AAEN,2BAAiB,aAAa,SAAS;AAAA,QACxC;AAAA,MACD;AACA,aAAO;AAAA,IACR;AAAA,EACD;AAEA,SAAO;AACR;AAUO,SAAS,uBACf,QACA,QACyD;AACzD,SAAO;AAAA,IACN,cAAc,SAAS,EAAE,QAAQ,KAAK,UAAU,MAAM,EAAE,IAAI;AAAA,IAC5D,KAAK;AAAA,EACN;AACD;AAWO,MAAM,sBACZ,CAAC,cAAgC,WACjC,OAAO,UAA6E;AAEnF,MAAI,MAAM,QAAQ,KAAK,GAAG;AACzB,UAAM,qBAAqB,MAAM,KAAK,CAAC,SAAS,KAAK,SAAS,4BAA4B;AAC1F,QAAI,sBAAsB,cAAc;AACvC,YAAM,YAAY,mBAAmB;AAErC,YAAM,cAAc,qBAAqB,SAAS,KAAK,UAAU,SAAS,IAAI;AAC9E,YAAM,eAAgB,MAAM,aAAa,MAAM,WAAW;AAC1D,aAAO,uBAAuB,cAAc,MAAM;AAAA,IACnD;AAAA,EACD;AAGA,MAAI,gBAAgB,OAAO,UAAU,YAAa,MAAsB,cAAc;AACrF,UAAM,gBAAiB,MAAsB;AAC7C,QAAI;AAEJ,QAAI,yBAAyB,QAAQ;AACpC,UAAI,YAAY,eAAe;AAC9B,YAAI;AACH,gBAAM,mBAAe,+BAAmC,cAAc,MAAM;AAI5E,cACC,iBAAiB,QACjB,OAAO,iBAAiB,YACxB,YAAY,gBACZ,OAAO,KAAK,YAAY,EAAE,WAAW,GACpC;AAED,0BAAc,KAAK,UAAU,YAAY;AAAA,UAC1C,OAAO;AAEN,0BAAc,KAAK,UAAU,EAAE,QAAQ,aAAa,CAAC;AAAA,UACtD;AAAA,QACD,SAAS,OAAO;AAEf,wBAAc,cAAc;AAAA,QAC7B;AAAA,MACD,OAAO;AAEN,sBAAc,KAAK,UAAU,aAAa;AAAA,MAC3C;AAAA,IACD,OAAO;AACN,oBAAc;AAAA,IACf;AAEA,UAAM,eAAgB,MAAM,aAAa,MAAM,WAAW;AAC1D,WAAO,uBAAuB,cAAc,MAAM;AAAA,EACnD;AAEA,SAAO,wBAAwB,KAAK;AACrC;AAYD,eAAsB,aACrB,KACA,QAAgB,GACqB;AACrC,QAAM,kBAAkB,MAAM,IAAI,uBAAuB,wCAAoB,iBAAiB,CAAC;AAE/F,MAAI;AAEJ,MAAI,MAAM,QAAQ,eAAe,KAAK,UAAU,QAAW;AAC1D,QAAI,gBAAgB,UAAU,OAAO;AACpC,aAAO;AAAA,IACR;AAEA,UAAM,iBAAiB,CAAC,GAAG,eAAe,EAAE,QAAQ;AACpD,YAAQ,eAAe,KAAK;AAAA,EAC7B,OAAO;AACN,YAAQ;AAAA,EACT;AAEA,MAAI,KAAC,+BAAe,KAAK,KAAK,CAAC,MAAM,WAAW;AAC/C,UAAM,IAAI;AAAA,MACT,IAAI,QAAQ;AAAA,MACZ;AAAA,IACD;AAAA,EACD;AACA,SAAO;AACR;AAQA,eAAsB,kBACrB,KACsC;AACtC,SAAQ,MAAM,IAAI,uBAAuB,wCAAoB,UAAU,CAAC;AAGzE;AAUA,eAAsB,SACrB,KACA,cAC+C;AAC/C,QAAM,QAAS,UAAM,kCAAkB,KAAK,MAAM,KAAK;AAGvD,MAAI,cAAc;AACjB,UAAM,SAAS,sBAAsB,YAAY;AACjD,UAAM,6BAA6B,IAAI,mCAAsB;AAAA,MAC5D;AAAA,MACA,MAAM;AAAA,MACN,aACC;AAAA;AAAA,MAED,MAAM,YAAY;AAAA,IACnB,CAAC;AACD,UAAM,KAAK,0BAA0B;AAAA,EACtC;AACA,SAAO;AACR;AAUA,eAAsB,gBACrB,KACA,WACA,SAK2C;AAC3C,QAAM,mBAAmB,QAAQ,iBAAiB,IAAI,QAAQ,EAAE,cAAc;AAE9E,QAAM,WAA4C,CAAC;AAEnD,MAAI,kBAAkB;AACrB,aAAS,KAAK;AAAA,MACb;AAAA,MACA,mBAAmB,QAAQ,eAAe,kCAAkC,EAAE;AAAA,IAC/E,CAAC;AAAA,EACF,WAAW,QAAQ,cAAc;AAChC,aAAS,KAAK,CAAC,UAAU,2BAA2B,CAAC;AAAA,EACtD;AAEA,WAAS,KAAK,CAAC,eAAe,gBAAgB,GAAG,CAAC,SAAS,SAAS,CAAC;AAGrE,QAAM,gBAAgB,IAAI,aAAa,IAAI,SAAS,GAAG,WAAW;AAClE,MAAI,iBAAiB,QAAQ,yBAAyB;AACrD,UAAM,gBAAgB,MAAM,sBAAsB,KAAK,SAAS;AAChE,QAAI,cAAc,QAAQ,WAAW,GAAG;AACvC,eAAS,KAAK,aAAa;AAAA,IAC5B,OAAO;AACN,UAAI,OAAO,MAAM,2DAA2D;AAAA,IAC7E;AAAA,EACD;AAIA,WAAS,KAAK,CAAC,eAAe,oBAAoB,CAAC;AACnD,SAAO;AACR;AAQO,SAAS,cAAc,UAA+D;AAC5F,SAAO,kCAAmB,aAAa,QAAQ;AAChD;","names":[]}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../nodes/document_loaders/DocumentGithubLoader/DocumentGithubLoader.node.ts"],"sourcesContent":["import { GithubRepoLoader } from '@langchain/community/document_loaders/web/github';\nimport type { TextSplitter } from '@langchain/textsplitters';\nimport { RecursiveCharacterTextSplitter } from '@langchain/textsplitters';\nimport { logWrapper } from '@utils/logWrapper';\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\nimport {\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n\ttype IDataObject,\n\ttype INodeInputConfiguration,\n} from 'n8n-workflow';\n\nfunction getInputs(parameters: IDataObject) {\n\tconst inputs: INodeInputConfiguration[] = [];\n\n\tconst textSplittingMode = parameters?.textSplittingMode;\n\t// If text splitting mode is 'custom' or does not exist (v1), we need to add an input for the text splitter\n\tif (!textSplittingMode || textSplittingMode === 'custom') {\n\t\tinputs.push({\n\t\t\tdisplayName: 'Text Splitter',\n\t\t\tmaxConnections: 1,\n\t\t\ttype: 'ai_textSplitter',\n\t\t\trequired: true,\n\t\t});\n\t}\n\n\treturn inputs;\n}\n\nexport class DocumentGithubLoader implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'GitHub Document Loader',\n\t\tname: 'documentGithubLoader',\n\t\ticon: 'file:github.svg',\n\t\tgroup: ['transform'],\n\t\tversion: [1, 1.1],\n\t\tdefaultVersion: 1.1,\n\t\tdescription: 'Use GitHub data as input to this chain',\n\t\tdefaults: {\n\t\t\tname: 'GitHub Document Loader',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Document Loaders'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.documentgithubloader/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'githubApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\n\t\tinputs: `={{ ((parameter) => { ${getInputs.toString()}; return getInputs(parameter) })($parameter) }}`,\n\t\tinputNames: ['Text Splitter'],\n\n\t\toutputs: [NodeConnectionTypes.AiDocument],\n\t\toutputNames: ['Document'],\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiVectorStore]),\n\t\t\t{\n\t\t\t\tdisplayName: 'Repository Link',\n\t\t\t\tname: 'repository',\n\t\t\t\ttype: 'string',\n\t\t\t\tdefault: '',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Branch',\n\t\t\t\tname: 'branch',\n\t\t\t\ttype: 'string',\n\t\t\t\tdefault: 'main',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Text Splitting',\n\t\t\t\tname: 'textSplittingMode',\n\t\t\t\ttype: 'options',\n\t\t\t\tdefault: 'simple',\n\t\t\t\trequired: true,\n\t\t\t\tnoDataExpression: true,\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\tshow: {\n\t\t\t\t\t\t'@version': [1.1],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tname: 'Simple',\n\t\t\t\t\t\tvalue: 'simple',\n\t\t\t\t\t\tdescription: 'Splits every 1000 characters with a 200 character overlap',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tname: 'Custom',\n\t\t\t\t\t\tvalue: 'custom',\n\t\t\t\t\t\tdescription: 'Connect a custom text-splitting sub-node',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Options',\n\t\t\t\tname: 'additionalOptions',\n\t\t\t\ttype: 'collection',\n\t\t\t\tplaceholder: 'Add Option',\n\t\t\t\tdefault: {},\n\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Recursive',\n\t\t\t\t\t\tname: 'recursive',\n\t\t\t\t\t\ttype: 'boolean',\n\t\t\t\t\t\tdefault: false,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Ignore Paths',\n\t\t\t\t\t\tname: 'ignorePaths',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t\tdescription: 'Comma-separated list of paths to ignore, e.g. \"docs, src/tests',\n\t\t\t\t\t\tdefault: '',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tthis.logger.debug('Supplying data for Github Document Loader');\n\t\tconst node = this.getNode();\n\n\t\tconst repository = this.getNodeParameter('repository', itemIndex) as string;\n\t\tconst branch = this.getNodeParameter('branch', itemIndex) as string;\n\t\tconst credentials = await this.getCredentials('githubApi');\n\t\tconst { ignorePaths, recursive } = this.getNodeParameter('additionalOptions', 0) as {\n\t\t\trecursive: boolean;\n\t\t\tignorePaths: string;\n\t\t};\n\t\tlet textSplitter: TextSplitter | undefined;\n\n\t\tif (node.typeVersion === 1.1) {\n\t\t\tconst textSplittingMode = this.getNodeParameter('textSplittingMode', itemIndex, 'simple') as\n\t\t\t\t| 'simple'\n\t\t\t\t| 'custom';\n\n\t\t\tif (textSplittingMode === 'simple') {\n\t\t\t\ttextSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000, chunkOverlap: 200 });\n\t\t\t} else if (textSplittingMode === 'custom') {\n\t\t\t\ttextSplitter = (await this.getInputConnectionData(NodeConnectionTypes.AiTextSplitter, 0)) as\n\t\t\t\t\t| TextSplitter\n\t\t\t\t\t| undefined;\n\t\t\t}\n\t\t} else {\n\t\t\ttextSplitter = (await this.getInputConnectionData(NodeConnectionTypes.AiTextSplitter, 0)) as\n\t\t\t\t| TextSplitter\n\t\t\t\t| undefined;\n\t\t}\n\n\t\tconst { index } = this.addInputData(NodeConnectionTypes.AiDocument, [\n\t\t\t[{ json: { repository, branch, ignorePaths, recursive } }],\n\t\t]);\n\t\tconst docs = new GithubRepoLoader(repository, {\n\t\t\tbranch,\n\t\t\tignorePaths: (ignorePaths ?? '').split(',').map((p) => p.trim()),\n\t\t\trecursive,\n\t\t\taccessToken: (credentials.accessToken as string) || '',\n\t\t\tapiUrl: credentials.server as string,\n\t\t});\n\n\t\tconst loadedDocs = textSplitter\n\t\t\t? await textSplitter.splitDocuments(await docs.load())\n\t\t\t: await docs.load();\n\n\t\tthis.addOutputData(NodeConnectionTypes.AiDocument, index, [[{ json: { loadedDocs } }]]);\n\t\treturn {\n\t\t\tresponse: logWrapper(loadedDocs, this),\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,oBAAiC;AAEjC,2BAA+C;AAC/C,wBAA2B;AAC3B,0BAA6C;AAC7C,0BAQO;AAEP,SAAS,UAAU,YAAyB;AAC3C,QAAM,SAAoC,CAAC;AAE3C,QAAM,oBAAoB,YAAY;AAEtC,MAAI,CAAC,qBAAqB,sBAAsB,UAAU;AACzD,WAAO,KAAK;AAAA,MACX,aAAa;AAAA,MACb,gBAAgB;AAAA,MAChB,MAAM;AAAA,MACN,UAAU;AAAA,IACX,CAAC;AAAA,EACF;AAEA,SAAO;AACR;AAEO,MAAM,qBAA0C;AAAA,EAAhD;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS,CAAC,GAAG,GAAG;AAAA,MAChB,gBAAgB;AAAA,MAChB,aAAa;AAAA,MACb,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,kBAAkB;AAAA,QACxB;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MACA,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MAEA,QAAQ,yBAAyB,UAAU,SAAS,CAAC;AAAA,MACrD,YAAY,CAAC,eAAe;AAAA,MAE5B,SAAS,CAAC,wCAAoB,UAAU;AAAA,MACxC,aAAa,CAAC,UAAU;AAAA,MACxB,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,aAAa,CAAC;AAAA,QAChE;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,QACV;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,QACV;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,UAAU;AAAA,UACV,kBAAkB;AAAA,UAClB,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,YAAY,CAAC,GAAG;AAAA,YACjB;AAAA,UACD;AAAA,UACA,SAAS;AAAA,YACR;AAAA,cACC,MAAM;AAAA,cACN,OAAO;AAAA,cACP,aAAa;AAAA,YACd;AAAA,YACA;AAAA,cACC,MAAM;AAAA,cACN,OAAO;AAAA,cACP,aAAa;AAAA,YACd;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aAAa;AAAA,UACb,SAAS,CAAC;AAAA,UAEV,SAAS;AAAA,YACR;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,MAAM;AAAA,cACN,SAAS;AAAA,YACV;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,MAAM;AAAA,cACN,aAAa;AAAA,cACb,SAAS;AAAA,YACV;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,SAAK,OAAO,MAAM,2CAA2C;AAC7D,UAAM,OAAO,KAAK,QAAQ;AAE1B,UAAM,aAAa,KAAK,iBAAiB,cAAc,SAAS;AAChE,UAAM,SAAS,KAAK,iBAAiB,UAAU,SAAS;AACxD,UAAM,cAAc,MAAM,KAAK,eAAe,WAAW;AACzD,UAAM,EAAE,aAAa,UAAU,IAAI,KAAK,iBAAiB,qBAAqB,CAAC;AAI/E,QAAI;AAEJ,QAAI,KAAK,gBAAgB,KAAK;AAC7B,YAAM,oBAAoB,KAAK,iBAAiB,qBAAqB,WAAW,QAAQ;AAIxF,UAAI,sBAAsB,UAAU;AACnC,uBAAe,IAAI,oDAA+B,EAAE,WAAW,KAAM,cAAc,IAAI,CAAC;AAAA,MACzF,WAAW,sBAAsB,UAAU;AAC1C,uBAAgB,MAAM,KAAK,uBAAuB,wCAAoB,gBAAgB,CAAC;AAAA,MAGxF;AAAA,IACD,OAAO;AACN,qBAAgB,MAAM,KAAK,uBAAuB,wCAAoB,gBAAgB,CAAC;AAAA,IAGxF;AAEA,UAAM,EAAE,MAAM,IAAI,KAAK,aAAa,wCAAoB,YAAY;AAAA,MACnE,CAAC,EAAE,MAAM,EAAE,YAAY,QAAQ,aAAa,UAAU,EAAE,CAAC;AAAA,IAC1D,CAAC;AACD,UAAM,OAAO,IAAI,+BAAiB,YAAY;AAAA,MAC7C;AAAA,MACA,cAAc,eAAe,IAAI,MAAM,GAAG,EAAE,IAAI,CAAC,MAAM,EAAE,KAAK,CAAC;AAAA,MAC/D;AAAA,MACA,aAAc,YAAY,eAA0B;AAAA,MACpD,QAAQ,YAAY;AAAA,IACrB,CAAC;AAED,UAAM,aAAa,eAChB,MAAM,aAAa,eAAe,MAAM,KAAK,KAAK,CAAC,IACnD,MAAM,KAAK,KAAK;AAEnB,SAAK,cAAc,wCAAoB,YAAY,OAAO,CAAC,CAAC,EAAE,MAAM,EAAE,WAAW,EAAE,CAAC,CAAC,CAAC;AACtF,WAAO;AAAA,MACN,cAAU,8BAAW,YAAY,IAAI;AAAA,IACtC;AAAA,EACD;AACD;","names":[]}
|
|
1
|
+
{"version":3,"sources":["../../../../nodes/document_loaders/DocumentGithubLoader/DocumentGithubLoader.node.ts"],"sourcesContent":["import { GithubRepoLoader } from '@langchain/community/document_loaders/web/github';\nimport type { TextSplitter } from '@langchain/textsplitters';\nimport { RecursiveCharacterTextSplitter } from '@langchain/textsplitters';\nimport { logWrapper } from '@utils/logWrapper';\nimport { getConnectionHintNoticeField } from '@utils/sharedFields';\nimport {\n\tNodeConnectionTypes,\n\ttype INodeType,\n\ttype INodeTypeDescription,\n\ttype ISupplyDataFunctions,\n\ttype SupplyData,\n\ttype IDataObject,\n\ttype INodeInputConfiguration,\n} from 'n8n-workflow';\n\nfunction getInputs(parameters: IDataObject) {\n\tconst inputs: INodeInputConfiguration[] = [];\n\n\tconst textSplittingMode = parameters?.textSplittingMode;\n\t// If text splitting mode is 'custom' or does not exist (v1), we need to add an input for the text splitter\n\tif (!textSplittingMode || textSplittingMode === 'custom') {\n\t\tinputs.push({\n\t\t\tdisplayName: 'Text Splitter',\n\t\t\tmaxConnections: 1,\n\t\t\ttype: 'ai_textSplitter',\n\t\t\trequired: true,\n\t\t});\n\t}\n\n\treturn inputs;\n}\n\nexport class DocumentGithubLoader implements INodeType {\n\tdescription: INodeTypeDescription = {\n\t\tdisplayName: 'GitHub Document Loader',\n\t\tname: 'documentGithubLoader',\n\t\ticon: 'file:github.svg',\n\t\tgroup: ['transform'],\n\t\tversion: [1, 1.1],\n\t\tdefaultVersion: 1.1,\n\t\tdescription: 'Use GitHub data as input to this chain',\n\t\thidden: true,\n\t\tdefaults: {\n\t\t\tname: 'GitHub Document Loader',\n\t\t},\n\t\tcodex: {\n\t\t\tcategories: ['AI'],\n\t\t\tsubcategories: {\n\t\t\t\tAI: ['Document Loaders'],\n\t\t\t},\n\t\t\tresources: {\n\t\t\t\tprimaryDocumentation: [\n\t\t\t\t\t{\n\t\t\t\t\t\turl: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.documentgithubloader/',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t},\n\t\tcredentials: [\n\t\t\t{\n\t\t\t\tname: 'githubApi',\n\t\t\t\trequired: true,\n\t\t\t},\n\t\t],\n\n\t\tinputs: `={{ ((parameter) => { ${getInputs.toString()}; return getInputs(parameter) })($parameter) }}`,\n\t\tinputNames: ['Text Splitter'],\n\n\t\toutputs: [NodeConnectionTypes.AiDocument],\n\t\toutputNames: ['Document'],\n\t\tproperties: [\n\t\t\tgetConnectionHintNoticeField([NodeConnectionTypes.AiVectorStore]),\n\t\t\t{\n\t\t\t\tdisplayName: 'Repository Link',\n\t\t\t\tname: 'repository',\n\t\t\t\ttype: 'string',\n\t\t\t\tdefault: '',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Branch',\n\t\t\t\tname: 'branch',\n\t\t\t\ttype: 'string',\n\t\t\t\tdefault: 'main',\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Text Splitting',\n\t\t\t\tname: 'textSplittingMode',\n\t\t\t\ttype: 'options',\n\t\t\t\tdefault: 'simple',\n\t\t\t\trequired: true,\n\t\t\t\tnoDataExpression: true,\n\t\t\t\tdisplayOptions: {\n\t\t\t\t\tshow: {\n\t\t\t\t\t\t'@version': [1.1],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tname: 'Simple',\n\t\t\t\t\t\tvalue: 'simple',\n\t\t\t\t\t\tdescription: 'Splits every 1000 characters with a 200 character overlap',\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tname: 'Custom',\n\t\t\t\t\t\tvalue: 'custom',\n\t\t\t\t\t\tdescription: 'Connect a custom text-splitting sub-node',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t\t{\n\t\t\t\tdisplayName: 'Options',\n\t\t\t\tname: 'additionalOptions',\n\t\t\t\ttype: 'collection',\n\t\t\t\tplaceholder: 'Add Option',\n\t\t\t\tdefault: {},\n\n\t\t\t\toptions: [\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Recursive',\n\t\t\t\t\t\tname: 'recursive',\n\t\t\t\t\t\ttype: 'boolean',\n\t\t\t\t\t\tdefault: false,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tdisplayName: 'Ignore Paths',\n\t\t\t\t\t\tname: 'ignorePaths',\n\t\t\t\t\t\ttype: 'string',\n\t\t\t\t\t\tdescription: 'Comma-separated list of paths to ignore, e.g. \"docs, src/tests',\n\t\t\t\t\t\tdefault: '',\n\t\t\t\t\t},\n\t\t\t\t],\n\t\t\t},\n\t\t],\n\t};\n\n\tasync supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {\n\t\tthis.logger.debug('Supplying data for Github Document Loader');\n\t\tconst node = this.getNode();\n\n\t\tconst repository = this.getNodeParameter('repository', itemIndex) as string;\n\t\tconst branch = this.getNodeParameter('branch', itemIndex) as string;\n\t\tconst credentials = await this.getCredentials('githubApi');\n\t\tconst { ignorePaths, recursive } = this.getNodeParameter('additionalOptions', 0) as {\n\t\t\trecursive: boolean;\n\t\t\tignorePaths: string;\n\t\t};\n\t\tlet textSplitter: TextSplitter | undefined;\n\n\t\tif (node.typeVersion === 1.1) {\n\t\t\tconst textSplittingMode = this.getNodeParameter('textSplittingMode', itemIndex, 'simple') as\n\t\t\t\t| 'simple'\n\t\t\t\t| 'custom';\n\n\t\t\tif (textSplittingMode === 'simple') {\n\t\t\t\ttextSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000, chunkOverlap: 200 });\n\t\t\t} else if (textSplittingMode === 'custom') {\n\t\t\t\ttextSplitter = (await this.getInputConnectionData(NodeConnectionTypes.AiTextSplitter, 0)) as\n\t\t\t\t\t| TextSplitter\n\t\t\t\t\t| undefined;\n\t\t\t}\n\t\t} else {\n\t\t\ttextSplitter = (await this.getInputConnectionData(NodeConnectionTypes.AiTextSplitter, 0)) as\n\t\t\t\t| TextSplitter\n\t\t\t\t| undefined;\n\t\t}\n\n\t\tconst { index } = this.addInputData(NodeConnectionTypes.AiDocument, [\n\t\t\t[{ json: { repository, branch, ignorePaths, recursive } }],\n\t\t]);\n\t\tconst docs = new GithubRepoLoader(repository, {\n\t\t\tbranch,\n\t\t\tignorePaths: (ignorePaths ?? '').split(',').map((p) => p.trim()),\n\t\t\trecursive,\n\t\t\taccessToken: (credentials.accessToken as string) || '',\n\t\t\tapiUrl: credentials.server as string,\n\t\t});\n\n\t\tconst loadedDocs = textSplitter\n\t\t\t? await textSplitter.splitDocuments(await docs.load())\n\t\t\t: await docs.load();\n\n\t\tthis.addOutputData(NodeConnectionTypes.AiDocument, index, [[{ json: { loadedDocs } }]]);\n\t\treturn {\n\t\t\tresponse: logWrapper(loadedDocs, this),\n\t\t};\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,oBAAiC;AAEjC,2BAA+C;AAC/C,wBAA2B;AAC3B,0BAA6C;AAC7C,0BAQO;AAEP,SAAS,UAAU,YAAyB;AAC3C,QAAM,SAAoC,CAAC;AAE3C,QAAM,oBAAoB,YAAY;AAEtC,MAAI,CAAC,qBAAqB,sBAAsB,UAAU;AACzD,WAAO,KAAK;AAAA,MACX,aAAa;AAAA,MACb,gBAAgB;AAAA,MAChB,MAAM;AAAA,MACN,UAAU;AAAA,IACX,CAAC;AAAA,EACF;AAEA,SAAO;AACR;AAEO,MAAM,qBAA0C;AAAA,EAAhD;AACN,uBAAoC;AAAA,MACnC,aAAa;AAAA,MACb,MAAM;AAAA,MACN,MAAM;AAAA,MACN,OAAO,CAAC,WAAW;AAAA,MACnB,SAAS,CAAC,GAAG,GAAG;AAAA,MAChB,gBAAgB;AAAA,MAChB,aAAa;AAAA,MACb,QAAQ;AAAA,MACR,UAAU;AAAA,QACT,MAAM;AAAA,MACP;AAAA,MACA,OAAO;AAAA,QACN,YAAY,CAAC,IAAI;AAAA,QACjB,eAAe;AAAA,UACd,IAAI,CAAC,kBAAkB;AAAA,QACxB;AAAA,QACA,WAAW;AAAA,UACV,sBAAsB;AAAA,YACrB;AAAA,cACC,KAAK;AAAA,YACN;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,MACA,aAAa;AAAA,QACZ;AAAA,UACC,MAAM;AAAA,UACN,UAAU;AAAA,QACX;AAAA,MACD;AAAA,MAEA,QAAQ,yBAAyB,UAAU,SAAS,CAAC;AAAA,MACrD,YAAY,CAAC,eAAe;AAAA,MAE5B,SAAS,CAAC,wCAAoB,UAAU;AAAA,MACxC,aAAa,CAAC,UAAU;AAAA,MACxB,YAAY;AAAA,YACX,kDAA6B,CAAC,wCAAoB,aAAa,CAAC;AAAA,QAChE;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,QACV;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,QACV;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,SAAS;AAAA,UACT,UAAU;AAAA,UACV,kBAAkB;AAAA,UAClB,gBAAgB;AAAA,YACf,MAAM;AAAA,cACL,YAAY,CAAC,GAAG;AAAA,YACjB;AAAA,UACD;AAAA,UACA,SAAS;AAAA,YACR;AAAA,cACC,MAAM;AAAA,cACN,OAAO;AAAA,cACP,aAAa;AAAA,YACd;AAAA,YACA;AAAA,cACC,MAAM;AAAA,cACN,OAAO;AAAA,cACP,aAAa;AAAA,YACd;AAAA,UACD;AAAA,QACD;AAAA,QACA;AAAA,UACC,aAAa;AAAA,UACb,MAAM;AAAA,UACN,MAAM;AAAA,UACN,aAAa;AAAA,UACb,SAAS,CAAC;AAAA,UAEV,SAAS;AAAA,YACR;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,MAAM;AAAA,cACN,SAAS;AAAA,YACV;AAAA,YACA;AAAA,cACC,aAAa;AAAA,cACb,MAAM;AAAA,cACN,MAAM;AAAA,cACN,aAAa;AAAA,cACb,SAAS;AAAA,YACV;AAAA,UACD;AAAA,QACD;AAAA,MACD;AAAA,IACD;AAAA;AAAA,EAEA,MAAM,WAAuC,WAAwC;AACpF,SAAK,OAAO,MAAM,2CAA2C;AAC7D,UAAM,OAAO,KAAK,QAAQ;AAE1B,UAAM,aAAa,KAAK,iBAAiB,cAAc,SAAS;AAChE,UAAM,SAAS,KAAK,iBAAiB,UAAU,SAAS;AACxD,UAAM,cAAc,MAAM,KAAK,eAAe,WAAW;AACzD,UAAM,EAAE,aAAa,UAAU,IAAI,KAAK,iBAAiB,qBAAqB,CAAC;AAI/E,QAAI;AAEJ,QAAI,KAAK,gBAAgB,KAAK;AAC7B,YAAM,oBAAoB,KAAK,iBAAiB,qBAAqB,WAAW,QAAQ;AAIxF,UAAI,sBAAsB,UAAU;AACnC,uBAAe,IAAI,oDAA+B,EAAE,WAAW,KAAM,cAAc,IAAI,CAAC;AAAA,MACzF,WAAW,sBAAsB,UAAU;AAC1C,uBAAgB,MAAM,KAAK,uBAAuB,wCAAoB,gBAAgB,CAAC;AAAA,MAGxF;AAAA,IACD,OAAO;AACN,qBAAgB,MAAM,KAAK,uBAAuB,wCAAoB,gBAAgB,CAAC;AAAA,IAGxF;AAEA,UAAM,EAAE,MAAM,IAAI,KAAK,aAAa,wCAAoB,YAAY;AAAA,MACnE,CAAC,EAAE,MAAM,EAAE,YAAY,QAAQ,aAAa,UAAU,EAAE,CAAC;AAAA,IAC1D,CAAC;AACD,UAAM,OAAO,IAAI,+BAAiB,YAAY;AAAA,MAC7C;AAAA,MACA,cAAc,eAAe,IAAI,MAAM,GAAG,EAAE,IAAI,CAAC,MAAM,EAAE,KAAK,CAAC;AAAA,MAC/D;AAAA,MACA,aAAc,YAAY,eAA0B;AAAA,MACpD,QAAQ,YAAY;AAAA,IACrB,CAAC;AAED,UAAM,aAAa,eAChB,MAAM,aAAa,eAAe,MAAM,KAAK,KAAK,CAAC,IACnD,MAAM,KAAK,KAAK;AAEnB,SAAK,cAAc,wCAAoB,YAAY,OAAO,CAAC,CAAC,EAAE,MAAM,EAAE,WAAW,EAAE,CAAC,CAAC,CAAC;AACtF,WAAO;AAAA,MACN,cAAU,8BAAW,YAAY,IAAI;AAAA,IACtC;AAAA,EACD;AACD;","names":[]}
|
|
@@ -0,0 +1,335 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __create = Object.create;
|
|
3
|
+
var __defProp = Object.defineProperty;
|
|
4
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
5
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
7
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
8
|
+
var __export = (target, all) => {
|
|
9
|
+
for (var name in all)
|
|
10
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
11
|
+
};
|
|
12
|
+
var __copyProps = (to, from, except, desc) => {
|
|
13
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
14
|
+
for (let key of __getOwnPropNames(from))
|
|
15
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
16
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
17
|
+
}
|
|
18
|
+
return to;
|
|
19
|
+
};
|
|
20
|
+
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
21
|
+
// If the importer is in node compatibility mode or this is not an ESM
|
|
22
|
+
// file that has been converted to a CommonJS file using a Babel-
|
|
23
|
+
// compatible transform (i.e. "__esModule" has not been set), then set
|
|
24
|
+
// "default" to the CommonJS "module.exports" for node compatibility.
|
|
25
|
+
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
|
26
|
+
mod
|
|
27
|
+
));
|
|
28
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
29
|
+
var McpClient_node_exports = {};
|
|
30
|
+
__export(McpClient_node_exports, {
|
|
31
|
+
McpClient: () => McpClient
|
|
32
|
+
});
|
|
33
|
+
module.exports = __toCommonJS(McpClient_node_exports);
|
|
34
|
+
var import_n8n_workflow = require("n8n-workflow");
|
|
35
|
+
var import_zod = require("zod");
|
|
36
|
+
var import_core = require("zod/v4/core");
|
|
37
|
+
var listSearch = __toESM(require("./listSearch"));
|
|
38
|
+
var resourceMapping = __toESM(require("./resourceMapping"));
|
|
39
|
+
var import_descriptions = require("../shared/descriptions");
|
|
40
|
+
var import_utils = require("../shared/utils");
|
|
41
|
+
class McpClient {
|
|
42
|
+
constructor() {
|
|
43
|
+
this.description = {
|
|
44
|
+
displayName: "MCP Client",
|
|
45
|
+
description: "Standalone MCP Client",
|
|
46
|
+
name: "mcpClient",
|
|
47
|
+
icon: {
|
|
48
|
+
light: "file:../mcp.svg",
|
|
49
|
+
dark: "file:../mcp.dark.svg"
|
|
50
|
+
},
|
|
51
|
+
group: ["transform"],
|
|
52
|
+
version: 1,
|
|
53
|
+
defaults: {
|
|
54
|
+
name: "MCP Client"
|
|
55
|
+
},
|
|
56
|
+
credentials: import_descriptions.credentials,
|
|
57
|
+
inputs: [import_n8n_workflow.NodeConnectionTypes.Main],
|
|
58
|
+
outputs: [import_n8n_workflow.NodeConnectionTypes.Main],
|
|
59
|
+
properties: [
|
|
60
|
+
(0, import_descriptions.transportSelect)({
|
|
61
|
+
defaultOption: "httpStreamable"
|
|
62
|
+
}),
|
|
63
|
+
{
|
|
64
|
+
displayName: "MCP Endpoint URL",
|
|
65
|
+
name: "endpointUrl",
|
|
66
|
+
type: "string",
|
|
67
|
+
default: "",
|
|
68
|
+
placeholder: "e.g. https://my-mcp-server.ai/mcp",
|
|
69
|
+
required: true,
|
|
70
|
+
description: "The URL of the MCP server to connect to"
|
|
71
|
+
},
|
|
72
|
+
{
|
|
73
|
+
displayName: "Authentication",
|
|
74
|
+
name: "authentication",
|
|
75
|
+
type: "options",
|
|
76
|
+
options: [
|
|
77
|
+
{
|
|
78
|
+
name: "Bearer Auth",
|
|
79
|
+
value: "bearerAuth"
|
|
80
|
+
},
|
|
81
|
+
{
|
|
82
|
+
name: "Header Auth",
|
|
83
|
+
value: "headerAuth"
|
|
84
|
+
},
|
|
85
|
+
{
|
|
86
|
+
name: "MCP OAuth2",
|
|
87
|
+
value: "mcpOAuth2Api"
|
|
88
|
+
},
|
|
89
|
+
{
|
|
90
|
+
name: "Multiple Headers Auth",
|
|
91
|
+
value: "multipleHeadersAuth"
|
|
92
|
+
},
|
|
93
|
+
{
|
|
94
|
+
name: "None",
|
|
95
|
+
value: "none"
|
|
96
|
+
}
|
|
97
|
+
],
|
|
98
|
+
default: "none",
|
|
99
|
+
description: "The way to authenticate with your endpoint"
|
|
100
|
+
},
|
|
101
|
+
{
|
|
102
|
+
displayName: "Credentials",
|
|
103
|
+
name: "credentials",
|
|
104
|
+
type: "credentials",
|
|
105
|
+
default: "",
|
|
106
|
+
displayOptions: {
|
|
107
|
+
show: {
|
|
108
|
+
authentication: ["headerAuth", "bearerAuth", "mcpOAuth2Api", "multipleHeadersAuth"]
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
},
|
|
112
|
+
{
|
|
113
|
+
displayName: "Tool",
|
|
114
|
+
name: "tool",
|
|
115
|
+
type: "resourceLocator",
|
|
116
|
+
default: { mode: "list", value: "" },
|
|
117
|
+
required: true,
|
|
118
|
+
description: "The tool to use",
|
|
119
|
+
modes: [
|
|
120
|
+
{
|
|
121
|
+
displayName: "From List",
|
|
122
|
+
name: "list",
|
|
123
|
+
type: "list",
|
|
124
|
+
typeOptions: {
|
|
125
|
+
searchListMethod: "getTools",
|
|
126
|
+
searchable: true,
|
|
127
|
+
skipCredentialsCheckInRLC: true
|
|
128
|
+
}
|
|
129
|
+
},
|
|
130
|
+
{
|
|
131
|
+
displayName: "ID",
|
|
132
|
+
name: "id",
|
|
133
|
+
type: "string"
|
|
134
|
+
}
|
|
135
|
+
]
|
|
136
|
+
},
|
|
137
|
+
{
|
|
138
|
+
displayName: "Input Mode",
|
|
139
|
+
name: "inputMode",
|
|
140
|
+
type: "options",
|
|
141
|
+
default: "manual",
|
|
142
|
+
noDataExpression: true,
|
|
143
|
+
options: [
|
|
144
|
+
{
|
|
145
|
+
name: "Manual",
|
|
146
|
+
value: "manual",
|
|
147
|
+
description: "Manually specify the input data for each tool parameter"
|
|
148
|
+
},
|
|
149
|
+
{
|
|
150
|
+
name: "JSON",
|
|
151
|
+
value: "json",
|
|
152
|
+
description: "Specify the input data as a JSON object"
|
|
153
|
+
}
|
|
154
|
+
]
|
|
155
|
+
},
|
|
156
|
+
{
|
|
157
|
+
displayName: "Parameters",
|
|
158
|
+
name: "parameters",
|
|
159
|
+
type: "resourceMapper",
|
|
160
|
+
default: {
|
|
161
|
+
mappingMode: "defineBelow",
|
|
162
|
+
value: null
|
|
163
|
+
},
|
|
164
|
+
noDataExpression: true,
|
|
165
|
+
required: true,
|
|
166
|
+
typeOptions: {
|
|
167
|
+
loadOptionsDependsOn: ["tool.value"],
|
|
168
|
+
resourceMapper: {
|
|
169
|
+
resourceMapperMethod: "getToolParameters",
|
|
170
|
+
hideNoDataError: true,
|
|
171
|
+
addAllFields: false,
|
|
172
|
+
supportAutoMap: false,
|
|
173
|
+
mode: "add",
|
|
174
|
+
fieldWords: {
|
|
175
|
+
singular: "parameter",
|
|
176
|
+
plural: "parameters"
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
},
|
|
180
|
+
displayOptions: {
|
|
181
|
+
show: {
|
|
182
|
+
inputMode: ["manual"]
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
},
|
|
186
|
+
{
|
|
187
|
+
displayName: "JSON",
|
|
188
|
+
name: "jsonInput",
|
|
189
|
+
type: "json",
|
|
190
|
+
typeOptions: {
|
|
191
|
+
rows: 5
|
|
192
|
+
},
|
|
193
|
+
default: '{\n "my_field_1": "value",\n "my_field_2": 1\n}\n',
|
|
194
|
+
validateType: "object",
|
|
195
|
+
displayOptions: {
|
|
196
|
+
show: {
|
|
197
|
+
inputMode: ["json"]
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
},
|
|
201
|
+
{
|
|
202
|
+
displayName: "Options",
|
|
203
|
+
name: "options",
|
|
204
|
+
placeholder: "Add Option",
|
|
205
|
+
description: "Additional options to add",
|
|
206
|
+
type: "collection",
|
|
207
|
+
default: {},
|
|
208
|
+
options: [
|
|
209
|
+
{
|
|
210
|
+
displayName: "Convert to Binary",
|
|
211
|
+
name: "convertToBinary",
|
|
212
|
+
type: "boolean",
|
|
213
|
+
default: true,
|
|
214
|
+
description: "Whether to convert images and audio to binary data. If false, images and audio will be returned as base64 encoded strings."
|
|
215
|
+
},
|
|
216
|
+
{
|
|
217
|
+
displayName: "Timeout",
|
|
218
|
+
name: "timeout",
|
|
219
|
+
type: "number",
|
|
220
|
+
typeOptions: {
|
|
221
|
+
minValue: 1
|
|
222
|
+
},
|
|
223
|
+
default: 6e4,
|
|
224
|
+
description: "Time in ms to wait for tool calls to finish"
|
|
225
|
+
}
|
|
226
|
+
]
|
|
227
|
+
}
|
|
228
|
+
]
|
|
229
|
+
};
|
|
230
|
+
this.methods = {
|
|
231
|
+
listSearch,
|
|
232
|
+
resourceMapping
|
|
233
|
+
};
|
|
234
|
+
}
|
|
235
|
+
async execute() {
|
|
236
|
+
const authentication = this.getNodeParameter("authentication", 0);
|
|
237
|
+
const serverTransport = this.getNodeParameter("serverTransport", 0);
|
|
238
|
+
const endpointUrl = this.getNodeParameter("endpointUrl", 0);
|
|
239
|
+
const node = this.getNode();
|
|
240
|
+
const { headers } = await (0, import_utils.getAuthHeaders)(this, authentication);
|
|
241
|
+
const client = await (0, import_utils.connectMcpClient)({
|
|
242
|
+
serverTransport,
|
|
243
|
+
endpointUrl,
|
|
244
|
+
headers,
|
|
245
|
+
name: node.type,
|
|
246
|
+
version: node.typeVersion,
|
|
247
|
+
onUnauthorized: async (headers2) => await (0, import_utils.tryRefreshOAuth2Token)(this, authentication, headers2)
|
|
248
|
+
});
|
|
249
|
+
if (!client.ok) {
|
|
250
|
+
throw (0, import_utils.mapToNodeOperationError)(node, client.error);
|
|
251
|
+
}
|
|
252
|
+
const inputMode = this.getNodeParameter("inputMode", 0, "manual");
|
|
253
|
+
const items = this.getInputData();
|
|
254
|
+
const returnData = [];
|
|
255
|
+
for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
|
|
256
|
+
try {
|
|
257
|
+
const tool = this.getNodeParameter("tool.value", itemIndex);
|
|
258
|
+
const options = this.getNodeParameter("options", itemIndex);
|
|
259
|
+
let parameters = {};
|
|
260
|
+
if (inputMode === "manual") {
|
|
261
|
+
parameters = this.getNodeParameter("parameters.value", itemIndex);
|
|
262
|
+
} else {
|
|
263
|
+
parameters = this.getNodeParameter("jsonInput", itemIndex);
|
|
264
|
+
}
|
|
265
|
+
const result = await client.result.callTool(
|
|
266
|
+
{
|
|
267
|
+
name: tool,
|
|
268
|
+
arguments: parameters
|
|
269
|
+
},
|
|
270
|
+
void 0,
|
|
271
|
+
{
|
|
272
|
+
timeout: options.timeout ? Number(options.timeout) : void 0
|
|
273
|
+
}
|
|
274
|
+
);
|
|
275
|
+
let binaryIndex = 0;
|
|
276
|
+
const binary = {};
|
|
277
|
+
const content = [];
|
|
278
|
+
const convertToBinary = options.convertToBinary ?? true;
|
|
279
|
+
for (const contentItem of result.content) {
|
|
280
|
+
if (contentItem.type === "text") {
|
|
281
|
+
content.push({
|
|
282
|
+
...contentItem,
|
|
283
|
+
text: (0, import_n8n_workflow.jsonParse)(contentItem.text, { fallbackValue: contentItem.text })
|
|
284
|
+
});
|
|
285
|
+
continue;
|
|
286
|
+
}
|
|
287
|
+
if (convertToBinary && (contentItem.type === "image" || contentItem.type === "audio")) {
|
|
288
|
+
binary[`data_${binaryIndex}`] = await this.helpers.prepareBinaryData(
|
|
289
|
+
Buffer.from(contentItem.data, "base64"),
|
|
290
|
+
void 0,
|
|
291
|
+
contentItem.mimeType
|
|
292
|
+
);
|
|
293
|
+
binaryIndex++;
|
|
294
|
+
continue;
|
|
295
|
+
}
|
|
296
|
+
content.push(contentItem);
|
|
297
|
+
}
|
|
298
|
+
returnData.push({
|
|
299
|
+
json: {
|
|
300
|
+
content: content.length > 0 ? content : void 0
|
|
301
|
+
},
|
|
302
|
+
binary: Object.keys(binary).length > 0 ? binary : void 0,
|
|
303
|
+
pairedItem: {
|
|
304
|
+
item: itemIndex
|
|
305
|
+
}
|
|
306
|
+
});
|
|
307
|
+
} catch (e) {
|
|
308
|
+
const errorMessage = e instanceof import_zod.ZodError ? (0, import_core.prettifyError)(e) : e instanceof Error ? e.message : String(e);
|
|
309
|
+
if (this.continueOnFail()) {
|
|
310
|
+
returnData.push({
|
|
311
|
+
json: {
|
|
312
|
+
error: {
|
|
313
|
+
message: errorMessage,
|
|
314
|
+
issues: e instanceof import_zod.ZodError ? e.issues : void 0
|
|
315
|
+
}
|
|
316
|
+
},
|
|
317
|
+
pairedItem: {
|
|
318
|
+
item: itemIndex
|
|
319
|
+
}
|
|
320
|
+
});
|
|
321
|
+
continue;
|
|
322
|
+
}
|
|
323
|
+
throw new import_n8n_workflow.NodeOperationError(node, errorMessage, {
|
|
324
|
+
itemIndex
|
|
325
|
+
});
|
|
326
|
+
}
|
|
327
|
+
}
|
|
328
|
+
return [returnData];
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
332
|
+
0 && (module.exports = {
|
|
333
|
+
McpClient
|
|
334
|
+
});
|
|
335
|
+
//# sourceMappingURL=McpClient.node.js.map
|