n8n-nodes-agnicwallet 1.0.6 → 1.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,5 @@
1
+ import { IExecuteFunctions, INodeExecutionData, INodeType, INodeTypeDescription } from "n8n-workflow";
2
+ export declare class AgnicAI implements INodeType {
3
+ description: INodeTypeDescription;
4
+ execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]>;
5
+ }
@@ -0,0 +1,422 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.AgnicAI = void 0;
4
+ const n8n_workflow_1 = require("n8n-workflow");
5
+ class AgnicAI {
6
+ constructor() {
7
+ this.description = {
8
+ displayName: "AgnicAI",
9
+ name: "agnicAI",
10
+ group: ["transform"],
11
+ version: 1.0,
12
+ description: "Access various language models through AgnicPay AI Gateway with X402 payment support. Use this node in regular workflows to call AI models.",
13
+ defaults: {
14
+ name: "AgnicAI",
15
+ },
16
+ icon: "file:AgnicAI.png",
17
+ inputs: [n8n_workflow_1.NodeConnectionTypes.Main],
18
+ outputs: [n8n_workflow_1.NodeConnectionTypes.Main],
19
+ credentials: [
20
+ {
21
+ name: "agnicWalletOAuth2Api",
22
+ required: false,
23
+ displayOptions: {
24
+ show: {
25
+ authentication: ["oAuth2"],
26
+ },
27
+ },
28
+ },
29
+ {
30
+ name: "agnicWalletApi",
31
+ required: false,
32
+ displayOptions: {
33
+ show: {
34
+ authentication: ["apiKey"],
35
+ },
36
+ },
37
+ },
38
+ ],
39
+ properties: [
40
+ {
41
+ displayName: "Authentication",
42
+ name: "authentication",
43
+ type: "options",
44
+ options: [
45
+ {
46
+ name: "OAuth2",
47
+ value: "oAuth2",
48
+ description: "Recommended: Connect your account",
49
+ },
50
+ {
51
+ name: "API Key",
52
+ value: "apiKey",
53
+ description: "For CI/CD or programmatic access",
54
+ },
55
+ ],
56
+ default: "oAuth2",
57
+ description: "How to authenticate with AgnicWallet",
58
+ },
59
+ {
60
+ displayName: "Model",
61
+ name: "model",
62
+ type: "options",
63
+ typeOptions: {
64
+ allowCustomValues: true,
65
+ },
66
+ options: [
67
+ {
68
+ name: "Simple (GPT-4o Mini)",
69
+ value: "openai/gpt-4o-mini",
70
+ description: "Simple and fast default option - GPT-4o Mini",
71
+ },
72
+ {
73
+ name: "OpenAI - GPT-4o",
74
+ value: "openai/gpt-4o",
75
+ description: "OpenAI's latest GPT-4o model",
76
+ },
77
+ {
78
+ name: "OpenAI - GPT-4o Mini",
79
+ value: "openai/gpt-4o-mini",
80
+ description: "Fast and efficient GPT-4o Mini",
81
+ },
82
+ {
83
+ name: "OpenAI - GPT-4 Turbo",
84
+ value: "openai/gpt-4-turbo",
85
+ description: "GPT-4 Turbo with extended context",
86
+ },
87
+ {
88
+ name: "OpenAI - GPT-4",
89
+ value: "openai/gpt-4",
90
+ description: "OpenAI GPT-4",
91
+ },
92
+ {
93
+ name: "OpenAI - GPT-3.5 Turbo",
94
+ value: "openai/gpt-3.5-turbo",
95
+ description: "Fast GPT-3.5 Turbo model",
96
+ },
97
+ {
98
+ name: "Anthropic - Claude 3.5 Sonnet",
99
+ value: "anthropic/claude-3.5-sonnet",
100
+ description: "Anthropic's Claude 3.5 Sonnet",
101
+ },
102
+ {
103
+ name: "Anthropic - Claude 3 Opus",
104
+ value: "anthropic/claude-3-opus",
105
+ description: "Anthropic's Claude 3 Opus",
106
+ },
107
+ {
108
+ name: "Anthropic - Claude 3 Sonnet",
109
+ value: "anthropic/claude-3-sonnet",
110
+ description: "Anthropic's Claude 3 Sonnet",
111
+ },
112
+ {
113
+ name: "Anthropic - Claude 3 Haiku",
114
+ value: "anthropic/claude-3-haiku",
115
+ description: "Fast Claude 3 Haiku model",
116
+ },
117
+ {
118
+ name: "Google - Gemini Pro 1.5",
119
+ value: "google/gemini-pro-1.5",
120
+ description: "Google Gemini Pro 1.5",
121
+ },
122
+ {
123
+ name: "Google - Gemini Pro",
124
+ value: "google/gemini-pro",
125
+ description: "Google Gemini Pro",
126
+ },
127
+ {
128
+ name: "Google - Gemini Flash 1.5",
129
+ value: "google/gemini-flash-1.5",
130
+ description: "Fast Gemini Flash 1.5",
131
+ },
132
+ {
133
+ name: "Meta - Llama 3.1 405B",
134
+ value: "meta-llama/llama-3.1-405b-instruct",
135
+ description: "Meta Llama 3.1 405B Instruct",
136
+ },
137
+ {
138
+ name: "Meta - Llama 3.1 70B",
139
+ value: "meta-llama/llama-3.1-70b-instruct",
140
+ description: "Meta Llama 3.1 70B Instruct",
141
+ },
142
+ {
143
+ name: "Meta - Llama 3 70B",
144
+ value: "meta-llama/llama-3-70b-instruct",
145
+ description: "Meta Llama 3 70B Instruct",
146
+ },
147
+ {
148
+ name: "Mistral AI - Mistral Large",
149
+ value: "mistralai/mistral-large",
150
+ description: "Mistral AI Large model",
151
+ },
152
+ {
153
+ name: "Mistral AI - Mixtral 8x7B",
154
+ value: "mistralai/mixtral-8x7b-instruct",
155
+ description: "Mistral Mixtral 8x7B Instruct",
156
+ },
157
+ {
158
+ name: "Mistral AI - Mistral 7B",
159
+ value: "mistralai/mistral-7b-instruct",
160
+ description: "Mistral 7B Instruct",
161
+ },
162
+ {
163
+ name: "Cohere - Command R+",
164
+ value: "cohere/command-r-plus",
165
+ description: "Cohere Command R+",
166
+ },
167
+ {
168
+ name: "Perplexity - Sonar",
169
+ value: "perplexity/sonar",
170
+ description: "Perplexity Sonar model",
171
+ },
172
+ {
173
+ name: "xAI - Grok Beta",
174
+ value: "x-ai/grok-beta",
175
+ description: "xAI Grok Beta",
176
+ },
177
+ ],
178
+ default: "openai/gpt-4o-mini",
179
+ description: "Select a model or type any OpenRouter model ID. See https://openrouter.ai/models for all available models. Examples: 'openai/gpt-4o', 'anthropic/claude-3.5-sonnet', 'google/gemini-pro-1.5'",
180
+ },
181
+ {
182
+ displayName: "Messages",
183
+ name: "messages",
184
+ type: "fixedCollection",
185
+ typeOptions: {
186
+ multipleValues: true,
187
+ },
188
+ default: {},
189
+ options: [
190
+ {
191
+ name: "message",
192
+ displayName: "Message",
193
+ values: [
194
+ {
195
+ displayName: "Role",
196
+ name: "role",
197
+ type: "options",
198
+ options: [
199
+ {
200
+ name: "System",
201
+ value: "system",
202
+ description: "System message to set behavior",
203
+ },
204
+ {
205
+ name: "User",
206
+ value: "user",
207
+ description: "User message",
208
+ },
209
+ {
210
+ name: "Assistant",
211
+ value: "assistant",
212
+ description: "Assistant message (for conversation history)",
213
+ },
214
+ ],
215
+ default: "user",
216
+ description: "The role of the message",
217
+ },
218
+ {
219
+ displayName: "Content",
220
+ name: "content",
221
+ type: "string",
222
+ default: "",
223
+ typeOptions: {
224
+ rows: 4,
225
+ },
226
+ description: "The content of the message",
227
+ },
228
+ ],
229
+ },
230
+ ],
231
+ description: "The messages to send to the model",
232
+ },
233
+ {
234
+ displayName: "Options",
235
+ name: "options",
236
+ type: "collection",
237
+ placeholder: "Add Option",
238
+ default: {},
239
+ options: [
240
+ {
241
+ displayName: "Temperature",
242
+ name: "temperature",
243
+ type: "number",
244
+ typeOptions: {
245
+ minValue: 0,
246
+ maxValue: 2,
247
+ numberStepSize: 0.1,
248
+ },
249
+ default: 1,
250
+ description: "Controls randomness. Lower values make output more deterministic",
251
+ },
252
+ {
253
+ displayName: "Max Tokens",
254
+ name: "max_tokens",
255
+ type: "number",
256
+ typeOptions: {
257
+ minValue: 1,
258
+ },
259
+ default: 2048,
260
+ description: "Maximum number of tokens to generate",
261
+ },
262
+ {
263
+ displayName: "Top P",
264
+ name: "top_p",
265
+ type: "number",
266
+ typeOptions: {
267
+ minValue: 0,
268
+ maxValue: 1,
269
+ numberStepSize: 0.1,
270
+ },
271
+ default: 1,
272
+ description: "Nucleus sampling: consider tokens with top_p probability mass",
273
+ },
274
+ {
275
+ displayName: "Frequency Penalty",
276
+ name: "frequency_penalty",
277
+ type: "number",
278
+ typeOptions: {
279
+ minValue: -2,
280
+ maxValue: 2,
281
+ numberStepSize: 0.1,
282
+ },
283
+ default: 0,
284
+ description: "Penalize tokens based on their frequency in the text so far",
285
+ },
286
+ {
287
+ displayName: "Presence Penalty",
288
+ name: "presence_penalty",
289
+ type: "number",
290
+ typeOptions: {
291
+ minValue: -2,
292
+ maxValue: 2,
293
+ numberStepSize: 0.1,
294
+ },
295
+ default: 0,
296
+ description: "Penalize tokens based on whether they appear in the text so far",
297
+ },
298
+ ],
299
+ description: "Additional options for the chat completion",
300
+ },
301
+ ],
302
+ };
303
+ }
304
+ async execute() {
305
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l;
306
+ const items = this.getInputData();
307
+ const returnData = [];
308
+ for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
309
+ try {
310
+ // Get authentication type
311
+ const authentication = this.getNodeParameter("authentication", itemIndex);
312
+ // Get authentication header
313
+ let authHeader;
314
+ if (authentication === "oAuth2") {
315
+ // OAuth2 authentication
316
+ const credentials = (await this.getCredentials("agnicWalletOAuth2Api", itemIndex));
317
+ authHeader = `Bearer ${String(credentials.oauthTokenData.access_token)}`;
318
+ }
319
+ else {
320
+ // API Key authentication
321
+ const credentials = await this.getCredentials("agnicWalletApi", itemIndex);
322
+ const { apiToken } = credentials;
323
+ authHeader = `Bearer ${String(apiToken)}`;
324
+ }
325
+ // Get model parameter (supports both dropdown selection and custom input)
326
+ const modelParam = this.getNodeParameter("model", itemIndex);
327
+ const model = modelParam === null || modelParam === void 0 ? void 0 : modelParam.trim();
328
+ if (!model || model === "") {
329
+ throw new n8n_workflow_1.NodeOperationError(this.getNode(), "Model must be specified. Enter an OpenRouter model ID (e.g., 'openai/gpt-4o' or 'anthropic/claude-3.5-sonnet'). See https://openrouter.ai/models for all available models.", { itemIndex });
330
+ }
331
+ // Get messages
332
+ const messagesConfig = this.getNodeParameter("messages", itemIndex, {});
333
+ if (!messagesConfig.message || messagesConfig.message.length === 0) {
334
+ throw new n8n_workflow_1.NodeOperationError(this.getNode(), "At least one message is required", { itemIndex });
335
+ }
336
+ const messages = messagesConfig.message.map((msg) => ({
337
+ role: msg.role,
338
+ content: msg.content,
339
+ }));
340
+ // Get options
341
+ const options = this.getNodeParameter("options", itemIndex, {});
342
+ // Build request body
343
+ const requestBody = {
344
+ model: model.trim(),
345
+ messages,
346
+ };
347
+ if (options.temperature !== undefined) {
348
+ requestBody.temperature = options.temperature;
349
+ }
350
+ if (options.max_tokens !== undefined) {
351
+ requestBody.max_tokens = options.max_tokens;
352
+ }
353
+ if (options.top_p !== undefined) {
354
+ requestBody.top_p = options.top_p;
355
+ }
356
+ if (options.frequency_penalty !== undefined) {
357
+ requestBody.frequency_penalty = options.frequency_penalty;
358
+ }
359
+ if (options.presence_penalty !== undefined) {
360
+ requestBody.presence_penalty = options.presence_penalty;
361
+ }
362
+ // Make request to AgnicPay AI Gateway
363
+ const apiUrl = "https://api.agnicpay.xyz/v1/chat/completions";
364
+ (_a = this.logger) === null || _a === void 0 ? void 0 : _a.info(`[AgnicAI] Calling AgnicPay AI Gateway with model: ${model}`);
365
+ const response = await this.helpers.httpRequest({
366
+ method: "POST",
367
+ url: apiUrl,
368
+ headers: {
369
+ "Content-Type": "application/json",
370
+ Authorization: authHeader,
371
+ },
372
+ body: requestBody,
373
+ json: true,
374
+ });
375
+ // Format response
376
+ const formattedResponse = {
377
+ ...response,
378
+ content: ((_d = (_c = (_b = response.choices) === null || _b === void 0 ? void 0 : _b[0]) === null || _c === void 0 ? void 0 : _c.message) === null || _d === void 0 ? void 0 : _d.content) ||
379
+ ((_f = (_e = response.choices) === null || _e === void 0 ? void 0 : _e[0]) === null || _f === void 0 ? void 0 : _f.text) ||
380
+ response.content,
381
+ role: ((_j = (_h = (_g = response.choices) === null || _g === void 0 ? void 0 : _g[0]) === null || _h === void 0 ? void 0 : _h.message) === null || _j === void 0 ? void 0 : _j.role) || "assistant",
382
+ };
383
+ returnData.push({
384
+ json: formattedResponse,
385
+ pairedItem: {
386
+ item: itemIndex,
387
+ },
388
+ });
389
+ }
390
+ catch (error) {
391
+ const errorMessage = error instanceof Error ? error.message : "Unknown error occurred";
392
+ // Extract more detailed error information if available
393
+ let detailedError = errorMessage;
394
+ if (error && typeof error === "object" && "response" in error) {
395
+ const responseError = error;
396
+ if ((_k = responseError.response) === null || _k === void 0 ? void 0 : _k.body) {
397
+ detailedError = JSON.stringify(responseError.response.body);
398
+ }
399
+ else if ((_l = responseError.response) === null || _l === void 0 ? void 0 : _l.statusCode) {
400
+ detailedError = `HTTP ${responseError.response.statusCode}: ${errorMessage}`;
401
+ }
402
+ }
403
+ if (this.continueOnFail()) {
404
+ returnData.push({
405
+ json: {
406
+ error: detailedError,
407
+ },
408
+ pairedItem: {
409
+ item: itemIndex,
410
+ },
411
+ });
412
+ continue;
413
+ }
414
+ throw new n8n_workflow_1.NodeOperationError(this.getNode(), detailedError, {
415
+ itemIndex,
416
+ });
417
+ }
418
+ }
419
+ return [returnData];
420
+ }
421
+ }
422
+ exports.AgnicAI = AgnicAI;
Binary file
@@ -0,0 +1,12 @@
1
+ import { INodeType, INodeTypeDescription, ISupplyDataFunctions, SupplyData } from "n8n-workflow";
2
+ /**
3
+ * AgnicAI Chat Model Node for n8n
4
+ *
5
+ * Uses LangChain's ChatOpenAI class with AgnicPay's OpenAI-compatible endpoint.
6
+ * This approach is identical to how n8n's built-in OpenAI Chat Model works,
7
+ * just pointing to AgnicPay's AI Gateway instead.
8
+ */
9
+ export declare class AgnicAILanguageModel implements INodeType {
10
+ description: INodeTypeDescription;
11
+ supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData>;
12
+ }
@@ -0,0 +1,365 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.AgnicAILanguageModel = void 0;
4
+ const n8n_workflow_1 = require("n8n-workflow");
5
+ const openai_1 = require("@langchain/openai");
6
+ const base_1 = require("@langchain/core/callbacks/base");
7
+ /**
8
+ * Custom LLM Tracing callback for AgnicAI
9
+ * This enables the spinning indicator and AI Agent logging
10
+ * Mirrors n8n's internal N8nLlmTracing implementation
11
+ */
12
+ class AgnicLlmTracing extends base_1.BaseCallbackHandler {
13
+ constructor(executionFunctions) {
14
+ super();
15
+ this.name = "AgnicLlmTracing";
16
+ // This flag makes LangChain wait for handlers before continuing
17
+ this.awaitHandlers = true;
18
+ this.connectionType = n8n_workflow_1.NodeConnectionTypes.AiLanguageModel;
19
+ this.runsMap = {};
20
+ this.executionFunctions = executionFunctions;
21
+ }
22
+ async handleLLMStart(llm, prompts, runId) {
23
+ const options = llm.kwargs || llm;
24
+ // Add input data to n8n's execution context
25
+ // This triggers the spinning indicator
26
+ const { index } = this.executionFunctions.addInputData(this.connectionType, [[{ json: { messages: prompts, options } }]]);
27
+ this.runsMap[runId] = {
28
+ index,
29
+ options,
30
+ messages: prompts,
31
+ };
32
+ // Log AI event for the AI Agent's log panel
33
+ this.logAiEvent("ai-llm-generated-output-started", {
34
+ messages: prompts,
35
+ options,
36
+ });
37
+ }
38
+ async handleLLMEnd(output, runId) {
39
+ var _a;
40
+ const runDetails = (_a = this.runsMap[runId]) !== null && _a !== void 0 ? _a : { index: 0 };
41
+ // Parse the response
42
+ const generations = output.generations.map((gen) => gen.map((g) => ({ text: g.text, generationInfo: g.generationInfo })));
43
+ const response = {
44
+ generations,
45
+ llmOutput: output.llmOutput,
46
+ };
47
+ // Add output data to n8n's execution context
48
+ // This stops the spinning indicator and shows success
49
+ this.executionFunctions.addOutputData(this.connectionType, runDetails.index, [[{ json: response }]]);
50
+ // Log AI event for the AI Agent's log panel
51
+ this.logAiEvent("ai-llm-generated-output", {
52
+ messages: runDetails.messages,
53
+ options: runDetails.options,
54
+ response,
55
+ });
56
+ }
57
+ async handleLLMError(error, runId) {
58
+ var _a;
59
+ const runDetails = (_a = this.runsMap[runId]) !== null && _a !== void 0 ? _a : { index: 0 };
60
+ // Add error output
61
+ this.executionFunctions.addOutputData(this.connectionType, runDetails.index, new n8n_workflow_1.NodeOperationError(this.executionFunctions.getNode(), error, {
62
+ functionality: "configuration-node",
63
+ }));
64
+ // Log AI error event
65
+ this.logAiEvent("ai-llm-errored", {
66
+ error: error.message || String(error),
67
+ runId,
68
+ });
69
+ }
70
+ logAiEvent(event, data) {
71
+ var _a, _b;
72
+ try {
73
+ (_b = (_a = this.executionFunctions).logAiEvent) === null || _b === void 0 ? void 0 : _b.call(_a, event, data ? (0, n8n_workflow_1.jsonStringify)(data) : undefined);
74
+ }
75
+ catch {
76
+ // Silently ignore if logAiEvent is not available
77
+ }
78
+ }
79
+ }
80
+ /**
81
+ * AgnicAI Chat Model Node for n8n
82
+ *
83
+ * Uses LangChain's ChatOpenAI class with AgnicPay's OpenAI-compatible endpoint.
84
+ * This approach is identical to how n8n's built-in OpenAI Chat Model works,
85
+ * just pointing to AgnicPay's AI Gateway instead.
86
+ */
87
+ class AgnicAILanguageModel {
88
+ constructor() {
89
+ this.description = {
90
+ displayName: "AgnicAI Chat Model",
91
+ name: "lmChatAgnicAI",
92
+ icon: "file:AgnicAILanguageModel.png",
93
+ group: ["transform"],
94
+ version: [1, 1.1],
95
+ description: "Chat model using AgnicPay AI Gateway with X402 payment support",
96
+ defaults: {
97
+ name: "AgnicAI Chat Model",
98
+ },
99
+ codex: {
100
+ categories: ["AI"],
101
+ subcategories: {
102
+ AI: ["Language Models", "Root Nodes"],
103
+ "Language Models": ["Chat Models (Recommended)"],
104
+ },
105
+ resources: {
106
+ primaryDocumentation: [
107
+ {
108
+ url: "https://www.agnicpay.xyz/ai-gateway",
109
+ },
110
+ ],
111
+ },
112
+ },
113
+ inputs: [],
114
+ outputs: [n8n_workflow_1.NodeConnectionTypes.AiLanguageModel],
115
+ outputNames: ["Model"],
116
+ credentials: [
117
+ {
118
+ name: "agnicWalletOAuth2Api",
119
+ required: false,
120
+ displayOptions: {
121
+ show: {
122
+ authentication: ["oAuth2"],
123
+ },
124
+ },
125
+ },
126
+ {
127
+ name: "agnicWalletApi",
128
+ required: false,
129
+ displayOptions: {
130
+ show: {
131
+ authentication: ["apiKey"],
132
+ },
133
+ },
134
+ },
135
+ ],
136
+ properties: [
137
+ {
138
+ displayName: "Authentication",
139
+ name: "authentication",
140
+ type: "options",
141
+ options: [
142
+ {
143
+ name: "OAuth2",
144
+ value: "oAuth2",
145
+ description: "Recommended: Connect your account",
146
+ },
147
+ {
148
+ name: "API Key",
149
+ value: "apiKey",
150
+ description: "For CI/CD or programmatic access",
151
+ },
152
+ ],
153
+ default: "apiKey",
154
+ description: "How to authenticate with AgnicWallet",
155
+ },
156
+ {
157
+ displayName: "Model",
158
+ name: "model",
159
+ type: "options",
160
+ typeOptions: {
161
+ allowCustomValues: true,
162
+ },
163
+ options: [
164
+ {
165
+ name: "GPT-4o Mini (Fast & Affordable)",
166
+ value: "openai/gpt-4o-mini",
167
+ },
168
+ {
169
+ name: "GPT-4o (Best Quality)",
170
+ value: "openai/gpt-4o",
171
+ },
172
+ {
173
+ name: "GPT-4 Turbo",
174
+ value: "openai/gpt-4-turbo",
175
+ },
176
+ {
177
+ name: "GPT-3.5 Turbo",
178
+ value: "openai/gpt-3.5-turbo",
179
+ },
180
+ {
181
+ name: "Claude 3.5 Sonnet",
182
+ value: "anthropic/claude-3.5-sonnet",
183
+ },
184
+ {
185
+ name: "Claude 3 Opus",
186
+ value: "anthropic/claude-3-opus",
187
+ },
188
+ {
189
+ name: "Claude 3 Haiku",
190
+ value: "anthropic/claude-3-haiku",
191
+ },
192
+ {
193
+ name: "Gemini Pro 1.5",
194
+ value: "google/gemini-pro-1.5",
195
+ },
196
+ {
197
+ name: "Gemini Flash 1.5",
198
+ value: "google/gemini-flash-1.5",
199
+ },
200
+ {
201
+ name: "Llama 3.1 70B",
202
+ value: "meta-llama/llama-3.1-70b-instruct",
203
+ },
204
+ {
205
+ name: "Llama 3.1 8B",
206
+ value: "meta-llama/llama-3.1-8b-instruct",
207
+ },
208
+ {
209
+ name: "Mistral Large",
210
+ value: "mistralai/mistral-large",
211
+ },
212
+ {
213
+ name: "Mixtral 8x22B",
214
+ value: "mistralai/mixtral-8x22b-instruct",
215
+ },
216
+ {
217
+ name: "DeepSeek R1",
218
+ value: "deepseek/deepseek-r1",
219
+ },
220
+ {
221
+ name: "DeepSeek Chat",
222
+ value: "deepseek/deepseek-chat",
223
+ },
224
+ {
225
+ name: "Qwen 2.5 72B",
226
+ value: "qwen/qwen-2.5-72b-instruct",
227
+ },
228
+ ],
229
+ default: "openai/gpt-4o-mini",
230
+ description: "Select a model or type a custom OpenRouter model ID. See https://openrouter.ai/models for all available models.",
231
+ },
232
+ {
233
+ displayName: "Options",
234
+ name: "options",
235
+ type: "collection",
236
+ placeholder: "Add Option",
237
+ default: {},
238
+ options: [
239
+ {
240
+ displayName: "Temperature",
241
+ name: "temperature",
242
+ type: "number",
243
+ typeOptions: {
244
+ minValue: 0,
245
+ maxValue: 2,
246
+ numberStepSize: 0.1,
247
+ },
248
+ default: 0.7,
249
+ description: "Controls randomness: Lower = more focused and deterministic",
250
+ },
251
+ {
252
+ displayName: "Max Tokens",
253
+ name: "maxTokens",
254
+ type: "number",
255
+ typeOptions: {
256
+ minValue: 1,
257
+ },
258
+ default: 2048,
259
+ description: "Maximum number of tokens to generate",
260
+ },
261
+ {
262
+ displayName: "Top P",
263
+ name: "topP",
264
+ type: "number",
265
+ typeOptions: {
266
+ minValue: 0,
267
+ maxValue: 1,
268
+ numberStepSize: 0.1,
269
+ },
270
+ default: 1,
271
+ description: "Nucleus sampling: considers tokens with top_p probability mass",
272
+ },
273
+ {
274
+ displayName: "Frequency Penalty",
275
+ name: "frequencyPenalty",
276
+ type: "number",
277
+ typeOptions: {
278
+ minValue: -2,
279
+ maxValue: 2,
280
+ numberStepSize: 0.1,
281
+ },
282
+ default: 0,
283
+ description: "Penalizes new tokens based on frequency in text so far",
284
+ },
285
+ {
286
+ displayName: "Presence Penalty",
287
+ name: "presencePenalty",
288
+ type: "number",
289
+ typeOptions: {
290
+ minValue: -2,
291
+ maxValue: 2,
292
+ numberStepSize: 0.1,
293
+ },
294
+ default: 0,
295
+ description: "Penalizes new tokens based on presence in text so far",
296
+ },
297
+ {
298
+ displayName: "Timeout",
299
+ name: "timeout",
300
+ type: "number",
301
+ default: 60000,
302
+ description: "Request timeout in milliseconds",
303
+ },
304
+ ],
305
+ },
306
+ ],
307
+ };
308
+ }
309
+ async supplyData(itemIndex) {
310
+ var _a, _b;
311
+ // Get authentication type and credentials
312
+ const authentication = this.getNodeParameter("authentication", itemIndex);
313
+ let apiKey;
314
+ try {
315
+ if (authentication === "oAuth2") {
316
+ const credentials = (await this.getCredentials("agnicWalletOAuth2Api", itemIndex));
317
+ apiKey = (_a = credentials.oauthTokenData) === null || _a === void 0 ? void 0 : _a.access_token;
318
+ if (!apiKey) {
319
+ throw new Error("OAuth2 access token not found. Please reconnect your AgnicWallet account.");
320
+ }
321
+ }
322
+ else {
323
+ const credentials = await this.getCredentials("agnicWalletApi", itemIndex);
324
+ apiKey = credentials.apiToken;
325
+ if (!apiKey) {
326
+ throw new Error("API Key not found. Please configure your AgnicWallet API credentials.");
327
+ }
328
+ }
329
+ }
330
+ catch (error) {
331
+ const errorMsg = error instanceof Error ? error.message : String(error);
332
+ throw new n8n_workflow_1.NodeOperationError(this.getNode(), `Authentication failed: ${errorMsg}`, { itemIndex });
333
+ }
334
+ // Get model parameter
335
+ const model = this.getNodeParameter("model", itemIndex);
336
+ if (!(model === null || model === void 0 ? void 0 : model.trim())) {
337
+ throw new n8n_workflow_1.NodeOperationError(this.getNode(), "Model must be specified. Select from dropdown or enter a custom OpenRouter model ID.", { itemIndex });
338
+ }
339
+ // Get options
340
+ const options = this.getNodeParameter("options", itemIndex, {});
341
+ // Create ChatOpenAI instance pointing to AgnicPay's endpoint
342
+ // Pass our custom tracing callback to enable spinning indicator and logging
343
+ const chatModel = new openai_1.ChatOpenAI({
344
+ apiKey,
345
+ model: model.trim(),
346
+ temperature: options.temperature,
347
+ maxTokens: options.maxTokens,
348
+ topP: options.topP,
349
+ frequencyPenalty: options.frequencyPenalty,
350
+ presencePenalty: options.presencePenalty,
351
+ timeout: (_b = options.timeout) !== null && _b !== void 0 ? _b : 60000,
352
+ maxRetries: 2,
353
+ configuration: {
354
+ baseURL: "https://api.agnicpay.xyz/v1",
355
+ },
356
+ // Add our custom tracing callback for spinning indicator and AI Agent logging
357
+ callbacks: [new AgnicLlmTracing(this)],
358
+ });
359
+ // Return in the same format as n8n's built-in OpenAI Chat Model
360
+ return {
361
+ response: chatModel,
362
+ };
363
+ }
364
+ }
365
+ exports.AgnicAILanguageModel = AgnicAILanguageModel;
@@ -0,0 +1,6 @@
1
+ <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 60 60">
2
+ <rect width="60" height="60" fill="none"/>
3
+ <path d="M30 10 L50 25 L50 35 L30 50 L10 35 L10 25 Z" fill="#6366f1" stroke="#4f46e5" stroke-width="2"/>
4
+ <circle cx="30" cy="30" r="8" fill="#ffffff"/>
5
+ <path d="M25 30 L28 33 L35 26" stroke="#6366f1" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" fill="none"/>
6
+ </svg>
@@ -0,0 +1,22 @@
1
+ import { INodeType, INodeTypeDescription, ISupplyDataFunctions, IExecuteFunctions, SupplyData, INodeExecutionData } from "n8n-workflow";
2
+ /**
3
+ * AgnicMCPTool - MCP Client for AgnicPay
4
+ *
5
+ * This is a supply-only AI tool node that connects to the AgnicPay MCP server
6
+ * and provides X402 payment tools to AI Agents via the MCP protocol.
7
+ *
8
+ * This node cannot be executed directly - it only supplies tools to AI Agents.
9
+ */
10
+ export declare class AgnicMCPTool implements INodeType {
11
+ description: INodeTypeDescription;
12
+ /**
13
+ * Execute method for direct tool invocation.
14
+ * This is called when input data is passed directly to this node.
15
+ */
16
+ execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]>;
17
+ /**
18
+ * Supply MCP tools to AI Agent.
19
+ * This is the main method that provides tools to the AI Agent.
20
+ */
21
+ supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData>;
22
+ }
@@ -0,0 +1,370 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.AgnicMCPTool = void 0;
4
+ const n8n_workflow_1 = require("n8n-workflow");
5
+ const index_js_1 = require("@modelcontextprotocol/sdk/client/index.js");
6
+ const streamableHttp_js_1 = require("@modelcontextprotocol/sdk/client/streamableHttp.js");
7
+ const tools_1 = require("@langchain/core/tools");
8
+ const zod_1 = require("zod");
9
+ const json_schema_to_zod_1 = require("@n8n/json-schema-to-zod");
10
+ /**
11
+ * Minimal Toolkit interface compatible with n8n AI Agent
12
+ * This replaces the deprecated langchain/agents Toolkit to avoid version conflicts
13
+ */
14
+ class AgnicMcpToolkit {
15
+ constructor(tools) {
16
+ this.tools = tools;
17
+ this.lc_namespace = ["langchain", "agents", "toolkits"];
18
+ }
19
+ getTools() {
20
+ return this.tools;
21
+ }
22
+ }
23
+ // Pre-configured AgnicPay MCP endpoint (uses HTTP Streamable transport)
24
+ const AGNIC_MCP_ENDPOINT = "https://mcp.agnicpay.xyz/sse";
25
+ /**
26
+ * Convert JSON Schema to Zod schema using n8n's library
27
+ * Returns actual Zod schema objects (not strings)
28
+ */
29
+ function convertJsonSchemaToZod(schema) {
30
+ if (!schema || typeof schema !== "object") {
31
+ return zod_1.z.object({});
32
+ }
33
+ try {
34
+ // @n8n/json-schema-to-zod returns actual Zod objects, not strings
35
+ const zodSchema = (0, json_schema_to_zod_1.jsonSchemaToZod)(schema);
36
+ // Ensure we return an object schema for structured tools
37
+ if (zodSchema instanceof zod_1.z.ZodObject) {
38
+ return zodSchema;
39
+ }
40
+ // Wrap non-object schemas in an object
41
+ return zod_1.z.object({ value: zodSchema });
42
+ }
43
+ catch {
44
+ // Fallback to empty object schema if conversion fails
45
+ return zod_1.z.object({});
46
+ }
47
+ }
48
+ /**
49
+ * Convert an MCP tool to a LangChain DynamicStructuredTool
50
+ */
51
+ function mcpToolToDynamicTool(tool, callTool) {
52
+ // Convert JSON Schema to Zod schema using proper library
53
+ const zodSchema = convertJsonSchemaToZod(tool.inputSchema);
54
+ // Use type assertion to avoid deep type instantiation issues with DynamicStructuredTool
55
+ const toolConfig = {
56
+ name: tool.name,
57
+ description: tool.description || `MCP tool: ${tool.name}`,
58
+ schema: zodSchema,
59
+ func: async (input) => {
60
+ try {
61
+ const result = await callTool(tool.name, input);
62
+ if (typeof result === "string") {
63
+ return result;
64
+ }
65
+ return JSON.stringify(result);
66
+ }
67
+ catch (error) {
68
+ const errorMessage = error instanceof Error ? error.message : String(error);
69
+ return `Error calling ${tool.name}: ${errorMessage}`;
70
+ }
71
+ },
72
+ // Required metadata for proper tool serialization in n8n
73
+ metadata: { isFromToolkit: true },
74
+ };
75
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
76
+ return new tools_1.DynamicStructuredTool(toolConfig);
77
+ }
78
+ /**
79
+ * AgnicMCPTool - MCP Client for AgnicPay
80
+ *
81
+ * This is a supply-only AI tool node that connects to the AgnicPay MCP server
82
+ * and provides X402 payment tools to AI Agents via the MCP protocol.
83
+ *
84
+ * This node cannot be executed directly - it only supplies tools to AI Agents.
85
+ */
86
+ class AgnicMCPTool {
87
+ constructor() {
88
+ this.description = {
89
+ displayName: "Agnic MCP Tool",
90
+ name: "agnicMcpTool",
91
+ icon: "file:AgnicMCPTool.png",
92
+ group: ["output"],
93
+ version: 1,
94
+ description: "MCP client for AgnicPay - X402 payment tools for AI Agents",
95
+ defaults: {
96
+ name: "Agnic MCP Tool",
97
+ },
98
+ // Supply-only AI tool configuration
99
+ inputs: [],
100
+ outputs: [{ type: n8n_workflow_1.NodeConnectionTypes.AiTool, displayName: "Tools" }],
101
+ codex: {
102
+ categories: ["AI"],
103
+ subcategories: {
104
+ AI: ["Tools"],
105
+ },
106
+ resources: {
107
+ primaryDocumentation: [
108
+ {
109
+ url: "https://www.agnicpay.xyz/mcp",
110
+ },
111
+ ],
112
+ },
113
+ },
114
+ credentials: [
115
+ {
116
+ name: "agnicWalletOAuth2Api",
117
+ required: false,
118
+ displayOptions: {
119
+ show: {
120
+ authentication: ["oAuth2"],
121
+ },
122
+ },
123
+ },
124
+ {
125
+ name: "agnicWalletApi",
126
+ required: false,
127
+ displayOptions: {
128
+ show: {
129
+ authentication: ["apiKey"],
130
+ },
131
+ },
132
+ },
133
+ ],
134
+ properties: [
135
+ {
136
+ displayName: "Authentication",
137
+ name: "authentication",
138
+ type: "options",
139
+ default: "apiKey",
140
+ options: [
141
+ {
142
+ name: "OAuth2",
143
+ value: "oAuth2",
144
+ description: "Recommended: Connect your account",
145
+ },
146
+ {
147
+ name: "API Key",
148
+ value: "apiKey",
149
+ description: "For CI/CD or programmatic access",
150
+ },
151
+ ],
152
+ description: "How to authenticate with AgnicWallet",
153
+ },
154
+ {
155
+ displayName: "Connects to AgnicPay MCP server. Tools are discovered automatically and include: make X402 API requests, check balance, view payment history, and discover APIs.",
156
+ name: "notice",
157
+ type: "notice",
158
+ default: "",
159
+ },
160
+ ],
161
+ };
162
+ }
163
+ /**
164
+ * Execute method for direct tool invocation.
165
+ * This is called when input data is passed directly to this node.
166
+ */
167
+ async execute() {
168
+ var _a;
169
+ const node = this.getNode();
170
+ const items = this.getInputData();
171
+ const returnData = [];
172
+ // Get authentication
173
+ const authentication = this.getNodeParameter("authentication", 0);
174
+ let accessToken;
175
+ try {
176
+ if (authentication === "oAuth2") {
177
+ const creds = (await this.getCredentials("agnicWalletOAuth2Api"));
178
+ accessToken = (_a = creds === null || creds === void 0 ? void 0 : creds.oauthTokenData) === null || _a === void 0 ? void 0 : _a.access_token;
179
+ }
180
+ else {
181
+ const creds = (await this.getCredentials("agnicWalletApi"));
182
+ accessToken = creds === null || creds === void 0 ? void 0 : creds.apiToken;
183
+ }
184
+ }
185
+ catch {
186
+ throw new n8n_workflow_1.NodeOperationError(node, "Failed to load AgnicWallet credentials.");
187
+ }
188
+ if (!accessToken) {
189
+ throw new n8n_workflow_1.NodeOperationError(node, "Missing AgnicWallet authentication token.");
190
+ }
191
+ // Connect to MCP server
192
+ const transport = new streamableHttp_js_1.StreamableHTTPClientTransport(new URL(AGNIC_MCP_ENDPOINT), {
193
+ requestInit: { headers: { Authorization: `Bearer ${accessToken}` } },
194
+ });
195
+ const client = new index_js_1.Client({ name: "agnic-mcp-client", version: "1.0.0" }, { capabilities: {} });
196
+ try {
197
+ await client.connect(transport);
198
+ const toolsResult = await client.listTools();
199
+ const mcpTools = toolsResult.tools || [];
200
+ for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
201
+ const item = items[itemIndex];
202
+ // Expect input to have a 'tool' property with the tool name
203
+ if (!item.json.tool || typeof item.json.tool !== "string") {
204
+ throw new n8n_workflow_1.NodeOperationError(node, "Tool name not found in item.json.tool", { itemIndex });
205
+ }
206
+ const toolName = item.json.tool;
207
+ const matchingTool = mcpTools.find((t) => t.name === toolName);
208
+ if (!matchingTool) {
209
+ throw new n8n_workflow_1.NodeOperationError(node, `Tool "${toolName}" not found`, {
210
+ itemIndex,
211
+ });
212
+ }
213
+ // Extract tool arguments (everything except 'tool' property)
214
+ const { tool: _, ...toolArguments } = item.json;
215
+ const result = await client.callTool({
216
+ name: toolName,
217
+ arguments: toolArguments,
218
+ });
219
+ // Extract text content from result
220
+ let responseContent = result;
221
+ if (result.content && Array.isArray(result.content)) {
222
+ const textContent = result.content.find((c) => c.type === "text");
223
+ if (textContent && "text" in textContent) {
224
+ responseContent = textContent.text;
225
+ }
226
+ }
227
+ returnData.push({
228
+ json: { response: responseContent },
229
+ pairedItem: { item: itemIndex },
230
+ });
231
+ }
232
+ }
233
+ finally {
234
+ try {
235
+ await client.close();
236
+ }
237
+ catch {
238
+ // Ignore cleanup errors
239
+ }
240
+ try {
241
+ await transport.close();
242
+ }
243
+ catch {
244
+ // Ignore cleanup errors
245
+ }
246
+ }
247
+ return [returnData];
248
+ }
249
+ /**
250
+ * Supply MCP tools to AI Agent.
251
+ * This is the main method that provides tools to the AI Agent.
252
+ */
253
+ async supplyData(itemIndex) {
254
+ var _a;
255
+ // ─────────────────────────────────────────────
256
+ // Authentication
257
+ // ─────────────────────────────────────────────
258
+ const authentication = this.getNodeParameter("authentication", itemIndex);
259
+ let accessToken;
260
+ try {
261
+ if (authentication === "oAuth2") {
262
+ const creds = (await this.getCredentials("agnicWalletOAuth2Api", itemIndex));
263
+ accessToken = (_a = creds === null || creds === void 0 ? void 0 : creds.oauthTokenData) === null || _a === void 0 ? void 0 : _a.access_token;
264
+ }
265
+ else {
266
+ const creds = (await this.getCredentials("agnicWalletApi", itemIndex));
267
+ accessToken = creds === null || creds === void 0 ? void 0 : creds.apiToken;
268
+ }
269
+ }
270
+ catch (err) {
271
+ throw new n8n_workflow_1.NodeOperationError(this.getNode(), "Failed to load AgnicWallet credentials. Please configure your credentials.", { itemIndex });
272
+ }
273
+ if (!accessToken) {
274
+ throw new n8n_workflow_1.NodeOperationError(this.getNode(), "Missing AgnicWallet authentication token. Please check your credentials configuration.", { itemIndex });
275
+ }
276
+ // ─────────────────────────────────────────────
277
+ // MCP Client Setup
278
+ // ─────────────────────────────────────────────
279
+ let client;
280
+ let transport;
281
+ try {
282
+ // Create HTTP Streamable transport with authentication
283
+ // This transport uses POST requests and accepts both JSON and SSE responses
284
+ transport = new streamableHttp_js_1.StreamableHTTPClientTransport(new URL(AGNIC_MCP_ENDPOINT), {
285
+ requestInit: {
286
+ headers: {
287
+ Authorization: `Bearer ${accessToken}`,
288
+ },
289
+ },
290
+ });
291
+ // Create MCP client
292
+ client = new index_js_1.Client({ name: "agnic-mcp-client", version: "1.0.0" }, { capabilities: {} });
293
+ // Connect to MCP server
294
+ await client.connect(transport);
295
+ // ─────────────────────────────────────────────
296
+ // Discover and wrap MCP tools
297
+ // ─────────────────────────────────────────────
298
+ const toolsResult = await client.listTools();
299
+ const mcpTools = toolsResult.tools || [];
300
+ if (mcpTools.length === 0) {
301
+ throw new n8n_workflow_1.NodeOperationError(this.getNode(), "No tools available from AgnicPay MCP server. Please check your authentication and try again.", { itemIndex });
302
+ }
303
+ // Create a tool caller function
304
+ const callTool = async (name, args) => {
305
+ if (!client) {
306
+ throw new Error("MCP client is not connected");
307
+ }
308
+ const result = await client.callTool({
309
+ name,
310
+ arguments: args,
311
+ });
312
+ // Extract content from the result
313
+ if (result.content && Array.isArray(result.content)) {
314
+ const textContent = result.content.find((c) => c.type === "text");
315
+ if (textContent && "text" in textContent) {
316
+ return textContent.text;
317
+ }
318
+ }
319
+ return result;
320
+ };
321
+ // Convert MCP tools to LangChain DynamicStructuredTools
322
+ const langchainTools = mcpTools.map((tool) => mcpToolToDynamicTool(tool, callTool));
323
+ // Wrap tools in a Toolkit for n8n AI Agent compatibility
324
+ const toolkit = new AgnicMcpToolkit(langchainTools);
325
+ // Store references for cleanup
326
+ const clientRef = client;
327
+ const transportRef = transport;
328
+ // Return toolkit with cleanup function
329
+ return {
330
+ response: toolkit,
331
+ closeFunction: async () => {
332
+ try {
333
+ await clientRef.close();
334
+ }
335
+ catch {
336
+ // Ignore cleanup errors
337
+ }
338
+ try {
339
+ await transportRef.close();
340
+ }
341
+ catch {
342
+ // Ignore cleanup errors
343
+ }
344
+ },
345
+ };
346
+ }
347
+ catch (error) {
348
+ // Clean up on error
349
+ if (client) {
350
+ try {
351
+ await client.close();
352
+ }
353
+ catch {
354
+ // Ignore cleanup errors
355
+ }
356
+ }
357
+ if (transport) {
358
+ try {
359
+ await transport.close();
360
+ }
361
+ catch {
362
+ // Ignore cleanup errors
363
+ }
364
+ }
365
+ const errorMessage = error instanceof Error ? error.message : String(error);
366
+ throw new n8n_workflow_1.NodeOperationError(this.getNode(), `Failed to connect to AgnicPay MCP server: ${errorMessage}`, { itemIndex });
367
+ }
368
+ }
369
+ }
370
+ exports.AgnicMCPTool = AgnicMCPTool;
@@ -13,6 +13,7 @@ class X402HttpRequest {
13
13
  defaults: {
14
14
  name: "AgnicWallet X402",
15
15
  },
16
+ icon: "file:X402HttpRequest.png",
16
17
  usableAsTool: true,
17
18
  inputs: ["main"],
18
19
  outputs: ["main"],
@@ -0,0 +1,19 @@
1
+ <?xml version="1.0" standalone="no"?>
2
+ <!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN"
3
+ "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
4
+ <svg version="1.0" xmlns="http://www.w3.org/2000/svg"
5
+ width="44.000000pt" height="45.000000pt" viewBox="0 0 44.000000 45.000000"
6
+ preserveAspectRatio="xMidYMid meet">
7
+
8
+ <g transform="translate(0.000000,45.000000) scale(0.100000,-0.100000)"
9
+ fill="#000000" stroke="none">
10
+ <path d="M175 380 l-39 -40 42 -43 42 -42 42 42 42 43 -39 40 c-21 22 -42 40
11
+ -45 40 -3 0 -24 -18 -45 -40z"/>
12
+ <path d="M55 260 l-39 -40 42 -43 42 -42 42 42 42 43 -39 40 c-21 22 -42 40
13
+ -45 40 -3 0 -24 -18 -45 -40z"/>
14
+ <path d="M295 260 l-39 -40 42 -43 42 -42 42 42 42 43 -39 40 c-21 22 -42 40
15
+ -45 40 -3 0 -24 -18 -45 -40z"/>
16
+ <path d="M175 140 l-39 -40 42 -43 42 -42 42 42 42 43 -39 40 c-21 22 -42 40
17
+ -45 40 -3 0 -24 -18 -45 -40z"/>
18
+ </g>
19
+ </svg>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "n8n-nodes-agnicwallet",
3
- "version": "1.0.6",
3
+ "version": "1.0.8",
4
4
  "description": "n8n community node for AgnicWallet - automated Web3 payments for X402 APIs",
5
5
  "keywords": [
6
6
  "n8n-community-node-package",
@@ -15,7 +15,10 @@
15
15
  "automation",
16
16
  "workflow",
17
17
  "base",
18
- "solana"
18
+ "solana",
19
+ "mcp",
20
+ "model-context-protocol",
21
+ "ai-agent"
19
22
  ],
20
23
  "license": "MIT",
21
24
  "homepage": "https://github.com/agnicpay/n8n-X402-AgnicWallet#readme",
@@ -48,10 +51,20 @@
48
51
  "dist/credentials/AgnicWalletOAuth2Api.credentials.js"
49
52
  ],
50
53
  "nodes": [
51
- "dist/nodes/X402HttpRequest/X402HttpRequest.node.js"
54
+ "dist/nodes/X402HttpRequest/X402HttpRequest.node.js",
55
+ "dist/nodes/AgnicAILanguageModel/AgnicAILanguageModel.node.js",
56
+ "dist/nodes/AgnicAI/AgnicAI.node.js",
57
+ "dist/nodes/AgnicMCPTool/AgnicMCPTool.node.js"
52
58
  ]
53
59
  },
60
+ "dependencies": {
61
+ "@modelcontextprotocol/sdk": "^1.24.0",
62
+ "@n8n/json-schema-to-zod": "^1.6.0",
63
+ "zod": "^3.23.0"
64
+ },
54
65
  "devDependencies": {
66
+ "@langchain/core": "^0.3.68",
67
+ "@langchain/openai": "^0.6.16",
55
68
  "@types/node": "^20.10.0",
56
69
  "@typescript-eslint/parser": "^6.13.0",
57
70
  "eslint": "^8.54.0",
@@ -62,6 +75,7 @@
62
75
  "typescript": "^5.3.0"
63
76
  },
64
77
  "peerDependencies": {
78
+ "@langchain/core": ">=0.2.0",
65
79
  "n8n-workflow": "*"
66
80
  }
67
81
  }