n8n-nodes-agnicwallet 1.0.5 → 1.0.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/credentials/AgnicWalletApi.credentials.d.ts +1 -1
- package/dist/credentials/AgnicWalletApi.credentials.js +17 -17
- package/dist/credentials/AgnicWalletOAuth2Api.credentials.d.ts +1 -1
- package/dist/credentials/AgnicWalletOAuth2Api.credentials.js +51 -51
- package/dist/nodes/AgnicAI/AgnicAI.node.d.ts +5 -0
- package/dist/nodes/AgnicAI/AgnicAI.node.js +422 -0
- package/dist/nodes/AgnicAI/AgnicAI.png +0 -0
- package/dist/nodes/AgnicAILanguageModel/AgnicAILanguageModel.node.d.ts +12 -0
- package/dist/nodes/AgnicAILanguageModel/AgnicAILanguageModel.node.js +365 -0
- package/dist/nodes/AgnicAILanguageModel/AgnicAILanguageModel.png +0 -0
- package/dist/nodes/AgnicAILanguageModel/AgnicAILanguageModel.svg +6 -0
- package/dist/nodes/AgnicMCPTool/AgnicMCPTool.node.d.ts +22 -0
- package/dist/nodes/AgnicMCPTool/AgnicMCPTool.node.js +368 -0
- package/dist/nodes/AgnicMCPTool/AgnicMCPTool.png +0 -0
- package/dist/nodes/X402HttpRequest/X402HttpRequest.node.d.ts +1 -1
- package/dist/nodes/X402HttpRequest/X402HttpRequest.node.js +91 -89
- package/dist/nodes/X402HttpRequest/X402HttpRequest.png +0 -0
- package/dist/nodes/X402HttpRequest/X402HttpRequest.svg +19 -0
- package/package.json +21 -8
|
@@ -0,0 +1,365 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.AgnicAILanguageModel = void 0;
|
|
4
|
+
const n8n_workflow_1 = require("n8n-workflow");
|
|
5
|
+
const openai_1 = require("@langchain/openai");
|
|
6
|
+
const base_1 = require("@langchain/core/callbacks/base");
|
|
7
|
+
/**
|
|
8
|
+
* Custom LLM Tracing callback for AgnicAI
|
|
9
|
+
* This enables the spinning indicator and AI Agent logging
|
|
10
|
+
* Mirrors n8n's internal N8nLlmTracing implementation
|
|
11
|
+
*/
|
|
12
|
+
class AgnicLlmTracing extends base_1.BaseCallbackHandler {
|
|
13
|
+
constructor(executionFunctions) {
|
|
14
|
+
super();
|
|
15
|
+
this.name = "AgnicLlmTracing";
|
|
16
|
+
// This flag makes LangChain wait for handlers before continuing
|
|
17
|
+
this.awaitHandlers = true;
|
|
18
|
+
this.connectionType = n8n_workflow_1.NodeConnectionTypes.AiLanguageModel;
|
|
19
|
+
this.runsMap = {};
|
|
20
|
+
this.executionFunctions = executionFunctions;
|
|
21
|
+
}
|
|
22
|
+
async handleLLMStart(llm, prompts, runId) {
|
|
23
|
+
const options = llm.kwargs || llm;
|
|
24
|
+
// Add input data to n8n's execution context
|
|
25
|
+
// This triggers the spinning indicator
|
|
26
|
+
const { index } = this.executionFunctions.addInputData(this.connectionType, [[{ json: { messages: prompts, options } }]]);
|
|
27
|
+
this.runsMap[runId] = {
|
|
28
|
+
index,
|
|
29
|
+
options,
|
|
30
|
+
messages: prompts,
|
|
31
|
+
};
|
|
32
|
+
// Log AI event for the AI Agent's log panel
|
|
33
|
+
this.logAiEvent("ai-llm-generated-output-started", {
|
|
34
|
+
messages: prompts,
|
|
35
|
+
options,
|
|
36
|
+
});
|
|
37
|
+
}
|
|
38
|
+
async handleLLMEnd(output, runId) {
|
|
39
|
+
var _a;
|
|
40
|
+
const runDetails = (_a = this.runsMap[runId]) !== null && _a !== void 0 ? _a : { index: 0 };
|
|
41
|
+
// Parse the response
|
|
42
|
+
const generations = output.generations.map((gen) => gen.map((g) => ({ text: g.text, generationInfo: g.generationInfo })));
|
|
43
|
+
const response = {
|
|
44
|
+
generations,
|
|
45
|
+
llmOutput: output.llmOutput,
|
|
46
|
+
};
|
|
47
|
+
// Add output data to n8n's execution context
|
|
48
|
+
// This stops the spinning indicator and shows success
|
|
49
|
+
this.executionFunctions.addOutputData(this.connectionType, runDetails.index, [[{ json: response }]]);
|
|
50
|
+
// Log AI event for the AI Agent's log panel
|
|
51
|
+
this.logAiEvent("ai-llm-generated-output", {
|
|
52
|
+
messages: runDetails.messages,
|
|
53
|
+
options: runDetails.options,
|
|
54
|
+
response,
|
|
55
|
+
});
|
|
56
|
+
}
|
|
57
|
+
async handleLLMError(error, runId) {
|
|
58
|
+
var _a;
|
|
59
|
+
const runDetails = (_a = this.runsMap[runId]) !== null && _a !== void 0 ? _a : { index: 0 };
|
|
60
|
+
// Add error output
|
|
61
|
+
this.executionFunctions.addOutputData(this.connectionType, runDetails.index, new n8n_workflow_1.NodeOperationError(this.executionFunctions.getNode(), error, {
|
|
62
|
+
functionality: "configuration-node",
|
|
63
|
+
}));
|
|
64
|
+
// Log AI error event
|
|
65
|
+
this.logAiEvent("ai-llm-errored", {
|
|
66
|
+
error: error.message || String(error),
|
|
67
|
+
runId,
|
|
68
|
+
});
|
|
69
|
+
}
|
|
70
|
+
logAiEvent(event, data) {
|
|
71
|
+
var _a, _b;
|
|
72
|
+
try {
|
|
73
|
+
(_b = (_a = this.executionFunctions).logAiEvent) === null || _b === void 0 ? void 0 : _b.call(_a, event, data ? (0, n8n_workflow_1.jsonStringify)(data) : undefined);
|
|
74
|
+
}
|
|
75
|
+
catch {
|
|
76
|
+
// Silently ignore if logAiEvent is not available
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
/**
|
|
81
|
+
* AgnicAI Chat Model Node for n8n
|
|
82
|
+
*
|
|
83
|
+
* Uses LangChain's ChatOpenAI class with AgnicPay's OpenAI-compatible endpoint.
|
|
84
|
+
* This approach is identical to how n8n's built-in OpenAI Chat Model works,
|
|
85
|
+
* just pointing to AgnicPay's AI Gateway instead.
|
|
86
|
+
*/
|
|
87
|
+
class AgnicAILanguageModel {
|
|
88
|
+
constructor() {
|
|
89
|
+
this.description = {
|
|
90
|
+
displayName: "AgnicAI Chat Model",
|
|
91
|
+
name: "lmChatAgnicAI",
|
|
92
|
+
icon: "file:AgnicAILanguageModel.png",
|
|
93
|
+
group: ["transform"],
|
|
94
|
+
version: [1, 1.1],
|
|
95
|
+
description: "Chat model using AgnicPay AI Gateway with X402 payment support",
|
|
96
|
+
defaults: {
|
|
97
|
+
name: "AgnicAI Chat Model",
|
|
98
|
+
},
|
|
99
|
+
codex: {
|
|
100
|
+
categories: ["AI"],
|
|
101
|
+
subcategories: {
|
|
102
|
+
AI: ["Language Models", "Root Nodes"],
|
|
103
|
+
"Language Models": ["Chat Models (Recommended)"],
|
|
104
|
+
},
|
|
105
|
+
resources: {
|
|
106
|
+
primaryDocumentation: [
|
|
107
|
+
{
|
|
108
|
+
url: "https://www.agnicpay.xyz/ai-gateway",
|
|
109
|
+
},
|
|
110
|
+
],
|
|
111
|
+
},
|
|
112
|
+
},
|
|
113
|
+
inputs: [],
|
|
114
|
+
outputs: [n8n_workflow_1.NodeConnectionTypes.AiLanguageModel],
|
|
115
|
+
outputNames: ["Model"],
|
|
116
|
+
credentials: [
|
|
117
|
+
{
|
|
118
|
+
name: "agnicWalletOAuth2Api",
|
|
119
|
+
required: false,
|
|
120
|
+
displayOptions: {
|
|
121
|
+
show: {
|
|
122
|
+
authentication: ["oAuth2"],
|
|
123
|
+
},
|
|
124
|
+
},
|
|
125
|
+
},
|
|
126
|
+
{
|
|
127
|
+
name: "agnicWalletApi",
|
|
128
|
+
required: false,
|
|
129
|
+
displayOptions: {
|
|
130
|
+
show: {
|
|
131
|
+
authentication: ["apiKey"],
|
|
132
|
+
},
|
|
133
|
+
},
|
|
134
|
+
},
|
|
135
|
+
],
|
|
136
|
+
properties: [
|
|
137
|
+
{
|
|
138
|
+
displayName: "Authentication",
|
|
139
|
+
name: "authentication",
|
|
140
|
+
type: "options",
|
|
141
|
+
options: [
|
|
142
|
+
{
|
|
143
|
+
name: "OAuth2",
|
|
144
|
+
value: "oAuth2",
|
|
145
|
+
description: "Recommended: Connect your account",
|
|
146
|
+
},
|
|
147
|
+
{
|
|
148
|
+
name: "API Key",
|
|
149
|
+
value: "apiKey",
|
|
150
|
+
description: "For CI/CD or programmatic access",
|
|
151
|
+
},
|
|
152
|
+
],
|
|
153
|
+
default: "apiKey",
|
|
154
|
+
description: "How to authenticate with AgnicWallet",
|
|
155
|
+
},
|
|
156
|
+
{
|
|
157
|
+
displayName: "Model",
|
|
158
|
+
name: "model",
|
|
159
|
+
type: "options",
|
|
160
|
+
typeOptions: {
|
|
161
|
+
allowCustomValues: true,
|
|
162
|
+
},
|
|
163
|
+
options: [
|
|
164
|
+
{
|
|
165
|
+
name: "GPT-4o Mini (Fast & Affordable)",
|
|
166
|
+
value: "openai/gpt-4o-mini",
|
|
167
|
+
},
|
|
168
|
+
{
|
|
169
|
+
name: "GPT-4o (Best Quality)",
|
|
170
|
+
value: "openai/gpt-4o",
|
|
171
|
+
},
|
|
172
|
+
{
|
|
173
|
+
name: "GPT-4 Turbo",
|
|
174
|
+
value: "openai/gpt-4-turbo",
|
|
175
|
+
},
|
|
176
|
+
{
|
|
177
|
+
name: "GPT-3.5 Turbo",
|
|
178
|
+
value: "openai/gpt-3.5-turbo",
|
|
179
|
+
},
|
|
180
|
+
{
|
|
181
|
+
name: "Claude 3.5 Sonnet",
|
|
182
|
+
value: "anthropic/claude-3.5-sonnet",
|
|
183
|
+
},
|
|
184
|
+
{
|
|
185
|
+
name: "Claude 3 Opus",
|
|
186
|
+
value: "anthropic/claude-3-opus",
|
|
187
|
+
},
|
|
188
|
+
{
|
|
189
|
+
name: "Claude 3 Haiku",
|
|
190
|
+
value: "anthropic/claude-3-haiku",
|
|
191
|
+
},
|
|
192
|
+
{
|
|
193
|
+
name: "Gemini Pro 1.5",
|
|
194
|
+
value: "google/gemini-pro-1.5",
|
|
195
|
+
},
|
|
196
|
+
{
|
|
197
|
+
name: "Gemini Flash 1.5",
|
|
198
|
+
value: "google/gemini-flash-1.5",
|
|
199
|
+
},
|
|
200
|
+
{
|
|
201
|
+
name: "Llama 3.1 70B",
|
|
202
|
+
value: "meta-llama/llama-3.1-70b-instruct",
|
|
203
|
+
},
|
|
204
|
+
{
|
|
205
|
+
name: "Llama 3.1 8B",
|
|
206
|
+
value: "meta-llama/llama-3.1-8b-instruct",
|
|
207
|
+
},
|
|
208
|
+
{
|
|
209
|
+
name: "Mistral Large",
|
|
210
|
+
value: "mistralai/mistral-large",
|
|
211
|
+
},
|
|
212
|
+
{
|
|
213
|
+
name: "Mixtral 8x22B",
|
|
214
|
+
value: "mistralai/mixtral-8x22b-instruct",
|
|
215
|
+
},
|
|
216
|
+
{
|
|
217
|
+
name: "DeepSeek R1",
|
|
218
|
+
value: "deepseek/deepseek-r1",
|
|
219
|
+
},
|
|
220
|
+
{
|
|
221
|
+
name: "DeepSeek Chat",
|
|
222
|
+
value: "deepseek/deepseek-chat",
|
|
223
|
+
},
|
|
224
|
+
{
|
|
225
|
+
name: "Qwen 2.5 72B",
|
|
226
|
+
value: "qwen/qwen-2.5-72b-instruct",
|
|
227
|
+
},
|
|
228
|
+
],
|
|
229
|
+
default: "openai/gpt-4o-mini",
|
|
230
|
+
description: "Select a model or type a custom OpenRouter model ID. See https://openrouter.ai/models for all available models.",
|
|
231
|
+
},
|
|
232
|
+
{
|
|
233
|
+
displayName: "Options",
|
|
234
|
+
name: "options",
|
|
235
|
+
type: "collection",
|
|
236
|
+
placeholder: "Add Option",
|
|
237
|
+
default: {},
|
|
238
|
+
options: [
|
|
239
|
+
{
|
|
240
|
+
displayName: "Temperature",
|
|
241
|
+
name: "temperature",
|
|
242
|
+
type: "number",
|
|
243
|
+
typeOptions: {
|
|
244
|
+
minValue: 0,
|
|
245
|
+
maxValue: 2,
|
|
246
|
+
numberStepSize: 0.1,
|
|
247
|
+
},
|
|
248
|
+
default: 0.7,
|
|
249
|
+
description: "Controls randomness: Lower = more focused and deterministic",
|
|
250
|
+
},
|
|
251
|
+
{
|
|
252
|
+
displayName: "Max Tokens",
|
|
253
|
+
name: "maxTokens",
|
|
254
|
+
type: "number",
|
|
255
|
+
typeOptions: {
|
|
256
|
+
minValue: 1,
|
|
257
|
+
},
|
|
258
|
+
default: 2048,
|
|
259
|
+
description: "Maximum number of tokens to generate",
|
|
260
|
+
},
|
|
261
|
+
{
|
|
262
|
+
displayName: "Top P",
|
|
263
|
+
name: "topP",
|
|
264
|
+
type: "number",
|
|
265
|
+
typeOptions: {
|
|
266
|
+
minValue: 0,
|
|
267
|
+
maxValue: 1,
|
|
268
|
+
numberStepSize: 0.1,
|
|
269
|
+
},
|
|
270
|
+
default: 1,
|
|
271
|
+
description: "Nucleus sampling: considers tokens with top_p probability mass",
|
|
272
|
+
},
|
|
273
|
+
{
|
|
274
|
+
displayName: "Frequency Penalty",
|
|
275
|
+
name: "frequencyPenalty",
|
|
276
|
+
type: "number",
|
|
277
|
+
typeOptions: {
|
|
278
|
+
minValue: -2,
|
|
279
|
+
maxValue: 2,
|
|
280
|
+
numberStepSize: 0.1,
|
|
281
|
+
},
|
|
282
|
+
default: 0,
|
|
283
|
+
description: "Penalizes new tokens based on frequency in text so far",
|
|
284
|
+
},
|
|
285
|
+
{
|
|
286
|
+
displayName: "Presence Penalty",
|
|
287
|
+
name: "presencePenalty",
|
|
288
|
+
type: "number",
|
|
289
|
+
typeOptions: {
|
|
290
|
+
minValue: -2,
|
|
291
|
+
maxValue: 2,
|
|
292
|
+
numberStepSize: 0.1,
|
|
293
|
+
},
|
|
294
|
+
default: 0,
|
|
295
|
+
description: "Penalizes new tokens based on presence in text so far",
|
|
296
|
+
},
|
|
297
|
+
{
|
|
298
|
+
displayName: "Timeout",
|
|
299
|
+
name: "timeout",
|
|
300
|
+
type: "number",
|
|
301
|
+
default: 60000,
|
|
302
|
+
description: "Request timeout in milliseconds",
|
|
303
|
+
},
|
|
304
|
+
],
|
|
305
|
+
},
|
|
306
|
+
],
|
|
307
|
+
};
|
|
308
|
+
}
|
|
309
|
+
async supplyData(itemIndex) {
|
|
310
|
+
var _a, _b;
|
|
311
|
+
// Get authentication type and credentials
|
|
312
|
+
const authentication = this.getNodeParameter("authentication", itemIndex);
|
|
313
|
+
let apiKey;
|
|
314
|
+
try {
|
|
315
|
+
if (authentication === "oAuth2") {
|
|
316
|
+
const credentials = (await this.getCredentials("agnicWalletOAuth2Api", itemIndex));
|
|
317
|
+
apiKey = (_a = credentials.oauthTokenData) === null || _a === void 0 ? void 0 : _a.access_token;
|
|
318
|
+
if (!apiKey) {
|
|
319
|
+
throw new Error("OAuth2 access token not found. Please reconnect your AgnicWallet account.");
|
|
320
|
+
}
|
|
321
|
+
}
|
|
322
|
+
else {
|
|
323
|
+
const credentials = await this.getCredentials("agnicWalletApi", itemIndex);
|
|
324
|
+
apiKey = credentials.apiToken;
|
|
325
|
+
if (!apiKey) {
|
|
326
|
+
throw new Error("API Key not found. Please configure your AgnicWallet API credentials.");
|
|
327
|
+
}
|
|
328
|
+
}
|
|
329
|
+
}
|
|
330
|
+
catch (error) {
|
|
331
|
+
const errorMsg = error instanceof Error ? error.message : String(error);
|
|
332
|
+
throw new n8n_workflow_1.NodeOperationError(this.getNode(), `Authentication failed: ${errorMsg}`, { itemIndex });
|
|
333
|
+
}
|
|
334
|
+
// Get model parameter
|
|
335
|
+
const model = this.getNodeParameter("model", itemIndex);
|
|
336
|
+
if (!(model === null || model === void 0 ? void 0 : model.trim())) {
|
|
337
|
+
throw new n8n_workflow_1.NodeOperationError(this.getNode(), "Model must be specified. Select from dropdown or enter a custom OpenRouter model ID.", { itemIndex });
|
|
338
|
+
}
|
|
339
|
+
// Get options
|
|
340
|
+
const options = this.getNodeParameter("options", itemIndex, {});
|
|
341
|
+
// Create ChatOpenAI instance pointing to AgnicPay's endpoint
|
|
342
|
+
// Pass our custom tracing callback to enable spinning indicator and logging
|
|
343
|
+
const chatModel = new openai_1.ChatOpenAI({
|
|
344
|
+
apiKey,
|
|
345
|
+
model: model.trim(),
|
|
346
|
+
temperature: options.temperature,
|
|
347
|
+
maxTokens: options.maxTokens,
|
|
348
|
+
topP: options.topP,
|
|
349
|
+
frequencyPenalty: options.frequencyPenalty,
|
|
350
|
+
presencePenalty: options.presencePenalty,
|
|
351
|
+
timeout: (_b = options.timeout) !== null && _b !== void 0 ? _b : 60000,
|
|
352
|
+
maxRetries: 2,
|
|
353
|
+
configuration: {
|
|
354
|
+
baseURL: "https://api.agnicpay.xyz/v1",
|
|
355
|
+
},
|
|
356
|
+
// Add our custom tracing callback for spinning indicator and AI Agent logging
|
|
357
|
+
callbacks: [new AgnicLlmTracing(this)],
|
|
358
|
+
});
|
|
359
|
+
// Return in the same format as n8n's built-in OpenAI Chat Model
|
|
360
|
+
return {
|
|
361
|
+
response: chatModel,
|
|
362
|
+
};
|
|
363
|
+
}
|
|
364
|
+
}
|
|
365
|
+
exports.AgnicAILanguageModel = AgnicAILanguageModel;
|
|
Binary file
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 60 60">
|
|
2
|
+
<rect width="60" height="60" fill="none"/>
|
|
3
|
+
<path d="M30 10 L50 25 L50 35 L30 50 L10 35 L10 25 Z" fill="#6366f1" stroke="#4f46e5" stroke-width="2"/>
|
|
4
|
+
<circle cx="30" cy="30" r="8" fill="#ffffff"/>
|
|
5
|
+
<path d="M25 30 L28 33 L35 26" stroke="#6366f1" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" fill="none"/>
|
|
6
|
+
</svg>
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import { INodeType, INodeTypeDescription, ISupplyDataFunctions, IExecuteFunctions, SupplyData, INodeExecutionData } from "n8n-workflow";
|
|
2
|
+
/**
|
|
3
|
+
* AgnicMCPTool - MCP Client for AgnicPay
|
|
4
|
+
*
|
|
5
|
+
* This is a supply-only AI tool node that connects to the AgnicPay MCP server
|
|
6
|
+
* and provides X402 payment tools to AI Agents via the MCP protocol.
|
|
7
|
+
*
|
|
8
|
+
* This node cannot be executed directly - it only supplies tools to AI Agents.
|
|
9
|
+
*/
|
|
10
|
+
export declare class AgnicMCPTool implements INodeType {
|
|
11
|
+
description: INodeTypeDescription;
|
|
12
|
+
/**
|
|
13
|
+
* Execute method for direct tool invocation.
|
|
14
|
+
* This is called when input data is passed directly to this node.
|
|
15
|
+
*/
|
|
16
|
+
execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]>;
|
|
17
|
+
/**
|
|
18
|
+
* Supply MCP tools to AI Agent.
|
|
19
|
+
* This is the main method that provides tools to the AI Agent.
|
|
20
|
+
*/
|
|
21
|
+
supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData>;
|
|
22
|
+
}
|