n8n-nodes-github-copilot 3.2.7 → 3.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,3 @@
1
1
  export * from './types';
2
2
  export * from './helpers';
3
- export * from './audioProcessor';
4
3
  export * from './imageProcessor';
@@ -16,5 +16,4 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
16
16
  Object.defineProperty(exports, "__esModule", { value: true });
17
17
  __exportStar(require("./types"), exports);
18
18
  __exportStar(require("./helpers"), exports);
19
- __exportStar(require("./audioProcessor"), exports);
20
19
  __exportStar(require("./imageProcessor"), exports);
@@ -0,0 +1,5 @@
1
+ import { IExecuteFunctions, INodeExecutionData, INodeType, INodeTypeDescription } from 'n8n-workflow';
2
+ export declare class N8nAiAgent implements INodeType {
3
+ description: INodeTypeDescription;
4
+ execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]>;
5
+ }
@@ -0,0 +1,214 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.N8nAiAgent = void 0;
4
+ const n8n_workflow_1 = require("n8n-workflow");
5
+ const nodeProperties_1 = require("./nodeProperties");
6
+ const mediaDetection_1 = require("../GitHubCopilotChatAPI/utils/mediaDetection");
7
+ class N8nAiAgent {
8
+ constructor() {
9
+ this.description = {
10
+ displayName: 'N8N AI Agent',
11
+ name: 'n8nAiAgent',
12
+ icon: 'file:n8n-ai.svg',
13
+ group: ['AI'],
14
+ version: 1,
15
+ subtitle: '={{$parameter["operation"] + ": " + $parameter["model"]}}',
16
+ description: 'Connect to N8N AI Agent service for advanced AI capabilities with tool calling and memory',
17
+ defaults: {
18
+ name: 'N8N AI Agent',
19
+ },
20
+ inputs: ["main"],
21
+ outputs: ["main"],
22
+ credentials: [
23
+ {
24
+ name: 'n8nApi',
25
+ required: true,
26
+ },
27
+ ],
28
+ properties: nodeProperties_1.nodeProperties,
29
+ };
30
+ }
31
+ async execute() {
32
+ const items = this.getInputData();
33
+ const returnData = [];
34
+ for (let i = 0; i < items.length; i++) {
35
+ try {
36
+ const operation = this.getNodeParameter('operation', i);
37
+ const model = this.getNodeParameter('model', i);
38
+ if (operation === 'chat') {
39
+ const message = this.getNodeParameter('message', i);
40
+ const includeMedia = this.getNodeParameter('includeMedia', i, false);
41
+ const messages = [];
42
+ const systemMessage = this.getNodeParameter('systemMessage', i, '');
43
+ if (systemMessage) {
44
+ messages.push({
45
+ role: 'system',
46
+ content: systemMessage,
47
+ });
48
+ }
49
+ const messageContent = [
50
+ {
51
+ type: 'text',
52
+ text: message,
53
+ },
54
+ ];
55
+ if (includeMedia) {
56
+ const mediaSource = this.getNodeParameter('mediaSource', i);
57
+ const processedMedia = await (0, mediaDetection_1.processMediaFile)(this, i, mediaSource, this.getNodeParameter('mediaFile', i, ''), this.getNodeParameter('mediaUrl', i, ''), this.getNodeParameter('mediaProperty', i, ''));
58
+ if (processedMedia && processedMedia.dataUrl) {
59
+ messageContent.push({
60
+ type: 'image_url',
61
+ image_url: {
62
+ url: processedMedia.dataUrl,
63
+ },
64
+ });
65
+ }
66
+ }
67
+ messages.push({
68
+ role: 'user',
69
+ content: messageContent,
70
+ });
71
+ const includeHistory = this.getNodeParameter('includeHistory', i, false);
72
+ if (includeHistory) {
73
+ const historyMessages = this.getNodeParameter('conversationHistory', i, []);
74
+ messages.splice(-1, 0, ...historyMessages);
75
+ }
76
+ const advancedOptions = {};
77
+ const maxTokens = this.getNodeParameter('maxTokens', i, 1000);
78
+ const temperature = this.getNodeParameter('temperature', i, 0.7);
79
+ const enableTools = this.getNodeParameter('enableTools', i, false);
80
+ if (maxTokens > 0) {
81
+ advancedOptions.max_tokens = maxTokens;
82
+ }
83
+ advancedOptions.temperature = temperature;
84
+ if (enableTools) {
85
+ const toolsConfig = this.getNodeParameter('toolsConfig', i, {});
86
+ if (toolsConfig.tools && Array.isArray(toolsConfig.tools)) {
87
+ advancedOptions.tools = toolsConfig.tools;
88
+ }
89
+ }
90
+ const requestBody = {
91
+ model,
92
+ messages,
93
+ stream: false,
94
+ ...advancedOptions,
95
+ };
96
+ const response = await makeN8nAiAgentRequest(this, '/chat', requestBody, includeMedia);
97
+ const result = {
98
+ message: response.response || response.message || '',
99
+ model,
100
+ operation,
101
+ usage: response.usage || null,
102
+ tool_calls: response.tool_calls || null,
103
+ memory: response.memory || null,
104
+ finish_reason: response.finish_reason || 'completed',
105
+ };
106
+ returnData.push({
107
+ json: result,
108
+ pairedItem: { item: i },
109
+ });
110
+ }
111
+ else if (operation === 'tools') {
112
+ const toolName = this.getNodeParameter('toolName', i);
113
+ const toolArguments = this.getNodeParameter('toolArguments', i, {});
114
+ const context = this.getNodeParameter('context', i, '');
115
+ const requestBody = {
116
+ tool: toolName,
117
+ arguments: toolArguments,
118
+ context,
119
+ model,
120
+ };
121
+ const response = await makeN8nAiAgentRequest(this, '/tools', requestBody, false);
122
+ const result = {
123
+ tool_name: toolName,
124
+ result: response.result || response.response,
125
+ execution_time: response.execution_time || null,
126
+ operation,
127
+ model,
128
+ };
129
+ returnData.push({
130
+ json: result,
131
+ pairedItem: { item: i },
132
+ });
133
+ }
134
+ else if (operation === 'memory') {
135
+ const memoryAction = this.getNodeParameter('memoryAction', i);
136
+ const sessionId = this.getNodeParameter('sessionId', i, '');
137
+ const requestBody = {
138
+ action: memoryAction,
139
+ session_id: sessionId,
140
+ };
141
+ if (memoryAction === 'store') {
142
+ const memoryData = this.getNodeParameter('memoryData', i);
143
+ requestBody.data = memoryData;
144
+ }
145
+ const response = await makeN8nAiAgentRequest(this, '/memory', requestBody, false);
146
+ const result = {
147
+ action: memoryAction,
148
+ session_id: sessionId,
149
+ data: response.data || response.memory,
150
+ operation,
151
+ };
152
+ returnData.push({
153
+ json: result,
154
+ pairedItem: { item: i },
155
+ });
156
+ }
157
+ }
158
+ catch (error) {
159
+ if (this.continueOnFail()) {
160
+ const errorMessage = error instanceof Error ? error.message : 'Unknown error';
161
+ returnData.push({
162
+ json: {
163
+ error: errorMessage,
164
+ operation: this.getNodeParameter('operation', i),
165
+ model: this.getNodeParameter('model', i),
166
+ },
167
+ pairedItem: { item: i },
168
+ });
169
+ }
170
+ else {
171
+ throw error;
172
+ }
173
+ }
174
+ }
175
+ return [returnData];
176
+ }
177
+ }
178
+ exports.N8nAiAgent = N8nAiAgent;
179
+ async function makeN8nAiAgentRequest(context, endpoint, requestBody, hasMedia) {
180
+ const credentials = await context.getCredentials('n8nApi');
181
+ const baseUrl = credentials.baseUrl || 'http://localhost:5678';
182
+ const apiKey = credentials.apiKey;
183
+ if (!apiKey) {
184
+ throw new n8n_workflow_1.NodeOperationError(context.getNode(), 'N8N API key is required');
185
+ }
186
+ const options = {
187
+ method: 'POST',
188
+ headers: {
189
+ 'Content-Type': 'application/json',
190
+ 'Authorization': `Bearer ${apiKey}`,
191
+ 'User-Agent': 'N8nAiAgentNode/1.0',
192
+ ...(hasMedia && { 'AI-Vision-Request': 'true' }),
193
+ },
194
+ body: JSON.stringify(requestBody),
195
+ uri: `${baseUrl}/api/v1/ai-agent${endpoint}`,
196
+ json: true,
197
+ };
198
+ try {
199
+ const response = await context.helpers.request(options);
200
+ return response;
201
+ }
202
+ catch (error) {
203
+ const apiError = error;
204
+ if (apiError.statusCode === 401) {
205
+ throw new n8n_workflow_1.NodeOperationError(context.getNode(), 'Invalid N8N API key');
206
+ }
207
+ else if (apiError.statusCode === 404) {
208
+ throw new n8n_workflow_1.NodeOperationError(context.getNode(), 'N8N AI Agent endpoint not found. Make sure AI Agent is enabled in your N8N instance.');
209
+ }
210
+ else {
211
+ throw new n8n_workflow_1.NodeOperationError(context.getNode(), `N8N AI Agent API error: ${apiError.message || String(error)}`);
212
+ }
213
+ }
214
+ }
@@ -0,0 +1,35 @@
1
+ <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 100 100" width="100" height="100">
2
+ <!-- Background circle -->
3
+ <circle cx="50" cy="50" r="45" fill="#ff6d5a" stroke="#e55a4a" stroke-width="2"/>
4
+
5
+ <!-- N8N Logo style -->
6
+ <g transform="translate(50,50)" fill="white">
7
+ <!-- Brain/Neural network pattern -->
8
+ <circle cx="-15" cy="-15" r="3" opacity="0.9"/>
9
+ <circle cx="0" cy="-20" r="3" opacity="0.9"/>
10
+ <circle cx="15" cy="-15" r="3" opacity="0.9"/>
11
+ <circle cx="-20" cy="0" r="3" opacity="0.9"/>
12
+ <circle cx="0" cy="0" r="4" opacity="1"/>
13
+ <circle cx="20" cy="0" r="3" opacity="0.9"/>
14
+ <circle cx="-15" cy="15" r="3" opacity="0.9"/>
15
+ <circle cx="0" cy="20" r="3" opacity="0.9"/>
16
+ <circle cx="15" cy="15" r="3" opacity="0.9"/>
17
+
18
+ <!-- Connections -->
19
+ <line x1="-15" y1="-15" x2="0" y2="-20" stroke="white" stroke-width="1" opacity="0.6"/>
20
+ <line x1="0" y1="-20" x2="15" y2="-15" stroke="white" stroke-width="1" opacity="0.6"/>
21
+ <line x1="-20" y1="0" x2="-15" y2="-15" stroke="white" stroke-width="1" opacity="0.6"/>
22
+ <line x1="-20" y1="0" x2="0" y2="0" stroke="white" stroke-width="2" opacity="0.8"/>
23
+ <line x1="0" y1="0" x2="20" y2="0" stroke="white" stroke-width="2" opacity="0.8"/>
24
+ <line x1="20" y1="0" x2="15" y2="15" stroke="white" stroke-width="1" opacity="0.6"/>
25
+ <line x1="0" y1="0" x2="0" y2="20" stroke="white" stroke-width="2" opacity="0.8"/>
26
+ <line x1="-15" y1="15" x2="0" y2="20" stroke="white" stroke-width="1" opacity="0.6"/>
27
+ <line x1="0" y1="20" x2="15" y2="15" stroke="white" stroke-width="1" opacity="0.6"/>
28
+
29
+ <!-- AI indicator -->
30
+ <text x="0" y="35" text-anchor="middle" font-family="Arial, sans-serif" font-size="12" font-weight="bold" fill="white">AI</text>
31
+ </g>
32
+
33
+ <!-- N8N text at bottom -->
34
+ <text x="50" y="85" text-anchor="middle" font-family="Arial, sans-serif" font-size="10" font-weight="bold" fill="white">N8N</text>
35
+ </svg>
@@ -0,0 +1,2 @@
1
+ import { INodeProperties } from 'n8n-workflow';
2
+ export declare const nodeProperties: INodeProperties[];
@@ -0,0 +1,432 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.nodeProperties = void 0;
4
+ exports.nodeProperties = [
5
+ {
6
+ displayName: 'Operation',
7
+ name: 'operation',
8
+ type: 'options',
9
+ noDataExpression: true,
10
+ options: [
11
+ {
12
+ name: 'Chat',
13
+ value: 'chat',
14
+ description: 'Send a chat message to N8N AI Agent',
15
+ action: 'Send a chat message',
16
+ },
17
+ {
18
+ name: 'Use Tools',
19
+ value: 'tools',
20
+ description: 'Execute specific tools through AI Agent',
21
+ action: 'Execute tools',
22
+ },
23
+ {
24
+ name: 'Memory',
25
+ value: 'memory',
26
+ description: 'Manage AI Agent memory/context',
27
+ action: 'Manage memory',
28
+ },
29
+ ],
30
+ default: 'chat',
31
+ },
32
+ {
33
+ displayName: 'AI Model',
34
+ name: 'model',
35
+ type: 'options',
36
+ options: [
37
+ {
38
+ name: 'GPT-4 Turbo',
39
+ value: 'gpt-4-turbo',
40
+ description: 'OpenAI GPT-4 Turbo - Latest and most capable',
41
+ },
42
+ {
43
+ name: 'GPT-4',
44
+ value: 'gpt-4',
45
+ description: 'OpenAI GPT-4 - High quality reasoning',
46
+ },
47
+ {
48
+ name: 'GPT-3.5 Turbo',
49
+ value: 'gpt-3.5-turbo',
50
+ description: 'OpenAI GPT-3.5 Turbo - Fast and efficient',
51
+ },
52
+ {
53
+ name: 'Claude 3 Opus',
54
+ value: 'claude-3-opus',
55
+ description: 'Anthropic Claude 3 Opus - Superior reasoning',
56
+ },
57
+ {
58
+ name: 'Claude 3 Sonnet',
59
+ value: 'claude-3-sonnet',
60
+ description: 'Anthropic Claude 3 Sonnet - Balanced performance',
61
+ },
62
+ {
63
+ name: 'Claude 3 Haiku',
64
+ value: 'claude-3-haiku',
65
+ description: 'Anthropic Claude 3 Haiku - Fast responses',
66
+ },
67
+ {
68
+ name: 'Gemini Pro',
69
+ value: 'gemini-pro',
70
+ description: 'Google Gemini Pro - Multimodal capabilities',
71
+ },
72
+ ],
73
+ default: 'gpt-4-turbo',
74
+ description: 'Select the AI model to use',
75
+ },
76
+ {
77
+ displayName: 'Message',
78
+ name: 'message',
79
+ type: 'string',
80
+ typeOptions: {
81
+ rows: 3,
82
+ },
83
+ displayOptions: {
84
+ show: {
85
+ operation: ['chat'],
86
+ },
87
+ },
88
+ default: '',
89
+ placeholder: 'Enter your message here...',
90
+ description: 'The message to send to the AI Agent',
91
+ required: true,
92
+ },
93
+ {
94
+ displayName: 'System Message',
95
+ name: 'systemMessage',
96
+ type: 'string',
97
+ typeOptions: {
98
+ rows: 2,
99
+ },
100
+ displayOptions: {
101
+ show: {
102
+ operation: ['chat'],
103
+ },
104
+ },
105
+ default: '',
106
+ placeholder: 'You are a helpful AI assistant...',
107
+ description: 'System prompt to set the AI behavior and context',
108
+ },
109
+ {
110
+ displayName: 'Include Media',
111
+ name: 'includeMedia',
112
+ type: 'boolean',
113
+ displayOptions: {
114
+ show: {
115
+ operation: ['chat'],
116
+ },
117
+ },
118
+ default: false,
119
+ description: 'Whether to include images in the chat message',
120
+ },
121
+ {
122
+ displayName: 'Media Source',
123
+ name: 'mediaSource',
124
+ type: 'options',
125
+ options: [
126
+ {
127
+ name: 'Upload File',
128
+ value: 'manual',
129
+ description: 'Upload an image file directly',
130
+ },
131
+ {
132
+ name: 'From URL',
133
+ value: 'url',
134
+ description: 'Use an image from URL',
135
+ },
136
+ {
137
+ name: 'From Binary Data',
138
+ value: 'binary',
139
+ description: 'Use image from previous node binary data',
140
+ },
141
+ ],
142
+ displayOptions: {
143
+ show: {
144
+ operation: ['chat'],
145
+ includeMedia: [true],
146
+ },
147
+ },
148
+ default: 'manual',
149
+ description: 'Source of the media file',
150
+ },
151
+ {
152
+ displayName: 'Image File',
153
+ name: 'mediaFile',
154
+ type: 'string',
155
+ displayOptions: {
156
+ show: {
157
+ operation: ['chat'],
158
+ includeMedia: [true],
159
+ mediaSource: ['manual'],
160
+ },
161
+ },
162
+ default: '',
163
+ placeholder: 'Paste base64 image data...',
164
+ description: 'Base64 encoded image data',
165
+ },
166
+ {
167
+ displayName: 'Image URL',
168
+ name: 'mediaUrl',
169
+ type: 'string',
170
+ displayOptions: {
171
+ show: {
172
+ operation: ['chat'],
173
+ includeMedia: [true],
174
+ mediaSource: ['url'],
175
+ },
176
+ },
177
+ default: '',
178
+ placeholder: 'https://example.com/image.jpg',
179
+ description: 'URL of the image to analyze',
180
+ },
181
+ {
182
+ displayName: 'Binary Property',
183
+ name: 'mediaProperty',
184
+ type: 'string',
185
+ displayOptions: {
186
+ show: {
187
+ operation: ['chat'],
188
+ includeMedia: [true],
189
+ mediaSource: ['binary'],
190
+ },
191
+ },
192
+ default: 'data',
193
+ placeholder: 'data',
194
+ description: 'Name of the binary property containing the image',
195
+ },
196
+ {
197
+ displayName: 'Include Conversation History',
198
+ name: 'includeHistory',
199
+ type: 'boolean',
200
+ displayOptions: {
201
+ show: {
202
+ operation: ['chat'],
203
+ },
204
+ },
205
+ default: false,
206
+ description: 'Include previous messages for context',
207
+ },
208
+ {
209
+ displayName: 'Conversation History',
210
+ name: 'conversationHistory',
211
+ type: 'json',
212
+ typeOptions: {
213
+ rows: 4,
214
+ },
215
+ displayOptions: {
216
+ show: {
217
+ operation: ['chat'],
218
+ includeHistory: [true],
219
+ },
220
+ },
221
+ default: '[]',
222
+ placeholder: '[{"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi there!"}]',
223
+ description: 'Previous messages in OpenAI chat format',
224
+ },
225
+ {
226
+ displayName: 'Enable Tools',
227
+ name: 'enableTools',
228
+ type: 'boolean',
229
+ displayOptions: {
230
+ show: {
231
+ operation: ['chat'],
232
+ },
233
+ },
234
+ default: false,
235
+ description: 'Allow AI to use tools and function calling',
236
+ },
237
+ {
238
+ displayName: 'Tools Configuration',
239
+ name: 'toolsConfig',
240
+ type: 'json',
241
+ typeOptions: {
242
+ rows: 6,
243
+ },
244
+ displayOptions: {
245
+ show: {
246
+ operation: ['chat'],
247
+ enableTools: [true],
248
+ },
249
+ },
250
+ default: '{"tools": []}',
251
+ placeholder: '{"tools": [{"type": "function", "function": {"name": "get_weather", "description": "Get weather info"}}]}',
252
+ description: 'Tools available to the AI Agent',
253
+ },
254
+ {
255
+ displayName: 'Tool Name',
256
+ name: 'toolName',
257
+ type: 'string',
258
+ displayOptions: {
259
+ show: {
260
+ operation: ['tools'],
261
+ },
262
+ },
263
+ default: '',
264
+ placeholder: 'get_weather',
265
+ description: 'Name of the tool to execute',
266
+ required: true,
267
+ },
268
+ {
269
+ displayName: 'Tool Arguments',
270
+ name: 'toolArguments',
271
+ type: 'json',
272
+ typeOptions: {
273
+ rows: 3,
274
+ },
275
+ displayOptions: {
276
+ show: {
277
+ operation: ['tools'],
278
+ },
279
+ },
280
+ default: '{}',
281
+ placeholder: '{"location": "São Paulo", "units": "celsius"}',
282
+ description: 'Arguments to pass to the tool',
283
+ },
284
+ {
285
+ displayName: 'Context',
286
+ name: 'context',
287
+ type: 'string',
288
+ typeOptions: {
289
+ rows: 2,
290
+ },
291
+ displayOptions: {
292
+ show: {
293
+ operation: ['tools'],
294
+ },
295
+ },
296
+ default: '',
297
+ placeholder: 'User is asking about weather in their city...',
298
+ description: 'Context for tool execution',
299
+ },
300
+ {
301
+ displayName: 'Memory Action',
302
+ name: 'memoryAction',
303
+ type: 'options',
304
+ options: [
305
+ {
306
+ name: 'Store',
307
+ value: 'store',
308
+ description: 'Store data in AI Agent memory',
309
+ },
310
+ {
311
+ name: 'Retrieve',
312
+ value: 'retrieve',
313
+ description: 'Retrieve data from AI Agent memory',
314
+ },
315
+ {
316
+ name: 'Clear',
317
+ value: 'clear',
318
+ description: 'Clear AI Agent memory',
319
+ },
320
+ ],
321
+ displayOptions: {
322
+ show: {
323
+ operation: ['memory'],
324
+ },
325
+ },
326
+ default: 'retrieve',
327
+ description: 'Action to perform on AI Agent memory',
328
+ },
329
+ {
330
+ displayName: 'Session ID',
331
+ name: 'sessionId',
332
+ type: 'string',
333
+ displayOptions: {
334
+ show: {
335
+ operation: ['memory'],
336
+ },
337
+ },
338
+ default: '',
339
+ placeholder: 'user-123',
340
+ description: 'Unique session identifier for memory isolation',
341
+ },
342
+ {
343
+ displayName: 'Memory Data',
344
+ name: 'memoryData',
345
+ type: 'json',
346
+ typeOptions: {
347
+ rows: 3,
348
+ },
349
+ displayOptions: {
350
+ show: {
351
+ operation: ['memory'],
352
+ memoryAction: ['store'],
353
+ },
354
+ },
355
+ default: '{}',
356
+ placeholder: '{"user_preferences": {"language": "pt-BR", "timezone": "America/Sao_Paulo"}}',
357
+ description: 'Data to store in memory',
358
+ },
359
+ {
360
+ displayName: 'Advanced Options',
361
+ name: 'advancedOptions',
362
+ type: 'collection',
363
+ placeholder: 'Add Option',
364
+ default: {},
365
+ displayOptions: {
366
+ show: {
367
+ operation: ['chat'],
368
+ },
369
+ },
370
+ options: [
371
+ {
372
+ displayName: 'Max Tokens',
373
+ name: 'maxTokens',
374
+ type: 'number',
375
+ default: 1000,
376
+ description: 'Maximum number of tokens to generate (0 for model default)',
377
+ typeOptions: {
378
+ minValue: 0,
379
+ maxValue: 4000,
380
+ },
381
+ },
382
+ {
383
+ displayName: 'Temperature',
384
+ name: 'temperature',
385
+ type: 'number',
386
+ default: 0.7,
387
+ description: 'Controls randomness (0.0 = deterministic, 1.0 = very random)',
388
+ typeOptions: {
389
+ minValue: 0,
390
+ maxValue: 1,
391
+ numberPrecision: 2,
392
+ },
393
+ },
394
+ {
395
+ displayName: 'Top P',
396
+ name: 'topP',
397
+ type: 'number',
398
+ default: 1,
399
+ description: 'Controls diversity via nucleus sampling',
400
+ typeOptions: {
401
+ minValue: 0.01,
402
+ maxValue: 1,
403
+ numberPrecision: 2,
404
+ },
405
+ },
406
+ {
407
+ displayName: 'Frequency Penalty',
408
+ name: 'frequencyPenalty',
409
+ type: 'number',
410
+ default: 0,
411
+ description: 'Reduces repetition of words',
412
+ typeOptions: {
413
+ minValue: -2,
414
+ maxValue: 2,
415
+ numberPrecision: 2,
416
+ },
417
+ },
418
+ {
419
+ displayName: 'Presence Penalty',
420
+ name: 'presencePenalty',
421
+ type: 'number',
422
+ default: 0,
423
+ description: 'Encourages new topics',
424
+ typeOptions: {
425
+ minValue: -2,
426
+ maxValue: 2,
427
+ numberPrecision: 2,
428
+ },
429
+ },
430
+ ],
431
+ },
432
+ ];
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "n8n-nodes-github-copilot",
3
- "version": "3.2.7",
4
- "description": "n8n community node for GitHub Copilot with CLI integration and official Chat API access to GPT-5, Claude, Gemini and more using your existing Copilot credits",
3
+ "version": "3.3.0",
4
+ "description": "n8n community node for GitHub Copilot and N8N AI Agent with CLI integration and official Chat API access to GPT-5, Claude, Gemini and more using your existing Copilot credits",
5
5
  "license": "MIT",
6
6
  "homepage": "https://github.com/sufficit/n8n-nodes-github-copilot",
7
7
  "author": {
@@ -32,13 +32,15 @@
32
32
  ],
33
33
  "nodes": [
34
34
  "dist/nodes/GitHubCopilot/GitHubCopilot.node.js",
35
- "dist/nodes/GitHubCopilotChatAPI/GitHubCopilotChatAPI.node.js"
35
+ "dist/nodes/GitHubCopilotChatAPI/GitHubCopilotChatAPI.node.js",
36
+ "dist/nodes/N8nAiAgent/N8nAiAgent.node.js"
36
37
  ]
37
38
  },
38
39
  "keywords": [
39
40
  "n8n-community-node-package",
40
41
  "github",
41
42
  "copilot",
43
+ "n8n-ai-agent",
42
44
  "ai",
43
45
  "gpt-5",
44
46
  "claude",