n8n-nodes-github-copilot 3.2.7 → 3.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,7 @@
1
+ import { ICredentialType, INodeProperties } from 'n8n-workflow';
2
+ export declare class N8nApi implements ICredentialType {
3
+ name: string;
4
+ displayName: string;
5
+ documentationUrl: string;
6
+ properties: INodeProperties[];
7
+ }
@@ -0,0 +1,31 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.N8nApi = void 0;
4
+ class N8nApi {
5
+ constructor() {
6
+ this.name = 'n8nApi';
7
+ this.displayName = 'N8N API';
8
+ this.documentationUrl = 'https://docs.n8n.io/api/';
9
+ this.properties = [
10
+ {
11
+ displayName: 'API Key',
12
+ name: 'apiKey',
13
+ type: 'string',
14
+ typeOptions: { password: true },
15
+ default: '',
16
+ required: true,
17
+ description: 'The API key for N8N API access',
18
+ },
19
+ {
20
+ displayName: 'Base URL',
21
+ name: 'baseUrl',
22
+ type: 'string',
23
+ default: 'https://app.n8n.cloud/api/v1',
24
+ required: true,
25
+ description: 'The base URL for your N8N instance API',
26
+ placeholder: 'https://your-instance.app.n8n.cloud/api/v1',
27
+ },
28
+ ];
29
+ }
30
+ }
31
+ exports.N8nApi = N8nApi;
@@ -1,4 +1,3 @@
1
1
  export * from './types';
2
2
  export * from './helpers';
3
- export * from './audioProcessor';
4
3
  export * from './imageProcessor';
@@ -16,5 +16,4 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
16
16
  Object.defineProperty(exports, "__esModule", { value: true });
17
17
  __exportStar(require("./types"), exports);
18
18
  __exportStar(require("./helpers"), exports);
19
- __exportStar(require("./audioProcessor"), exports);
20
19
  __exportStar(require("./imageProcessor"), exports);
@@ -0,0 +1,5 @@
1
+ import { INodeType, INodeTypeDescription } from 'n8n-workflow';
2
+ export declare class GitHubCopilotChatModel implements INodeType {
3
+ description: INodeTypeDescription;
4
+ supplyData(this: any, itemIndex: number): Promise<any>;
5
+ }
@@ -0,0 +1,211 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.GitHubCopilotChatModel = void 0;
4
+ class GitHubCopilotChatModel {
5
+ constructor() {
6
+ this.description = {
7
+ displayName: 'GitHub Copilot Chat Model',
8
+ name: 'gitHubCopilotChatModel',
9
+ icon: 'file:copilot.svg',
10
+ group: ['transform'],
11
+ version: 1,
12
+ description: 'GitHub Copilot chat model for AI workflows - access GPT-5, Claude, Gemini and more using your Copilot subscription',
13
+ defaults: {
14
+ name: 'GitHub Copilot Chat Model',
15
+ },
16
+ codex: {
17
+ categories: ['AI'],
18
+ subcategories: {
19
+ AI: ['Language Models', 'Root Nodes'],
20
+ 'Language Models': ['Chat Models (Recommended)'],
21
+ },
22
+ resources: {
23
+ primaryDocumentation: [
24
+ {
25
+ url: 'https://docs.github.com/copilot/using-github-copilot/using-github-copilot-chat',
26
+ },
27
+ ],
28
+ },
29
+ },
30
+ inputs: [],
31
+ outputs: ["ai_languageModel"],
32
+ outputNames: ['Model'],
33
+ credentials: [
34
+ {
35
+ name: 'gitHubApiManual',
36
+ required: true,
37
+ },
38
+ ],
39
+ properties: [
40
+ {
41
+ displayName: 'Model',
42
+ name: 'model',
43
+ type: 'options',
44
+ default: 'gpt-4o',
45
+ description: 'The GitHub Copilot model to use',
46
+ options: [
47
+ {
48
+ name: 'GPT-4o (Recommended)',
49
+ value: 'gpt-4o',
50
+ description: 'Latest GPT-4 model with vision and reasoning capabilities',
51
+ },
52
+ {
53
+ name: 'GPT-5 (Preview)',
54
+ value: 'gpt-5',
55
+ description: 'Next generation GPT model (requires access)',
56
+ },
57
+ {
58
+ name: 'Claude 3.5 Sonnet',
59
+ value: 'claude-3-5-sonnet-20241022',
60
+ description: 'Anthropic Claude with excellent reasoning',
61
+ },
62
+ {
63
+ name: 'Claude 3.5 Haiku',
64
+ value: 'claude-3-5-haiku-20241022',
65
+ description: 'Faster Claude model for simple tasks',
66
+ },
67
+ {
68
+ name: 'Gemini 2.0 Flash',
69
+ value: 'gemini-2.0-flash-exp',
70
+ description: 'Google\'s latest Gemini model',
71
+ },
72
+ {
73
+ name: 'Gemini 1.5 Pro',
74
+ value: 'gemini-1.5-pro-002',
75
+ description: 'Google Gemini Pro with large context',
76
+ },
77
+ {
78
+ name: 'o1 Preview',
79
+ value: 'o1-preview',
80
+ description: 'OpenAI o1 with advanced reasoning',
81
+ },
82
+ {
83
+ name: 'o1 Mini',
84
+ value: 'o1-mini',
85
+ description: 'Faster o1 model for coding tasks',
86
+ },
87
+ ],
88
+ },
89
+ {
90
+ displayName: 'Options',
91
+ name: 'options',
92
+ placeholder: 'Add Option',
93
+ description: 'Additional options for the GitHub Copilot model',
94
+ type: 'collection',
95
+ default: {},
96
+ options: [
97
+ {
98
+ displayName: 'Temperature',
99
+ name: 'temperature',
100
+ default: 0.7,
101
+ typeOptions: { maxValue: 2, minValue: 0, numberPrecision: 1 },
102
+ description: 'Controls randomness in output. Lower values make responses more focused.',
103
+ type: 'number',
104
+ },
105
+ {
106
+ displayName: 'Maximum Number of Tokens',
107
+ name: 'maxTokens',
108
+ default: 1000,
109
+ description: 'The maximum number of tokens to generate',
110
+ type: 'number',
111
+ typeOptions: {
112
+ maxValue: 32768,
113
+ },
114
+ },
115
+ {
116
+ displayName: 'Top P',
117
+ name: 'topP',
118
+ default: 1,
119
+ typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 2 },
120
+ description: 'Controls diversity of output by nucleus sampling',
121
+ type: 'number',
122
+ },
123
+ {
124
+ displayName: 'Enable Vision',
125
+ name: 'enableVision',
126
+ type: 'boolean',
127
+ default: true,
128
+ description: 'Whether to enable image processing capabilities',
129
+ },
130
+ {
131
+ displayName: 'System Message',
132
+ name: 'systemMessage',
133
+ type: 'string',
134
+ default: '',
135
+ description: 'System message to set the behavior of the assistant',
136
+ typeOptions: {
137
+ rows: 3,
138
+ },
139
+ },
140
+ ],
141
+ },
142
+ ],
143
+ };
144
+ }
145
+ async supplyData(itemIndex) {
146
+ const model = this.getNodeParameter('model', itemIndex);
147
+ const options = this.getNodeParameter('options', itemIndex, {});
148
+ const copilotModel = {
149
+ _call: async (messages) => {
150
+ var _a;
151
+ const requestBody = {
152
+ intent: true,
153
+ model: model,
154
+ messages: messages,
155
+ n: 1,
156
+ stream: false,
157
+ temperature: options.temperature || 0.7,
158
+ max_tokens: options.maxTokens || 1000,
159
+ top_p: options.topP || 1,
160
+ };
161
+ if (options.systemMessage) {
162
+ const systemMessage = {
163
+ role: 'system',
164
+ content: options.systemMessage,
165
+ };
166
+ requestBody.messages = [systemMessage, ...messages];
167
+ }
168
+ try {
169
+ const credentials = await this.getCredentials('gitHubApiManual');
170
+ const response = await fetch('https://api.githubcopilot.com/chat/completions', {
171
+ method: 'POST',
172
+ headers: {
173
+ 'Authorization': `Bearer ${credentials.accessToken}`,
174
+ 'Content-Type': 'application/json',
175
+ 'User-Agent': 'n8n-github-copilot-chat-model',
176
+ ...(options.enableVision && {
177
+ 'Copilot-Vision-Request': 'true',
178
+ 'Copilot-Media-Request': 'true'
179
+ }),
180
+ },
181
+ body: JSON.stringify(requestBody),
182
+ });
183
+ if (!response.ok) {
184
+ throw new Error(`GitHub Copilot API error: ${response.status} ${response.statusText}`);
185
+ }
186
+ const result = await response.json();
187
+ if (result.choices && result.choices.length > 0) {
188
+ const choice = result.choices[0];
189
+ return ((_a = choice.message) === null || _a === void 0 ? void 0 : _a.content) || 'No response from GitHub Copilot';
190
+ }
191
+ return 'No response from GitHub Copilot';
192
+ }
193
+ catch (error) {
194
+ throw new Error(`GitHub Copilot API error: ${error instanceof Error ? error.message : 'Unknown error'}`);
195
+ }
196
+ },
197
+ _modelType: 'chat_model',
198
+ _llmType: 'github-copilot',
199
+ modelName: model,
200
+ temperature: options.temperature || 0.7,
201
+ maxTokens: options.maxTokens || 1000,
202
+ topP: options.topP || 1,
203
+ enableVision: options.enableVision || true,
204
+ systemMessage: options.systemMessage || '',
205
+ };
206
+ return {
207
+ response: copilotModel,
208
+ };
209
+ }
210
+ }
211
+ exports.GitHubCopilotChatModel = GitHubCopilotChatModel;
@@ -0,0 +1,34 @@
1
+ <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" width="24" height="24">
2
+ <defs>
3
+ <linearGradient id="copilotGradient" x1="0%" y1="0%" x2="100%" y2="100%">
4
+ <stop offset="0%" style="stop-color:#1f6feb;stop-opacity:1" />
5
+ <stop offset="100%" style="stop-color:#0969da;stop-opacity:1" />
6
+ </linearGradient>
7
+ </defs>
8
+
9
+ <!-- GitHub Copilot inspired icon -->
10
+ <circle cx="12" cy="12" r="11" fill="url(#copilotGradient)" stroke="#ffffff" stroke-width="1"/>
11
+
12
+ <!-- Copilot face -->
13
+ <ellipse cx="12" cy="10" rx="8" ry="6" fill="#ffffff" opacity="0.9"/>
14
+
15
+ <!-- Eyes -->
16
+ <circle cx="9" cy="9" r="1.5" fill="#1f6feb"/>
17
+ <circle cx="15" cy="9" r="1.5" fill="#1f6feb"/>
18
+
19
+ <!-- Light reflection in eyes -->
20
+ <circle cx="9.5" cy="8.5" r="0.5" fill="#ffffff"/>
21
+ <circle cx="15.5" cy="8.5" r="0.5" fill="#ffffff"/>
22
+
23
+ <!-- Mouth/Interface line -->
24
+ <path d="M8 12 L16 12" stroke="#1f6feb" stroke-width="1.5" stroke-linecap="round"/>
25
+
26
+ <!-- Code brackets -->
27
+ <path d="M6 15 L8 17 L6 19" stroke="#ffffff" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round" fill="none"/>
28
+ <path d="M18 15 L16 17 L18 19" stroke="#ffffff" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round" fill="none"/>
29
+
30
+ <!-- AI indicator dots -->
31
+ <circle cx="10" cy="17" r="0.5" fill="#ffffff" opacity="0.8"/>
32
+ <circle cx="12" cy="17" r="0.5" fill="#ffffff" opacity="0.6"/>
33
+ <circle cx="14" cy="17" r="0.5" fill="#ffffff" opacity="0.4"/>
34
+ </svg>
@@ -0,0 +1,5 @@
1
+ import { INodeType, INodeTypeDescription } from 'n8n-workflow';
2
+ export declare class N8nAiAgent implements INodeType {
3
+ description: INodeTypeDescription;
4
+ supplyData(this: any, itemIndex: number): Promise<any>;
5
+ }
@@ -0,0 +1,152 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.N8nAiAgent = void 0;
4
+ class N8nAiAgent {
5
+ constructor() {
6
+ this.description = {
7
+ displayName: 'N8N AI Agent Model',
8
+ name: 'n8nAiAgent',
9
+ icon: 'file:n8n-ai.svg',
10
+ group: ['transform'],
11
+ version: 1,
12
+ description: 'N8N AI Agent chat model for advanced AI capabilities with tool calling and memory',
13
+ defaults: {
14
+ name: 'N8N AI Agent Model',
15
+ },
16
+ codex: {
17
+ categories: ['AI'],
18
+ subcategories: {
19
+ AI: ['Language Models', 'Root Nodes'],
20
+ 'Language Models': ['Chat Models (Recommended)'],
21
+ },
22
+ resources: {
23
+ primaryDocumentation: [
24
+ {
25
+ url: 'https://docs.n8n.io/',
26
+ },
27
+ ],
28
+ },
29
+ },
30
+ inputs: [],
31
+ outputs: ["ai_languageModel"],
32
+ outputNames: ['Model'],
33
+ credentials: [
34
+ {
35
+ name: 'n8nApi',
36
+ required: true,
37
+ },
38
+ ],
39
+ properties: [
40
+ {
41
+ displayName: 'Model',
42
+ name: 'model',
43
+ type: 'options',
44
+ default: 'n8n-ai-agent',
45
+ description: 'The N8N AI Agent model to use',
46
+ options: [
47
+ {
48
+ name: 'N8N AI Agent',
49
+ value: 'n8n-ai-agent',
50
+ description: 'N8N AI Agent with tool calling and memory capabilities',
51
+ },
52
+ {
53
+ name: 'N8N AI Agent Pro',
54
+ value: 'n8n-ai-agent-pro',
55
+ description: 'Enhanced version with advanced features',
56
+ },
57
+ ],
58
+ },
59
+ {
60
+ displayName: 'Options',
61
+ name: 'options',
62
+ placeholder: 'Add Option',
63
+ description: 'Additional options for the AI model',
64
+ type: 'collection',
65
+ default: {},
66
+ options: [
67
+ {
68
+ displayName: 'Temperature',
69
+ name: 'temperature',
70
+ default: 0.7,
71
+ typeOptions: { maxValue: 2, minValue: 0, numberPrecision: 1 },
72
+ description: 'Controls randomness in output. Lower values make responses more focused.',
73
+ type: 'number',
74
+ },
75
+ {
76
+ displayName: 'Maximum Number of Tokens',
77
+ name: 'maxTokens',
78
+ default: 1000,
79
+ description: 'The maximum number of tokens to generate',
80
+ type: 'number',
81
+ typeOptions: {
82
+ maxValue: 32768,
83
+ },
84
+ },
85
+ {
86
+ displayName: 'Enable Tools',
87
+ name: 'enableTools',
88
+ type: 'boolean',
89
+ default: false,
90
+ description: 'Whether to enable tool calling capabilities',
91
+ },
92
+ {
93
+ displayName: 'Enable Memory',
94
+ name: 'enableMemory',
95
+ type: 'boolean',
96
+ default: false,
97
+ description: 'Whether to enable conversation memory',
98
+ },
99
+ ],
100
+ },
101
+ ],
102
+ };
103
+ }
104
+ async supplyData(itemIndex) {
105
+ const credentials = await this.getCredentials('n8nApi');
106
+ const model = this.getNodeParameter('model', itemIndex);
107
+ const options = this.getNodeParameter('options', itemIndex, {});
108
+ const mockModel = {
109
+ _call: async (messages, options) => {
110
+ const apiKey = credentials.apiKey;
111
+ const baseUrl = credentials.baseUrl || 'http://localhost:5678';
112
+ const requestBody = {
113
+ model: model,
114
+ messages: messages,
115
+ temperature: (options === null || options === void 0 ? void 0 : options.temperature) || 0.7,
116
+ max_tokens: (options === null || options === void 0 ? void 0 : options.maxTokens) || 1000,
117
+ tools: (options === null || options === void 0 ? void 0 : options.enableTools) || false,
118
+ memory: (options === null || options === void 0 ? void 0 : options.enableMemory) || false,
119
+ };
120
+ try {
121
+ const response = await fetch(`${baseUrl}/api/v1/ai-agent/chat`, {
122
+ method: 'POST',
123
+ headers: {
124
+ 'Content-Type': 'application/json',
125
+ 'Authorization': `Bearer ${apiKey}`,
126
+ },
127
+ body: JSON.stringify(requestBody),
128
+ });
129
+ if (!response.ok) {
130
+ throw new Error(`API request failed: ${response.statusText}`);
131
+ }
132
+ const result = await response.json();
133
+ return result.response || result.message || 'No response from AI agent';
134
+ }
135
+ catch (error) {
136
+ return `N8N AI Agent response using model: ${model} (${error instanceof Error ? error.message : 'Error'})`;
137
+ }
138
+ },
139
+ _modelType: 'chat_model',
140
+ _llmType: 'n8n-ai-agent',
141
+ modelName: model,
142
+ temperature: options.temperature || 0.7,
143
+ maxTokens: options.maxTokens || 1000,
144
+ enableTools: options.enableTools || false,
145
+ enableMemory: options.enableMemory || false,
146
+ };
147
+ return {
148
+ response: mockModel,
149
+ };
150
+ }
151
+ }
152
+ exports.N8nAiAgent = N8nAiAgent;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "n8n-nodes-github-copilot",
3
- "version": "3.2.7",
4
- "description": "n8n community node for GitHub Copilot with CLI integration and official Chat API access to GPT-5, Claude, Gemini and more using your existing Copilot credits",
3
+ "version": "3.4.0",
4
+ "description": "n8n community node for GitHub Copilot with CLI integration, Chat API access, and AI Chat Model for workflows - access GPT-5, Claude, Gemini and more using your Copilot subscription",
5
5
  "license": "MIT",
6
6
  "homepage": "https://github.com/sufficit/n8n-nodes-github-copilot",
7
7
  "author": {
@@ -32,13 +32,15 @@
32
32
  ],
33
33
  "nodes": [
34
34
  "dist/nodes/GitHubCopilot/GitHubCopilot.node.js",
35
- "dist/nodes/GitHubCopilotChatAPI/GitHubCopilotChatAPI.node.js"
35
+ "dist/nodes/GitHubCopilotChatAPI/GitHubCopilotChatAPI.node.js",
36
+ "dist/nodes/GitHubCopilotChatModel/GitHubCopilotChatModel.node.js"
36
37
  ]
37
38
  },
38
39
  "keywords": [
39
40
  "n8n-community-node-package",
40
41
  "github",
41
42
  "copilot",
43
+ "n8n-ai-agent",
42
44
  "ai",
43
45
  "gpt-5",
44
46
  "claude",