n8n-nodes-github-copilot 3.2.2 → 3.2.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,55 +3,8 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.GitHubCopilotChatAPI = void 0;
4
4
  const utils_1 = require("./utils");
5
5
  const nodeProperties_1 = require("./nodeProperties");
6
- async function processAudioFileLegacy(context, itemIndex) {
7
- const audioSource = context.getNodeParameter('audioSource', itemIndex);
8
- try {
9
- const audioFile = context.getNodeParameter('audioFile', itemIndex, '');
10
- const audioUrl = context.getNodeParameter('audioUrl', itemIndex, '');
11
- const audioProperty = context.getNodeParameter('audioBinaryProperty', itemIndex, '');
12
- const result = await (0, utils_1.processAudioFile)(context, itemIndex, audioSource, audioFile, audioUrl, audioProperty);
13
- const tokenValidation = (0, utils_1.validateTokenLimit)(result.estimatedTokens, 100000);
14
- if (!tokenValidation.valid) {
15
- return {
16
- dataUrl: '',
17
- description: (0, utils_1.createAudioSummary)(result.filename, result.size)
18
- };
19
- }
20
- const optimized = (0, utils_1.optimizeAudioForTokens)(result.data, 80000);
21
- let description = `Audio file: ${result.filename} (${Math.round(result.size / 1024)}KB)`;
22
- if (optimized.truncated) {
23
- description += ` - Compressed from ${optimized.originalTokens} to ${optimized.finalTokens} tokens`;
24
- }
25
- return {
26
- dataUrl: `data:${result.mimeType};base64,${optimized.data}`,
27
- description
28
- };
29
- }
30
- catch (error) {
31
- return {
32
- dataUrl: '',
33
- description: `Error processing audio: ${error instanceof Error ? error.message : 'Unknown error'}`
34
- };
35
- }
36
- }
37
- async function processImageFileLegacy(context, itemIndex) {
38
- const imageSource = context.getNodeParameter('imageSource', itemIndex);
39
- try {
40
- const imageFile = context.getNodeParameter('imageFile', itemIndex, '');
41
- const imageUrl = context.getNodeParameter('imageUrl', itemIndex, '');
42
- const imageProperty = context.getNodeParameter('imageBinaryProperty', itemIndex, '');
43
- const result = await (0, utils_1.processImageFile)(context, itemIndex, imageSource, imageFile, imageUrl, imageProperty);
44
- const tokenValidation = (0, utils_1.validateTokenLimit)(result.estimatedTokens, 50000);
45
- if (!tokenValidation.valid) {
46
- throw new Error(`Image too large: ${result.estimatedTokens} estimated tokens. Please use a smaller image.`);
47
- }
48
- const optimizedData = (0, utils_1.compressImageToTokenLimit)(result.data, 40000);
49
- return `data:${result.mimeType};base64,${optimizedData}`;
50
- }
51
- catch (error) {
52
- throw new Error(`Error processing image: ${error instanceof Error ? error.message : 'Unknown error'}`);
53
- }
54
- }
6
+ const modelCapabilities_1 = require("./utils/modelCapabilities");
7
+ const mediaDetection_1 = require("./utils/mediaDetection");
55
8
  class GitHubCopilotChatAPI {
56
9
  constructor() {
57
10
  this.description = {
@@ -88,8 +41,18 @@ class GitHubCopilotChatAPI {
88
41
  const userMessage = this.getNodeParameter('message', i);
89
42
  const systemMessage = this.getNodeParameter('systemMessage', i, '');
90
43
  const advancedOptions = this.getNodeParameter('advancedOptions', i, {});
91
- const includeImage = this.getNodeParameter('includeImage', i, false);
92
- const includeAudio = this.getNodeParameter('includeAudio', i, false);
44
+ const includeMedia = this.getNodeParameter('includeMedia', i, false);
45
+ if (includeMedia) {
46
+ const validation = (0, modelCapabilities_1.validateModelCapabilities)(model, true, true);
47
+ if (!validation.isValid) {
48
+ throw new Error(validation.errorMessage || 'Model validation failed');
49
+ }
50
+ if (validation.warnings) {
51
+ for (const warning of validation.warnings) {
52
+ console.warn(`GitHub Copilot API Warning: ${warning}`);
53
+ }
54
+ }
55
+ }
93
56
  const messages = [];
94
57
  if (systemMessage) {
95
58
  messages.push({
@@ -98,7 +61,11 @@ class GitHubCopilotChatAPI {
98
61
  });
99
62
  }
100
63
  let userContent = userMessage;
101
- if (includeImage || includeAudio) {
64
+ if (includeMedia) {
65
+ const mediaSource = this.getNodeParameter('mediaSource', i);
66
+ const mediaFile = this.getNodeParameter('mediaFile', i, '');
67
+ const mediaUrl = this.getNodeParameter('mediaUrl', i, '');
68
+ const mediaBinaryProperty = this.getNodeParameter('mediaBinaryProperty', i, '');
102
69
  const contentArray = [];
103
70
  if (userMessage.trim()) {
104
71
  contentArray.push({
@@ -106,37 +73,35 @@ class GitHubCopilotChatAPI {
106
73
  text: userMessage,
107
74
  });
108
75
  }
109
- if (includeImage) {
110
- const imageDataUrl = await processImageFileLegacy(this, i);
111
- contentArray.push({
112
- type: 'image_url',
113
- image_url: {
114
- url: imageDataUrl,
115
- },
116
- });
117
- }
118
- if (includeAudio) {
119
- const audioResult = await processAudioFileLegacy(this, i);
120
- if (audioResult.dataUrl) {
76
+ try {
77
+ const mediaResult = await (0, mediaDetection_1.processMediaFile)(this, i, mediaSource, mediaFile, mediaUrl, mediaBinaryProperty);
78
+ if (mediaResult.type === 'image' && mediaResult.dataUrl) {
121
79
  contentArray.push({
122
- type: 'input_audio',
123
- input_audio: {
124
- data: audioResult.dataUrl.split(',')[1],
125
- format: audioResult.dataUrl.includes('mp3') ? 'mp3' : 'wav'
80
+ type: 'image_url',
81
+ image_url: {
82
+ url: mediaResult.dataUrl,
126
83
  },
127
84
  });
85
+ }
86
+ else if (mediaResult.type === 'audio' && mediaResult.dataUrl) {
128
87
  contentArray.push({
129
88
  type: 'text',
130
- text: `Please transcribe this audio: ${audioResult.description}`,
89
+ text: `Please analyze this audio file: ${mediaResult.description}\n\nAudio data (base64): ${mediaResult.dataUrl}`,
131
90
  });
132
91
  }
133
92
  else {
134
93
  contentArray.push({
135
94
  type: 'text',
136
- text: `[Audio processing failed: ${audioResult.description}]`,
95
+ text: `[Media processing failed: ${mediaResult.description}]`,
137
96
  });
138
97
  }
139
98
  }
99
+ catch (error) {
100
+ contentArray.push({
101
+ type: 'text',
102
+ text: `[Media processing error: ${error instanceof Error ? error.message : 'Unknown error'}]`,
103
+ });
104
+ }
140
105
  userContent = contentArray;
141
106
  }
142
107
  messages.push({
@@ -149,7 +114,7 @@ class GitHubCopilotChatAPI {
149
114
  stream: false,
150
115
  ...advancedOptions,
151
116
  };
152
- const hasMedia = includeImage || includeAudio;
117
+ const hasMedia = includeMedia;
153
118
  const response = await (0, utils_1.makeApiRequest)(this, '/chat/completions', requestBody, hasMedia);
154
119
  const result = {
155
120
  message: ((_b = (_a = response.choices[0]) === null || _a === void 0 ? void 0 : _a.message) === null || _b === void 0 ? void 0 : _b.content) || '',
@@ -24,32 +24,32 @@ exports.nodeProperties = [
24
24
  {
25
25
  name: 'GPT-5',
26
26
  value: 'gpt-5',
27
- description: 'OpenAI GPT-5 (Latest and most capable)',
27
+ description: 'OpenAI GPT-5 (Latest and most capable) ✓ Images ✓ Audio',
28
28
  },
29
29
  {
30
30
  name: 'GPT-5 Mini',
31
31
  value: 'gpt-5-mini',
32
- description: 'OpenAI GPT-5 Mini (Faster, cost-effective)',
32
+ description: 'OpenAI GPT-5 Mini (Faster, cost-effective) ✓ Images ✓ Audio',
33
33
  },
34
34
  {
35
35
  name: 'Claude Opus 4.1',
36
36
  value: 'claude-opus-4.1',
37
- description: 'Anthropic Claude Opus 4.1 (Advanced reasoning)',
37
+ description: 'Anthropic Claude Opus 4.1 (Advanced reasoning) ✗ Text only',
38
38
  },
39
39
  {
40
40
  name: 'Gemini 2.5 Pro',
41
41
  value: 'gemini-2.5-pro',
42
- description: 'Google Gemini 2.5 Pro (Multimodal capabilities)',
42
+ description: 'Google Gemini 2.5 Pro (Multimodal capabilities) ✓ Images ✓ Audio',
43
43
  },
44
44
  {
45
45
  name: 'Grok Code Fast 1',
46
46
  value: 'grok-code-fast-1',
47
- description: 'xAI Grok Code Fast 1 (Optimized for coding)',
47
+ description: 'xAI Grok Code Fast 1 (Optimized for coding) ✗ Text only',
48
48
  },
49
49
  {
50
50
  name: 'GPT-4.1 Copilot',
51
51
  value: 'gpt-4.1-copilot',
52
- description: 'OpenAI GPT-4.1 specialized for coding assistance',
52
+ description: 'OpenAI GPT-4.1 specialized for coding assistance ✓ Images ✓ Audio',
53
53
  },
54
54
  ],
55
55
  default: 'gpt-5-mini',
@@ -78,31 +78,31 @@ exports.nodeProperties = [
78
78
  description: 'System message to set the behavior of the AI model',
79
79
  },
80
80
  {
81
- displayName: 'Include Image',
82
- name: 'includeImage',
81
+ displayName: 'Include Media',
82
+ name: 'includeMedia',
83
83
  type: 'boolean',
84
84
  default: false,
85
- description: 'Whether to include an image in the message',
85
+ description: 'Whether to include a media file (image or audio) in the message. Type is auto-detected.',
86
86
  },
87
87
  {
88
- displayName: 'Image Source',
89
- name: 'imageSource',
88
+ displayName: 'Media Source',
89
+ name: 'mediaSource',
90
90
  type: 'options',
91
91
  displayOptions: {
92
92
  show: {
93
- includeImage: [true],
93
+ includeMedia: [true],
94
94
  },
95
95
  },
96
96
  options: [
97
97
  {
98
98
  name: 'Manual Input',
99
99
  value: 'manual',
100
- description: 'Provide image as base64 string or file path',
100
+ description: 'Provide media as base64 string or file path',
101
101
  },
102
102
  {
103
103
  name: 'URL',
104
104
  value: 'url',
105
- description: 'Download image from URL',
105
+ description: 'Download media from URL',
106
106
  },
107
107
  {
108
108
  name: 'Binary Data',
@@ -111,127 +111,49 @@ exports.nodeProperties = [
111
111
  },
112
112
  ],
113
113
  default: 'manual',
114
- description: 'Source of the image data',
114
+ description: 'Source of the media data',
115
115
  },
116
116
  {
117
- displayName: 'Image File',
118
- name: 'imageFile',
117
+ displayName: 'Media File',
118
+ name: 'mediaFile',
119
119
  type: 'string',
120
120
  displayOptions: {
121
121
  show: {
122
- includeImage: [true],
123
- imageSource: ['manual'],
122
+ includeMedia: [true],
123
+ mediaSource: ['manual'],
124
124
  },
125
125
  },
126
126
  default: '',
127
127
  placeholder: 'Paste base64 string or file path',
128
- description: 'Image as base64 string or file path',
128
+ description: 'Media file as base64 string or file path (auto-detects image/audio)',
129
129
  },
130
130
  {
131
- displayName: 'Image URL',
132
- name: 'imageUrl',
131
+ displayName: 'Media URL',
132
+ name: 'mediaUrl',
133
133
  type: 'string',
134
134
  displayOptions: {
135
135
  show: {
136
- includeImage: [true],
137
- imageSource: ['url'],
136
+ includeMedia: [true],
137
+ mediaSource: ['url'],
138
138
  },
139
139
  },
140
140
  default: '',
141
- placeholder: 'https://example.com/image.jpg',
142
- description: 'URL of the image to download and include',
141
+ placeholder: 'https://example.com/file.jpg',
142
+ description: 'URL of the media file to download and include',
143
143
  },
144
144
  {
145
- displayName: 'Image Binary Property',
146
- name: 'imageBinaryProperty',
145
+ displayName: 'Media Binary Property',
146
+ name: 'mediaBinaryProperty',
147
147
  type: 'string',
148
148
  displayOptions: {
149
149
  show: {
150
- includeImage: [true],
151
- imageSource: ['binary'],
150
+ includeMedia: [true],
151
+ mediaSource: ['binary'],
152
152
  },
153
153
  },
154
154
  default: 'data',
155
155
  placeholder: 'data',
156
- description: 'Name of the binary property containing the image',
157
- },
158
- {
159
- displayName: 'Include Audio',
160
- name: 'includeAudio',
161
- type: 'boolean',
162
- default: false,
163
- description: 'Whether to include an audio file in the message',
164
- },
165
- {
166
- displayName: 'Audio Source',
167
- name: 'audioSource',
168
- type: 'options',
169
- displayOptions: {
170
- show: {
171
- includeAudio: [true],
172
- },
173
- },
174
- options: [
175
- {
176
- name: 'Manual Input',
177
- value: 'manual',
178
- description: 'Provide audio as base64 string or file path',
179
- },
180
- {
181
- name: 'URL',
182
- value: 'url',
183
- description: 'Download audio from URL',
184
- },
185
- {
186
- name: 'Binary Data',
187
- value: 'binary',
188
- description: 'Use binary data from previous node',
189
- },
190
- ],
191
- default: 'manual',
192
- description: 'Source of the audio data',
193
- },
194
- {
195
- displayName: 'Audio File',
196
- name: 'audioFile',
197
- type: 'string',
198
- displayOptions: {
199
- show: {
200
- includeAudio: [true],
201
- audioSource: ['manual'],
202
- },
203
- },
204
- default: '',
205
- placeholder: 'Paste base64 string or file path',
206
- description: 'Audio as base64 string or file path',
207
- },
208
- {
209
- displayName: 'Audio URL',
210
- name: 'audioUrl',
211
- type: 'string',
212
- displayOptions: {
213
- show: {
214
- includeAudio: [true],
215
- audioSource: ['url'],
216
- },
217
- },
218
- default: '',
219
- placeholder: 'https://example.com/audio.mp3',
220
- description: 'URL of the audio file to download and include',
221
- },
222
- {
223
- displayName: 'Audio Binary Property',
224
- name: 'audioBinaryProperty',
225
- type: 'string',
226
- displayOptions: {
227
- show: {
228
- includeAudio: [true],
229
- audioSource: ['binary'],
230
- },
231
- },
232
- default: 'data',
233
- placeholder: 'data',
234
- description: 'Name of the binary property containing the audio file',
156
+ description: 'Name of the binary property containing the media file',
235
157
  },
236
158
  {
237
159
  displayName: 'Advanced Options',
@@ -0,0 +1,9 @@
1
+ import { IExecuteFunctions } from 'n8n-workflow';
2
+ export declare function processMediaFile(context: IExecuteFunctions, itemIndex: number, source: 'manual' | 'url' | 'binary', mediaFile?: string, mediaUrl?: string, binaryProperty?: string): Promise<{
3
+ type: 'image' | 'audio' | 'unknown';
4
+ dataUrl?: string;
5
+ description: string;
6
+ mimeType: string;
7
+ }>;
8
+ export declare function isImageMimeType(mimeType: string): boolean;
9
+ export declare function isAudioMimeType(mimeType: string): boolean;
@@ -0,0 +1,49 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.isAudioMimeType = exports.isImageMimeType = exports.processMediaFile = void 0;
4
+ const index_1 = require("./index");
5
+ async function processMediaFile(context, itemIndex, source, mediaFile, mediaUrl, binaryProperty) {
6
+ try {
7
+ try {
8
+ const imageResult = await (0, index_1.processImageFile)(context, itemIndex, source, mediaFile, mediaUrl, binaryProperty);
9
+ return {
10
+ type: 'image',
11
+ dataUrl: `data:${imageResult.mimeType};base64,${imageResult.data}`,
12
+ description: `Image file: ${imageResult.filename} (${Math.round(imageResult.size / 1024)}KB)`,
13
+ mimeType: imageResult.mimeType,
14
+ };
15
+ }
16
+ catch (imageError) {
17
+ try {
18
+ const audioResult = await (0, index_1.processAudioFile)(context, itemIndex, source, mediaFile, mediaUrl, binaryProperty);
19
+ return {
20
+ type: 'audio',
21
+ dataUrl: `data:${audioResult.mimeType};base64,${audioResult.data}`,
22
+ description: `Audio file: ${audioResult.filename} (${Math.round(audioResult.size / 1024)}KB)`,
23
+ mimeType: audioResult.mimeType,
24
+ };
25
+ }
26
+ catch (audioError) {
27
+ throw new Error(`File is neither a valid image nor audio file. Image error: ${imageError instanceof Error ? imageError.message : 'Unknown'}. Audio error: ${audioError instanceof Error ? audioError.message : 'Unknown'}`);
28
+ }
29
+ }
30
+ }
31
+ catch (error) {
32
+ return {
33
+ type: 'unknown',
34
+ description: `Error processing media file: ${error instanceof Error ? error.message : 'Unknown error'}`,
35
+ mimeType: 'unknown',
36
+ };
37
+ }
38
+ }
39
+ exports.processMediaFile = processMediaFile;
40
+ function isImageMimeType(mimeType) {
41
+ return mimeType.startsWith('image/') && !mimeType.includes('svg');
42
+ }
43
+ exports.isImageMimeType = isImageMimeType;
44
+ function isAudioMimeType(mimeType) {
45
+ return mimeType.startsWith('audio/') ||
46
+ mimeType === 'application/ogg' ||
47
+ mimeType === 'video/mp4';
48
+ }
49
+ exports.isAudioMimeType = isAudioMimeType;
@@ -0,0 +1,5 @@
1
+ import { ModelCapabilities, ModelValidationResult } from './types';
2
+ export declare const MODEL_CAPABILITIES: Record<string, ModelCapabilities>;
3
+ export declare function validateModelCapabilities(model: string, includeImage: boolean, includeAudio: boolean): ModelValidationResult;
4
+ export declare function getSupportedModels(requireImages?: boolean, requireAudio?: boolean): string[];
5
+ export declare function getModelInfo(model: string): ModelCapabilities | null;
@@ -0,0 +1,113 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.getModelInfo = exports.getSupportedModels = exports.validateModelCapabilities = exports.MODEL_CAPABILITIES = void 0;
4
+ exports.MODEL_CAPABILITIES = {
5
+ 'gpt-5': {
6
+ supportsImages: true,
7
+ supportsAudio: true,
8
+ maxContextTokens: 200000,
9
+ description: 'OpenAI GPT-5 with image support and audio analysis via GitHub Copilot API',
10
+ },
11
+ 'gpt-5-mini': {
12
+ supportsImages: true,
13
+ supportsAudio: true,
14
+ maxContextTokens: 128000,
15
+ description: 'OpenAI GPT-5 Mini with image support and audio analysis via GitHub Copilot API',
16
+ },
17
+ 'gpt-4.1-copilot': {
18
+ supportsImages: true,
19
+ supportsAudio: true,
20
+ maxContextTokens: 128000,
21
+ description: 'OpenAI GPT-4.1 with image support and audio analysis via GitHub Copilot API',
22
+ },
23
+ 'claude-opus-4.1': {
24
+ supportsImages: false,
25
+ supportsAudio: false,
26
+ maxContextTokens: 200000,
27
+ description: 'Anthropic Claude Opus 4.1 - Text only via GitHub Copilot API',
28
+ },
29
+ 'claude-3.5-sonnet': {
30
+ supportsImages: false,
31
+ supportsAudio: false,
32
+ maxContextTokens: 200000,
33
+ description: 'Anthropic Claude 3.5 Sonnet - Text only via GitHub Copilot API',
34
+ },
35
+ 'gemini-2.5-pro': {
36
+ supportsImages: true,
37
+ supportsAudio: true,
38
+ maxContextTokens: 1000000,
39
+ description: 'Google Gemini 2.5 Pro with multimodal support via GitHub Copilot API',
40
+ },
41
+ 'gemini-2.0-flash': {
42
+ supportsImages: true,
43
+ supportsAudio: true,
44
+ maxContextTokens: 1000000,
45
+ description: 'Google Gemini 2.0 Flash with multimodal support via GitHub Copilot API',
46
+ },
47
+ 'grok-code-fast-1': {
48
+ supportsImages: false,
49
+ supportsAudio: false,
50
+ maxContextTokens: 128000,
51
+ description: 'xAI Grok Code Fast 1 - Text only via GitHub Copilot API',
52
+ },
53
+ 'o3': {
54
+ supportsImages: false,
55
+ supportsAudio: false,
56
+ maxContextTokens: 200000,
57
+ description: 'OpenAI o3 - Text only via GitHub Copilot API',
58
+ },
59
+ 'o3-mini': {
60
+ supportsImages: false,
61
+ supportsAudio: false,
62
+ maxContextTokens: 128000,
63
+ description: 'OpenAI o3-mini - Text only via GitHub Copilot API',
64
+ },
65
+ };
66
+ function validateModelCapabilities(model, includeImage, includeAudio) {
67
+ const capabilities = exports.MODEL_CAPABILITIES[model];
68
+ if (!capabilities) {
69
+ return {
70
+ isValid: false,
71
+ errorMessage: `Unknown model: ${model}. Please check if the model name is correct.`,
72
+ };
73
+ }
74
+ const warnings = [];
75
+ let isValid = true;
76
+ let errorMessage;
77
+ if (includeImage && !capabilities.supportsImages) {
78
+ isValid = false;
79
+ errorMessage = `Model ${model} does not support image input. Please disable image upload or choose a different model (e.g., GPT-5, Gemini 2.5 Pro).`;
80
+ }
81
+ if (includeAudio && !capabilities.supportsAudio) {
82
+ isValid = false;
83
+ errorMessage = `Model ${model} does not support audio input. Please disable audio upload or choose a different model (e.g., GPT-5, Gemini 2.5 Pro).`;
84
+ }
85
+ if (model.includes('claude') && (includeImage || includeAudio)) {
86
+ warnings.push('Claude models typically work best with text-only input via GitHub Copilot API.');
87
+ }
88
+ if (model.includes('grok') && (includeImage || includeAudio)) {
89
+ warnings.push('Grok models are optimized for coding tasks and work best with text input.');
90
+ }
91
+ return {
92
+ isValid,
93
+ errorMessage,
94
+ warnings: warnings.length > 0 ? warnings : undefined,
95
+ };
96
+ }
97
+ exports.validateModelCapabilities = validateModelCapabilities;
98
+ function getSupportedModels(requireImages = false, requireAudio = false) {
99
+ return Object.entries(exports.MODEL_CAPABILITIES)
100
+ .filter(([, capabilities]) => {
101
+ if (requireImages && !capabilities.supportsImages)
102
+ return false;
103
+ if (requireAudio && !capabilities.supportsAudio)
104
+ return false;
105
+ return true;
106
+ })
107
+ .map(([model]) => model);
108
+ }
109
+ exports.getSupportedModels = getSupportedModels;
110
+ function getModelInfo(model) {
111
+ return exports.MODEL_CAPABILITIES[model] || null;
112
+ }
113
+ exports.getModelInfo = getModelInfo;
@@ -9,10 +9,6 @@ export interface ChatMessageContent {
9
9
  image_url?: {
10
10
  url: string;
11
11
  };
12
- input_audio?: {
13
- data: string;
14
- format: string;
15
- };
16
12
  }
17
13
  export interface CopilotResponse {
18
14
  choices: Array<{
@@ -48,3 +44,14 @@ export interface OptimizationOptions {
48
44
  quality?: number;
49
45
  maxSizeKB?: number;
50
46
  }
47
+ export interface ModelCapabilities {
48
+ supportsImages: boolean;
49
+ supportsAudio: boolean;
50
+ maxContextTokens: number;
51
+ description: string;
52
+ }
53
+ export interface ModelValidationResult {
54
+ isValid: boolean;
55
+ errorMessage?: string;
56
+ warnings?: string[];
57
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "n8n-nodes-github-copilot",
3
- "version": "3.2.2",
3
+ "version": "3.2.4",
4
4
  "description": "n8n community node for GitHub Copilot with CLI integration and official Chat API access to GPT-5, Claude, Gemini and more using your existing Copilot credits",
5
5
  "license": "MIT",
6
6
  "homepage": "https://github.com/sufficit/n8n-nodes-github-copilot",