n8n-nodes-github-copilot 3.2.1 → 3.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,6 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.GitHubCopilotChatAPI = void 0;
4
4
  const utils_1 = require("./utils");
5
5
  const nodeProperties_1 = require("./nodeProperties");
6
+ const modelCapabilities_1 = require("./utils/modelCapabilities");
6
7
  async function processAudioFileLegacy(context, itemIndex) {
7
8
  const audioSource = context.getNodeParameter('audioSource', itemIndex);
8
9
  try {
@@ -90,6 +91,15 @@ class GitHubCopilotChatAPI {
90
91
  const advancedOptions = this.getNodeParameter('advancedOptions', i, {});
91
92
  const includeImage = this.getNodeParameter('includeImage', i, false);
92
93
  const includeAudio = this.getNodeParameter('includeAudio', i, false);
94
+ const validation = (0, modelCapabilities_1.validateModelCapabilities)(model, includeImage, includeAudio);
95
+ if (!validation.isValid) {
96
+ throw new Error(validation.errorMessage || 'Model validation failed');
97
+ }
98
+ if (validation.warnings) {
99
+ for (const warning of validation.warnings) {
100
+ console.warn(`GitHub Copilot API Warning: ${warning}`);
101
+ }
102
+ }
93
103
  const messages = [];
94
104
  if (systemMessage) {
95
105
  messages.push({
@@ -120,7 +130,7 @@ class GitHubCopilotChatAPI {
120
130
  if (audioResult.dataUrl) {
121
131
  contentArray.push({
122
132
  type: 'text',
123
- text: `[Audio included: ${audioResult.description}]`,
133
+ text: `Please analyze this audio file: ${audioResult.description}\n\nAudio data (base64): ${audioResult.dataUrl}`,
124
134
  });
125
135
  }
126
136
  else {
@@ -142,7 +152,8 @@ class GitHubCopilotChatAPI {
142
152
  stream: false,
143
153
  ...advancedOptions,
144
154
  };
145
- const response = await (0, utils_1.makeApiRequest)(this, '/chat/completions', requestBody, includeImage);
155
+ const hasMedia = includeImage || includeAudio;
156
+ const response = await (0, utils_1.makeApiRequest)(this, '/chat/completions', requestBody, hasMedia);
146
157
  const result = {
147
158
  message: ((_b = (_a = response.choices[0]) === null || _a === void 0 ? void 0 : _a.message) === null || _b === void 0 ? void 0 : _b.content) || '',
148
159
  model,
@@ -24,32 +24,32 @@ exports.nodeProperties = [
24
24
  {
25
25
  name: 'GPT-5',
26
26
  value: 'gpt-5',
27
- description: 'OpenAI GPT-5 (Latest and most capable)',
27
+ description: 'OpenAI GPT-5 (Latest and most capable) ✓ Images',
28
28
  },
29
29
  {
30
30
  name: 'GPT-5 Mini',
31
31
  value: 'gpt-5-mini',
32
- description: 'OpenAI GPT-5 Mini (Faster, cost-effective)',
32
+ description: 'OpenAI GPT-5 Mini (Faster, cost-effective) ✓ Images',
33
33
  },
34
34
  {
35
35
  name: 'Claude Opus 4.1',
36
36
  value: 'claude-opus-4.1',
37
- description: 'Anthropic Claude Opus 4.1 (Advanced reasoning)',
37
+ description: 'Anthropic Claude Opus 4.1 (Advanced reasoning) ✗ Text only',
38
38
  },
39
39
  {
40
40
  name: 'Gemini 2.5 Pro',
41
41
  value: 'gemini-2.5-pro',
42
- description: 'Google Gemini 2.5 Pro (Multimodal capabilities)',
42
+ description: 'Google Gemini 2.5 Pro (Multimodal capabilities) ✓ Images',
43
43
  },
44
44
  {
45
45
  name: 'Grok Code Fast 1',
46
46
  value: 'grok-code-fast-1',
47
- description: 'xAI Grok Code Fast 1 (Optimized for coding)',
47
+ description: 'xAI Grok Code Fast 1 (Optimized for coding) ✗ Text only',
48
48
  },
49
49
  {
50
50
  name: 'GPT-4.1 Copilot',
51
51
  value: 'gpt-4.1-copilot',
52
- description: 'OpenAI GPT-4.1 specialized for coding assistance',
52
+ description: 'OpenAI GPT-4.1 specialized for coding assistance ✓ Images',
53
53
  },
54
54
  ],
55
55
  default: 'gpt-5-mini',
@@ -82,7 +82,7 @@ exports.nodeProperties = [
82
82
  name: 'includeImage',
83
83
  type: 'boolean',
84
84
  default: false,
85
- description: 'Whether to include an image in the message',
85
+ description: 'Whether to include an image in the message. ✓ Supported by GPT and Gemini models via GitHub Copilot API.',
86
86
  },
87
87
  {
88
88
  displayName: 'Image Source',
@@ -160,7 +160,7 @@ exports.nodeProperties = [
160
160
  name: 'includeAudio',
161
161
  type: 'boolean',
162
162
  default: false,
163
- description: 'Whether to include an audio file in the message',
163
+ description: 'Whether to include an audio file in the message. ⚠️ Audio is sent as base64 text to models for analysis (GitHub Copilot API limitation).',
164
164
  },
165
165
  {
166
166
  displayName: 'Audio Source',
@@ -2,7 +2,7 @@
2
2
  /// <reference types="node" />
3
3
  import { IExecuteFunctions } from 'n8n-workflow';
4
4
  import { CopilotResponse } from './types';
5
- export declare function makeApiRequest(context: IExecuteFunctions, endpoint: string, body: Record<string, unknown>, hasVision?: boolean): Promise<CopilotResponse>;
5
+ export declare function makeApiRequest(context: IExecuteFunctions, endpoint: string, body: Record<string, unknown>, hasMedia?: boolean): Promise<CopilotResponse>;
6
6
  export declare function downloadFileFromUrl(url: string): Promise<Buffer>;
7
7
  export declare function getFileFromBinary(context: IExecuteFunctions, itemIndex: number, propertyName: string): Promise<Buffer>;
8
8
  export declare function getImageMimeType(filename: string): string;
@@ -1,15 +1,16 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.truncateToTokenLimit = exports.validateTokenLimit = exports.estimateTokens = exports.validateFileSize = exports.getAudioMimeType = exports.getImageMimeType = exports.getFileFromBinary = exports.downloadFileFromUrl = exports.makeApiRequest = void 0;
4
- async function makeApiRequest(context, endpoint, body, hasVision = false) {
4
+ async function makeApiRequest(context, endpoint, body, hasMedia = false) {
5
5
  const credentials = await context.getCredentials('githubApi');
6
6
  const headers = {
7
7
  'Authorization': `Bearer ${credentials.accessToken}`,
8
8
  'Content-Type': 'application/json',
9
9
  'User-Agent': 'n8n-github-copilot-chat-api-node',
10
10
  };
11
- if (hasVision) {
11
+ if (hasMedia) {
12
12
  headers['Copilot-Vision-Request'] = 'true';
13
+ headers['Copilot-Media-Request'] = 'true';
13
14
  }
14
15
  const options = {
15
16
  method: 'POST',
@@ -0,0 +1,9 @@
1
+ import { IExecuteFunctions } from 'n8n-workflow';
2
+ export declare function processMediaFile(context: IExecuteFunctions, itemIndex: number, source: 'manual' | 'url' | 'binary', mediaFile?: string, mediaUrl?: string, binaryProperty?: string): Promise<{
3
+ type: 'image' | 'audio' | 'unknown';
4
+ dataUrl?: string;
5
+ description: string;
6
+ mimeType: string;
7
+ }>;
8
+ export declare function isImageMimeType(mimeType: string): boolean;
9
+ export declare function isAudioMimeType(mimeType: string): boolean;
@@ -0,0 +1,49 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.isAudioMimeType = exports.isImageMimeType = exports.processMediaFile = void 0;
4
+ const index_1 = require("./index");
5
+ async function processMediaFile(context, itemIndex, source, mediaFile, mediaUrl, binaryProperty) {
6
+ try {
7
+ try {
8
+ const imageResult = await (0, index_1.processImageFile)(context, itemIndex, source, mediaFile, mediaUrl, binaryProperty);
9
+ return {
10
+ type: 'image',
11
+ dataUrl: `data:${imageResult.mimeType};base64,${imageResult.data}`,
12
+ description: `Image file: ${imageResult.filename} (${Math.round(imageResult.size / 1024)}KB)`,
13
+ mimeType: imageResult.mimeType,
14
+ };
15
+ }
16
+ catch (imageError) {
17
+ try {
18
+ const audioResult = await (0, index_1.processAudioFile)(context, itemIndex, source, mediaFile, mediaUrl, binaryProperty);
19
+ return {
20
+ type: 'audio',
21
+ dataUrl: `data:${audioResult.mimeType};base64,${audioResult.data}`,
22
+ description: `Audio file: ${audioResult.filename} (${Math.round(audioResult.size / 1024)}KB)`,
23
+ mimeType: audioResult.mimeType,
24
+ };
25
+ }
26
+ catch (audioError) {
27
+ throw new Error(`File is neither a valid image nor audio file. Image error: ${imageError instanceof Error ? imageError.message : 'Unknown'}. Audio error: ${audioError instanceof Error ? audioError.message : 'Unknown'}`);
28
+ }
29
+ }
30
+ }
31
+ catch (error) {
32
+ return {
33
+ type: 'unknown',
34
+ description: `Error processing media file: ${error instanceof Error ? error.message : 'Unknown error'}`,
35
+ mimeType: 'unknown',
36
+ };
37
+ }
38
+ }
39
+ exports.processMediaFile = processMediaFile;
40
+ function isImageMimeType(mimeType) {
41
+ return mimeType.startsWith('image/') && !mimeType.includes('svg');
42
+ }
43
+ exports.isImageMimeType = isImageMimeType;
44
+ function isAudioMimeType(mimeType) {
45
+ return mimeType.startsWith('audio/') ||
46
+ mimeType === 'application/ogg' ||
47
+ mimeType === 'video/mp4';
48
+ }
49
+ exports.isAudioMimeType = isAudioMimeType;
@@ -0,0 +1,5 @@
1
+ import { ModelCapabilities, ModelValidationResult } from './types';
2
+ export declare const MODEL_CAPABILITIES: Record<string, ModelCapabilities>;
3
+ export declare function validateModelCapabilities(model: string, includeImage: boolean, includeAudio: boolean): ModelValidationResult;
4
+ export declare function getSupportedModels(requireImages?: boolean, requireAudio?: boolean): string[];
5
+ export declare function getModelInfo(model: string): ModelCapabilities | null;
@@ -0,0 +1,113 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.getModelInfo = exports.getSupportedModels = exports.validateModelCapabilities = exports.MODEL_CAPABILITIES = void 0;
4
+ exports.MODEL_CAPABILITIES = {
5
+ 'gpt-5': {
6
+ supportsImages: true,
7
+ supportsAudio: false,
8
+ maxContextTokens: 200000,
9
+ description: 'OpenAI GPT-5 with image support via GitHub Copilot API',
10
+ },
11
+ 'gpt-5-mini': {
12
+ supportsImages: true,
13
+ supportsAudio: false,
14
+ maxContextTokens: 128000,
15
+ description: 'OpenAI GPT-5 Mini with image support via GitHub Copilot API',
16
+ },
17
+ 'gpt-4.1-copilot': {
18
+ supportsImages: true,
19
+ supportsAudio: false,
20
+ maxContextTokens: 128000,
21
+ description: 'OpenAI GPT-4.1 with image support via GitHub Copilot API',
22
+ },
23
+ 'claude-opus-4.1': {
24
+ supportsImages: false,
25
+ supportsAudio: false,
26
+ maxContextTokens: 200000,
27
+ description: 'Anthropic Claude Opus 4.1 - Text only via GitHub Copilot API',
28
+ },
29
+ 'claude-3.5-sonnet': {
30
+ supportsImages: false,
31
+ supportsAudio: false,
32
+ maxContextTokens: 200000,
33
+ description: 'Anthropic Claude 3.5 Sonnet - Text only via GitHub Copilot API',
34
+ },
35
+ 'gemini-2.5-pro': {
36
+ supportsImages: true,
37
+ supportsAudio: false,
38
+ maxContextTokens: 1000000,
39
+ description: 'Google Gemini 2.5 Pro via GitHub Copilot API',
40
+ },
41
+ 'gemini-2.0-flash': {
42
+ supportsImages: true,
43
+ supportsAudio: false,
44
+ maxContextTokens: 1000000,
45
+ description: 'Google Gemini 2.0 Flash via GitHub Copilot API',
46
+ },
47
+ 'grok-code-fast-1': {
48
+ supportsImages: false,
49
+ supportsAudio: false,
50
+ maxContextTokens: 128000,
51
+ description: 'xAI Grok Code Fast 1 - Text only via GitHub Copilot API',
52
+ },
53
+ 'o3': {
54
+ supportsImages: false,
55
+ supportsAudio: false,
56
+ maxContextTokens: 200000,
57
+ description: 'OpenAI o3 - Text only via GitHub Copilot API',
58
+ },
59
+ 'o3-mini': {
60
+ supportsImages: false,
61
+ supportsAudio: false,
62
+ maxContextTokens: 128000,
63
+ description: 'OpenAI o3-mini - Text only via GitHub Copilot API',
64
+ },
65
+ };
66
+ function validateModelCapabilities(model, includeImage, includeAudio) {
67
+ const capabilities = exports.MODEL_CAPABILITIES[model];
68
+ if (!capabilities) {
69
+ return {
70
+ isValid: false,
71
+ errorMessage: `Unknown model: ${model}. Please check if the model name is correct.`,
72
+ };
73
+ }
74
+ const warnings = [];
75
+ let isValid = true;
76
+ let errorMessage;
77
+ if (includeImage && !capabilities.supportsImages) {
78
+ isValid = false;
79
+ errorMessage = `Model ${model} does not support image input. Please disable image upload or choose a different model (e.g., GPT-5, Gemini 2.5 Pro).`;
80
+ }
81
+ if (includeAudio && !capabilities.supportsAudio) {
82
+ isValid = false;
83
+ errorMessage = `Model ${model} does not support audio input. Please disable audio upload or choose a different model (e.g., GPT-5, Gemini 2.5 Pro).`;
84
+ }
85
+ if (model.includes('claude') && (includeImage || includeAudio)) {
86
+ warnings.push('Claude models typically work best with text-only input via GitHub Copilot API.');
87
+ }
88
+ if (model.includes('grok') && (includeImage || includeAudio)) {
89
+ warnings.push('Grok models are optimized for coding tasks and work best with text input.');
90
+ }
91
+ return {
92
+ isValid,
93
+ errorMessage,
94
+ warnings: warnings.length > 0 ? warnings : undefined,
95
+ };
96
+ }
97
+ exports.validateModelCapabilities = validateModelCapabilities;
98
+ function getSupportedModels(requireImages = false, requireAudio = false) {
99
+ return Object.entries(exports.MODEL_CAPABILITIES)
100
+ .filter(([, capabilities]) => {
101
+ if (requireImages && !capabilities.supportsImages)
102
+ return false;
103
+ if (requireAudio && !capabilities.supportsAudio)
104
+ return false;
105
+ return true;
106
+ })
107
+ .map(([model]) => model);
108
+ }
109
+ exports.getSupportedModels = getSupportedModels;
110
+ function getModelInfo(model) {
111
+ return exports.MODEL_CAPABILITIES[model] || null;
112
+ }
113
+ exports.getModelInfo = getModelInfo;
@@ -1,13 +1,14 @@
1
1
  import { IExecuteFunctions } from 'n8n-workflow';
2
2
  export interface ChatMessage {
3
3
  role: 'system' | 'user' | 'assistant';
4
- content: string | Array<{
5
- type: string;
6
- text?: string;
7
- image_url?: {
8
- url: string;
9
- };
10
- }>;
4
+ content: string | Array<ChatMessageContent>;
5
+ }
6
+ export interface ChatMessageContent {
7
+ type: string;
8
+ text?: string;
9
+ image_url?: {
10
+ url: string;
11
+ };
11
12
  }
12
13
  export interface CopilotResponse {
13
14
  choices: Array<{
@@ -43,3 +44,14 @@ export interface OptimizationOptions {
43
44
  quality?: number;
44
45
  maxSizeKB?: number;
45
46
  }
47
+ export interface ModelCapabilities {
48
+ supportsImages: boolean;
49
+ supportsAudio: boolean;
50
+ maxContextTokens: number;
51
+ description: string;
52
+ }
53
+ export interface ModelValidationResult {
54
+ isValid: boolean;
55
+ errorMessage?: string;
56
+ warnings?: string[];
57
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "n8n-nodes-github-copilot",
3
- "version": "3.2.1",
3
+ "version": "3.2.3",
4
4
  "description": "n8n community node for GitHub Copilot with CLI integration and official Chat API access to GPT-5, Claude, Gemini and more using your existing Copilot credits",
5
5
  "license": "MIT",
6
6
  "homepage": "https://github.com/sufficit/n8n-nodes-github-copilot",