n8n-nodes-github-copilot 3.38.26 → 3.38.35

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,9 +1,9 @@
1
- import { INodeType, INodeTypeDescription, ISupplyDataFunctions, SupplyData, ILoadOptionsFunctions, INodePropertyOptions } from 'n8n-workflow';
1
+ import { ISupplyDataFunctions, INodeType, INodeTypeDescription, SupplyData } from 'n8n-workflow';
2
2
  export declare class GitHubCopilotChatModel implements INodeType {
3
3
  description: INodeTypeDescription;
4
4
  methods: {
5
5
  loadOptions: {
6
- getAvailableModels(this: ILoadOptionsFunctions): Promise<INodePropertyOptions[]>;
6
+ getAvailableModels(this: import("n8n-workflow").ILoadOptionsFunctions): Promise<import("n8n-workflow").INodePropertyOptions[]>;
7
7
  };
8
8
  };
9
9
  supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData>;
@@ -2,6 +2,7 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.GitHubCopilotChatModel = void 0;
4
4
  const openai_1 = require("@langchain/openai");
5
+ const messages_1 = require("@langchain/core/messages");
5
6
  const GitHubCopilotModels_1 = require("../../shared/models/GitHubCopilotModels");
6
7
  const GitHubCopilotEndpoints_1 = require("../../shared/utils/GitHubCopilotEndpoints");
7
8
  const DynamicModelLoader_1 = require("../../shared/models/DynamicModelLoader");
@@ -24,6 +25,7 @@ class GitHubCopilotChatOpenAI extends openai_1.ChatOpenAI {
24
25
  if (!messages || messages.length === 0) {
25
26
  throw new Error('No messages provided for generation');
26
27
  }
28
+ let hasVisionContent = false;
27
29
  let copilotMessages = messages.map((msg) => {
28
30
  let role;
29
31
  switch (msg._getType()) {
@@ -40,19 +42,66 @@ class GitHubCopilotChatOpenAI extends openai_1.ChatOpenAI {
40
42
  console.warn(`⚠️ Unknown message type: ${msg._getType()}, defaulting to 'user'`);
41
43
  role = 'user';
42
44
  }
43
- let content = msg.content;
44
- if (typeof content === 'string') {
45
+ let content = '';
46
+ const rawContent = msg.content;
47
+ if (typeof rawContent === 'string') {
48
+ if (rawContent.includes('data:image/') || rawContent.match(/\[.*image.*\]/i)) {
49
+ hasVisionContent = true;
50
+ console.log(`👁️ Vision content detected in string message (data URL or image reference)`);
51
+ }
52
+ content = rawContent;
45
53
  }
46
- else if (Array.isArray(content)) {
47
- console.warn(`⚠️ Complex content detected, stringifying:`, content);
48
- content = JSON.stringify(content);
54
+ else if (Array.isArray(rawContent)) {
55
+ const hasImageContent = rawContent.some((part) => {
56
+ if (typeof part === 'object' && part !== null) {
57
+ const p = part;
58
+ if (p.type === 'image_url' || p.type === 'image' || p.image_url !== undefined) {
59
+ return true;
60
+ }
61
+ if (typeof p.url === 'string' && p.url.startsWith('data:image/')) {
62
+ return true;
63
+ }
64
+ if (p.image || p.imageUrl || p.image_data) {
65
+ return true;
66
+ }
67
+ }
68
+ return false;
69
+ });
70
+ if (hasImageContent) {
71
+ hasVisionContent = true;
72
+ console.log(`👁️ Vision content detected in array message`);
73
+ content = rawContent.map((part) => {
74
+ if (typeof part === 'object' && part !== null) {
75
+ const p = part;
76
+ if (p.type === 'text') {
77
+ return { type: 'text', text: String(p.text || '') };
78
+ }
79
+ else if (p.type === 'image_url' || p.type === 'image' || p.image_url) {
80
+ const imageUrl = (p.image_url || p.image || p);
81
+ const url = String((imageUrl === null || imageUrl === void 0 ? void 0 : imageUrl.url) || p.url || p.imageUrl || p.image_data || '');
82
+ return {
83
+ type: 'image_url',
84
+ image_url: {
85
+ url,
86
+ detail: (imageUrl === null || imageUrl === void 0 ? void 0 : imageUrl.detail) || 'auto',
87
+ },
88
+ };
89
+ }
90
+ }
91
+ return { type: 'text', text: String(part) };
92
+ });
93
+ }
94
+ else {
95
+ console.warn(`⚠️ Complex content detected, stringifying:`, rawContent);
96
+ content = JSON.stringify(rawContent);
97
+ }
49
98
  }
50
- else if (content === null || content === undefined) {
99
+ else if (rawContent === null || rawContent === undefined) {
51
100
  content = '';
52
101
  }
53
102
  else {
54
- console.warn(`⚠️ Non-string content detected, stringifying:`, typeof content);
55
- content = JSON.stringify(content);
103
+ console.warn(`⚠️ Non-string content detected, stringifying:`, typeof rawContent);
104
+ content = JSON.stringify(rawContent);
56
105
  }
57
106
  return {
58
107
  role,
@@ -70,7 +119,10 @@ class GitHubCopilotChatOpenAI extends openai_1.ChatOpenAI {
70
119
  }
71
120
  }
72
121
  const validMessages = copilotMessages.filter((msg) => {
73
- if (!msg.content || msg.content.trim() === '') {
122
+ const isEmpty = Array.isArray(msg.content)
123
+ ? msg.content.length === 0
124
+ : (!msg.content || (typeof msg.content === 'string' && msg.content.trim() === ''));
125
+ if (isEmpty) {
74
126
  console.warn(`⚠️ Filtering out empty message with role: ${msg.role}`);
75
127
  return false;
76
128
  }
@@ -80,21 +132,25 @@ class GitHubCopilotChatOpenAI extends openai_1.ChatOpenAI {
80
132
  throw new Error('No valid messages after filtering empty content');
81
133
  }
82
134
  const requestBody = {
83
- model: this.modelName || this.model,
135
+ model: this.model,
84
136
  messages: validMessages,
85
137
  temperature: this.temperature,
86
138
  max_tokens: this.maxTokens,
87
139
  top_p: this.topP,
88
140
  stream: this.options.enableStreaming || false,
89
141
  };
90
- if (this.options.tools && this.options.tools.length > 0) {
91
- requestBody.tools = this.options.tools;
142
+ if (this.options.tools && JSON.parse(this.options.tools).length > 0) {
143
+ requestBody.tools = JSON.parse(this.options.tools);
92
144
  requestBody.tool_choice = this.options.tool_choice || 'auto';
93
- console.log(`🔧 Request includes ${this.options.tools.length} tools`);
145
+ console.log(`🔧 Request includes ${requestBody.tools.length} tools`);
94
146
  }
95
147
  const startTime = Date.now();
148
+ const shouldUseVision = hasVisionContent || this.options.enableVision === true;
149
+ if (shouldUseVision) {
150
+ console.log(`👁️ Sending vision request with Copilot-Vision-Request header (auto=${hasVisionContent}, manual=${this.options.enableVision})`);
151
+ }
96
152
  try {
97
- const response = await (0, GitHubCopilotApiUtils_1.makeGitHubCopilotRequest)(this.context, GitHubCopilotEndpoints_1.GITHUB_COPILOT_API.ENDPOINTS.CHAT_COMPLETIONS, requestBody, false);
153
+ const response = await (0, GitHubCopilotApiUtils_1.makeGitHubCopilotRequest)(this.context, GitHubCopilotEndpoints_1.GITHUB_COPILOT_API.ENDPOINTS.CHAT_COMPLETIONS, requestBody, shouldUseVision);
98
154
  const endTime = Date.now();
99
155
  const latency = endTime - startTime;
100
156
  console.log(`⏱️ GitHub Copilot API call completed in ${latency}ms`);
@@ -105,22 +161,19 @@ class GitHubCopilotChatOpenAI extends openai_1.ChatOpenAI {
105
161
  if (!choice.message) {
106
162
  throw new Error('GitHub Copilot API returned choice without message');
107
163
  }
108
- const langchainMessage = {
109
- _getType: () => choice.message.role,
164
+ const langchainMessage = new messages_1.AIMessage({
110
165
  content: choice.message.content || '',
111
- tool_calls: choice.message.tool_calls,
112
- };
166
+ });
113
167
  console.log(`📝 Response: role=${choice.message.role}, content_length=${((_a = choice.message.content) === null || _a === void 0 ? void 0 : _a.length) || 0}, finish_reason=${choice.finish_reason}`);
168
+ const generation = {
169
+ text: choice.message.content || '',
170
+ generationInfo: {
171
+ finish_reason: choice.finish_reason,
172
+ },
173
+ message: langchainMessage,
174
+ };
114
175
  return {
115
- generations: [
116
- {
117
- text: choice.message.content || '',
118
- generationInfo: {
119
- finish_reason: choice.finish_reason,
120
- },
121
- message: langchainMessage,
122
- },
123
- ],
176
+ generations: [generation],
124
177
  llmOutput: {
125
178
  tokenUsage: response.usage,
126
179
  },
@@ -280,6 +333,13 @@ class GitHubCopilotChatModel {
280
333
  },
281
334
  },
282
335
  },
336
+ {
337
+ displayName: 'Enable Vision (Image Processing)',
338
+ name: 'enableVision',
339
+ type: 'boolean',
340
+ default: false,
341
+ description: 'Enable vision capabilities for processing images. Required when sending images via chat. Only works with vision-capable models (GPT-4o, GPT-5, Claude, etc.).',
342
+ },
283
343
  ],
284
344
  },
285
345
  ],
@@ -322,7 +382,7 @@ class GitHubCopilotChatModel {
322
382
  !token.startsWith('github_pat_')) {
323
383
  console.warn(`⚠️ Unexpected token format: ${tokenPrefix}...${tokenSuffix}. Trying API call anyway.`);
324
384
  }
325
- const safeModel = modelInfo ? model : GitHubCopilotModels_1.DEFAULT_MODELS.GENERAL;
385
+ const safeModel = model || GitHubCopilotModels_1.DEFAULT_MODELS.GENERAL;
326
386
  const safeModelInfo = modelInfo || GitHubCopilotModels_1.GitHubCopilotModelsManager.getModelByValue(GitHubCopilotModels_1.DEFAULT_MODELS.GENERAL);
327
387
  const minVSCodeVersion = (0, ModelVersionRequirements_1.getMinVSCodeVersion)(safeModel);
328
388
  const additionalHeaders = (0, ModelVersionRequirements_1.getAdditionalHeaders)(safeModel);
@@ -357,11 +417,15 @@ class GitHubCopilotChatModel {
357
417
  baseURL: GitHubCopilotEndpoints_1.GITHUB_COPILOT_API.BASE_URL,
358
418
  apiKey: token,
359
419
  defaultHeaders: {
360
- 'User-Agent': 'GitHubCopilotChat/1.0.0 n8n/3.10.1',
361
- Accept: 'application/json',
420
+ 'User-Agent': 'GitHubCopilotChat/0.35.0',
421
+ 'Accept': 'application/json',
362
422
  'Editor-Version': `vscode/${minVSCodeVersion}`,
363
- 'Editor-Plugin-Version': 'copilot-chat/0.12.0',
423
+ 'Editor-Plugin-Version': 'copilot-chat/0.35.0',
364
424
  'X-Request-Id': `n8n-chatmodel-${Date.now()}-${Math.random().toString(36).substring(7)}`,
425
+ 'X-GitHub-Api-Version': '2025-05-01',
426
+ 'X-Interaction-Type': 'copilot-chat',
427
+ 'OpenAI-Intent': 'conversation-panel',
428
+ 'Copilot-Integration-Id': 'vscode-chat',
365
429
  ...additionalHeaders,
366
430
  ...(options.enableVision &&
367
431
  (safeModelInfo === null || safeModelInfo === void 0 ? void 0 : safeModelInfo.capabilities.vision) && {
@@ -1,8 +1,14 @@
1
- import { IExecuteFunctions, INodeExecutionData, INodeType, INodeTypeDescription } from 'n8n-workflow';
1
+ import { IExecuteFunctions, INodeType, INodeTypeDescription, INodeExecutionData } from 'n8n-workflow';
2
+ interface ISpeechOptions {
3
+ temperature?: number;
4
+ maxTokens?: number;
5
+ timeout?: number;
6
+ }
2
7
  export declare class GitHubCopilotSpeech implements INodeType {
3
8
  description: INodeTypeDescription;
4
9
  execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]>;
5
- private static transcribeWithMicrosoftSpeech;
6
- private static detectAudioFormat;
7
- private static isSupportedFormat;
10
+ static transcribeWithMicrosoftSpeech(audioBuffer: Buffer, format: string, language: string, oauthToken: string, options: ISpeechOptions, context: IExecuteFunctions): Promise<string>;
11
+ static detectAudioFormat(buffer: Buffer): string;
12
+ static isSupportedFormat(format: string): boolean;
8
13
  }
14
+ export {};
@@ -212,8 +212,7 @@ class GitHubCopilotSpeech {
212
212
  const returnData = [];
213
213
  const operation = this.getNodeParameter('operation', 0);
214
214
  const audioSource = this.getNodeParameter('audioSource', 0);
215
- const credentials = await this.getCredentials('gitHubCopilotApi');
216
- const tokenManager = new OAuthTokenManager_1.OAuthTokenManager();
215
+ const credentials = (await this.getCredentials('gitHubCopilotApi'));
217
216
  for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
218
217
  try {
219
218
  let audioBuffer;
@@ -245,7 +244,9 @@ class GitHubCopilotSpeech {
245
244
  const language = this.getNodeParameter('language', itemIndex);
246
245
  const formatParam = this.getNodeParameter('audioFormat', itemIndex);
247
246
  const options = this.getNodeParameter('options', itemIndex);
248
- const actualFormat = formatParam === 'auto' ? GitHubCopilotSpeech.detectAudioFormat(audioBuffer) : formatParam;
247
+ const actualFormat = formatParam === 'auto'
248
+ ? GitHubCopilotSpeech.detectAudioFormat(audioBuffer)
249
+ : formatParam;
249
250
  if (!GitHubCopilotSpeech.isSupportedFormat(actualFormat)) {
250
251
  throw new n8n_workflow_1.NodeOperationError(this.getNode(), `Unsupported audio format: ${actualFormat}. Supported: wav, mp3, m4a, flac, ogg`);
251
252
  }
@@ -297,19 +298,19 @@ class GitHubCopilotSpeech {
297
298
  static async transcribeWithMicrosoftSpeech(audioBuffer, format, language, oauthToken, options, context) {
298
299
  const endpoint = 'https://speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1';
299
300
  const headers = {
300
- 'Authorization': `Bearer ${oauthToken}`,
301
+ Authorization: `Bearer ${oauthToken}`,
301
302
  'User-Agent': 'GitHub-Copilot/1.0 (n8n-node)',
302
303
  'Editor-Version': 'vscode/1.95.0',
303
304
  'Editor-Plugin-Version': 'copilot/1.0.0',
304
305
  'Content-Type': `audio/${format}; codecs=audio/pcm; samplerate=16000`,
305
- 'Accept': 'application/json',
306
+ Accept: 'application/json',
306
307
  };
307
308
  if (language !== 'auto') {
308
309
  headers['Accept-Language'] = language;
309
310
  }
310
311
  const timeout = (options === null || options === void 0 ? void 0 : options.timeout) || 30;
311
312
  try {
312
- const response = await context.helpers.httpRequest({
313
+ await context.helpers.httpRequest({
313
314
  method: 'POST',
314
315
  url: endpoint,
315
316
  headers,
@@ -327,7 +328,7 @@ class GitHubCopilotSpeech {
327
328
  if (buffer.length >= 12 && buffer.toString('ascii', 0, 4) === 'RIFF') {
328
329
  return 'wav';
329
330
  }
330
- if (buffer.length >= 3 && buffer[0] === 0xFF && (buffer[1] & 0xE0) === 0xE0) {
331
+ if (buffer.length >= 3 && buffer[0] === 0xff && (buffer[1] & 0xe0) === 0xe0) {
331
332
  return 'mp3';
332
333
  }
333
334
  if (buffer.length >= 12 && buffer.toString('ascii', 4, 8) === 'ftyp') {
@@ -1,5 +1,10 @@
1
- import { IExecuteFunctions, INodeExecutionData, INodeType, INodeTypeDescription } from 'n8n-workflow';
1
+ import { IExecuteFunctions, INodeExecutionData, INodeType, INodeTypeDescription, ILoadOptionsFunctions, INodePropertyOptions } from 'n8n-workflow';
2
2
  export declare class GitHubCopilotTest implements INodeType {
3
3
  description: INodeTypeDescription;
4
+ methods: {
5
+ loadOptions: {
6
+ getModels(this: ILoadOptionsFunctions): Promise<INodePropertyOptions[]>;
7
+ };
8
+ };
4
9
  execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]>;
5
10
  }
@@ -361,6 +361,66 @@ async function consolidatedModelTest(token, enableRetry = true, maxRetries = 3,
361
361
  };
362
362
  }
363
363
  }
364
+ async function testSingleModel(token, modelId, testMessage, enableRetry = true, maxRetries = 3) {
365
+ const testStart = Date.now();
366
+ try {
367
+ console.log(`🧪 Testing single model: ${modelId}`);
368
+ const response = await fetch(GitHubCopilotEndpoints_1.GitHubCopilotEndpoints.getChatCompletionsUrl(), {
369
+ method: 'POST',
370
+ headers: GitHubCopilotEndpoints_1.GitHubCopilotEndpoints.getAuthHeaders(token),
371
+ body: JSON.stringify({
372
+ model: modelId,
373
+ messages: [
374
+ {
375
+ role: 'user',
376
+ content: testMessage,
377
+ },
378
+ ],
379
+ max_tokens: 100,
380
+ temperature: 0.1,
381
+ }),
382
+ });
383
+ const testEnd = Date.now();
384
+ const responseTime = testEnd - testStart;
385
+ if (response.ok) {
386
+ const data = (await response.json());
387
+ const choices = data.choices || [];
388
+ const firstChoice = choices[0] || {};
389
+ const message = firstChoice.message || {};
390
+ const usage = data.usage || {};
391
+ return {
392
+ success: true,
393
+ modelId: modelId,
394
+ responseTime: `${responseTime}ms`,
395
+ response: message.content || 'No content',
396
+ usage: usage,
397
+ finishReason: firstChoice.finish_reason || 'unknown',
398
+ timestamp: new Date().toISOString(),
399
+ rawResponse: data,
400
+ };
401
+ }
402
+ else {
403
+ const errorText = await response.text();
404
+ return {
405
+ success: false,
406
+ modelId: modelId,
407
+ responseTime: `${responseTime}ms`,
408
+ error: `HTTP ${response.status}: ${errorText}`,
409
+ timestamp: new Date().toISOString(),
410
+ };
411
+ }
412
+ }
413
+ catch (error) {
414
+ const testEnd = Date.now();
415
+ return {
416
+ success: false,
417
+ modelId: modelId,
418
+ responseTime: `${testEnd - testStart}ms`,
419
+ error: error instanceof Error ? error.message : 'Unknown error',
420
+ timestamp: new Date().toISOString(),
421
+ };
422
+ }
423
+ }
364
424
  function generateTestRecommendations(testResults) {
365
425
  const recommendations = [];
366
426
  const modelStats = Object.entries(testResults).map(([modelId, results]) => {
@@ -589,10 +649,41 @@ class GitHubCopilotTest {
589
649
  value: 'consolidatedTest',
590
650
  description: 'Test all available chat models 5 times each and generate comprehensive report ⚠️ This test may take up to 2 minutes to complete',
591
651
  },
652
+ {
653
+ name: 'Test Single Chat Model',
654
+ value: 'testSingleModel',
655
+ description: 'Test a specific chat model with a custom message',
656
+ },
592
657
  ],
593
658
  default: 'listModels',
594
659
  description: 'Select the test function to execute',
595
660
  },
661
+ {
662
+ displayName: 'Model Name or ID',
663
+ name: 'modelId',
664
+ type: 'options',
665
+ description: 'Select the model to test. Choose from the list, or specify an ID using an expression.',
666
+ typeOptions: {
667
+ loadOptionsMethod: 'getModels',
668
+ },
669
+ displayOptions: {
670
+ show: {
671
+ testFunction: ['testSingleModel'],
672
+ },
673
+ },
674
+ default: '',
675
+ },
676
+ {
677
+ displayName: 'Test Message',
678
+ name: 'testMessage',
679
+ type: 'string',
680
+ default: "Hello! Please respond with just 'OK' to confirm you're working.",
681
+ displayOptions: {
682
+ show: {
683
+ testFunction: ['testSingleModel'],
684
+ },
685
+ },
686
+ },
596
687
  {
597
688
  displayName: 'Tests Per Model',
598
689
  name: 'testsPerModel',
@@ -640,6 +731,43 @@ class GitHubCopilotTest {
640
731
  },
641
732
  ],
642
733
  };
734
+ this.methods = {
735
+ loadOptions: {
736
+ async getModels() {
737
+ const credentials = await this.getCredentials('githubCopilotApi');
738
+ const token = credentials.token;
739
+ if (!token) {
740
+ throw new Error('Credentials are required to load models');
741
+ }
742
+ try {
743
+ const oauthToken = await OAuthTokenManager_1.OAuthTokenManager.getValidOAuthToken(token);
744
+ const models = await DynamicModelsManager_1.DynamicModelsManager.getAvailableModels(oauthToken);
745
+ return models
746
+ .filter((model) => {
747
+ var _a;
748
+ const type = (_a = model.capabilities) === null || _a === void 0 ? void 0 : _a.type;
749
+ return type !== 'embeddings';
750
+ })
751
+ .map((model) => {
752
+ var _a;
753
+ const multiplier = ((_a = model.billing) === null || _a === void 0 ? void 0 : _a.multiplier)
754
+ ? ` (${model.billing.multiplier}x)`
755
+ : '';
756
+ return {
757
+ name: `${model.name || model.id}${multiplier}`,
758
+ value: model.id,
759
+ description: `${model.vendor || 'GitHub'} - ${model.id}`,
760
+ };
761
+ })
762
+ .sort((a, b) => a.name.localeCompare(b.name));
763
+ }
764
+ catch (error) {
765
+ const errorMessage = error instanceof Error ? error.message : String(error);
766
+ throw new Error(`Failed to load models: ${errorMessage}`);
767
+ }
768
+ },
769
+ },
770
+ };
643
771
  }
644
772
  async execute() {
645
773
  const items = this.getInputData();
@@ -675,6 +803,11 @@ class GitHubCopilotTest {
675
803
  case 'consolidatedTest':
676
804
  result = await consolidatedModelTest(token, enableRetry, maxRetries, testsPerModel);
677
805
  break;
806
+ case 'testSingleModel':
807
+ const modelId = this.getNodeParameter('modelId', i);
808
+ const testMessage = this.getNodeParameter('testMessage', i);
809
+ result = await testSingleModel(token, modelId, testMessage, enableRetry, maxRetries);
810
+ break;
678
811
  default:
679
812
  throw new Error(`Unknown test function: ${testFunction}`);
680
813
  }
package/dist/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "n8n-nodes-github-copilot",
3
- "version": "3.38.26",
3
+ "version": "3.38.35",
4
4
  "description": "n8n community node for GitHub Copilot with CLI integration, Chat API access, and AI Chat Model for workflows with full tools and function calling support - access GPT-5, Claude, Gemini and more using your Copilot subscription",
5
5
  "license": "MIT",
6
6
  "homepage": "https://github.com/sufficit/n8n-nodes-github-copilot",
@@ -93,8 +93,8 @@ exports.GITHUB_COPILOT_MODELS = [
93
93
  description: "Faster and more cost-effective GPT-4o - VERIFIED WORKING",
94
94
  capabilities: {
95
95
  toolsCalling: true,
96
- vision: false,
97
- multimodal: false,
96
+ vision: true,
97
+ multimodal: true,
98
98
  maxContextTokens: 128000,
99
99
  maxOutputTokens: 4096,
100
100
  streaming: true,
@@ -121,6 +121,23 @@ exports.GITHUB_COPILOT_MODELS = [
121
121
  recommended: true,
122
122
  status: "stable"
123
123
  },
124
+ {
125
+ value: "oswe-vscode-prime",
126
+ name: "Raptor mini (Preview)",
127
+ description: "Fast and versatile model optimized for VS Code by Microsoft (Azure OpenAI)",
128
+ capabilities: {
129
+ toolsCalling: true,
130
+ vision: true,
131
+ multimodal: true,
132
+ maxContextTokens: 264000,
133
+ maxOutputTokens: 64000,
134
+ streaming: true,
135
+ provider: "Microsoft",
136
+ category: "chat"
137
+ },
138
+ recommended: true,
139
+ status: "preview"
140
+ },
124
141
  {
125
142
  value: "claude-sonnet-4",
126
143
  name: "Claude Sonnet 4",
@@ -45,6 +45,11 @@ exports.MODEL_VERSION_REQUIREMENTS = {
45
45
  supportedEndpoints: ["/chat/completions", "/responses"],
46
46
  preview: true,
47
47
  },
48
+ "oswe-vscode-prime": {
49
+ minVSCodeVersion: "1.96.0",
50
+ supportedEndpoints: ["/chat/completions", "/responses"],
51
+ preview: true,
52
+ },
48
53
  };
49
54
  exports.DEFAULT_MODEL_REQUIREMENTS = {
50
55
  minVSCodeVersion: "1.95.0",
@@ -3,10 +3,18 @@ interface CopilotModel {
3
3
  name: string;
4
4
  display_name?: string;
5
5
  model_picker_enabled?: boolean;
6
+ model_picker_category?: "lightweight" | "versatile" | "powerful" | string;
6
7
  capabilities?: any;
7
8
  vendor?: string;
8
9
  version?: string;
9
10
  preview?: boolean;
11
+ billing?: {
12
+ is_premium: boolean;
13
+ multiplier: number;
14
+ restricted_to?: string[];
15
+ };
16
+ is_chat_default?: boolean;
17
+ is_chat_fallback?: boolean;
10
18
  }
11
19
  export declare class DynamicModelsManager {
12
20
  private static cache;
@@ -16,6 +24,7 @@ export declare class DynamicModelsManager {
16
24
  private static fetchModelsFromAPI;
17
25
  static getAvailableModels(oauthToken: string): Promise<CopilotModel[]>;
18
26
  static filterModelsByType(models: CopilotModel[], type: string): CopilotModel[];
27
+ private static getCostMultiplier;
19
28
  static modelsToN8nOptions(models: CopilotModel[]): Array<{
20
29
  name: string;
21
30
  value: string;
@@ -21,9 +21,13 @@ class DynamicModelsManager {
21
21
  Authorization: `Bearer ${oauthToken}`,
22
22
  Accept: "application/json",
23
23
  "Content-Type": "application/json",
24
- "User-Agent": "GitHub-Copilot/1.0 (n8n-node)",
25
- "Editor-Version": "vscode/1.95.0",
26
- "Editor-Plugin-Version": "copilot/1.0.0",
24
+ "User-Agent": "GitHubCopilotChat/0.35.0",
25
+ "Editor-Version": "vscode/1.96.0",
26
+ "Editor-Plugin-Version": "copilot-chat/0.35.0",
27
+ "X-GitHub-Api-Version": "2025-05-01",
28
+ "X-Interaction-Type": "model-access",
29
+ "OpenAI-Intent": "model-access",
30
+ "Copilot-Integration-Id": "vscode-chat",
27
31
  },
28
32
  });
29
33
  if (!response.ok) {
@@ -76,6 +80,38 @@ class DynamicModelsManager {
76
80
  return modelType === type;
77
81
  });
78
82
  }
83
+ static getCostMultiplier(model) {
84
+ var _a;
85
+ if (((_a = model.billing) === null || _a === void 0 ? void 0 : _a.multiplier) !== undefined) {
86
+ return `${model.billing.multiplier}x`;
87
+ }
88
+ const id = model.id.toLowerCase();
89
+ if (id === 'gpt-4.1' || id.startsWith('gpt-4.1-'))
90
+ return '0x';
91
+ if (id === 'gpt-4o' || id.startsWith('gpt-4o-'))
92
+ return '0x';
93
+ if (id === 'gpt-4' || id === 'gpt-4-0613')
94
+ return '0x';
95
+ if (id === 'gpt-5-mini')
96
+ return '0x';
97
+ if (id === 'gpt-4o-mini' || id.startsWith('gpt-4o-mini-'))
98
+ return '0x';
99
+ if (id.includes('grok') && id.includes('fast'))
100
+ return '0x';
101
+ if (id === 'oswe-vscode-prime' || id.includes('oswe-vscode'))
102
+ return '0x';
103
+ if (id.includes('haiku'))
104
+ return '0.33x';
105
+ if (id.includes('flash'))
106
+ return '0.33x';
107
+ if (id.includes('codex-mini'))
108
+ return '0.33x';
109
+ if (id === 'claude-opus-41' || id === 'claude-opus-4.1')
110
+ return '10x';
111
+ if (id.includes('opus'))
112
+ return '3x';
113
+ return '1x';
114
+ }
79
115
  static modelsToN8nOptions(models) {
80
116
  const nameCount = new Map();
81
117
  models.forEach((model) => {
@@ -100,8 +136,12 @@ class DynamicModelsManager {
100
136
  badges.push("🧠 Reasoning");
101
137
  }
102
138
  const displayName = model.display_name || model.name || model.id;
139
+ const costMultiplier = this.getCostMultiplier(model);
103
140
  const badgesText = badges.length > 0 ? ` [${badges.join(" • ")}]` : "";
104
141
  const hasDuplicates = (nameCount.get(displayName) || 0) > 1;
142
+ const category = model.model_picker_category || "";
143
+ const categoryLabel = category ? ` - ${category.charAt(0).toUpperCase() + category.slice(1)}` : "";
144
+ const multiplierDisplay = ` • ${costMultiplier}${categoryLabel}`;
105
145
  let description = "";
106
146
  if (model.capabilities) {
107
147
  const limits = model.capabilities.limits || {};
@@ -120,11 +160,13 @@ class DynamicModelsManager {
120
160
  }
121
161
  description = parts.join(" • ");
122
162
  }
123
- else if (hasDuplicates) {
124
- description = `ID: ${model.id}`;
163
+ else {
164
+ if (hasDuplicates) {
165
+ description = `ID: ${model.id}`;
166
+ }
125
167
  }
126
168
  return {
127
- name: `${displayName}${badgesText}`,
169
+ name: `${displayName}${multiplierDisplay}${badgesText}`,
128
170
  value: model.id,
129
171
  description: description || undefined,
130
172
  };
@@ -1,7 +1,7 @@
1
1
  export interface ChunkRequest {
2
2
  content: string;
3
3
  embed: boolean;
4
- qos: "Batch" | "Online";
4
+ qos: 'Batch' | 'Online';
5
5
  }
6
6
  export interface Chunk {
7
7
  content: string;
@@ -15,7 +15,7 @@ export interface ChunkResponse {
15
15
  total: number;
16
16
  contentLength: number;
17
17
  }
18
- export declare function chunkFile(token: string, fileContent: string, embeddings?: boolean, qos?: "Batch" | "Online"): Promise<ChunkResponse>;
18
+ export declare function chunkFile(token: string, fileContent: string, embeddings?: boolean, qos?: 'Batch' | 'Online'): Promise<ChunkResponse>;
19
19
  export declare function selectRelevantChunks(chunks: Chunk[], queryEmbedding: number[], maxTokens?: number, minRelevance?: number): string;
20
20
  export declare function selectTopChunks(chunks: Chunk[], maxTokens?: number): string;
21
21
  export declare function estimateTokens(text: string): number;
@@ -6,8 +6,8 @@ exports.selectTopChunks = selectTopChunks;
6
6
  exports.estimateTokens = estimateTokens;
7
7
  exports.getQueryEmbedding = getQueryEmbedding;
8
8
  const GitHubCopilotEndpoints_1 = require("./GitHubCopilotEndpoints");
9
- async function chunkFile(token, fileContent, embeddings = true, qos = "Online") {
10
- const url = "https://api.githubcopilot.com/chunks";
9
+ async function chunkFile(token, fileContent, embeddings = true, qos = 'Online') {
10
+ const url = 'https://api.githubcopilot.com/chunks';
11
11
  const headers = GitHubCopilotEndpoints_1.GitHubCopilotEndpoints.getAuthHeaders(token, true);
12
12
  const requestBody = {
13
13
  content: fileContent,
@@ -16,7 +16,7 @@ async function chunkFile(token, fileContent, embeddings = true, qos = "Online")
16
16
  };
17
17
  console.log(`🔪 Chunking file (${fileContent.length} chars, embed=${embeddings}, qos=${qos})`);
18
18
  const response = await fetch(url, {
19
- method: "POST",
19
+ method: 'POST',
20
20
  headers,
21
21
  body: JSON.stringify(requestBody),
22
22
  });
@@ -24,13 +24,13 @@ async function chunkFile(token, fileContent, embeddings = true, qos = "Online")
24
24
  const errorText = await response.text();
25
25
  throw new Error(`Chunking API error: ${response.status} ${response.statusText}. ${errorText}`);
26
26
  }
27
- const data = await response.json();
27
+ const data = (await response.json());
28
28
  console.log(`✅ Chunked into ${data.chunks.length} chunks`);
29
29
  return data;
30
30
  }
31
31
  function cosineSimilarity(a, b) {
32
32
  if (a.length !== b.length) {
33
- throw new Error("Vectors must have same length");
33
+ throw new Error('Vectors must have same length');
34
34
  }
35
35
  let dotProduct = 0;
36
36
  let normA = 0;
@@ -44,15 +44,15 @@ function cosineSimilarity(a, b) {
44
44
  }
45
45
  function selectRelevantChunks(chunks, queryEmbedding, maxTokens = 10000, minRelevance = 0.5) {
46
46
  if (!chunks.length) {
47
- return "";
47
+ return '';
48
48
  }
49
49
  const rankedChunks = chunks
50
- .filter(chunk => chunk.embedding)
51
- .map(chunk => ({
50
+ .filter((chunk) => chunk.embedding)
51
+ .map((chunk) => ({
52
52
  chunk,
53
53
  relevance: cosineSimilarity(chunk.embedding, queryEmbedding),
54
54
  }))
55
- .filter(item => item.relevance >= minRelevance)
55
+ .filter((item) => item.relevance >= minRelevance)
56
56
  .sort((a, b) => b.relevance - a.relevance);
57
57
  const selectedChunks = [];
58
58
  let totalTokens = 0;
@@ -66,7 +66,7 @@ function selectRelevantChunks(chunks, queryEmbedding, maxTokens = 10000, minRele
66
66
  console.log(` ✓ Selected chunk (relevance: ${item.relevance.toFixed(3)}, tokens: ~${chunkTokens})`);
67
67
  }
68
68
  console.log(`📊 Selected ${selectedChunks.length}/${rankedChunks.length} chunks (~${totalTokens} tokens)`);
69
- return selectedChunks.join("\n\n---\n\n");
69
+ return selectedChunks.join('\n\n---\n\n');
70
70
  }
71
71
  function selectTopChunks(chunks, maxTokens = 10000) {
72
72
  const selectedChunks = [];
@@ -80,26 +80,26 @@ function selectTopChunks(chunks, maxTokens = 10000) {
80
80
  totalTokens += chunkTokens;
81
81
  }
82
82
  console.log(`📊 Selected ${selectedChunks.length}/${chunks.length} chunks (~${totalTokens} tokens)`);
83
- return selectedChunks.join("\n\n---\n\n");
83
+ return selectedChunks.join('\n\n---\n\n');
84
84
  }
85
85
  function estimateTokens(text) {
86
86
  return Math.ceil(text.length / 4);
87
87
  }
88
88
  async function getQueryEmbedding(token, query) {
89
- const url = "https://api.githubcopilot.com/embeddings";
89
+ const url = 'https://api.githubcopilot.com/embeddings';
90
90
  const headers = GitHubCopilotEndpoints_1.GitHubCopilotEndpoints.getEmbeddingsHeaders(token);
91
91
  const response = await fetch(url, {
92
- method: "POST",
92
+ method: 'POST',
93
93
  headers,
94
94
  body: JSON.stringify({
95
95
  input: [query],
96
- model: "text-embedding-3-small",
96
+ model: 'text-embedding-3-small',
97
97
  }),
98
98
  });
99
99
  if (!response.ok) {
100
100
  const errorText = await response.text();
101
101
  throw new Error(`Embeddings API error: ${response.status} ${response.statusText}. ${errorText}`);
102
102
  }
103
- const data = await response.json();
103
+ const data = (await response.json());
104
104
  return data.data[0].embedding;
105
105
  }
@@ -1,4 +1,4 @@
1
- export type FileProcessingMode = "direct" | "chunking" | "summarize" | "auto";
1
+ export type FileProcessingMode = 'direct' | 'chunking' | 'summarize' | 'auto';
2
2
  export interface FileOptimizationOptions {
3
3
  mode: FileProcessingMode;
4
4
  model: string;
@@ -7,7 +7,7 @@ export interface FileOptimizationOptions {
7
7
  minRelevance?: number;
8
8
  }
9
9
  export interface OptimizationResult {
10
- mode: "direct" | "chunking" | "summarize";
10
+ mode: 'direct' | 'chunking' | 'summarize';
11
11
  reason: string;
12
12
  estimatedTokens: number;
13
13
  maxAllowedTokens: number;
@@ -24,12 +24,12 @@ function selectFileProcessingMode(options) {
24
24
  console.log(` Estimated tokens: ${estimatedTokens.toLocaleString()}`);
25
25
  console.log(` Max allowed (${(maxContextUsage * 100).toFixed(0)}%): ${maxAllowedTokens.toLocaleString()}`);
26
26
  console.log(` Model context: ${maxContextTokens.toLocaleString()} tokens`);
27
- if (mode !== "auto") {
28
- if (mode === "direct" && !fitsInContext) {
27
+ if (mode !== 'auto') {
28
+ if (mode === 'direct' && !fitsInContext) {
29
29
  console.warn(`⚠️ Warning: Direct mode requested but file exceeds token limit`);
30
30
  }
31
31
  return {
32
- mode,
32
+ mode: mode,
33
33
  reason: `User requested ${mode} mode`,
34
34
  estimatedTokens,
35
35
  maxAllowedTokens,
@@ -38,7 +38,7 @@ function selectFileProcessingMode(options) {
38
38
  }
39
39
  if (estimatedTokens < maxAllowedTokens * 0.3) {
40
40
  return {
41
- mode: "direct",
41
+ mode: 'direct',
42
42
  reason: `File is small (${estimatedTokens.toLocaleString()} tokens < 30% of limit)`,
43
43
  estimatedTokens,
44
44
  maxAllowedTokens,
@@ -47,7 +47,7 @@ function selectFileProcessingMode(options) {
47
47
  }
48
48
  else if (estimatedTokens < maxAllowedTokens * 0.8) {
49
49
  return {
50
- mode: "chunking",
50
+ mode: 'chunking',
51
51
  reason: `File is medium-sized (${estimatedTokens.toLocaleString()} tokens, 30-80% of limit) - chunking recommended`,
52
52
  estimatedTokens,
53
53
  maxAllowedTokens,
@@ -56,7 +56,7 @@ function selectFileProcessingMode(options) {
56
56
  }
57
57
  else if (fitsInContext) {
58
58
  return {
59
- mode: "chunking",
59
+ mode: 'chunking',
60
60
  reason: `File is large (${estimatedTokens.toLocaleString()} tokens, >80% of limit) - chunking strongly recommended`,
61
61
  estimatedTokens,
62
62
  maxAllowedTokens,
@@ -65,7 +65,7 @@ function selectFileProcessingMode(options) {
65
65
  }
66
66
  else {
67
67
  return {
68
- mode: "summarize",
68
+ mode: 'summarize',
69
69
  reason: `File exceeds token limit (${estimatedTokens.toLocaleString()} > ${maxAllowedTokens.toLocaleString()} tokens) - summarization required`,
70
70
  estimatedTokens,
71
71
  maxAllowedTokens,
@@ -86,12 +86,12 @@ function getOptimalChunkSettings(model, maxContextUsage = 0.5) {
86
86
  }
87
87
  function compressText(text) {
88
88
  return text
89
- .replace(/ {2,}/g, " ")
90
- .replace(/\n{3,}/g, "\n\n")
91
- .replace(/\t/g, " ")
92
- .split("\n")
93
- .map(line => line.trim())
94
- .join("\n")
89
+ .replace(/ {2,}/g, ' ')
90
+ .replace(/\n{3,}/g, '\n\n')
91
+ .replace(/\t/g, ' ')
92
+ .split('\n')
93
+ .map((line) => line.trim())
94
+ .join('\n')
95
95
  .trim();
96
96
  }
97
97
  function truncateToTokenLimit(text, maxTokens, addEllipsis = true) {
@@ -106,7 +106,7 @@ function truncateToTokenLimit(text, maxTokens, addEllipsis = true) {
106
106
  }
107
107
  const maxChars = maxTokens * 4;
108
108
  const truncated = text.slice(0, maxChars);
109
- const ellipsis = addEllipsis ? "\n\n...[truncated]" : "";
109
+ const ellipsis = addEllipsis ? '\n\n...[truncated]' : '';
110
110
  const finalContent = truncated + ellipsis;
111
111
  const finalTokens = (0, FileChunkingApiUtils_1.estimateTokens)(finalContent);
112
112
  return {
@@ -118,16 +118,16 @@ function truncateToTokenLimit(text, maxTokens, addEllipsis = true) {
118
118
  }
119
119
  function getFileSizeCategory(sizeBytes) {
120
120
  if (sizeBytes < 10 * 1024)
121
- return "tiny (<10KB)";
121
+ return 'tiny (<10KB)';
122
122
  if (sizeBytes < 50 * 1024)
123
- return "small (<50KB)";
123
+ return 'small (<50KB)';
124
124
  if (sizeBytes < 200 * 1024)
125
- return "medium (<200KB)";
125
+ return 'medium (<200KB)';
126
126
  if (sizeBytes < 500 * 1024)
127
- return "large (<500KB)";
127
+ return 'large (<500KB)';
128
128
  if (sizeBytes < 1024 * 1024)
129
- return "very large (<1MB)";
130
- return "huge (>1MB)";
129
+ return 'very large (<1MB)';
130
+ return 'huge (>1MB)';
131
131
  }
132
132
  function formatTokenCount(tokens) {
133
133
  if (tokens < 1000)
@@ -69,9 +69,13 @@ async function makeGitHubCopilotRequest(context, endpoint, body, hasMedia = fals
69
69
  }
70
70
  const headers = {
71
71
  ...GitHubCopilotEndpoints_1.GITHUB_COPILOT_API.HEADERS.WITH_AUTH(token),
72
- "User-Agent": "GitHub-Copilot/1.0 (n8n-node)",
72
+ "User-Agent": "GitHubCopilotChat/0.35.0",
73
73
  "Editor-Version": `vscode/${minVSCodeVersion}`,
74
- "Editor-Plugin-Version": "copilot/1.0.0",
74
+ "Editor-Plugin-Version": "copilot-chat/0.35.0",
75
+ "X-GitHub-Api-Version": "2025-05-01",
76
+ "X-Interaction-Type": "copilot-chat",
77
+ "OpenAI-Intent": "conversation-panel",
78
+ "Copilot-Integration-Id": "vscode-chat",
75
79
  ...additionalHeaders,
76
80
  };
77
81
  if (hasMedia) {
@@ -76,7 +76,16 @@ class GitHubCopilotEndpoints {
76
76
  return exports.GITHUB_COPILOT_API.URLS.USER_COPILOT;
77
77
  }
78
78
  static getAuthHeaders(token, includeVSCodeHeaders = false) {
79
- const headers = exports.GITHUB_COPILOT_API.HEADERS.WITH_AUTH(token);
79
+ const headers = {
80
+ ...exports.GITHUB_COPILOT_API.HEADERS.WITH_AUTH(token),
81
+ "User-Agent": "GitHubCopilotChat/0.35.0",
82
+ "Editor-Version": "vscode/1.96.0",
83
+ "Editor-Plugin-Version": "copilot-chat/0.35.0",
84
+ "X-GitHub-Api-Version": "2025-05-01",
85
+ "X-Interaction-Type": "copilot-chat",
86
+ "OpenAI-Intent": "conversation-panel",
87
+ "Copilot-Integration-Id": "vscode-chat",
88
+ };
80
89
  if (includeVSCodeHeaders) {
81
90
  return {
82
91
  ...headers,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "n8n-nodes-github-copilot",
3
- "version": "3.38.26",
3
+ "version": "3.38.35",
4
4
  "description": "n8n community node for GitHub Copilot with CLI integration, Chat API access, and AI Chat Model for workflows with full tools and function calling support - access GPT-5, Claude, Gemini and more using your Copilot subscription",
5
5
  "license": "MIT",
6
6
  "homepage": "https://github.com/sufficit/n8n-nodes-github-copilot",