n8n-nodes-github-copilot 3.38.25 → 3.38.35

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/dist/credentials/GitHubCopilotApi.credentials.d.ts +1 -1
  2. package/dist/credentials/GitHubCopilotApi.credentials.js +25 -25
  3. package/dist/nodes/GitHubCopilot/GitHubCopilot.node.d.ts +1 -1
  4. package/dist/nodes/GitHubCopilot/GitHubCopilot.node.js +166 -166
  5. package/dist/nodes/GitHubCopilotAuthHelper/GitHubCopilotAuthHelper.node.d.ts +1 -1
  6. package/dist/nodes/GitHubCopilotAuthHelper/GitHubCopilotAuthHelper.node.js +539 -539
  7. package/dist/nodes/GitHubCopilotChatAPI/GitHubCopilotChatAPI.node.d.ts +1 -1
  8. package/dist/nodes/GitHubCopilotChatAPI/GitHubCopilotChatAPI.node.js +46 -44
  9. package/dist/nodes/GitHubCopilotChatAPI/nodeProperties.d.ts +1 -1
  10. package/dist/nodes/GitHubCopilotChatAPI/nodeProperties.js +82 -82
  11. package/dist/nodes/GitHubCopilotChatAPI/utils/helpers.d.ts +2 -2
  12. package/dist/nodes/GitHubCopilotChatAPI/utils/helpers.js +26 -26
  13. package/dist/nodes/GitHubCopilotChatAPI/utils/imageProcessor.d.ts +2 -2
  14. package/dist/nodes/GitHubCopilotChatAPI/utils/imageProcessor.js +12 -12
  15. package/dist/nodes/GitHubCopilotChatAPI/utils/index.d.ts +4 -4
  16. package/dist/nodes/GitHubCopilotChatAPI/utils/mediaDetection.d.ts +3 -3
  17. package/dist/nodes/GitHubCopilotChatAPI/utils/mediaDetection.js +19 -19
  18. package/dist/nodes/GitHubCopilotChatAPI/utils/modelCapabilities.d.ts +1 -1
  19. package/dist/nodes/GitHubCopilotChatAPI/utils/modelCapabilities.js +23 -23
  20. package/dist/nodes/GitHubCopilotChatAPI/utils/types.d.ts +5 -5
  21. package/dist/nodes/GitHubCopilotChatModel/GitHubCopilotChatModel.node.d.ts +2 -2
  22. package/dist/nodes/GitHubCopilotChatModel/GitHubCopilotChatModel.node.js +198 -125
  23. package/dist/nodes/GitHubCopilotEmbeddings/GitHubCopilotEmbeddings.node.d.ts +1 -1
  24. package/dist/nodes/GitHubCopilotEmbeddings/GitHubCopilotEmbeddings.node.js +114 -114
  25. package/dist/nodes/GitHubCopilotOpenAI/GitHubCopilotOpenAI.node.d.ts +1 -1
  26. package/dist/nodes/GitHubCopilotOpenAI/GitHubCopilotOpenAI.node.js +74 -69
  27. package/dist/nodes/GitHubCopilotOpenAI/nodeProperties.d.ts +1 -1
  28. package/dist/nodes/GitHubCopilotOpenAI/nodeProperties.js +181 -181
  29. package/dist/nodes/GitHubCopilotOpenAI/utils/index.d.ts +2 -2
  30. package/dist/nodes/GitHubCopilotOpenAI/utils/openaiCompat.d.ts +10 -10
  31. package/dist/nodes/GitHubCopilotOpenAI/utils/openaiCompat.js +53 -53
  32. package/dist/nodes/GitHubCopilotOpenAI/utils/types.d.ts +12 -12
  33. package/dist/nodes/GitHubCopilotSpeech/GitHubCopilotSpeech.node.d.ts +10 -4
  34. package/dist/nodes/GitHubCopilotSpeech/GitHubCopilotSpeech.node.js +8 -7
  35. package/dist/nodes/GitHubCopilotTest/GitHubCopilotTest.node.d.ts +6 -1
  36. package/dist/nodes/GitHubCopilotTest/GitHubCopilotTest.node.js +253 -116
  37. package/dist/package.json +1 -1
  38. package/dist/shared/models/GitHubCopilotModels.js +19 -2
  39. package/dist/shared/models/ModelVersionRequirements.js +5 -0
  40. package/dist/shared/utils/DynamicModelsManager.d.ts +9 -0
  41. package/dist/shared/utils/DynamicModelsManager.js +48 -6
  42. package/dist/shared/utils/FileChunkingApiUtils.d.ts +2 -2
  43. package/dist/shared/utils/FileChunkingApiUtils.js +15 -15
  44. package/dist/shared/utils/FileOptimizationUtils.d.ts +2 -2
  45. package/dist/shared/utils/FileOptimizationUtils.js +20 -20
  46. package/dist/shared/utils/GitHubCopilotApiUtils.js +6 -2
  47. package/dist/shared/utils/GitHubCopilotEndpoints.js +10 -1
  48. package/package.json +1 -1
@@ -93,8 +93,8 @@ exports.GITHUB_COPILOT_MODELS = [
93
93
  description: "Faster and more cost-effective GPT-4o - VERIFIED WORKING",
94
94
  capabilities: {
95
95
  toolsCalling: true,
96
- vision: false,
97
- multimodal: false,
96
+ vision: true,
97
+ multimodal: true,
98
98
  maxContextTokens: 128000,
99
99
  maxOutputTokens: 4096,
100
100
  streaming: true,
@@ -121,6 +121,23 @@ exports.GITHUB_COPILOT_MODELS = [
121
121
  recommended: true,
122
122
  status: "stable"
123
123
  },
124
+ {
125
+ value: "oswe-vscode-prime",
126
+ name: "Raptor mini (Preview)",
127
+ description: "Fast and versatile model optimized for VS Code by Microsoft (Azure OpenAI)",
128
+ capabilities: {
129
+ toolsCalling: true,
130
+ vision: true,
131
+ multimodal: true,
132
+ maxContextTokens: 264000,
133
+ maxOutputTokens: 64000,
134
+ streaming: true,
135
+ provider: "Microsoft",
136
+ category: "chat"
137
+ },
138
+ recommended: true,
139
+ status: "preview"
140
+ },
124
141
  {
125
142
  value: "claude-sonnet-4",
126
143
  name: "Claude Sonnet 4",
@@ -45,6 +45,11 @@ exports.MODEL_VERSION_REQUIREMENTS = {
45
45
  supportedEndpoints: ["/chat/completions", "/responses"],
46
46
  preview: true,
47
47
  },
48
+ "oswe-vscode-prime": {
49
+ minVSCodeVersion: "1.96.0",
50
+ supportedEndpoints: ["/chat/completions", "/responses"],
51
+ preview: true,
52
+ },
48
53
  };
49
54
  exports.DEFAULT_MODEL_REQUIREMENTS = {
50
55
  minVSCodeVersion: "1.95.0",
@@ -3,10 +3,18 @@ interface CopilotModel {
3
3
  name: string;
4
4
  display_name?: string;
5
5
  model_picker_enabled?: boolean;
6
+ model_picker_category?: "lightweight" | "versatile" | "powerful" | string;
6
7
  capabilities?: any;
7
8
  vendor?: string;
8
9
  version?: string;
9
10
  preview?: boolean;
11
+ billing?: {
12
+ is_premium: boolean;
13
+ multiplier: number;
14
+ restricted_to?: string[];
15
+ };
16
+ is_chat_default?: boolean;
17
+ is_chat_fallback?: boolean;
10
18
  }
11
19
  export declare class DynamicModelsManager {
12
20
  private static cache;
@@ -16,6 +24,7 @@ export declare class DynamicModelsManager {
16
24
  private static fetchModelsFromAPI;
17
25
  static getAvailableModels(oauthToken: string): Promise<CopilotModel[]>;
18
26
  static filterModelsByType(models: CopilotModel[], type: string): CopilotModel[];
27
+ private static getCostMultiplier;
19
28
  static modelsToN8nOptions(models: CopilotModel[]): Array<{
20
29
  name: string;
21
30
  value: string;
@@ -21,9 +21,13 @@ class DynamicModelsManager {
21
21
  Authorization: `Bearer ${oauthToken}`,
22
22
  Accept: "application/json",
23
23
  "Content-Type": "application/json",
24
- "User-Agent": "GitHub-Copilot/1.0 (n8n-node)",
25
- "Editor-Version": "vscode/1.95.0",
26
- "Editor-Plugin-Version": "copilot/1.0.0",
24
+ "User-Agent": "GitHubCopilotChat/0.35.0",
25
+ "Editor-Version": "vscode/1.96.0",
26
+ "Editor-Plugin-Version": "copilot-chat/0.35.0",
27
+ "X-GitHub-Api-Version": "2025-05-01",
28
+ "X-Interaction-Type": "model-access",
29
+ "OpenAI-Intent": "model-access",
30
+ "Copilot-Integration-Id": "vscode-chat",
27
31
  },
28
32
  });
29
33
  if (!response.ok) {
@@ -76,6 +80,38 @@ class DynamicModelsManager {
76
80
  return modelType === type;
77
81
  });
78
82
  }
83
+ static getCostMultiplier(model) {
84
+ var _a;
85
+ if (((_a = model.billing) === null || _a === void 0 ? void 0 : _a.multiplier) !== undefined) {
86
+ return `${model.billing.multiplier}x`;
87
+ }
88
+ const id = model.id.toLowerCase();
89
+ if (id === 'gpt-4.1' || id.startsWith('gpt-4.1-'))
90
+ return '0x';
91
+ if (id === 'gpt-4o' || id.startsWith('gpt-4o-'))
92
+ return '0x';
93
+ if (id === 'gpt-4' || id === 'gpt-4-0613')
94
+ return '0x';
95
+ if (id === 'gpt-5-mini')
96
+ return '0x';
97
+ if (id === 'gpt-4o-mini' || id.startsWith('gpt-4o-mini-'))
98
+ return '0x';
99
+ if (id.includes('grok') && id.includes('fast'))
100
+ return '0x';
101
+ if (id === 'oswe-vscode-prime' || id.includes('oswe-vscode'))
102
+ return '0x';
103
+ if (id.includes('haiku'))
104
+ return '0.33x';
105
+ if (id.includes('flash'))
106
+ return '0.33x';
107
+ if (id.includes('codex-mini'))
108
+ return '0.33x';
109
+ if (id === 'claude-opus-41' || id === 'claude-opus-4.1')
110
+ return '10x';
111
+ if (id.includes('opus'))
112
+ return '3x';
113
+ return '1x';
114
+ }
79
115
  static modelsToN8nOptions(models) {
80
116
  const nameCount = new Map();
81
117
  models.forEach((model) => {
@@ -100,8 +136,12 @@ class DynamicModelsManager {
100
136
  badges.push("🧠 Reasoning");
101
137
  }
102
138
  const displayName = model.display_name || model.name || model.id;
139
+ const costMultiplier = this.getCostMultiplier(model);
103
140
  const badgesText = badges.length > 0 ? ` [${badges.join(" • ")}]` : "";
104
141
  const hasDuplicates = (nameCount.get(displayName) || 0) > 1;
142
+ const category = model.model_picker_category || "";
143
+ const categoryLabel = category ? ` - ${category.charAt(0).toUpperCase() + category.slice(1)}` : "";
144
+ const multiplierDisplay = ` • ${costMultiplier}${categoryLabel}`;
105
145
  let description = "";
106
146
  if (model.capabilities) {
107
147
  const limits = model.capabilities.limits || {};
@@ -120,11 +160,13 @@ class DynamicModelsManager {
120
160
  }
121
161
  description = parts.join(" • ");
122
162
  }
123
- else if (hasDuplicates) {
124
- description = `ID: ${model.id}`;
163
+ else {
164
+ if (hasDuplicates) {
165
+ description = `ID: ${model.id}`;
166
+ }
125
167
  }
126
168
  return {
127
- name: `${displayName}${badgesText}`,
169
+ name: `${displayName}${multiplierDisplay}${badgesText}`,
128
170
  value: model.id,
129
171
  description: description || undefined,
130
172
  };
@@ -1,7 +1,7 @@
1
1
  export interface ChunkRequest {
2
2
  content: string;
3
3
  embed: boolean;
4
- qos: "Batch" | "Online";
4
+ qos: 'Batch' | 'Online';
5
5
  }
6
6
  export interface Chunk {
7
7
  content: string;
@@ -15,7 +15,7 @@ export interface ChunkResponse {
15
15
  total: number;
16
16
  contentLength: number;
17
17
  }
18
- export declare function chunkFile(token: string, fileContent: string, embeddings?: boolean, qos?: "Batch" | "Online"): Promise<ChunkResponse>;
18
+ export declare function chunkFile(token: string, fileContent: string, embeddings?: boolean, qos?: 'Batch' | 'Online'): Promise<ChunkResponse>;
19
19
  export declare function selectRelevantChunks(chunks: Chunk[], queryEmbedding: number[], maxTokens?: number, minRelevance?: number): string;
20
20
  export declare function selectTopChunks(chunks: Chunk[], maxTokens?: number): string;
21
21
  export declare function estimateTokens(text: string): number;
@@ -6,8 +6,8 @@ exports.selectTopChunks = selectTopChunks;
6
6
  exports.estimateTokens = estimateTokens;
7
7
  exports.getQueryEmbedding = getQueryEmbedding;
8
8
  const GitHubCopilotEndpoints_1 = require("./GitHubCopilotEndpoints");
9
- async function chunkFile(token, fileContent, embeddings = true, qos = "Online") {
10
- const url = "https://api.githubcopilot.com/chunks";
9
+ async function chunkFile(token, fileContent, embeddings = true, qos = 'Online') {
10
+ const url = 'https://api.githubcopilot.com/chunks';
11
11
  const headers = GitHubCopilotEndpoints_1.GitHubCopilotEndpoints.getAuthHeaders(token, true);
12
12
  const requestBody = {
13
13
  content: fileContent,
@@ -16,7 +16,7 @@ async function chunkFile(token, fileContent, embeddings = true, qos = "Online")
16
16
  };
17
17
  console.log(`🔪 Chunking file (${fileContent.length} chars, embed=${embeddings}, qos=${qos})`);
18
18
  const response = await fetch(url, {
19
- method: "POST",
19
+ method: 'POST',
20
20
  headers,
21
21
  body: JSON.stringify(requestBody),
22
22
  });
@@ -24,13 +24,13 @@ async function chunkFile(token, fileContent, embeddings = true, qos = "Online")
24
24
  const errorText = await response.text();
25
25
  throw new Error(`Chunking API error: ${response.status} ${response.statusText}. ${errorText}`);
26
26
  }
27
- const data = await response.json();
27
+ const data = (await response.json());
28
28
  console.log(`✅ Chunked into ${data.chunks.length} chunks`);
29
29
  return data;
30
30
  }
31
31
  function cosineSimilarity(a, b) {
32
32
  if (a.length !== b.length) {
33
- throw new Error("Vectors must have same length");
33
+ throw new Error('Vectors must have same length');
34
34
  }
35
35
  let dotProduct = 0;
36
36
  let normA = 0;
@@ -44,15 +44,15 @@ function cosineSimilarity(a, b) {
44
44
  }
45
45
  function selectRelevantChunks(chunks, queryEmbedding, maxTokens = 10000, minRelevance = 0.5) {
46
46
  if (!chunks.length) {
47
- return "";
47
+ return '';
48
48
  }
49
49
  const rankedChunks = chunks
50
- .filter(chunk => chunk.embedding)
51
- .map(chunk => ({
50
+ .filter((chunk) => chunk.embedding)
51
+ .map((chunk) => ({
52
52
  chunk,
53
53
  relevance: cosineSimilarity(chunk.embedding, queryEmbedding),
54
54
  }))
55
- .filter(item => item.relevance >= minRelevance)
55
+ .filter((item) => item.relevance >= minRelevance)
56
56
  .sort((a, b) => b.relevance - a.relevance);
57
57
  const selectedChunks = [];
58
58
  let totalTokens = 0;
@@ -66,7 +66,7 @@ function selectRelevantChunks(chunks, queryEmbedding, maxTokens = 10000, minRele
66
66
  console.log(` ✓ Selected chunk (relevance: ${item.relevance.toFixed(3)}, tokens: ~${chunkTokens})`);
67
67
  }
68
68
  console.log(`📊 Selected ${selectedChunks.length}/${rankedChunks.length} chunks (~${totalTokens} tokens)`);
69
- return selectedChunks.join("\n\n---\n\n");
69
+ return selectedChunks.join('\n\n---\n\n');
70
70
  }
71
71
  function selectTopChunks(chunks, maxTokens = 10000) {
72
72
  const selectedChunks = [];
@@ -80,26 +80,26 @@ function selectTopChunks(chunks, maxTokens = 10000) {
80
80
  totalTokens += chunkTokens;
81
81
  }
82
82
  console.log(`📊 Selected ${selectedChunks.length}/${chunks.length} chunks (~${totalTokens} tokens)`);
83
- return selectedChunks.join("\n\n---\n\n");
83
+ return selectedChunks.join('\n\n---\n\n');
84
84
  }
85
85
  function estimateTokens(text) {
86
86
  return Math.ceil(text.length / 4);
87
87
  }
88
88
  async function getQueryEmbedding(token, query) {
89
- const url = "https://api.githubcopilot.com/embeddings";
89
+ const url = 'https://api.githubcopilot.com/embeddings';
90
90
  const headers = GitHubCopilotEndpoints_1.GitHubCopilotEndpoints.getEmbeddingsHeaders(token);
91
91
  const response = await fetch(url, {
92
- method: "POST",
92
+ method: 'POST',
93
93
  headers,
94
94
  body: JSON.stringify({
95
95
  input: [query],
96
- model: "text-embedding-3-small",
96
+ model: 'text-embedding-3-small',
97
97
  }),
98
98
  });
99
99
  if (!response.ok) {
100
100
  const errorText = await response.text();
101
101
  throw new Error(`Embeddings API error: ${response.status} ${response.statusText}. ${errorText}`);
102
102
  }
103
- const data = await response.json();
103
+ const data = (await response.json());
104
104
  return data.data[0].embedding;
105
105
  }
@@ -1,4 +1,4 @@
1
- export type FileProcessingMode = "direct" | "chunking" | "summarize" | "auto";
1
+ export type FileProcessingMode = 'direct' | 'chunking' | 'summarize' | 'auto';
2
2
  export interface FileOptimizationOptions {
3
3
  mode: FileProcessingMode;
4
4
  model: string;
@@ -7,7 +7,7 @@ export interface FileOptimizationOptions {
7
7
  minRelevance?: number;
8
8
  }
9
9
  export interface OptimizationResult {
10
- mode: "direct" | "chunking" | "summarize";
10
+ mode: 'direct' | 'chunking' | 'summarize';
11
11
  reason: string;
12
12
  estimatedTokens: number;
13
13
  maxAllowedTokens: number;
@@ -24,12 +24,12 @@ function selectFileProcessingMode(options) {
24
24
  console.log(` Estimated tokens: ${estimatedTokens.toLocaleString()}`);
25
25
  console.log(` Max allowed (${(maxContextUsage * 100).toFixed(0)}%): ${maxAllowedTokens.toLocaleString()}`);
26
26
  console.log(` Model context: ${maxContextTokens.toLocaleString()} tokens`);
27
- if (mode !== "auto") {
28
- if (mode === "direct" && !fitsInContext) {
27
+ if (mode !== 'auto') {
28
+ if (mode === 'direct' && !fitsInContext) {
29
29
  console.warn(`⚠️ Warning: Direct mode requested but file exceeds token limit`);
30
30
  }
31
31
  return {
32
- mode,
32
+ mode: mode,
33
33
  reason: `User requested ${mode} mode`,
34
34
  estimatedTokens,
35
35
  maxAllowedTokens,
@@ -38,7 +38,7 @@ function selectFileProcessingMode(options) {
38
38
  }
39
39
  if (estimatedTokens < maxAllowedTokens * 0.3) {
40
40
  return {
41
- mode: "direct",
41
+ mode: 'direct',
42
42
  reason: `File is small (${estimatedTokens.toLocaleString()} tokens < 30% of limit)`,
43
43
  estimatedTokens,
44
44
  maxAllowedTokens,
@@ -47,7 +47,7 @@ function selectFileProcessingMode(options) {
47
47
  }
48
48
  else if (estimatedTokens < maxAllowedTokens * 0.8) {
49
49
  return {
50
- mode: "chunking",
50
+ mode: 'chunking',
51
51
  reason: `File is medium-sized (${estimatedTokens.toLocaleString()} tokens, 30-80% of limit) - chunking recommended`,
52
52
  estimatedTokens,
53
53
  maxAllowedTokens,
@@ -56,7 +56,7 @@ function selectFileProcessingMode(options) {
56
56
  }
57
57
  else if (fitsInContext) {
58
58
  return {
59
- mode: "chunking",
59
+ mode: 'chunking',
60
60
  reason: `File is large (${estimatedTokens.toLocaleString()} tokens, >80% of limit) - chunking strongly recommended`,
61
61
  estimatedTokens,
62
62
  maxAllowedTokens,
@@ -65,7 +65,7 @@ function selectFileProcessingMode(options) {
65
65
  }
66
66
  else {
67
67
  return {
68
- mode: "summarize",
68
+ mode: 'summarize',
69
69
  reason: `File exceeds token limit (${estimatedTokens.toLocaleString()} > ${maxAllowedTokens.toLocaleString()} tokens) - summarization required`,
70
70
  estimatedTokens,
71
71
  maxAllowedTokens,
@@ -86,12 +86,12 @@ function getOptimalChunkSettings(model, maxContextUsage = 0.5) {
86
86
  }
87
87
  function compressText(text) {
88
88
  return text
89
- .replace(/ {2,}/g, " ")
90
- .replace(/\n{3,}/g, "\n\n")
91
- .replace(/\t/g, " ")
92
- .split("\n")
93
- .map(line => line.trim())
94
- .join("\n")
89
+ .replace(/ {2,}/g, ' ')
90
+ .replace(/\n{3,}/g, '\n\n')
91
+ .replace(/\t/g, ' ')
92
+ .split('\n')
93
+ .map((line) => line.trim())
94
+ .join('\n')
95
95
  .trim();
96
96
  }
97
97
  function truncateToTokenLimit(text, maxTokens, addEllipsis = true) {
@@ -106,7 +106,7 @@ function truncateToTokenLimit(text, maxTokens, addEllipsis = true) {
106
106
  }
107
107
  const maxChars = maxTokens * 4;
108
108
  const truncated = text.slice(0, maxChars);
109
- const ellipsis = addEllipsis ? "\n\n...[truncated]" : "";
109
+ const ellipsis = addEllipsis ? '\n\n...[truncated]' : '';
110
110
  const finalContent = truncated + ellipsis;
111
111
  const finalTokens = (0, FileChunkingApiUtils_1.estimateTokens)(finalContent);
112
112
  return {
@@ -118,16 +118,16 @@ function truncateToTokenLimit(text, maxTokens, addEllipsis = true) {
118
118
  }
119
119
  function getFileSizeCategory(sizeBytes) {
120
120
  if (sizeBytes < 10 * 1024)
121
- return "tiny (<10KB)";
121
+ return 'tiny (<10KB)';
122
122
  if (sizeBytes < 50 * 1024)
123
- return "small (<50KB)";
123
+ return 'small (<50KB)';
124
124
  if (sizeBytes < 200 * 1024)
125
- return "medium (<200KB)";
125
+ return 'medium (<200KB)';
126
126
  if (sizeBytes < 500 * 1024)
127
- return "large (<500KB)";
127
+ return 'large (<500KB)';
128
128
  if (sizeBytes < 1024 * 1024)
129
- return "very large (<1MB)";
130
- return "huge (>1MB)";
129
+ return 'very large (<1MB)';
130
+ return 'huge (>1MB)';
131
131
  }
132
132
  function formatTokenCount(tokens) {
133
133
  if (tokens < 1000)
@@ -69,9 +69,13 @@ async function makeGitHubCopilotRequest(context, endpoint, body, hasMedia = fals
69
69
  }
70
70
  const headers = {
71
71
  ...GitHubCopilotEndpoints_1.GITHUB_COPILOT_API.HEADERS.WITH_AUTH(token),
72
- "User-Agent": "GitHub-Copilot/1.0 (n8n-node)",
72
+ "User-Agent": "GitHubCopilotChat/0.35.0",
73
73
  "Editor-Version": `vscode/${minVSCodeVersion}`,
74
- "Editor-Plugin-Version": "copilot/1.0.0",
74
+ "Editor-Plugin-Version": "copilot-chat/0.35.0",
75
+ "X-GitHub-Api-Version": "2025-05-01",
76
+ "X-Interaction-Type": "copilot-chat",
77
+ "OpenAI-Intent": "conversation-panel",
78
+ "Copilot-Integration-Id": "vscode-chat",
75
79
  ...additionalHeaders,
76
80
  };
77
81
  if (hasMedia) {
@@ -76,7 +76,16 @@ class GitHubCopilotEndpoints {
76
76
  return exports.GITHUB_COPILOT_API.URLS.USER_COPILOT;
77
77
  }
78
78
  static getAuthHeaders(token, includeVSCodeHeaders = false) {
79
- const headers = exports.GITHUB_COPILOT_API.HEADERS.WITH_AUTH(token);
79
+ const headers = {
80
+ ...exports.GITHUB_COPILOT_API.HEADERS.WITH_AUTH(token),
81
+ "User-Agent": "GitHubCopilotChat/0.35.0",
82
+ "Editor-Version": "vscode/1.96.0",
83
+ "Editor-Plugin-Version": "copilot-chat/0.35.0",
84
+ "X-GitHub-Api-Version": "2025-05-01",
85
+ "X-Interaction-Type": "copilot-chat",
86
+ "OpenAI-Intent": "conversation-panel",
87
+ "Copilot-Integration-Id": "vscode-chat",
88
+ };
80
89
  if (includeVSCodeHeaders) {
81
90
  return {
82
91
  ...headers,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "n8n-nodes-github-copilot",
3
- "version": "3.38.25",
3
+ "version": "3.38.35",
4
4
  "description": "n8n community node for GitHub Copilot with CLI integration, Chat API access, and AI Chat Model for workflows with full tools and function calling support - access GPT-5, Claude, Gemini and more using your Copilot subscription",
5
5
  "license": "MIT",
6
6
  "homepage": "https://github.com/sufficit/n8n-nodes-github-copilot",