n8n-nodes-github-copilot 3.38.26 → 4.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,8 +6,8 @@ exports.selectTopChunks = selectTopChunks;
6
6
  exports.estimateTokens = estimateTokens;
7
7
  exports.getQueryEmbedding = getQueryEmbedding;
8
8
  const GitHubCopilotEndpoints_1 = require("./GitHubCopilotEndpoints");
9
- async function chunkFile(token, fileContent, embeddings = true, qos = "Online") {
10
- const url = "https://api.githubcopilot.com/chunks";
9
+ async function chunkFile(token, fileContent, embeddings = true, qos = 'Online') {
10
+ const url = 'https://api.githubcopilot.com/chunks';
11
11
  const headers = GitHubCopilotEndpoints_1.GitHubCopilotEndpoints.getAuthHeaders(token, true);
12
12
  const requestBody = {
13
13
  content: fileContent,
@@ -16,7 +16,7 @@ async function chunkFile(token, fileContent, embeddings = true, qos = "Online")
16
16
  };
17
17
  console.log(`🔪 Chunking file (${fileContent.length} chars, embed=${embeddings}, qos=${qos})`);
18
18
  const response = await fetch(url, {
19
- method: "POST",
19
+ method: 'POST',
20
20
  headers,
21
21
  body: JSON.stringify(requestBody),
22
22
  });
@@ -24,13 +24,13 @@ async function chunkFile(token, fileContent, embeddings = true, qos = "Online")
24
24
  const errorText = await response.text();
25
25
  throw new Error(`Chunking API error: ${response.status} ${response.statusText}. ${errorText}`);
26
26
  }
27
- const data = await response.json();
27
+ const data = (await response.json());
28
28
  console.log(`✅ Chunked into ${data.chunks.length} chunks`);
29
29
  return data;
30
30
  }
31
31
  function cosineSimilarity(a, b) {
32
32
  if (a.length !== b.length) {
33
- throw new Error("Vectors must have same length");
33
+ throw new Error('Vectors must have same length');
34
34
  }
35
35
  let dotProduct = 0;
36
36
  let normA = 0;
@@ -44,15 +44,15 @@ function cosineSimilarity(a, b) {
44
44
  }
45
45
  function selectRelevantChunks(chunks, queryEmbedding, maxTokens = 10000, minRelevance = 0.5) {
46
46
  if (!chunks.length) {
47
- return "";
47
+ return '';
48
48
  }
49
49
  const rankedChunks = chunks
50
- .filter(chunk => chunk.embedding)
51
- .map(chunk => ({
50
+ .filter((chunk) => chunk.embedding)
51
+ .map((chunk) => ({
52
52
  chunk,
53
53
  relevance: cosineSimilarity(chunk.embedding, queryEmbedding),
54
54
  }))
55
- .filter(item => item.relevance >= minRelevance)
55
+ .filter((item) => item.relevance >= minRelevance)
56
56
  .sort((a, b) => b.relevance - a.relevance);
57
57
  const selectedChunks = [];
58
58
  let totalTokens = 0;
@@ -66,7 +66,7 @@ function selectRelevantChunks(chunks, queryEmbedding, maxTokens = 10000, minRele
66
66
  console.log(` ✓ Selected chunk (relevance: ${item.relevance.toFixed(3)}, tokens: ~${chunkTokens})`);
67
67
  }
68
68
  console.log(`📊 Selected ${selectedChunks.length}/${rankedChunks.length} chunks (~${totalTokens} tokens)`);
69
- return selectedChunks.join("\n\n---\n\n");
69
+ return selectedChunks.join('\n\n---\n\n');
70
70
  }
71
71
  function selectTopChunks(chunks, maxTokens = 10000) {
72
72
  const selectedChunks = [];
@@ -80,26 +80,26 @@ function selectTopChunks(chunks, maxTokens = 10000) {
80
80
  totalTokens += chunkTokens;
81
81
  }
82
82
  console.log(`📊 Selected ${selectedChunks.length}/${chunks.length} chunks (~${totalTokens} tokens)`);
83
- return selectedChunks.join("\n\n---\n\n");
83
+ return selectedChunks.join('\n\n---\n\n');
84
84
  }
85
85
  function estimateTokens(text) {
86
86
  return Math.ceil(text.length / 4);
87
87
  }
88
88
  async function getQueryEmbedding(token, query) {
89
- const url = "https://api.githubcopilot.com/embeddings";
89
+ const url = 'https://api.githubcopilot.com/embeddings';
90
90
  const headers = GitHubCopilotEndpoints_1.GitHubCopilotEndpoints.getEmbeddingsHeaders(token);
91
91
  const response = await fetch(url, {
92
- method: "POST",
92
+ method: 'POST',
93
93
  headers,
94
94
  body: JSON.stringify({
95
95
  input: [query],
96
- model: "text-embedding-3-small",
96
+ model: 'text-embedding-3-small',
97
97
  }),
98
98
  });
99
99
  if (!response.ok) {
100
100
  const errorText = await response.text();
101
101
  throw new Error(`Embeddings API error: ${response.status} ${response.statusText}. ${errorText}`);
102
102
  }
103
- const data = await response.json();
103
+ const data = (await response.json());
104
104
  return data.data[0].embedding;
105
105
  }
@@ -1,4 +1,4 @@
1
- export type FileProcessingMode = "direct" | "chunking" | "summarize" | "auto";
1
+ export type FileProcessingMode = 'direct' | 'chunking' | 'summarize' | 'auto';
2
2
  export interface FileOptimizationOptions {
3
3
  mode: FileProcessingMode;
4
4
  model: string;
@@ -7,7 +7,7 @@ export interface FileOptimizationOptions {
7
7
  minRelevance?: number;
8
8
  }
9
9
  export interface OptimizationResult {
10
- mode: "direct" | "chunking" | "summarize";
10
+ mode: 'direct' | 'chunking' | 'summarize';
11
11
  reason: string;
12
12
  estimatedTokens: number;
13
13
  maxAllowedTokens: number;
@@ -24,12 +24,12 @@ function selectFileProcessingMode(options) {
24
24
  console.log(` Estimated tokens: ${estimatedTokens.toLocaleString()}`);
25
25
  console.log(` Max allowed (${(maxContextUsage * 100).toFixed(0)}%): ${maxAllowedTokens.toLocaleString()}`);
26
26
  console.log(` Model context: ${maxContextTokens.toLocaleString()} tokens`);
27
- if (mode !== "auto") {
28
- if (mode === "direct" && !fitsInContext) {
27
+ if (mode !== 'auto') {
28
+ if (mode === 'direct' && !fitsInContext) {
29
29
  console.warn(`⚠️ Warning: Direct mode requested but file exceeds token limit`);
30
30
  }
31
31
  return {
32
- mode,
32
+ mode: mode,
33
33
  reason: `User requested ${mode} mode`,
34
34
  estimatedTokens,
35
35
  maxAllowedTokens,
@@ -38,7 +38,7 @@ function selectFileProcessingMode(options) {
38
38
  }
39
39
  if (estimatedTokens < maxAllowedTokens * 0.3) {
40
40
  return {
41
- mode: "direct",
41
+ mode: 'direct',
42
42
  reason: `File is small (${estimatedTokens.toLocaleString()} tokens < 30% of limit)`,
43
43
  estimatedTokens,
44
44
  maxAllowedTokens,
@@ -47,7 +47,7 @@ function selectFileProcessingMode(options) {
47
47
  }
48
48
  else if (estimatedTokens < maxAllowedTokens * 0.8) {
49
49
  return {
50
- mode: "chunking",
50
+ mode: 'chunking',
51
51
  reason: `File is medium-sized (${estimatedTokens.toLocaleString()} tokens, 30-80% of limit) - chunking recommended`,
52
52
  estimatedTokens,
53
53
  maxAllowedTokens,
@@ -56,7 +56,7 @@ function selectFileProcessingMode(options) {
56
56
  }
57
57
  else if (fitsInContext) {
58
58
  return {
59
- mode: "chunking",
59
+ mode: 'chunking',
60
60
  reason: `File is large (${estimatedTokens.toLocaleString()} tokens, >80% of limit) - chunking strongly recommended`,
61
61
  estimatedTokens,
62
62
  maxAllowedTokens,
@@ -65,7 +65,7 @@ function selectFileProcessingMode(options) {
65
65
  }
66
66
  else {
67
67
  return {
68
- mode: "summarize",
68
+ mode: 'summarize',
69
69
  reason: `File exceeds token limit (${estimatedTokens.toLocaleString()} > ${maxAllowedTokens.toLocaleString()} tokens) - summarization required`,
70
70
  estimatedTokens,
71
71
  maxAllowedTokens,
@@ -86,12 +86,12 @@ function getOptimalChunkSettings(model, maxContextUsage = 0.5) {
86
86
  }
87
87
  function compressText(text) {
88
88
  return text
89
- .replace(/ {2,}/g, " ")
90
- .replace(/\n{3,}/g, "\n\n")
91
- .replace(/\t/g, " ")
92
- .split("\n")
93
- .map(line => line.trim())
94
- .join("\n")
89
+ .replace(/ {2,}/g, ' ')
90
+ .replace(/\n{3,}/g, '\n\n')
91
+ .replace(/\t/g, ' ')
92
+ .split('\n')
93
+ .map((line) => line.trim())
94
+ .join('\n')
95
95
  .trim();
96
96
  }
97
97
  function truncateToTokenLimit(text, maxTokens, addEllipsis = true) {
@@ -106,7 +106,7 @@ function truncateToTokenLimit(text, maxTokens, addEllipsis = true) {
106
106
  }
107
107
  const maxChars = maxTokens * 4;
108
108
  const truncated = text.slice(0, maxChars);
109
- const ellipsis = addEllipsis ? "\n\n...[truncated]" : "";
109
+ const ellipsis = addEllipsis ? '\n\n...[truncated]' : '';
110
110
  const finalContent = truncated + ellipsis;
111
111
  const finalTokens = (0, FileChunkingApiUtils_1.estimateTokens)(finalContent);
112
112
  return {
@@ -118,16 +118,16 @@ function truncateToTokenLimit(text, maxTokens, addEllipsis = true) {
118
118
  }
119
119
  function getFileSizeCategory(sizeBytes) {
120
120
  if (sizeBytes < 10 * 1024)
121
- return "tiny (<10KB)";
121
+ return 'tiny (<10KB)';
122
122
  if (sizeBytes < 50 * 1024)
123
- return "small (<50KB)";
123
+ return 'small (<50KB)';
124
124
  if (sizeBytes < 200 * 1024)
125
- return "medium (<200KB)";
125
+ return 'medium (<200KB)';
126
126
  if (sizeBytes < 500 * 1024)
127
- return "large (<500KB)";
127
+ return 'large (<500KB)';
128
128
  if (sizeBytes < 1024 * 1024)
129
- return "very large (<1MB)";
130
- return "huge (>1MB)";
129
+ return 'very large (<1MB)';
130
+ return 'huge (>1MB)';
131
131
  }
132
132
  function formatTokenCount(tokens) {
133
133
  if (tokens < 1000)
@@ -69,9 +69,13 @@ async function makeGitHubCopilotRequest(context, endpoint, body, hasMedia = fals
69
69
  }
70
70
  const headers = {
71
71
  ...GitHubCopilotEndpoints_1.GITHUB_COPILOT_API.HEADERS.WITH_AUTH(token),
72
- "User-Agent": "GitHub-Copilot/1.0 (n8n-node)",
72
+ "User-Agent": "GitHubCopilotChat/0.35.0",
73
73
  "Editor-Version": `vscode/${minVSCodeVersion}`,
74
- "Editor-Plugin-Version": "copilot/1.0.0",
74
+ "Editor-Plugin-Version": "copilot-chat/0.35.0",
75
+ "X-GitHub-Api-Version": "2025-05-01",
76
+ "X-Interaction-Type": "copilot-chat",
77
+ "OpenAI-Intent": "conversation-panel",
78
+ "Copilot-Integration-Id": "vscode-chat",
75
79
  ...additionalHeaders,
76
80
  };
77
81
  if (hasMedia) {
@@ -76,7 +76,16 @@ class GitHubCopilotEndpoints {
76
76
  return exports.GITHUB_COPILOT_API.URLS.USER_COPILOT;
77
77
  }
78
78
  static getAuthHeaders(token, includeVSCodeHeaders = false) {
79
- const headers = exports.GITHUB_COPILOT_API.HEADERS.WITH_AUTH(token);
79
+ const headers = {
80
+ ...exports.GITHUB_COPILOT_API.HEADERS.WITH_AUTH(token),
81
+ "User-Agent": "GitHubCopilotChat/0.35.0",
82
+ "Editor-Version": "vscode/1.96.0",
83
+ "Editor-Plugin-Version": "copilot-chat/0.35.0",
84
+ "X-GitHub-Api-Version": "2025-05-01",
85
+ "X-Interaction-Type": "copilot-chat",
86
+ "OpenAI-Intent": "conversation-panel",
87
+ "Copilot-Integration-Id": "vscode-chat",
88
+ };
80
89
  if (includeVSCodeHeaders) {
81
90
  return {
82
91
  ...headers,
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "n8n-nodes-github-copilot",
3
- "version": "3.38.26",
4
- "description": "n8n community node for GitHub Copilot with CLI integration, Chat API access, and AI Chat Model for workflows with full tools and function calling support - access GPT-5, Claude, Gemini and more using your Copilot subscription",
3
+ "version": "4.0.0",
4
+ "description": "n8n community node for GitHub Copilot with NEW CLI programmatic mode, Chat API access, and AI Chat Model for workflows - access GPT-5, Claude Sonnet 4.5, Gemini and more using your Copilot subscription",
5
5
  "license": "MIT",
6
6
  "homepage": "https://github.com/sufficit/n8n-nodes-github-copilot",
7
7
  "author": {