n8n-nodes-github-copilot 4.1.2 → 4.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/package.json +4 -3
- package/dist/shared/index.d.ts +2 -0
- package/dist/shared/index.js +15 -0
- package/dist/shared/utils/provider-injection.d.ts +15 -0
- package/dist/shared/utils/provider-injection.js +177 -0
- package/dist/shared/utils/version-detection.d.ts +11 -0
- package/dist/shared/utils/version-detection.js +78 -0
- package/package.json +4 -3
- package/shared/icons/copilot.svg +34 -0
- package/shared/index.ts +27 -0
- package/shared/models/DynamicModelLoader.ts +124 -0
- package/shared/models/GitHubCopilotModels.ts +420 -0
- package/shared/models/ModelVersionRequirements.ts +165 -0
- package/shared/properties/ModelProperties.ts +52 -0
- package/shared/properties/ModelSelectionProperty.ts +68 -0
- package/shared/utils/DynamicModelsManager.ts +355 -0
- package/shared/utils/EmbeddingsApiUtils.ts +135 -0
- package/shared/utils/FileChunkingApiUtils.ts +176 -0
- package/shared/utils/FileOptimizationUtils.ts +210 -0
- package/shared/utils/GitHubCopilotApiUtils.ts +407 -0
- package/shared/utils/GitHubCopilotEndpoints.ts +212 -0
- package/shared/utils/GitHubDeviceFlowHandler.ts +276 -0
- package/shared/utils/OAuthTokenManager.ts +196 -0
- package/shared/utils/provider-injection.ts +277 -0
- package/shared/utils/version-detection.ts +145 -0
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
import { estimateTokens } from './FileChunkingApiUtils';
|
|
2
|
+
|
|
3
|
+
export type FileProcessingMode = 'direct' | 'chunking' | 'summarize' | 'auto';
|
|
4
|
+
|
|
5
|
+
export interface FileOptimizationOptions {
|
|
6
|
+
mode: FileProcessingMode;
|
|
7
|
+
model: string;
|
|
8
|
+
fileSize: number;
|
|
9
|
+
maxContextUsage?: number;
|
|
10
|
+
minRelevance?: number;
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
export interface OptimizationResult {
|
|
14
|
+
mode: 'direct' | 'chunking' | 'summarize';
|
|
15
|
+
reason: string;
|
|
16
|
+
estimatedTokens: number;
|
|
17
|
+
maxAllowedTokens: number;
|
|
18
|
+
fitsInContext: boolean;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* Estimate tokens for a file based on size
|
|
23
|
+
*/
|
|
24
|
+
export function estimateFileTokens(fileSize: number, isBase64 = true): number {
|
|
25
|
+
const encodedSize = isBase64 ? fileSize * 1.33 : fileSize;
|
|
26
|
+
return Math.ceil(encodedSize / 4);
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Select the optimal file processing mode based on file size and model constraints
|
|
31
|
+
*/
|
|
32
|
+
export function selectFileProcessingMode(options: FileOptimizationOptions): OptimizationResult {
|
|
33
|
+
const { model, fileSize, mode, maxContextUsage = 0.5 } = options;
|
|
34
|
+
|
|
35
|
+
const maxContextTokens = 128000;
|
|
36
|
+
const maxAllowedTokens = Math.floor(maxContextTokens * maxContextUsage);
|
|
37
|
+
const estimatedTokens = estimateFileTokens(fileSize, true);
|
|
38
|
+
const fitsInContext = estimatedTokens <= maxAllowedTokens;
|
|
39
|
+
|
|
40
|
+
console.log(`📊 File Optimization Analysis:`);
|
|
41
|
+
console.log(` File size: ${(fileSize / 1024).toFixed(2)} KB`);
|
|
42
|
+
console.log(` Estimated tokens: ${estimatedTokens.toLocaleString()}`);
|
|
43
|
+
console.log(` Max allowed (${(maxContextUsage * 100).toFixed(0)}%): ${maxAllowedTokens.toLocaleString()}`);
|
|
44
|
+
console.log(` Model context: ${maxContextTokens.toLocaleString()} tokens`);
|
|
45
|
+
|
|
46
|
+
if (mode !== 'auto') {
|
|
47
|
+
if (mode === 'direct' && !fitsInContext) {
|
|
48
|
+
console.warn(`⚠️ Warning: Direct mode requested but file exceeds token limit`);
|
|
49
|
+
}
|
|
50
|
+
return {
|
|
51
|
+
mode: mode as 'direct' | 'chunking' | 'summarize',
|
|
52
|
+
reason: `User requested ${mode} mode`,
|
|
53
|
+
estimatedTokens,
|
|
54
|
+
maxAllowedTokens,
|
|
55
|
+
fitsInContext,
|
|
56
|
+
};
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
if (estimatedTokens < maxAllowedTokens * 0.3) {
|
|
60
|
+
return {
|
|
61
|
+
mode: 'direct',
|
|
62
|
+
reason: `File is small (${estimatedTokens.toLocaleString()} tokens < 30% of limit)`,
|
|
63
|
+
estimatedTokens,
|
|
64
|
+
maxAllowedTokens,
|
|
65
|
+
fitsInContext: true,
|
|
66
|
+
};
|
|
67
|
+
} else if (estimatedTokens < maxAllowedTokens * 0.8) {
|
|
68
|
+
return {
|
|
69
|
+
mode: 'chunking',
|
|
70
|
+
reason: `File is medium-sized (${estimatedTokens.toLocaleString()} tokens, 30-80% of limit) - chunking recommended`,
|
|
71
|
+
estimatedTokens,
|
|
72
|
+
maxAllowedTokens,
|
|
73
|
+
fitsInContext: true,
|
|
74
|
+
};
|
|
75
|
+
} else if (fitsInContext) {
|
|
76
|
+
return {
|
|
77
|
+
mode: 'chunking',
|
|
78
|
+
reason: `File is large (${estimatedTokens.toLocaleString()} tokens, >80% of limit) - chunking strongly recommended`,
|
|
79
|
+
estimatedTokens,
|
|
80
|
+
maxAllowedTokens,
|
|
81
|
+
fitsInContext: true,
|
|
82
|
+
};
|
|
83
|
+
} else {
|
|
84
|
+
return {
|
|
85
|
+
mode: 'summarize',
|
|
86
|
+
reason: `File exceeds token limit (${estimatedTokens.toLocaleString()} > ${maxAllowedTokens.toLocaleString()} tokens) - summarization required`,
|
|
87
|
+
estimatedTokens,
|
|
88
|
+
maxAllowedTokens,
|
|
89
|
+
fitsInContext: false,
|
|
90
|
+
};
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
/**
|
|
95
|
+
* Get optimal chunk settings for a model
|
|
96
|
+
*/
|
|
97
|
+
export function getOptimalChunkSettings(
|
|
98
|
+
model: string,
|
|
99
|
+
maxContextUsage = 0.5,
|
|
100
|
+
): {
|
|
101
|
+
maxChunks: number;
|
|
102
|
+
maxTokensPerChunk: number;
|
|
103
|
+
totalMaxTokens: number;
|
|
104
|
+
} {
|
|
105
|
+
const maxContextTokens = 128000;
|
|
106
|
+
const totalMaxTokens = Math.floor(maxContextTokens * maxContextUsage);
|
|
107
|
+
const maxChunks = 10;
|
|
108
|
+
const maxTokensPerChunk = Math.floor(totalMaxTokens / maxChunks);
|
|
109
|
+
|
|
110
|
+
return {
|
|
111
|
+
maxChunks,
|
|
112
|
+
maxTokensPerChunk,
|
|
113
|
+
totalMaxTokens,
|
|
114
|
+
};
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
/**
|
|
118
|
+
* Compress text by removing extra whitespace
|
|
119
|
+
*/
|
|
120
|
+
export function compressText(text: string): string {
|
|
121
|
+
return text
|
|
122
|
+
.replace(/ {2,}/g, ' ')
|
|
123
|
+
.replace(/\n{3,}/g, '\n\n')
|
|
124
|
+
.replace(/\t/g, ' ')
|
|
125
|
+
.split('\n')
|
|
126
|
+
.map((line) => line.trim())
|
|
127
|
+
.join('\n')
|
|
128
|
+
.trim();
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
/**
|
|
132
|
+
* Truncate text to a token limit
|
|
133
|
+
*/
|
|
134
|
+
export function truncateToTokenLimit(
|
|
135
|
+
text: string,
|
|
136
|
+
maxTokens: number,
|
|
137
|
+
addEllipsis = true,
|
|
138
|
+
): {
|
|
139
|
+
content: string;
|
|
140
|
+
truncated: boolean;
|
|
141
|
+
originalTokens: number;
|
|
142
|
+
finalTokens: number;
|
|
143
|
+
} {
|
|
144
|
+
const originalTokens = estimateTokens(text);
|
|
145
|
+
|
|
146
|
+
if (originalTokens <= maxTokens) {
|
|
147
|
+
return {
|
|
148
|
+
content: text,
|
|
149
|
+
truncated: false,
|
|
150
|
+
originalTokens,
|
|
151
|
+
finalTokens: originalTokens,
|
|
152
|
+
};
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
const maxChars = maxTokens * 4;
|
|
156
|
+
const truncated = text.slice(0, maxChars);
|
|
157
|
+
const ellipsis = addEllipsis ? '\n\n...[truncated]' : '';
|
|
158
|
+
const finalContent = truncated + ellipsis;
|
|
159
|
+
const finalTokens = estimateTokens(finalContent);
|
|
160
|
+
|
|
161
|
+
return {
|
|
162
|
+
content: finalContent,
|
|
163
|
+
truncated: true,
|
|
164
|
+
originalTokens,
|
|
165
|
+
finalTokens,
|
|
166
|
+
};
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
/**
|
|
170
|
+
* Get a human-readable file size category
|
|
171
|
+
*/
|
|
172
|
+
export function getFileSizeCategory(sizeBytes: number): string {
|
|
173
|
+
if (sizeBytes < 10 * 1024) return 'tiny (<10KB)';
|
|
174
|
+
if (sizeBytes < 50 * 1024) return 'small (<50KB)';
|
|
175
|
+
if (sizeBytes < 200 * 1024) return 'medium (<200KB)';
|
|
176
|
+
if (sizeBytes < 500 * 1024) return 'large (<500KB)';
|
|
177
|
+
if (sizeBytes < 1024 * 1024) return 'very large (<1MB)';
|
|
178
|
+
return 'huge (>1MB)';
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
/**
|
|
182
|
+
* Format token count for display
|
|
183
|
+
*/
|
|
184
|
+
export function formatTokenCount(tokens: number): string {
|
|
185
|
+
if (tokens < 1000) return `${tokens} tokens`;
|
|
186
|
+
if (tokens < 1000000) return `${(tokens / 1000).toFixed(1)}K tokens`;
|
|
187
|
+
return `${(tokens / 1000000).toFixed(2)}M tokens`;
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
/**
|
|
191
|
+
* Calculate token savings from optimization
|
|
192
|
+
*/
|
|
193
|
+
export function calculateSavings(
|
|
194
|
+
originalTokens: number,
|
|
195
|
+
optimizedTokens: number,
|
|
196
|
+
): {
|
|
197
|
+
savedTokens: number;
|
|
198
|
+
savingsPercent: number;
|
|
199
|
+
savingsRatio: string;
|
|
200
|
+
} {
|
|
201
|
+
const savedTokens = originalTokens - optimizedTokens;
|
|
202
|
+
const savingsPercent = (savedTokens / originalTokens) * 100;
|
|
203
|
+
const savingsRatio = `${optimizedTokens.toLocaleString()}/${originalTokens.toLocaleString()}`;
|
|
204
|
+
|
|
205
|
+
return {
|
|
206
|
+
savedTokens,
|
|
207
|
+
savingsPercent,
|
|
208
|
+
savingsRatio,
|
|
209
|
+
};
|
|
210
|
+
}
|
|
@@ -0,0 +1,407 @@
|
|
|
1
|
+
import { IExecuteFunctions } from "n8n-workflow";
|
|
2
|
+
import { GITHUB_COPILOT_API } from "./GitHubCopilotEndpoints";
|
|
3
|
+
import { OAuthTokenManager } from "./OAuthTokenManager";
|
|
4
|
+
import { DynamicModelsManager } from "./DynamicModelsManager";
|
|
5
|
+
import { getMinVSCodeVersion, getAdditionalHeaders } from "../models/ModelVersionRequirements";
|
|
6
|
+
|
|
7
|
+
// Interface for OAuth2 credentials
|
|
8
|
+
interface OAuth2Credentials {
|
|
9
|
+
accessToken?: string;
|
|
10
|
+
access_token?: string;
|
|
11
|
+
token?: string;
|
|
12
|
+
oauthTokenData?: {
|
|
13
|
+
access_token?: string;
|
|
14
|
+
};
|
|
15
|
+
[key: string]: unknown;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
// GitHub Copilot API Response interface
|
|
19
|
+
export interface CopilotResponse {
|
|
20
|
+
id: string;
|
|
21
|
+
object: string;
|
|
22
|
+
created: number;
|
|
23
|
+
model: string;
|
|
24
|
+
choices: Array<{
|
|
25
|
+
index: number;
|
|
26
|
+
message: {
|
|
27
|
+
role: string;
|
|
28
|
+
content: string;
|
|
29
|
+
tool_calls?: Array<{
|
|
30
|
+
id: string;
|
|
31
|
+
type: string;
|
|
32
|
+
function: {
|
|
33
|
+
name: string;
|
|
34
|
+
arguments: string;
|
|
35
|
+
};
|
|
36
|
+
}>;
|
|
37
|
+
[key: string]: unknown; // Allow additional fields from GitHub Copilot (like 'padding')
|
|
38
|
+
};
|
|
39
|
+
finish_reason: string;
|
|
40
|
+
}>;
|
|
41
|
+
usage: {
|
|
42
|
+
prompt_tokens: number;
|
|
43
|
+
completion_tokens: number;
|
|
44
|
+
total_tokens: number;
|
|
45
|
+
};
|
|
46
|
+
// Retry metadata (added by wrapper)
|
|
47
|
+
_retryMetadata?: {
|
|
48
|
+
attempts: number;
|
|
49
|
+
retries: number;
|
|
50
|
+
succeeded: boolean;
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* Retry configuration for GitHub Copilot API requests
|
|
56
|
+
*/
|
|
57
|
+
export interface RetryConfig {
|
|
58
|
+
/** Maximum number of retry attempts (default: 3) */
|
|
59
|
+
maxRetries?: number;
|
|
60
|
+
/** Base delay in milliseconds between retries (default: 500ms, uses exponential backoff) */
|
|
61
|
+
baseDelay?: number;
|
|
62
|
+
/** Whether to retry on 403 errors (default: true) */
|
|
63
|
+
retryOn403?: boolean;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
/**
|
|
67
|
+
* Makes an API request to GitHub Copilot API
|
|
68
|
+
* Works with both OAuth2 and manual token credentials
|
|
69
|
+
* @param context - n8n execution context
|
|
70
|
+
* @param endpoint - API endpoint path
|
|
71
|
+
* @param body - Request body
|
|
72
|
+
* @param hasMedia - Whether request contains media (images/audio)
|
|
73
|
+
* @param retryConfig - Optional retry configuration for handling intermittent failures
|
|
74
|
+
*/
|
|
75
|
+
export async function makeGitHubCopilotRequest(
|
|
76
|
+
context: IExecuteFunctions,
|
|
77
|
+
endpoint: string,
|
|
78
|
+
body: Record<string, unknown>,
|
|
79
|
+
hasMedia = false,
|
|
80
|
+
retryConfig?: RetryConfig
|
|
81
|
+
): Promise<CopilotResponse> {
|
|
82
|
+
// Default retry configuration
|
|
83
|
+
const MAX_RETRIES = retryConfig?.maxRetries ?? 3;
|
|
84
|
+
const BASE_DELAY = retryConfig?.baseDelay ?? 500;
|
|
85
|
+
const RETRY_ON_403 = retryConfig?.retryOn403 ?? true;
|
|
86
|
+
|
|
87
|
+
// Extract model from request body for version-specific headers
|
|
88
|
+
const model = body.model as string | undefined;
|
|
89
|
+
|
|
90
|
+
// Determine credential type dynamically
|
|
91
|
+
let credentialType = "githubCopilotApi"; // default
|
|
92
|
+
try {
|
|
93
|
+
credentialType = context.getNodeParameter("credentialType", 0, "githubCopilotApi") as string;
|
|
94
|
+
} catch {
|
|
95
|
+
// If credentialType parameter doesn't exist, use default
|
|
96
|
+
console.log("🔍 No credentialType parameter found, using default: githubCopilotApi");
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
// Get credentials based on type
|
|
100
|
+
const credentials = await context.getCredentials(credentialType) as OAuth2Credentials;
|
|
101
|
+
|
|
102
|
+
// Debug: Log credential structure for OAuth2
|
|
103
|
+
console.log(`🔍 ${credentialType} Credentials Debug:`, Object.keys(credentials));
|
|
104
|
+
|
|
105
|
+
// Get GitHub token and auto-generate OAuth token
|
|
106
|
+
const githubToken = credentials.token as string;
|
|
107
|
+
|
|
108
|
+
if (!githubToken) {
|
|
109
|
+
throw new Error("GitHub token not found in credentials");
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
// Validate GitHub token format (ghu_*, github_pat_*, or gho_*)
|
|
113
|
+
if (!githubToken.startsWith("ghu_") && !githubToken.startsWith("github_pat_") && !githubToken.startsWith("gho_")) {
|
|
114
|
+
throw new Error("Invalid GitHub token format. Must start with ghu_, github_pat_, or gho_");
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
console.log(`🔄 Using GitHub token to generate OAuth token...`);
|
|
118
|
+
|
|
119
|
+
let token: string;
|
|
120
|
+
try {
|
|
121
|
+
// Auto-generate OAuth token (uses cache if still valid)
|
|
122
|
+
token = await OAuthTokenManager.getValidOAuthToken(githubToken);
|
|
123
|
+
console.log(`✅ OAuth token ready (auto-generated from GitHub token)`);
|
|
124
|
+
|
|
125
|
+
// Fetch available models in background (don't block the request)
|
|
126
|
+
DynamicModelsManager.getAvailableModels(token)
|
|
127
|
+
.then((models) => {
|
|
128
|
+
console.log(`✅ Models list updated: ${models.length} models available`);
|
|
129
|
+
})
|
|
130
|
+
.catch((error) => {
|
|
131
|
+
console.warn(`⚠️ Failed to update models list: ${error instanceof Error ? error.message : String(error)}`);
|
|
132
|
+
});
|
|
133
|
+
} catch (error) {
|
|
134
|
+
throw new Error(
|
|
135
|
+
`Failed to generate OAuth token: ${error instanceof Error ? error.message : String(error)}`
|
|
136
|
+
);
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
// Validate OAuth2 token exists
|
|
140
|
+
if (!token) {
|
|
141
|
+
console.error(`❌ Available ${credentialType} credential properties:`, Object.keys(credentials));
|
|
142
|
+
console.error(`❌ Full ${credentialType} credential object:`, JSON.stringify(credentials, null, 2));
|
|
143
|
+
throw new Error(`GitHub Copilot: No access token found in ${credentialType} credentials. Available properties: ` + Object.keys(credentials).join(", "));
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
// Debug: Show token info for troubleshooting
|
|
147
|
+
const tokenPrefix = token.substring(0, Math.min(4, token.indexOf("_") + 1)) || token.substring(0, 4);
|
|
148
|
+
const tokenSuffix = token.substring(Math.max(0, token.length - 5));
|
|
149
|
+
console.log(`🔍 GitHub Copilot ${credentialType} Debug: Using token ${tokenPrefix}...${tokenSuffix}`);
|
|
150
|
+
|
|
151
|
+
// Note: GitHub Copilot accepts different token formats
|
|
152
|
+
if (!token.startsWith("gho_") && !token.startsWith("ghu_") && !token.startsWith("github_pat_")) {
|
|
153
|
+
console.warn(`⚠️ Unexpected token format: ${tokenPrefix}...${tokenSuffix}. Trying API call anyway.`);
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
// Get model-specific version requirements
|
|
157
|
+
const minVSCodeVersion = model ? getMinVSCodeVersion(model) : "1.95.0";
|
|
158
|
+
const additionalHeaders = model ? getAdditionalHeaders(model) : {};
|
|
159
|
+
|
|
160
|
+
if (model) {
|
|
161
|
+
console.log(`🔧 Model: ${model} requires VS Code version: ${minVSCodeVersion}`);
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
// Prepare headers using centralized configuration
|
|
165
|
+
// CRITICAL: X-GitHub-Api-Version: 2025-05-01 and Copilot-Integration-Id are REQUIRED
|
|
166
|
+
// for newer models like Raptor Mini (oswe-vscode-prime), Gemini 3, etc.
|
|
167
|
+
// Source: microsoft/vscode-copilot-chat networking.ts
|
|
168
|
+
const headers: Record<string, string> = {
|
|
169
|
+
...GITHUB_COPILOT_API.HEADERS.WITH_AUTH(token),
|
|
170
|
+
// VS Code client headers for compatibility
|
|
171
|
+
"User-Agent": "GitHubCopilotChat/0.35.0",
|
|
172
|
+
"Editor-Version": `vscode/${minVSCodeVersion}`,
|
|
173
|
+
"Editor-Plugin-Version": "copilot-chat/0.35.0",
|
|
174
|
+
// CRITICAL: These headers are required for newer models (Raptor Mini, Gemini 3, etc.)
|
|
175
|
+
"X-GitHub-Api-Version": "2025-05-01",
|
|
176
|
+
"X-Interaction-Type": "copilot-chat",
|
|
177
|
+
"OpenAI-Intent": "conversation-panel",
|
|
178
|
+
"Copilot-Integration-Id": "vscode-chat",
|
|
179
|
+
...additionalHeaders,
|
|
180
|
+
};
|
|
181
|
+
|
|
182
|
+
// Add required headers for vision/audio requests
|
|
183
|
+
if (hasMedia) {
|
|
184
|
+
headers["Copilot-Vision-Request"] = "true";
|
|
185
|
+
headers["Copilot-Media-Request"] = "true";
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
const options = {
|
|
189
|
+
method: "POST",
|
|
190
|
+
headers,
|
|
191
|
+
body: JSON.stringify(body),
|
|
192
|
+
};
|
|
193
|
+
|
|
194
|
+
// Use centralized endpoint construction
|
|
195
|
+
const fullUrl = `${GITHUB_COPILOT_API.BASE_URL}${endpoint}`;
|
|
196
|
+
|
|
197
|
+
// Retry logic for 403 errors (GitHub Copilot intermittent issues)
|
|
198
|
+
let lastError: Error | null = null;
|
|
199
|
+
|
|
200
|
+
for (let attempt = 1; attempt <= MAX_RETRIES; attempt++) {
|
|
201
|
+
try {
|
|
202
|
+
const response = await fetch(fullUrl, options);
|
|
203
|
+
|
|
204
|
+
// If we get a 403, retry (unless it's the last attempt)
|
|
205
|
+
if (response.status === 403 && RETRY_ON_403 && attempt < MAX_RETRIES) {
|
|
206
|
+
const delayMs = BASE_DELAY * Math.pow(2, attempt - 1);
|
|
207
|
+
// Add small random jitter (0-20%) to avoid thundering herd
|
|
208
|
+
const jitter = Math.random() * delayMs * 0.2;
|
|
209
|
+
const totalDelay = Math.floor(delayMs + jitter);
|
|
210
|
+
console.warn(`⚠️ GitHub Copilot API 403 error on attempt ${attempt}/${MAX_RETRIES}. Retrying in ${totalDelay}ms...`);
|
|
211
|
+
// Wait before retrying (exponential backoff with jitter)
|
|
212
|
+
await new Promise(resolve => setTimeout(resolve, totalDelay));
|
|
213
|
+
continue;
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
if (!response.ok) {
|
|
217
|
+
const errorText = await response.text();
|
|
218
|
+
|
|
219
|
+
// Check for 400 Bad Request - should NOT retry
|
|
220
|
+
if (response.status === 400) {
|
|
221
|
+
console.log(`🚫 400 Bad Request detected - not retrying`);
|
|
222
|
+
const enhancedError = `GitHub Copilot API error: ${response.status} ${response.statusText}. ${errorText}`;
|
|
223
|
+
throw new Error(enhancedError);
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
// Secure token display - show only prefix and last 5 characters
|
|
227
|
+
const tokenPrefix = token.substring(0, 4);
|
|
228
|
+
const tokenSuffix = token.substring(token.length - 5);
|
|
229
|
+
const tokenInfo = `${tokenPrefix}...${tokenSuffix}`;
|
|
230
|
+
|
|
231
|
+
console.error(`❌ GitHub Copilot API Error: ${response.status} ${response.statusText}`);
|
|
232
|
+
console.error(`❌ Error details: ${errorText}`);
|
|
233
|
+
console.error(`❌ Used credential type: ${credentialType}`);
|
|
234
|
+
console.error(`❌ Token format used: ${tokenInfo}`);
|
|
235
|
+
console.error(`❌ Attempt: ${attempt}/${MAX_RETRIES}`);
|
|
236
|
+
|
|
237
|
+
// Enhanced error message with secure token info
|
|
238
|
+
const enhancedError = `GitHub Copilot API error: ${response.status} ${response.statusText}. ${errorText} [Token used: ${tokenInfo}] [Attempt: ${attempt}/${MAX_RETRIES}]`;
|
|
239
|
+
|
|
240
|
+
throw new Error(enhancedError);
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
// Success! Return the response with retry metadata
|
|
244
|
+
if (attempt > 1) {
|
|
245
|
+
console.log(`✅ GitHub Copilot API succeeded on attempt ${attempt}/${MAX_RETRIES}`);
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
const responseData = await response.json() as CopilotResponse;
|
|
249
|
+
|
|
250
|
+
// Add retry metadata to response
|
|
251
|
+
responseData._retryMetadata = {
|
|
252
|
+
attempts: attempt,
|
|
253
|
+
retries: attempt - 1,
|
|
254
|
+
succeeded: true
|
|
255
|
+
};
|
|
256
|
+
|
|
257
|
+
return responseData;
|
|
258
|
+
|
|
259
|
+
} catch (error) {
|
|
260
|
+
lastError = error instanceof Error ? error : new Error(String(error));
|
|
261
|
+
|
|
262
|
+
// If it's not the last attempt and it's a network/timeout error, retry
|
|
263
|
+
if (attempt < MAX_RETRIES) {
|
|
264
|
+
const delayMs = BASE_DELAY * Math.pow(2, attempt - 1);
|
|
265
|
+
// Add small random jitter (0-20%) to avoid thundering herd
|
|
266
|
+
const jitter = Math.random() * delayMs * 0.2;
|
|
267
|
+
const totalDelay = Math.floor(delayMs + jitter);
|
|
268
|
+
console.warn(`⚠️ GitHub Copilot API error on attempt ${attempt}/${MAX_RETRIES}: ${lastError.message}. Retrying in ${totalDelay}ms...`);
|
|
269
|
+
await new Promise(resolve => setTimeout(resolve, totalDelay));
|
|
270
|
+
continue;
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
// Last attempt failed, throw the error
|
|
274
|
+
throw lastError;
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
// Should never reach here, but just in case
|
|
279
|
+
throw lastError || new Error("GitHub Copilot API request failed after all retries");
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
/**
|
|
283
|
+
* Utility functions for file handling (shared between nodes)
|
|
284
|
+
*/
|
|
285
|
+
export async function downloadFileFromUrl(url: string): Promise<Buffer> {
|
|
286
|
+
const response = await fetch(url);
|
|
287
|
+
if (!response.ok) {
|
|
288
|
+
throw new Error(`Failed to download file from URL: ${response.status} ${response.statusText}`);
|
|
289
|
+
}
|
|
290
|
+
return Buffer.from(await response.arrayBuffer());
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
export async function getFileFromBinary(
|
|
294
|
+
context: IExecuteFunctions,
|
|
295
|
+
itemIndex: number,
|
|
296
|
+
propertyName: string
|
|
297
|
+
): Promise<Buffer> {
|
|
298
|
+
const items = context.getInputData();
|
|
299
|
+
const item = items[itemIndex];
|
|
300
|
+
|
|
301
|
+
if (!item.binary || !item.binary[propertyName]) {
|
|
302
|
+
throw new Error(`No binary data found in property "${propertyName}"`);
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
const binaryData = item.binary[propertyName];
|
|
306
|
+
|
|
307
|
+
if (binaryData.data) {
|
|
308
|
+
// Data is base64 encoded
|
|
309
|
+
return Buffer.from(binaryData.data, "base64");
|
|
310
|
+
} else if (binaryData.id) {
|
|
311
|
+
// Data is in binary data manager
|
|
312
|
+
return await context.helpers.getBinaryDataBuffer(itemIndex, propertyName);
|
|
313
|
+
} else {
|
|
314
|
+
throw new Error(`Invalid binary data format in property "${propertyName}"`);
|
|
315
|
+
}
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
export function getImageMimeType(filename: string): string {
|
|
319
|
+
const ext = filename.toLowerCase().split(".").pop();
|
|
320
|
+
switch (ext) {
|
|
321
|
+
case "jpg":
|
|
322
|
+
case "jpeg":
|
|
323
|
+
return "image/jpeg";
|
|
324
|
+
case "png":
|
|
325
|
+
return "image/png";
|
|
326
|
+
case "webp":
|
|
327
|
+
return "image/webp";
|
|
328
|
+
case "gif":
|
|
329
|
+
return "image/gif";
|
|
330
|
+
default:
|
|
331
|
+
return "image/jpeg";
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
export function getAudioMimeType(filename: string): string {
|
|
336
|
+
const ext = filename.toLowerCase().split(".").pop();
|
|
337
|
+
switch (ext) {
|
|
338
|
+
case "mp3":
|
|
339
|
+
return "audio/mpeg";
|
|
340
|
+
case "wav":
|
|
341
|
+
return "audio/wav";
|
|
342
|
+
case "m4a":
|
|
343
|
+
return "audio/mp4";
|
|
344
|
+
case "flac":
|
|
345
|
+
return "audio/flac";
|
|
346
|
+
case "ogg":
|
|
347
|
+
return "audio/ogg";
|
|
348
|
+
case "aac":
|
|
349
|
+
return "audio/aac";
|
|
350
|
+
default:
|
|
351
|
+
return "audio/mpeg";
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
|
|
355
|
+
export function validateFileSize(buffer: Buffer, maxSizeKB = 1024): void {
|
|
356
|
+
const sizeKB = buffer.length / 1024;
|
|
357
|
+
if (sizeKB > maxSizeKB) {
|
|
358
|
+
throw new Error(`File size ${sizeKB.toFixed(2)}KB exceeds limit of ${maxSizeKB}KB`);
|
|
359
|
+
}
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
export function estimateTokens(base64String: string): number {
|
|
363
|
+
// Rough estimation: base64 characters / 4 * 3 for bytes, then / 4 for tokens
|
|
364
|
+
return Math.ceil((base64String.length / 4 * 3) / 4);
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
export function validateTokenLimit(estimatedTokens: number, maxTokens = 128000): {
|
|
368
|
+
valid: boolean;
|
|
369
|
+
message?: string
|
|
370
|
+
} {
|
|
371
|
+
if (estimatedTokens <= maxTokens) {
|
|
372
|
+
return { valid: true };
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
return {
|
|
376
|
+
valid: false,
|
|
377
|
+
message: `Content too large: ${estimatedTokens} tokens exceeds limit of ${maxTokens}. Consider using smaller files or text.`
|
|
378
|
+
};
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
export function truncateToTokenLimit(content: string, maxTokens = 100000): {
|
|
382
|
+
content: string;
|
|
383
|
+
truncated: boolean;
|
|
384
|
+
originalTokens: number;
|
|
385
|
+
finalTokens: number
|
|
386
|
+
} {
|
|
387
|
+
const originalTokens = Math.ceil(content.length / 4); // Rough estimate for text
|
|
388
|
+
|
|
389
|
+
if (originalTokens <= maxTokens) {
|
|
390
|
+
return {
|
|
391
|
+
content,
|
|
392
|
+
truncated: false,
|
|
393
|
+
originalTokens,
|
|
394
|
+
finalTokens: originalTokens
|
|
395
|
+
};
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
const targetLength = Math.floor(content.length * (maxTokens / originalTokens));
|
|
399
|
+
const truncatedContent = content.slice(0, targetLength) + "...[truncated]";
|
|
400
|
+
|
|
401
|
+
return {
|
|
402
|
+
content: truncatedContent,
|
|
403
|
+
truncated: true,
|
|
404
|
+
originalTokens,
|
|
405
|
+
finalTokens: Math.ceil(truncatedContent.length / 4)
|
|
406
|
+
};
|
|
407
|
+
}
|