n8n-nodes-github-copilot 4.2.1 → 4.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/dist/nodes/GitHubCopilotChatAPI/GitHubCopilotChatAPI.node.d.ts +1 -0
  2. package/dist/nodes/GitHubCopilotChatAPI/GitHubCopilotChatAPI.node.js +47 -15
  3. package/dist/nodes/GitHubCopilotChatAPI/nodeProperties.js +37 -0
  4. package/dist/nodes/GitHubCopilotChatModel/GitHubCopilotChatModel.node.d.ts +1 -0
  5. package/dist/nodes/GitHubCopilotChatModel/GitHubCopilotChatModel.node.js +174 -10
  6. package/dist/nodes/GitHubCopilotOpenAI/GitHubCopilotOpenAI.node.d.ts +1 -0
  7. package/dist/nodes/GitHubCopilotOpenAI/GitHubCopilotOpenAI.node.js +65 -4
  8. package/dist/nodes/GitHubCopilotOpenAI/nodeProperties.js +64 -27
  9. package/dist/nodes/GitHubCopilotPGVector/GitHubCopilotPGVector.node.d.ts +10 -0
  10. package/dist/nodes/GitHubCopilotPGVector/GitHubCopilotPGVector.node.js +421 -0
  11. package/dist/package.json +3 -4
  12. package/dist/shared/models/DynamicModelLoader.d.ts +1 -0
  13. package/dist/shared/models/DynamicModelLoader.js +12 -0
  14. package/dist/shared/models/GitHubCopilotModels.d.ts +14 -8
  15. package/dist/shared/models/GitHubCopilotModels.js +255 -74
  16. package/dist/shared/utils/DynamicModelsManager.d.ts +11 -0
  17. package/dist/shared/utils/DynamicModelsManager.js +50 -0
  18. package/dist/shared/utils/GitHubCopilotApiUtils.d.ts +1 -0
  19. package/dist/shared/utils/GitHubCopilotApiUtils.js +85 -6
  20. package/package.json +3 -4
  21. package/shared/icons/copilot.svg +0 -34
  22. package/shared/index.ts +0 -27
  23. package/shared/models/DynamicModelLoader.ts +0 -124
  24. package/shared/models/GitHubCopilotModels.ts +0 -420
  25. package/shared/models/ModelVersionRequirements.ts +0 -165
  26. package/shared/properties/ModelProperties.ts +0 -52
  27. package/shared/properties/ModelSelectionProperty.ts +0 -68
  28. package/shared/utils/DynamicModelsManager.ts +0 -355
  29. package/shared/utils/EmbeddingsApiUtils.ts +0 -135
  30. package/shared/utils/FileChunkingApiUtils.ts +0 -176
  31. package/shared/utils/FileOptimizationUtils.ts +0 -210
  32. package/shared/utils/GitHubCopilotApiUtils.ts +0 -407
  33. package/shared/utils/GitHubCopilotEndpoints.ts +0 -212
  34. package/shared/utils/GitHubDeviceFlowHandler.ts +0 -276
  35. package/shared/utils/OAuthTokenManager.ts +0 -196
  36. package/shared/utils/provider-injection.ts +0 -277
  37. package/shared/utils/version-detection.ts +0 -145
@@ -14,7 +14,7 @@ exports.GITHUB_COPILOT_MODELS = [
14
14
  maxOutputTokens: 16384,
15
15
  streaming: true,
16
16
  provider: "OpenAI",
17
- category: "chat"
17
+ category: "versatile"
18
18
  },
19
19
  recommended: true,
20
20
  status: "stable"
@@ -22,41 +22,151 @@ exports.GITHUB_COPILOT_MODELS = [
22
22
  {
23
23
  value: "gpt-5",
24
24
  name: "GPT-5",
25
- description: "Latest generation GPT model with enhanced capabilities",
25
+ description: "Latest generation GPT model with vision (400K context, 128K output) - PREMIUM",
26
26
  capabilities: {
27
27
  toolsCalling: true,
28
28
  vision: true,
29
29
  multimodal: true,
30
- maxContextTokens: 128000,
31
- maxOutputTokens: 64000,
30
+ maxContextTokens: 400000,
31
+ maxOutputTokens: 128000,
32
32
  streaming: true,
33
33
  provider: "OpenAI",
34
- category: "chat"
34
+ category: "versatile"
35
35
  },
36
36
  recommended: true,
37
- status: "stable"
37
+ status: "stable",
38
+ isPremium: true
38
39
  },
39
40
  {
40
41
  value: "gpt-5-mini",
41
42
  name: "GPT-5 Mini",
42
- description: "Faster and more efficient GPT-5 model",
43
+ description: "Faster GPT-5 model with vision (264K context, 64K output) - PREMIUM",
43
44
  capabilities: {
44
45
  toolsCalling: true,
45
46
  vision: true,
46
47
  multimodal: true,
47
- maxContextTokens: 128000,
48
+ maxContextTokens: 264000,
48
49
  maxOutputTokens: 64000,
49
50
  streaming: true,
50
51
  provider: "OpenAI",
51
- category: "chat"
52
+ category: "versatile"
52
53
  },
53
54
  recommended: true,
54
- status: "stable"
55
+ status: "stable",
56
+ isPremium: true
57
+ },
58
+ {
59
+ value: "gpt-5.1",
60
+ name: "GPT-5.1",
61
+ description: "Enhanced GPT-5 model with vision (264K context, 64K output) - PREMIUM",
62
+ capabilities: {
63
+ toolsCalling: true,
64
+ vision: true,
65
+ multimodal: true,
66
+ maxContextTokens: 264000,
67
+ maxOutputTokens: 64000,
68
+ streaming: true,
69
+ provider: "OpenAI",
70
+ category: "versatile"
71
+ },
72
+ recommended: true,
73
+ status: "stable",
74
+ isPremium: true
75
+ },
76
+ {
77
+ value: "gpt-5.2",
78
+ name: "GPT-5.2",
79
+ description: "Latest GPT-5.2 model with vision (264K context, 64K output) - PREMIUM",
80
+ capabilities: {
81
+ toolsCalling: true,
82
+ vision: true,
83
+ multimodal: true,
84
+ maxContextTokens: 264000,
85
+ maxOutputTokens: 64000,
86
+ streaming: true,
87
+ provider: "OpenAI",
88
+ category: "versatile"
89
+ },
90
+ recommended: true,
91
+ status: "stable",
92
+ isPremium: true
93
+ },
94
+ {
95
+ value: "gpt-5-codex",
96
+ name: "GPT-5-Codex (Preview)",
97
+ description: "GPT-5 optimized for coding with vision (400K context, 128K output) - PREMIUM",
98
+ capabilities: {
99
+ toolsCalling: true,
100
+ vision: true,
101
+ multimodal: true,
102
+ maxContextTokens: 400000,
103
+ maxOutputTokens: 128000,
104
+ streaming: true,
105
+ provider: "OpenAI",
106
+ category: "powerful"
107
+ },
108
+ recommended: true,
109
+ status: "preview",
110
+ isPremium: true
111
+ },
112
+ {
113
+ value: "gpt-5.1-codex",
114
+ name: "GPT-5.1-Codex",
115
+ description: "GPT-5.1 optimized for coding with vision (400K context, 128K output) - PREMIUM",
116
+ capabilities: {
117
+ toolsCalling: true,
118
+ vision: true,
119
+ multimodal: true,
120
+ maxContextTokens: 400000,
121
+ maxOutputTokens: 128000,
122
+ streaming: true,
123
+ provider: "OpenAI",
124
+ category: "powerful"
125
+ },
126
+ recommended: true,
127
+ status: "stable",
128
+ isPremium: true
129
+ },
130
+ {
131
+ value: "gpt-5.1-codex-mini",
132
+ name: "GPT-5.1-Codex-Mini",
133
+ description: "Smaller GPT-5.1-Codex with vision (400K context, 128K output) - PREMIUM (0.33x)",
134
+ capabilities: {
135
+ toolsCalling: true,
136
+ vision: true,
137
+ multimodal: true,
138
+ maxContextTokens: 400000,
139
+ maxOutputTokens: 128000,
140
+ streaming: true,
141
+ provider: "OpenAI",
142
+ category: "powerful"
143
+ },
144
+ recommended: true,
145
+ status: "preview",
146
+ isPremium: true
147
+ },
148
+ {
149
+ value: "gpt-5.1-codex-max",
150
+ name: "GPT-5.1-Codex-Max",
151
+ description: "Largest GPT-5.1-Codex with vision (400K context, 128K output) - PREMIUM",
152
+ capabilities: {
153
+ toolsCalling: true,
154
+ vision: true,
155
+ multimodal: true,
156
+ maxContextTokens: 400000,
157
+ maxOutputTokens: 128000,
158
+ streaming: true,
159
+ provider: "OpenAI",
160
+ category: "powerful"
161
+ },
162
+ recommended: true,
163
+ status: "stable",
164
+ isPremium: true
55
165
  },
56
166
  {
57
167
  value: "gpt-4.1",
58
168
  name: "GPT-4.1",
59
- description: "Enhanced GPT-4 model with improved capabilities",
169
+ description: "Enhanced GPT-4 with vision (128K context, 16K output) - Chat Fallback Model",
60
170
  capabilities: {
61
171
  toolsCalling: true,
62
172
  vision: true,
@@ -65,7 +175,7 @@ exports.GITHUB_COPILOT_MODELS = [
65
175
  maxOutputTokens: 16384,
66
176
  streaming: true,
67
177
  provider: "OpenAI",
68
- category: "chat"
178
+ category: "versatile"
69
179
  },
70
180
  recommended: true,
71
181
  status: "stable"
@@ -73,7 +183,7 @@ exports.GITHUB_COPILOT_MODELS = [
73
183
  {
74
184
  value: "gpt-4o",
75
185
  name: "GPT-4o",
76
- description: "Most capable GPT-4 model with vision, optimized for chat and complex reasoning",
186
+ description: "GPT-4 Omni with vision (128K context, 4K output)",
77
187
  capabilities: {
78
188
  toolsCalling: true,
79
189
  vision: true,
@@ -82,7 +192,7 @@ exports.GITHUB_COPILOT_MODELS = [
82
192
  maxOutputTokens: 4096,
83
193
  streaming: true,
84
194
  provider: "OpenAI",
85
- category: "multimodal"
195
+ category: "versatile"
86
196
  },
87
197
  recommended: true,
88
198
  status: "stable"
@@ -90,24 +200,41 @@ exports.GITHUB_COPILOT_MODELS = [
90
200
  {
91
201
  value: "gpt-4o-mini",
92
202
  name: "GPT-4o Mini",
93
- description: "Faster and more cost-effective GPT-4o - VERIFIED WORKING",
203
+ description: "Faster GPT-4o - ⚠️ NO VISION SUPPORT (128K context, 4K output)",
94
204
  capabilities: {
95
205
  toolsCalling: true,
96
- vision: true,
97
- multimodal: true,
206
+ vision: false,
207
+ multimodal: false,
98
208
  maxContextTokens: 128000,
99
209
  maxOutputTokens: 4096,
100
210
  streaming: true,
101
211
  provider: "OpenAI",
102
- category: "chat"
212
+ category: "lightweight"
103
213
  },
104
214
  recommended: true,
105
215
  status: "stable"
106
216
  },
217
+ {
218
+ value: "gpt-41-copilot",
219
+ name: "GPT-4.1 Copilot",
220
+ description: "GPT-4.1 fine-tuned for Copilot (completion type, not chat)",
221
+ capabilities: {
222
+ toolsCalling: false,
223
+ vision: false,
224
+ multimodal: false,
225
+ maxContextTokens: 128000,
226
+ maxOutputTokens: 16384,
227
+ streaming: true,
228
+ provider: "OpenAI",
229
+ category: "versatile"
230
+ },
231
+ recommended: false,
232
+ status: "stable"
233
+ },
107
234
  {
108
235
  value: "o3-mini",
109
236
  name: "o3 Mini",
110
- description: "New reasoning model optimized for coding and complex tasks",
237
+ description: "Reasoning model optimized for coding (200K context, 100K output) - PREMIUM",
111
238
  capabilities: {
112
239
  toolsCalling: true,
113
240
  vision: false,
@@ -119,12 +246,13 @@ exports.GITHUB_COPILOT_MODELS = [
119
246
  category: "reasoning"
120
247
  },
121
248
  recommended: true,
122
- status: "stable"
249
+ status: "stable",
250
+ isPremium: true
123
251
  },
124
252
  {
125
253
  value: "oswe-vscode-prime",
126
254
  name: "Raptor mini (Preview)",
127
- description: "Fast and versatile model optimized for VS Code by Microsoft (Azure OpenAI)",
255
+ description: "Microsoft model optimized for VS Code with vision (264K context, 64K output)",
128
256
  capabilities: {
129
257
  toolsCalling: true,
130
258
  vision: true,
@@ -133,100 +261,140 @@ exports.GITHUB_COPILOT_MODELS = [
133
261
  maxOutputTokens: 64000,
134
262
  streaming: true,
135
263
  provider: "Microsoft",
136
- category: "chat"
264
+ category: "versatile"
137
265
  },
138
266
  recommended: true,
139
267
  status: "preview"
140
268
  },
269
+ {
270
+ value: "grok-code-fast-1",
271
+ name: "Grok Code Fast 1",
272
+ description: "xAI Grok model for fast coding (128K context, 64K output) - ⚠️ NO vision",
273
+ capabilities: {
274
+ toolsCalling: true,
275
+ vision: false,
276
+ multimodal: false,
277
+ maxContextTokens: 128000,
278
+ maxOutputTokens: 64000,
279
+ streaming: true,
280
+ provider: "xAI",
281
+ category: "lightweight"
282
+ },
283
+ recommended: true,
284
+ status: "stable"
285
+ },
141
286
  {
142
287
  value: "claude-sonnet-4",
143
288
  name: "Claude Sonnet 4",
144
- description: "Latest Claude model with advanced reasoning capabilities",
289
+ description: "Claude Sonnet 4 with vision (216K context, 16K output) - PREMIUM",
145
290
  capabilities: {
146
291
  toolsCalling: true,
147
292
  vision: true,
148
293
  multimodal: true,
149
- maxContextTokens: 128000,
294
+ maxContextTokens: 216000,
150
295
  maxOutputTokens: 16000,
151
296
  streaming: true,
152
297
  provider: "Anthropic",
153
- category: "chat"
298
+ category: "versatile"
154
299
  },
155
300
  recommended: true,
156
- status: "stable"
301
+ status: "stable",
302
+ isPremium: true
157
303
  },
158
304
  {
159
- value: "claude-opus-4",
160
- name: "Claude Opus 4",
161
- description: "Most powerful Claude model for complex reasoning (may have performance issues)",
305
+ value: "claude-sonnet-4.5",
306
+ name: "Claude Sonnet 4.5",
307
+ description: "Claude Sonnet 4.5 with vision (144K context, 16K output) - PREMIUM",
162
308
  capabilities: {
163
- toolsCalling: false,
309
+ toolsCalling: true,
164
310
  vision: true,
165
311
  multimodal: true,
166
- maxContextTokens: 80000,
312
+ maxContextTokens: 144000,
167
313
  maxOutputTokens: 16000,
168
314
  streaming: true,
169
315
  provider: "Anthropic",
170
- category: "reasoning"
316
+ category: "versatile"
171
317
  },
172
- recommended: false,
173
- status: "stable"
318
+ recommended: true,
319
+ status: "stable",
320
+ isPremium: true
174
321
  },
175
322
  {
176
- value: "claude-3.7-sonnet",
177
- name: "Claude 3.7 Sonnet",
178
- description: "Enhanced Claude 3.5 with improved capabilities",
323
+ value: "claude-haiku-4.5",
324
+ name: "Claude Haiku 4.5",
325
+ description: "Fast Claude model with vision (144K context, 16K output) - PREMIUM (0.33x)",
179
326
  capabilities: {
180
327
  toolsCalling: true,
181
328
  vision: true,
182
329
  multimodal: true,
183
- maxContextTokens: 200000,
184
- maxOutputTokens: 16384,
330
+ maxContextTokens: 144000,
331
+ maxOutputTokens: 16000,
185
332
  streaming: true,
186
333
  provider: "Anthropic",
187
- category: "chat"
334
+ category: "versatile"
188
335
  },
189
336
  recommended: true,
190
- status: "stable"
337
+ status: "stable",
338
+ isPremium: true
339
+ },
340
+ {
341
+ value: "claude-opus-4.5",
342
+ name: "Claude Opus 4.5",
343
+ description: "Most powerful Claude with vision (144K context, 16K output) - PREMIUM (3x)",
344
+ capabilities: {
345
+ toolsCalling: true,
346
+ vision: true,
347
+ multimodal: true,
348
+ maxContextTokens: 144000,
349
+ maxOutputTokens: 16000,
350
+ streaming: true,
351
+ provider: "Anthropic",
352
+ category: "powerful"
353
+ },
354
+ recommended: true,
355
+ status: "stable",
356
+ isPremium: true
191
357
  },
192
358
  {
193
- value: "claude-3.7-sonnet-thought",
194
- name: "Claude 3.7 Sonnet Thinking",
195
- description: "Claude with visible reasoning process",
359
+ value: "claude-opus-41",
360
+ name: "Claude Opus 4.1",
361
+ description: "Claude Opus 4.1 with vision (80K context, 16K output) - PREMIUM (10x) - Limited",
196
362
  capabilities: {
197
363
  toolsCalling: false,
198
364
  vision: true,
199
365
  multimodal: true,
200
- maxContextTokens: 200000,
201
- maxOutputTokens: 16384,
366
+ maxContextTokens: 80000,
367
+ maxOutputTokens: 16000,
202
368
  streaming: true,
203
369
  provider: "Anthropic",
204
- category: "reasoning"
370
+ category: "powerful"
205
371
  },
206
372
  recommended: false,
207
- status: "stable"
373
+ status: "stable",
374
+ isPremium: true
208
375
  },
209
376
  {
210
- value: "claude-3.5-sonnet",
211
- name: "Claude 3.5 Sonnet",
212
- description: "Anthropic's balanced model with excellent reasoning and creativity",
377
+ value: "gemini-2.5-pro",
378
+ name: "Gemini 2.5 Pro",
379
+ description: "Gemini 2.5 Pro with vision (128K context, 64K output) - PREMIUM",
213
380
  capabilities: {
214
381
  toolsCalling: true,
215
382
  vision: true,
216
383
  multimodal: true,
217
- maxContextTokens: 90000,
218
- maxOutputTokens: 8192,
384
+ maxContextTokens: 128000,
385
+ maxOutputTokens: 64000,
219
386
  streaming: true,
220
- provider: "Anthropic",
221
- category: "chat"
387
+ provider: "Google",
388
+ category: "powerful"
222
389
  },
223
390
  recommended: true,
224
- status: "stable"
391
+ status: "stable",
392
+ isPremium: true
225
393
  },
226
394
  {
227
- value: "gemini-2.5-pro",
228
- name: "Gemini 2.5 Pro",
229
- description: "Most advanced Gemini model with reasoning capabilities",
395
+ value: "gemini-3-pro-preview",
396
+ name: "Gemini 3 Pro (Preview)",
397
+ description: "Gemini 3 Pro with vision (128K context, 64K output) - PREMIUM",
230
398
  capabilities: {
231
399
  toolsCalling: true,
232
400
  vision: true,
@@ -235,27 +403,29 @@ exports.GITHUB_COPILOT_MODELS = [
235
403
  maxOutputTokens: 64000,
236
404
  streaming: true,
237
405
  provider: "Google",
238
- category: "reasoning"
406
+ category: "powerful"
239
407
  },
240
408
  recommended: true,
241
- status: "stable"
409
+ status: "preview",
410
+ isPremium: true
242
411
  },
243
412
  {
244
- value: "gemini-2.0-flash-001",
245
- name: "Gemini 2.0 Flash",
246
- description: "Fast and efficient Gemini model with large context window",
413
+ value: "gemini-3-flash-preview",
414
+ name: "Gemini 3 Flash (Preview)",
415
+ description: "Fast Gemini 3 with vision (128K context, 64K output) - PREMIUM (0.33x)",
247
416
  capabilities: {
248
417
  toolsCalling: true,
249
418
  vision: true,
250
419
  multimodal: true,
251
- maxContextTokens: 1000000,
252
- maxOutputTokens: 8192,
420
+ maxContextTokens: 128000,
421
+ maxOutputTokens: 64000,
253
422
  streaming: true,
254
423
  provider: "Google",
255
- category: "chat"
424
+ category: "lightweight"
256
425
  },
257
426
  recommended: true,
258
- status: "stable"
427
+ status: "preview",
428
+ isPremium: true
259
429
  }
260
430
  ];
261
431
  class GitHubCopilotModelsManager {
@@ -280,6 +450,12 @@ class GitHubCopilotModelsManager {
280
450
  static getStableModels() {
281
451
  return exports.GITHUB_COPILOT_MODELS.filter(model => model.status === "stable");
282
452
  }
453
+ static getFreeModels() {
454
+ return exports.GITHUB_COPILOT_MODELS.filter(model => !model.isPremium);
455
+ }
456
+ static getPremiumModels() {
457
+ return exports.GITHUB_COPILOT_MODELS.filter(model => model.isPremium);
458
+ }
283
459
  static getModelByValue(value) {
284
460
  return exports.GITHUB_COPILOT_MODELS.find(model => model.value === value);
285
461
  }
@@ -297,11 +473,13 @@ class GitHubCopilotModelsManager {
297
473
  return this.getRecommendedModels();
298
474
  case "coding":
299
475
  return exports.GITHUB_COPILOT_MODELS.filter(model => model.capabilities.category === "coding" ||
476
+ model.capabilities.category === "powerful" ||
300
477
  model.capabilities.toolsCalling);
301
478
  case "vision":
302
479
  return this.getVisionCapableModels();
303
480
  case "reasoning":
304
- return exports.GITHUB_COPILOT_MODELS.filter(model => model.capabilities.category === "reasoning");
481
+ return exports.GITHUB_COPILOT_MODELS.filter(model => model.capabilities.category === "reasoning" ||
482
+ model.capabilities.category === "powerful");
305
483
  case "tools":
306
484
  return this.getToolsCapableModels();
307
485
  default:
@@ -311,10 +489,13 @@ class GitHubCopilotModelsManager {
311
489
  }
312
490
  exports.GitHubCopilotModelsManager = GitHubCopilotModelsManager;
313
491
  exports.DEFAULT_MODELS = {
314
- GENERAL: "gpt-4o-mini",
315
- CODING: "o3-mini",
492
+ GENERAL: "gpt-4.1",
493
+ CODING: "gpt-5-codex",
316
494
  VISION: "gpt-4o",
317
- REASONING: "claude-sonnet-4",
318
- TOOLS: "gpt-5",
319
- MULTIMODAL: "gemini-2.5-pro"
495
+ VISION_FALLBACK: "gpt-4.1",
496
+ REASONING: "o3-mini",
497
+ TOOLS: "gpt-4.1",
498
+ MULTIMODAL: "gemini-2.5-pro",
499
+ FREE: "gpt-4.1",
500
+ PREMIUM: "gpt-5.2"
320
501
  };
@@ -32,6 +32,17 @@ export declare class DynamicModelsManager {
32
32
  }>;
33
33
  static clearCache(oauthToken: string): void;
34
34
  static clearAllCache(): void;
35
+ static getModelFromCache(oauthToken: string, modelId: string): CopilotModel | null;
36
+ static modelSupportsVision(oauthToken: string, modelId: string): boolean | null;
37
+ static modelSupportsTools(oauthToken: string, modelId: string): boolean | null;
38
+ static getModelCapabilities(oauthToken: string, modelId: string): {
39
+ vision: boolean;
40
+ tools: boolean;
41
+ streaming: boolean;
42
+ maxContextTokens: number;
43
+ maxOutputTokens: number;
44
+ isPremium: boolean;
45
+ } | null;
35
46
  static getCacheInfo(oauthToken: string): {
36
47
  cached: boolean;
37
48
  modelsCount: number;
@@ -181,6 +181,56 @@ class DynamicModelsManager {
181
181
  this.cache.clear();
182
182
  console.log("🗑️ Cleared all models cache");
183
183
  }
184
+ static getModelFromCache(oauthToken, modelId) {
185
+ const tokenHash = this.hashToken(oauthToken);
186
+ const cached = this.cache.get(tokenHash);
187
+ if (!cached) {
188
+ return null;
189
+ }
190
+ return cached.models.find(m => m.id === modelId) || null;
191
+ }
192
+ static modelSupportsVision(oauthToken, modelId) {
193
+ var _a, _b;
194
+ const model = this.getModelFromCache(oauthToken, modelId);
195
+ if (!model) {
196
+ return null;
197
+ }
198
+ const supports = ((_a = model.capabilities) === null || _a === void 0 ? void 0 : _a.supports) || {};
199
+ if (supports.vision === true) {
200
+ return true;
201
+ }
202
+ const limits = ((_b = model.capabilities) === null || _b === void 0 ? void 0 : _b.limits) || {};
203
+ if (limits.vision) {
204
+ return true;
205
+ }
206
+ return false;
207
+ }
208
+ static modelSupportsTools(oauthToken, modelId) {
209
+ var _a;
210
+ const model = this.getModelFromCache(oauthToken, modelId);
211
+ if (!model) {
212
+ return null;
213
+ }
214
+ const supports = ((_a = model.capabilities) === null || _a === void 0 ? void 0 : _a.supports) || {};
215
+ return supports.tool_calls === true;
216
+ }
217
+ static getModelCapabilities(oauthToken, modelId) {
218
+ var _a, _b, _c;
219
+ const model = this.getModelFromCache(oauthToken, modelId);
220
+ if (!model) {
221
+ return null;
222
+ }
223
+ const supports = ((_a = model.capabilities) === null || _a === void 0 ? void 0 : _a.supports) || {};
224
+ const limits = ((_b = model.capabilities) === null || _b === void 0 ? void 0 : _b.limits) || {};
225
+ return {
226
+ vision: supports.vision === true || !!limits.vision,
227
+ tools: supports.tool_calls === true,
228
+ streaming: supports.streaming === true,
229
+ maxContextTokens: limits.max_context_window_tokens || 128000,
230
+ maxOutputTokens: limits.max_output_tokens || 4096,
231
+ isPremium: ((_c = model.billing) === null || _c === void 0 ? void 0 : _c.is_premium) === true,
232
+ };
233
+ }
184
234
  static getCacheInfo(oauthToken) {
185
235
  const tokenHash = this.hashToken(oauthToken);
186
236
  const cached = this.cache.get(tokenHash);
@@ -38,6 +38,7 @@ export interface RetryConfig {
38
38
  retryOn403?: boolean;
39
39
  }
40
40
  export declare function makeGitHubCopilotRequest(context: IExecuteFunctions, endpoint: string, body: Record<string, unknown>, hasMedia?: boolean, retryConfig?: RetryConfig): Promise<CopilotResponse>;
41
+ export declare function uploadFileToCopilot(context: IExecuteFunctions, buffer: Buffer, filename: string, mimeType?: string): Promise<any>;
41
42
  export declare function downloadFileFromUrl(url: string): Promise<Buffer>;
42
43
  export declare function getFileFromBinary(context: IExecuteFunctions, itemIndex: number, propertyName: string): Promise<Buffer>;
43
44
  export declare function getImageMimeType(filename: string): string;