n8n-nodes-github-copilot 4.1.2 → 4.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,420 @@
1
+ /**
2
+ * Centralized model definitions for GitHub Copilot integration
3
+ * This file maintains consistency across all nodes and provides
4
+ * a single source of truth for available models and their capabilities
5
+ */
6
+
7
+ export interface ModelCapability {
8
+ /** Supports function/tools calling */
9
+ toolsCalling: boolean;
10
+ /** Supports vision/image processing */
11
+ vision: boolean;
12
+ /** Supports multimodal input (text + images) */
13
+ multimodal: boolean;
14
+ /** Maximum context window in tokens */
15
+ maxContextTokens: number;
16
+ /** Maximum output tokens */
17
+ maxOutputTokens: number;
18
+ /** Supports streaming responses */
19
+ streaming: boolean;
20
+ /** Provider of the model */
21
+ provider: "OpenAI" | "Anthropic" | "Google" | "Microsoft";
22
+ /** Model category */
23
+ category: "chat" | "reasoning" | "coding" | "vision" | "multimodal";
24
+ }
25
+
26
+ export interface GitHubCopilotModel {
27
+ /** Model identifier for API calls */
28
+ value: string;
29
+ /** Display name for UI */
30
+ name: string;
31
+ /** Detailed description */
32
+ description: string;
33
+ /** Model capabilities */
34
+ capabilities: ModelCapability;
35
+ /** Whether model is recommended for general use */
36
+ recommended: boolean;
37
+ /** Release status */
38
+ status: "stable" | "preview" | "experimental";
39
+ }
40
+
41
+ /**
42
+ * Complete list of GitHub Copilot available models
43
+ * Updated from REAL API response - September 2025
44
+ */
45
+ export const GITHUB_COPILOT_MODELS: GitHubCopilotModel[] = [
46
+ // Auto selection (important!)
47
+ {
48
+ value: "auto",
49
+ name: "Auto (Recommended)",
50
+ description: "Automatically selects the best model for your task",
51
+ capabilities: {
52
+ toolsCalling: true,
53
+ vision: true,
54
+ multimodal: true,
55
+ maxContextTokens: 128000,
56
+ maxOutputTokens: 16384,
57
+ streaming: true,
58
+ provider: "OpenAI",
59
+ category: "chat"
60
+ },
61
+ recommended: true,
62
+ status: "stable"
63
+ },
64
+
65
+ // OpenAI GPT Models - VERIFIED via API
66
+ {
67
+ value: "gpt-5",
68
+ name: "GPT-5",
69
+ description: "Latest generation GPT model with enhanced capabilities",
70
+ capabilities: {
71
+ toolsCalling: true,
72
+ vision: true,
73
+ multimodal: true,
74
+ maxContextTokens: 128000,
75
+ maxOutputTokens: 64000,
76
+ streaming: true,
77
+ provider: "OpenAI",
78
+ category: "chat"
79
+ },
80
+ recommended: true,
81
+ status: "stable"
82
+ },
83
+ {
84
+ value: "gpt-5-mini",
85
+ name: "GPT-5 Mini",
86
+ description: "Faster and more efficient GPT-5 model",
87
+ capabilities: {
88
+ toolsCalling: true,
89
+ vision: true,
90
+ multimodal: true,
91
+ maxContextTokens: 128000,
92
+ maxOutputTokens: 64000,
93
+ streaming: true,
94
+ provider: "OpenAI",
95
+ category: "chat"
96
+ },
97
+ recommended: true,
98
+ status: "stable"
99
+ },
100
+ {
101
+ value: "gpt-4.1",
102
+ name: "GPT-4.1",
103
+ description: "Enhanced GPT-4 model with improved capabilities",
104
+ capabilities: {
105
+ toolsCalling: true,
106
+ vision: true,
107
+ multimodal: true,
108
+ maxContextTokens: 128000,
109
+ maxOutputTokens: 16384,
110
+ streaming: true,
111
+ provider: "OpenAI",
112
+ category: "chat"
113
+ },
114
+ recommended: true,
115
+ status: "stable"
116
+ },
117
+ {
118
+ value: "gpt-4o",
119
+ name: "GPT-4o",
120
+ description: "Most capable GPT-4 model with vision, optimized for chat and complex reasoning",
121
+ capabilities: {
122
+ toolsCalling: true,
123
+ vision: true,
124
+ multimodal: true,
125
+ maxContextTokens: 128000,
126
+ maxOutputTokens: 4096,
127
+ streaming: true,
128
+ provider: "OpenAI",
129
+ category: "multimodal"
130
+ },
131
+ recommended: true,
132
+ status: "stable"
133
+ },
134
+ {
135
+ value: "gpt-4o-mini",
136
+ name: "GPT-4o Mini",
137
+ description: "Faster and more cost-effective GPT-4o - VERIFIED WORKING",
138
+ capabilities: {
139
+ toolsCalling: true,
140
+ vision: true,
141
+ multimodal: true,
142
+ maxContextTokens: 128000,
143
+ maxOutputTokens: 4096,
144
+ streaming: true,
145
+ provider: "OpenAI",
146
+ category: "chat"
147
+ },
148
+ recommended: true,
149
+ status: "stable"
150
+ },
151
+ {
152
+ value: "o3-mini",
153
+ name: "o3 Mini",
154
+ description: "New reasoning model optimized for coding and complex tasks",
155
+ capabilities: {
156
+ toolsCalling: true,
157
+ vision: false,
158
+ multimodal: false,
159
+ maxContextTokens: 200000,
160
+ maxOutputTokens: 100000,
161
+ streaming: true,
162
+ provider: "OpenAI",
163
+ category: "reasoning"
164
+ },
165
+ recommended: true,
166
+ status: "stable"
167
+ },
168
+
169
+ // Microsoft Models
170
+ {
171
+ value: "oswe-vscode-prime",
172
+ name: "Raptor mini (Preview)",
173
+ description: "Fast and versatile model optimized for VS Code by Microsoft (Azure OpenAI)",
174
+ capabilities: {
175
+ toolsCalling: true,
176
+ vision: true,
177
+ multimodal: true,
178
+ maxContextTokens: 264000,
179
+ maxOutputTokens: 64000,
180
+ streaming: true,
181
+ provider: "Microsoft",
182
+ category: "chat"
183
+ },
184
+ recommended: true,
185
+ status: "preview"
186
+ },
187
+
188
+ // Anthropic Claude Models - VERIFIED via API
189
+ {
190
+ value: "claude-sonnet-4",
191
+ name: "Claude Sonnet 4",
192
+ description: "Latest Claude model with advanced reasoning capabilities",
193
+ capabilities: {
194
+ toolsCalling: true,
195
+ vision: true,
196
+ multimodal: true,
197
+ maxContextTokens: 128000,
198
+ maxOutputTokens: 16000,
199
+ streaming: true,
200
+ provider: "Anthropic",
201
+ category: "chat"
202
+ },
203
+ recommended: true,
204
+ status: "stable"
205
+ },
206
+ {
207
+ value: "claude-opus-4",
208
+ name: "Claude Opus 4",
209
+ description: "Most powerful Claude model for complex reasoning (may have performance issues)",
210
+ capabilities: {
211
+ toolsCalling: false, // Based on API response
212
+ vision: true,
213
+ multimodal: true,
214
+ maxContextTokens: 80000,
215
+ maxOutputTokens: 16000,
216
+ streaming: true,
217
+ provider: "Anthropic",
218
+ category: "reasoning"
219
+ },
220
+ recommended: false,
221
+ status: "stable"
222
+ },
223
+ {
224
+ value: "claude-3.7-sonnet",
225
+ name: "Claude 3.7 Sonnet",
226
+ description: "Enhanced Claude 3.5 with improved capabilities",
227
+ capabilities: {
228
+ toolsCalling: true,
229
+ vision: true,
230
+ multimodal: true,
231
+ maxContextTokens: 200000,
232
+ maxOutputTokens: 16384,
233
+ streaming: true,
234
+ provider: "Anthropic",
235
+ category: "chat"
236
+ },
237
+ recommended: true,
238
+ status: "stable"
239
+ },
240
+ {
241
+ value: "claude-3.7-sonnet-thought",
242
+ name: "Claude 3.7 Sonnet Thinking",
243
+ description: "Claude with visible reasoning process",
244
+ capabilities: {
245
+ toolsCalling: false, // Based on API response
246
+ vision: true,
247
+ multimodal: true,
248
+ maxContextTokens: 200000,
249
+ maxOutputTokens: 16384,
250
+ streaming: true,
251
+ provider: "Anthropic",
252
+ category: "reasoning"
253
+ },
254
+ recommended: false,
255
+ status: "stable"
256
+ },
257
+ {
258
+ value: "claude-3.5-sonnet",
259
+ name: "Claude 3.5 Sonnet",
260
+ description: "Anthropic's balanced model with excellent reasoning and creativity",
261
+ capabilities: {
262
+ toolsCalling: true,
263
+ vision: true,
264
+ multimodal: true,
265
+ maxContextTokens: 90000,
266
+ maxOutputTokens: 8192,
267
+ streaming: true,
268
+ provider: "Anthropic",
269
+ category: "chat"
270
+ },
271
+ recommended: true,
272
+ status: "stable"
273
+ },
274
+
275
+ // Google Gemini Models - VERIFIED via API
276
+ {
277
+ value: "gemini-2.5-pro",
278
+ name: "Gemini 2.5 Pro",
279
+ description: "Most advanced Gemini model with reasoning capabilities",
280
+ capabilities: {
281
+ toolsCalling: true,
282
+ vision: true,
283
+ multimodal: true,
284
+ maxContextTokens: 128000,
285
+ maxOutputTokens: 64000,
286
+ streaming: true,
287
+ provider: "Google",
288
+ category: "reasoning"
289
+ },
290
+ recommended: true,
291
+ status: "stable"
292
+ },
293
+ {
294
+ value: "gemini-2.0-flash-001",
295
+ name: "Gemini 2.0 Flash",
296
+ description: "Fast and efficient Gemini model with large context window",
297
+ capabilities: {
298
+ toolsCalling: true,
299
+ vision: true,
300
+ multimodal: true,
301
+ maxContextTokens: 1000000,
302
+ maxOutputTokens: 8192,
303
+ streaming: true,
304
+ provider: "Google",
305
+ category: "chat"
306
+ },
307
+ recommended: true,
308
+ status: "stable"
309
+ }
310
+ ];
311
+
312
+ /**
313
+ * Get models filtered by capability
314
+ */
315
+ export class GitHubCopilotModelsManager {
316
+ /**
317
+ * Get all available models
318
+ */
319
+ static getAllModels(): GitHubCopilotModel[] {
320
+ return GITHUB_COPILOT_MODELS;
321
+ }
322
+
323
+ /**
324
+ * Get models that support tools calling
325
+ */
326
+ static getToolsCapableModels(): GitHubCopilotModel[] {
327
+ return GITHUB_COPILOT_MODELS.filter(model => model.capabilities.toolsCalling);
328
+ }
329
+
330
+ /**
331
+ * Get models that support vision
332
+ */
333
+ static getVisionCapableModels(): GitHubCopilotModel[] {
334
+ return GITHUB_COPILOT_MODELS.filter(model => model.capabilities.vision);
335
+ }
336
+
337
+ /**
338
+ * Get models by provider
339
+ */
340
+ static getModelsByProvider(provider: "OpenAI" | "Anthropic" | "Google" | "Microsoft"): GitHubCopilotModel[] {
341
+ return GITHUB_COPILOT_MODELS.filter(model => model.capabilities.provider === provider);
342
+ }
343
+
344
+ /**
345
+ * Get models by category
346
+ */
347
+ static getModelsByCategory(category: "chat" | "reasoning" | "coding" | "vision" | "multimodal"): GitHubCopilotModel[] {
348
+ return GITHUB_COPILOT_MODELS.filter(model => model.capabilities.category === category);
349
+ }
350
+
351
+ /**
352
+ * Get recommended models only
353
+ */
354
+ static getRecommendedModels(): GitHubCopilotModel[] {
355
+ return GITHUB_COPILOT_MODELS.filter(model => model.recommended);
356
+ }
357
+
358
+ /**
359
+ * Get stable models only
360
+ */
361
+ static getStableModels(): GitHubCopilotModel[] {
362
+ return GITHUB_COPILOT_MODELS.filter(model => model.status === "stable");
363
+ }
364
+
365
+ /**
366
+ * Get model by value
367
+ */
368
+ static getModelByValue(value: string): GitHubCopilotModel | undefined {
369
+ return GITHUB_COPILOT_MODELS.find(model => model.value === value);
370
+ }
371
+
372
+ /**
373
+ * Convert models to n8n options format
374
+ */
375
+ static toN8nOptions(models?: GitHubCopilotModel[]): Array<{name: string, value: string, description: string}> {
376
+ const modelsToUse = models || GITHUB_COPILOT_MODELS;
377
+ return modelsToUse.map(model => ({
378
+ name: model.name,
379
+ value: model.value,
380
+ description: model.description
381
+ }));
382
+ }
383
+
384
+ /**
385
+ * Get models suitable for specific use cases
386
+ */
387
+ static getModelsForUseCase(useCase: "general" | "coding" | "vision" | "reasoning" | "tools"): GitHubCopilotModel[] {
388
+ switch (useCase) {
389
+ case "general":
390
+ return this.getRecommendedModels();
391
+ case "coding":
392
+ return GITHUB_COPILOT_MODELS.filter(model =>
393
+ model.capabilities.category === "coding" ||
394
+ model.capabilities.toolsCalling
395
+ );
396
+ case "vision":
397
+ return this.getVisionCapableModels();
398
+ case "reasoning":
399
+ return GITHUB_COPILOT_MODELS.filter(model =>
400
+ model.capabilities.category === "reasoning"
401
+ );
402
+ case "tools":
403
+ return this.getToolsCapableModels();
404
+ default:
405
+ return this.getAllModels();
406
+ }
407
+ }
408
+ }
409
+
410
+ /**
411
+ * Default model for different scenarios
412
+ */
413
+ export const DEFAULT_MODELS = {
414
+ GENERAL: "gpt-4o-mini", // VERIFIED working in tests
415
+ CODING: "o3-mini",
416
+ VISION: "gpt-4o",
417
+ REASONING: "claude-sonnet-4",
418
+ TOOLS: "gpt-5",
419
+ MULTIMODAL: "gemini-2.5-pro"
420
+ } as const;
@@ -0,0 +1,165 @@
1
+ /**
2
+ * Model Version Requirements Configuration
3
+ *
4
+ * Maps GitHub Copilot models to their specific version requirements,
5
+ * supported endpoints, and any additional headers needed.
6
+ *
7
+ * This is extracted from models.json and API error responses.
8
+ */
9
+
10
+ export interface ModelRequirements {
11
+ /** Minimum VS Code version required (e.g., "1.104.1") */
12
+ minVSCodeVersion: string;
13
+
14
+ /** Supported API endpoints for this model */
15
+ supportedEndpoints: string[];
16
+
17
+ /** Whether model is in preview mode */
18
+ preview?: boolean;
19
+
20
+ /** Additional headers required for this model */
21
+ additionalHeaders?: Record<string, string>;
22
+
23
+ /** Special notes or warnings */
24
+ notes?: string;
25
+ }
26
+
27
+ /**
28
+ * Model-specific requirements configuration
29
+ *
30
+ * Models not listed here use default configuration:
31
+ * - minVSCodeVersion: "1.95.0"
32
+ * - supportedEndpoints: ["/chat/completions", "/responses"]
33
+ */
34
+ export const MODEL_VERSION_REQUIREMENTS: Record<string, ModelRequirements> = {
35
+ // GPT-5-Codex: Requires newer VS Code version and /responses endpoint only
36
+ "gpt-5-codex": {
37
+ minVSCodeVersion: "1.104.1",
38
+ supportedEndpoints: ["/responses"],
39
+ preview: true,
40
+ notes: "Preview model requiring VS Code 1.104.1 or newer. Only supports /responses endpoint."
41
+ },
42
+
43
+ // GPT-5: Supports both endpoints
44
+ "gpt-5": {
45
+ minVSCodeVersion: "1.95.0",
46
+ supportedEndpoints: ["/chat/completions", "/responses"],
47
+ preview: false,
48
+ },
49
+
50
+ // GPT-5 Mini: Supports both endpoints
51
+ "gpt-5-mini": {
52
+ minVSCodeVersion: "1.95.0",
53
+ supportedEndpoints: ["/chat/completions", "/responses"],
54
+ preview: false,
55
+ },
56
+
57
+ // o3: Preview model but works with standard version
58
+ "o3": {
59
+ minVSCodeVersion: "1.95.0",
60
+ supportedEndpoints: ["/chat/completions", "/responses"],
61
+ preview: true,
62
+ },
63
+
64
+ // o3-2025-04-16: Specific version
65
+ "o3-2025-04-16": {
66
+ minVSCodeVersion: "1.95.0",
67
+ supportedEndpoints: ["/chat/completions", "/responses"],
68
+ preview: true,
69
+ },
70
+
71
+ // o4-mini: Preview model
72
+ "o4-mini": {
73
+ minVSCodeVersion: "1.95.0",
74
+ supportedEndpoints: ["/chat/completions", "/responses"],
75
+ preview: true,
76
+ },
77
+
78
+ // o4-mini-2025-04-16: Specific version
79
+ "o4-mini-2025-04-16": {
80
+ minVSCodeVersion: "1.95.0",
81
+ supportedEndpoints: ["/chat/completions", "/responses"],
82
+ preview: true,
83
+ },
84
+
85
+ // Raptor Mini (oswe-vscode-prime)
86
+ "oswe-vscode-prime": {
87
+ minVSCodeVersion: "1.96.0",
88
+ supportedEndpoints: ["/chat/completions", "/responses"],
89
+ preview: true,
90
+ },
91
+
92
+ // Add more models with special requirements here as needed
93
+ };
94
+
95
+ /**
96
+ * Default requirements for models not explicitly configured
97
+ */
98
+ export const DEFAULT_MODEL_REQUIREMENTS: ModelRequirements = {
99
+ minVSCodeVersion: "1.95.0",
100
+ supportedEndpoints: ["/chat/completions", "/responses"],
101
+ preview: false,
102
+ };
103
+
104
+ /**
105
+ * Get requirements for a specific model
106
+ * Returns model-specific requirements or defaults if not configured
107
+ */
108
+ export function getModelRequirements(model: string): ModelRequirements {
109
+ return MODEL_VERSION_REQUIREMENTS[model] || DEFAULT_MODEL_REQUIREMENTS;
110
+ }
111
+
112
+ /**
113
+ * Check if model supports a specific endpoint
114
+ */
115
+ export function modelSupportsEndpoint(model: string, endpoint: string): boolean {
116
+ const requirements = getModelRequirements(model);
117
+ return requirements.supportedEndpoints.includes(endpoint);
118
+ }
119
+
120
+ /**
121
+ * Get recommended endpoint for a model
122
+ * Returns the first supported endpoint (usually preferred one)
123
+ */
124
+ export function getRecommendedEndpoint(model: string): string {
125
+ const requirements = getModelRequirements(model);
126
+ return requirements.supportedEndpoints[0] || "/chat/completions";
127
+ }
128
+
129
+ /**
130
+ * Validate model and endpoint combination
131
+ * Throws error if combination is not supported
132
+ */
133
+ export function validateModelEndpoint(model: string, endpoint: string): void {
134
+ if (!modelSupportsEndpoint(model, endpoint)) {
135
+ const requirements = getModelRequirements(model);
136
+ throw new Error(
137
+ `Model "${model}" does not support endpoint "${endpoint}". ` +
138
+ `Supported endpoints: ${requirements.supportedEndpoints.join(", ")}`
139
+ );
140
+ }
141
+ }
142
+
143
+ /**
144
+ * Get minimum VS Code version for a model
145
+ */
146
+ export function getMinVSCodeVersion(model: string): string {
147
+ const requirements = getModelRequirements(model);
148
+ return requirements.minVSCodeVersion;
149
+ }
150
+
151
+ /**
152
+ * Check if model is in preview mode
153
+ */
154
+ export function isPreviewModel(model: string): boolean {
155
+ const requirements = getModelRequirements(model);
156
+ return requirements.preview || false;
157
+ }
158
+
159
+ /**
160
+ * Get all additional headers required for a model
161
+ */
162
+ export function getAdditionalHeaders(model: string): Record<string, string> {
163
+ const requirements = getModelRequirements(model);
164
+ return requirements.additionalHeaders || {};
165
+ }
@@ -0,0 +1,52 @@
1
+ /**
2
+ * Shared Model Properties for GitHub Copilot Nodes
3
+ *
4
+ * Provides consistent model selection properties across all GitHub Copilot nodes.
5
+ * All nodes now use the simplified CHAT_MODEL_PROPERTIES for uniform interface.
6
+ */
7
+
8
+ import { INodeProperties } from "n8n-workflow";
9
+ import { DEFAULT_MODELS } from "../models/GitHubCopilotModels";
10
+
11
+ /**
12
+ * Chat Model Property (for all nodes)
13
+ * Simplified version that allows both list selection and manual entry
14
+ */
15
+ export const CHAT_MODEL_PROPERTY: INodeProperties = {
16
+ displayName: "Model",
17
+ name: "model",
18
+ type: "options",
19
+ typeOptions: {
20
+ loadOptionsMethod: "getAvailableModels",
21
+ },
22
+ default: DEFAULT_MODELS.GENERAL,
23
+ description: "Select the GitHub Copilot model to use (loaded dynamically based on your subscription)",
24
+ };
25
+
26
+ /**
27
+ * Manual Model Entry Property
28
+ * Appears when user selects "__manual__" from the dynamic dropdown
29
+ */
30
+ export const MANUAL_MODEL_PROPERTY: INodeProperties = {
31
+ displayName: "Custom Model Name",
32
+ name: "customModel",
33
+ type: "string",
34
+ default: "",
35
+ placeholder: "gpt-4o, claude-3.5-sonnet, grok-code-fast-1, etc.",
36
+ description: "Enter the model name manually. This is useful for new/beta models not yet in the list.",
37
+ hint: "Examples: gpt-4o, gpt-4o-mini, claude-3.5-sonnet, gemini-2.0-flash-exp, grok-code-fast-1",
38
+ displayOptions: {
39
+ show: {
40
+ model: ["__manual__"],
41
+ },
42
+ },
43
+ };
44
+
45
+ /**
46
+ * Chat Model Properties Set (used by all nodes)
47
+ * Simplified and consistent interface for all GitHub Copilot nodes
48
+ */
49
+ export const CHAT_MODEL_PROPERTIES: INodeProperties[] = [
50
+ CHAT_MODEL_PROPERTY,
51
+ MANUAL_MODEL_PROPERTY,
52
+ ];
@@ -0,0 +1,68 @@
1
+ /**
2
+ * Model Selection Property
3
+ *
4
+ * Reusable property for model selection across all GitHub Copilot nodes.
5
+ * Supports both dynamic model loading and manual input.
6
+ */
7
+
8
+ import { INodeProperties } from "n8n-workflow";
9
+ import { DEFAULT_MODELS } from "../models/GitHubCopilotModels";
10
+
11
+ /**
12
+ * Model selection property with dynamic loading and manual input
13
+ *
14
+ * Features:
15
+ * - Dynamic model loading from user's subscription
16
+ * - Manual input for custom/new models
17
+ * - Refresh button to update model list
18
+ * - Fallback to default models if loading fails
19
+ */
20
+ export const modelSelectionProperty: INodeProperties = {
21
+ displayName: "Model",
22
+ name: "model",
23
+ type: "options",
24
+ typeOptions: {
25
+ loadOptionsMethod: "getAvailableModels",
26
+ loadOptionsDependsOn: ["refresh"], // Triggers reload when refresh changes
27
+ },
28
+ default: DEFAULT_MODELS.GENERAL,
29
+ required: true,
30
+ description: "Select a model from your subscription or enter a custom model name",
31
+ placeholder: "Select model or type custom name (e.g., gpt-4o, claude-3.5-sonnet)",
32
+ hint: "Models are loaded based on your GitHub Copilot subscription. Use the refresh button to update the list.",
33
+ // Allow manual input - user can type any model name
34
+ validateType: "string",
35
+ ignoreValidationDuringExecution: false,
36
+ };
37
+
38
+ /**
39
+ * Optional refresh button property
40
+ * Place this after the model property to add a refresh button
41
+ */
42
+ export const modelRefreshProperty: INodeProperties = {
43
+ displayName: "Refresh Models",
44
+ name: "refreshModels",
45
+ type: "button",
46
+ typeOptions: {
47
+ action: "refreshModels",
48
+ },
49
+ default: "",
50
+ description: "Click to refresh the list of available models from your subscription",
51
+ displayOptions: {
52
+ show: {
53
+ operation: ["chat"], // Show only in chat operation
54
+ },
55
+ },
56
+ };
57
+
58
+ /**
59
+ * Helper property to trigger model list refresh
60
+ * This is a hidden field that changes value when refresh button is clicked
61
+ */
62
+ export const refreshTriggerProperty: INodeProperties = {
63
+ displayName: "Refresh Trigger",
64
+ name: "refresh",
65
+ type: "hidden",
66
+ default: 0,
67
+ description: "Internal field to trigger model list refresh",
68
+ };