n8n-nodes-github-copilot 3.38.11 → 3.38.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/nodes/GitHubCopilotChatAPI/nodeProperties.js +2 -64
- package/dist/nodes/GitHubCopilotChatModel/GitHubCopilotChatModel.node.js +2 -24
- package/dist/nodes/GitHubCopilotEmbeddings/GitHubCopilotEmbeddings.node.js +1 -18
- package/dist/nodes/GitHubCopilotOpenAI/GitHubCopilotOpenAI.node.js +242 -248
- package/dist/nodes/GitHubCopilotOpenAI/nodeProperties.js +2 -78
- package/dist/package.json +1 -1
- package/dist/shared/properties/ModelProperties.d.ts +4 -0
- package/dist/shared/properties/ModelProperties.js +32 -0
- package/package.json +1 -1
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.nodeProperties = void 0;
|
|
4
|
-
const
|
|
4
|
+
const ModelProperties_1 = require("../../shared/properties/ModelProperties");
|
|
5
5
|
exports.nodeProperties = [
|
|
6
6
|
{
|
|
7
7
|
displayName: "Operation",
|
|
@@ -17,69 +17,7 @@ exports.nodeProperties = [
|
|
|
17
17
|
],
|
|
18
18
|
default: "chat",
|
|
19
19
|
},
|
|
20
|
-
|
|
21
|
-
displayName: "Model Source",
|
|
22
|
-
name: "modelSource",
|
|
23
|
-
type: "options",
|
|
24
|
-
options: [
|
|
25
|
-
{
|
|
26
|
-
name: "From List (Auto-Discovered)",
|
|
27
|
-
value: "fromList",
|
|
28
|
-
description: "Select from available models based on your subscription",
|
|
29
|
-
},
|
|
30
|
-
{
|
|
31
|
-
name: "Custom (Manual Entry)",
|
|
32
|
-
value: "custom",
|
|
33
|
-
description: "Enter model name manually (use at your own risk)",
|
|
34
|
-
},
|
|
35
|
-
],
|
|
36
|
-
default: "fromList",
|
|
37
|
-
description: "Choose how to specify the model",
|
|
38
|
-
},
|
|
39
|
-
{
|
|
40
|
-
displayName: "Model",
|
|
41
|
-
name: "model",
|
|
42
|
-
type: "options",
|
|
43
|
-
typeOptions: {
|
|
44
|
-
loadOptionsMethod: "getAvailableModels",
|
|
45
|
-
},
|
|
46
|
-
default: GitHubCopilotModels_1.DEFAULT_MODELS.GENERAL,
|
|
47
|
-
description: "Select the GitHub Copilot model to use (loaded dynamically based on your subscription)",
|
|
48
|
-
displayOptions: {
|
|
49
|
-
show: {
|
|
50
|
-
modelSource: ["fromList"],
|
|
51
|
-
},
|
|
52
|
-
},
|
|
53
|
-
},
|
|
54
|
-
{
|
|
55
|
-
displayName: "Custom Model Name",
|
|
56
|
-
name: "customModel",
|
|
57
|
-
type: "string",
|
|
58
|
-
default: "",
|
|
59
|
-
placeholder: "gpt-4o, claude-3.5-sonnet, grok-code-fast-1, etc.",
|
|
60
|
-
description: "Enter the model name manually. Use at your own risk if the model is not available in your subscription.",
|
|
61
|
-
hint: "Examples: gpt-4o, gpt-4o-mini, claude-3.5-sonnet, gemini-2.0-flash-exp, grok-code-fast-1",
|
|
62
|
-
displayOptions: {
|
|
63
|
-
show: {
|
|
64
|
-
modelSource: ["custom"],
|
|
65
|
-
},
|
|
66
|
-
},
|
|
67
|
-
},
|
|
68
|
-
{
|
|
69
|
-
displayName: "Custom Model Name",
|
|
70
|
-
name: "customModel",
|
|
71
|
-
type: "string",
|
|
72
|
-
default: "",
|
|
73
|
-
placeholder: "gpt-4o, claude-3.5-sonnet, grok-code-fast-1, etc.",
|
|
74
|
-
description: "Enter the model name manually. This is useful for new/beta models not yet in the list.",
|
|
75
|
-
hint: "Examples: gpt-4o, gpt-4o-mini, claude-3.5-sonnet, gemini-2.0-flash-exp, grok-code-fast-1",
|
|
76
|
-
displayOptions: {
|
|
77
|
-
show: {
|
|
78
|
-
modelSource: ["fromList"],
|
|
79
|
-
model: ["__manual__"],
|
|
80
|
-
},
|
|
81
|
-
},
|
|
82
|
-
},
|
|
20
|
+
...ModelProperties_1.CHAT_MODEL_PROPERTIES,
|
|
83
21
|
{
|
|
84
22
|
displayName: "Message",
|
|
85
23
|
name: "message",
|
|
@@ -5,6 +5,7 @@ const openai_1 = require("@langchain/openai");
|
|
|
5
5
|
const GitHubCopilotModels_1 = require("../../shared/models/GitHubCopilotModels");
|
|
6
6
|
const GitHubCopilotEndpoints_1 = require("../../shared/utils/GitHubCopilotEndpoints");
|
|
7
7
|
const DynamicModelLoader_1 = require("../../shared/models/DynamicModelLoader");
|
|
8
|
+
const ModelProperties_1 = require("../../shared/properties/ModelProperties");
|
|
8
9
|
class GitHubCopilotChatModel {
|
|
9
10
|
constructor() {
|
|
10
11
|
this.description = {
|
|
@@ -41,30 +42,7 @@ class GitHubCopilotChatModel {
|
|
|
41
42
|
},
|
|
42
43
|
],
|
|
43
44
|
properties: [
|
|
44
|
-
|
|
45
|
-
displayName: "Model",
|
|
46
|
-
name: "model",
|
|
47
|
-
type: "options",
|
|
48
|
-
typeOptions: {
|
|
49
|
-
loadOptionsMethod: "getAvailableModels",
|
|
50
|
-
},
|
|
51
|
-
default: GitHubCopilotModels_1.DEFAULT_MODELS.GENERAL,
|
|
52
|
-
description: "Select the GitHub Copilot model to use (loaded dynamically based on your subscription)",
|
|
53
|
-
},
|
|
54
|
-
{
|
|
55
|
-
displayName: "Custom Model Name",
|
|
56
|
-
name: "customModel",
|
|
57
|
-
type: "string",
|
|
58
|
-
default: "",
|
|
59
|
-
placeholder: "gpt-4o, claude-3.5-sonnet, grok-code-fast-1, etc.",
|
|
60
|
-
description: "Enter the model name manually. This is useful for new/beta models not yet in the list.",
|
|
61
|
-
hint: "Examples: gpt-4o, gpt-4o-mini, claude-3.5-sonnet, gemini-2.0-flash-exp, grok-code-fast-1",
|
|
62
|
-
displayOptions: {
|
|
63
|
-
show: {
|
|
64
|
-
model: ["__manual__"],
|
|
65
|
-
},
|
|
66
|
-
},
|
|
67
|
-
},
|
|
45
|
+
...ModelProperties_1.CHAT_MODEL_PROPERTIES,
|
|
68
46
|
{
|
|
69
47
|
displayName: "Options",
|
|
70
48
|
name: "options",
|
|
@@ -14,7 +14,7 @@ class GitHubCopilotEmbeddings {
|
|
|
14
14
|
icon: "file:../../shared/icons/copilot.svg",
|
|
15
15
|
group: ["transform"],
|
|
16
16
|
version: 1,
|
|
17
|
-
subtitle: '={{$parameter["
|
|
17
|
+
subtitle: '={{$parameter["model"]}}',
|
|
18
18
|
description: "Generate text embeddings using GitHub Copilot API",
|
|
19
19
|
defaults: {
|
|
20
20
|
name: "GitHub Copilot Embeddings",
|
|
@@ -28,21 +28,6 @@ class GitHubCopilotEmbeddings {
|
|
|
28
28
|
},
|
|
29
29
|
],
|
|
30
30
|
properties: [
|
|
31
|
-
{
|
|
32
|
-
displayName: "Operation",
|
|
33
|
-
name: "operation",
|
|
34
|
-
type: "options",
|
|
35
|
-
noDataExpression: true,
|
|
36
|
-
options: [
|
|
37
|
-
{
|
|
38
|
-
name: "Generate Embeddings",
|
|
39
|
-
value: "generate",
|
|
40
|
-
description: "Generate vector embeddings for text input",
|
|
41
|
-
action: "Generate embeddings for text",
|
|
42
|
-
},
|
|
43
|
-
],
|
|
44
|
-
default: "generate",
|
|
45
|
-
},
|
|
46
31
|
{
|
|
47
32
|
displayName: "Model",
|
|
48
33
|
name: "model",
|
|
@@ -231,7 +216,6 @@ class GitHubCopilotEmbeddings {
|
|
|
231
216
|
const returnData = [];
|
|
232
217
|
for (let i = 0; i < items.length; i++) {
|
|
233
218
|
try {
|
|
234
|
-
const operation = this.getNodeParameter("operation", i);
|
|
235
219
|
const selectedModel = this.getNodeParameter("model", i);
|
|
236
220
|
let model;
|
|
237
221
|
if (selectedModel === "__manual__") {
|
|
@@ -322,7 +306,6 @@ class GitHubCopilotEmbeddings {
|
|
|
322
306
|
returnData.push({
|
|
323
307
|
json: {
|
|
324
308
|
error: error instanceof Error ? error.message : "Unknown error occurred",
|
|
325
|
-
operation: this.getNodeParameter("operation", i),
|
|
326
309
|
},
|
|
327
310
|
pairedItem: { item: i },
|
|
328
311
|
});
|
|
@@ -14,7 +14,7 @@ class GitHubCopilotOpenAI {
|
|
|
14
14
|
icon: "file:../../shared/icons/copilot.svg",
|
|
15
15
|
group: ["transform"],
|
|
16
16
|
version: 1,
|
|
17
|
-
subtitle: "={{$parameter[\"
|
|
17
|
+
subtitle: "={{$parameter[\"model\"]}}",
|
|
18
18
|
description: "OpenAI-compatible GitHub Copilot Chat API with full support for messages, tools, and all OpenAI parameters",
|
|
19
19
|
defaults: {
|
|
20
20
|
name: "GitHub Copilot OpenAI",
|
|
@@ -43,286 +43,280 @@ class GitHubCopilotOpenAI {
|
|
|
43
43
|
const returnData = [];
|
|
44
44
|
for (let i = 0; i < items.length; i++) {
|
|
45
45
|
try {
|
|
46
|
-
const
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
if (
|
|
46
|
+
const modelSource = this.getNodeParameter("modelSource", i, "fromList");
|
|
47
|
+
let model;
|
|
48
|
+
if (modelSource === "custom") {
|
|
49
|
+
model = this.getNodeParameter("customModel", i);
|
|
50
|
+
if (!model || model.trim() === "") {
|
|
51
|
+
throw new Error("Custom model name is required when using 'Custom (Manual Entry)' mode");
|
|
52
|
+
}
|
|
53
|
+
console.log(`🔧 Using custom model: ${model}`);
|
|
54
|
+
}
|
|
55
|
+
else {
|
|
56
|
+
const selectedModel = this.getNodeParameter("model", i);
|
|
57
|
+
if (selectedModel === "__manual__") {
|
|
51
58
|
model = this.getNodeParameter("customModel", i);
|
|
52
59
|
if (!model || model.trim() === "") {
|
|
53
|
-
throw new Error("Custom model name is required when
|
|
60
|
+
throw new Error("Custom model name is required when selecting '✏️ Enter Custom Model Name'");
|
|
54
61
|
}
|
|
55
|
-
console.log(
|
|
62
|
+
console.log(`✏️ Using manually entered model: ${model}`);
|
|
56
63
|
}
|
|
57
64
|
else {
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
+
model = selectedModel;
|
|
66
|
+
console.log(`📋 Using model from list: ${model}`);
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
const messagesInputMode = this.getNodeParameter("messagesInputMode", i, "manual");
|
|
70
|
+
let messages = [];
|
|
71
|
+
let requestBodyFromJson = undefined;
|
|
72
|
+
if (messagesInputMode === "json") {
|
|
73
|
+
const messagesJson = this.getNodeParameter("messagesJson", i, "[]");
|
|
74
|
+
try {
|
|
75
|
+
let parsed;
|
|
76
|
+
if (typeof messagesJson === 'object') {
|
|
77
|
+
parsed = messagesJson;
|
|
78
|
+
console.log('📥 Received messages as direct object/array (no parsing needed)');
|
|
65
79
|
}
|
|
66
80
|
else {
|
|
67
|
-
|
|
68
|
-
console.log(
|
|
81
|
+
parsed = JSON.parse(messagesJson);
|
|
82
|
+
console.log('📥 Parsed messages from JSON string');
|
|
69
83
|
}
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
let messages = [];
|
|
73
|
-
let requestBodyFromJson = undefined;
|
|
74
|
-
if (messagesInputMode === "json") {
|
|
75
|
-
const messagesJson = this.getNodeParameter("messagesJson", i, "[]");
|
|
76
|
-
try {
|
|
77
|
-
let parsed;
|
|
78
|
-
if (typeof messagesJson === 'object') {
|
|
79
|
-
parsed = messagesJson;
|
|
80
|
-
console.log('📥 Received messages as direct object/array (no parsing needed)');
|
|
81
|
-
}
|
|
82
|
-
else {
|
|
83
|
-
parsed = JSON.parse(messagesJson);
|
|
84
|
-
console.log('📥 Parsed messages from JSON string');
|
|
85
|
-
}
|
|
86
|
-
if (Array.isArray(parsed)) {
|
|
87
|
-
messages = parsed;
|
|
88
|
-
}
|
|
89
|
-
else if (parsed.messages && Array.isArray(parsed.messages)) {
|
|
90
|
-
messages = parsed.messages;
|
|
91
|
-
requestBodyFromJson = parsed;
|
|
92
|
-
console.log('📥 Full OpenAI request body received:', JSON.stringify(parsed, null, 2));
|
|
93
|
-
}
|
|
94
|
-
else {
|
|
95
|
-
messages = parsed;
|
|
96
|
-
}
|
|
84
|
+
if (Array.isArray(parsed)) {
|
|
85
|
+
messages = parsed;
|
|
97
86
|
}
|
|
98
|
-
|
|
99
|
-
|
|
87
|
+
else if (parsed.messages && Array.isArray(parsed.messages)) {
|
|
88
|
+
messages = parsed.messages;
|
|
89
|
+
requestBodyFromJson = parsed;
|
|
90
|
+
console.log('📥 Full OpenAI request body received:', JSON.stringify(parsed, null, 2));
|
|
100
91
|
}
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
const messagesParam = this.getNodeParameter("messages", i, {
|
|
104
|
-
message: [],
|
|
105
|
-
});
|
|
106
|
-
console.log('📥 Manual mode - messagesParam:', JSON.stringify(messagesParam, null, 2));
|
|
107
|
-
if (messagesParam.message && Array.isArray(messagesParam.message)) {
|
|
108
|
-
for (const msg of messagesParam.message) {
|
|
109
|
-
const message = {
|
|
110
|
-
role: msg.role,
|
|
111
|
-
content: msg.content,
|
|
112
|
-
};
|
|
113
|
-
if (msg.type && msg.type !== 'text') {
|
|
114
|
-
message.type = msg.type;
|
|
115
|
-
}
|
|
116
|
-
messages.push(message);
|
|
117
|
-
}
|
|
92
|
+
else {
|
|
93
|
+
messages = parsed;
|
|
118
94
|
}
|
|
119
|
-
console.log('📥 Manual mode - parsed messages:', JSON.stringify(messages, null, 2));
|
|
120
95
|
}
|
|
121
|
-
|
|
122
|
-
messages.
|
|
123
|
-
role: "user",
|
|
124
|
-
content: "Hello! How can you help me?",
|
|
125
|
-
});
|
|
96
|
+
catch (error) {
|
|
97
|
+
throw new Error(`Failed to parse messages JSON: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
126
98
|
}
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
99
|
+
}
|
|
100
|
+
else {
|
|
101
|
+
const messagesParam = this.getNodeParameter("messages", i, {
|
|
102
|
+
message: [],
|
|
103
|
+
});
|
|
104
|
+
console.log('📥 Manual mode - messagesParam:', JSON.stringify(messagesParam, null, 2));
|
|
105
|
+
if (messagesParam.message && Array.isArray(messagesParam.message)) {
|
|
106
|
+
for (const msg of messagesParam.message) {
|
|
107
|
+
const message = {
|
|
108
|
+
role: msg.role,
|
|
109
|
+
content: msg.content,
|
|
110
|
+
};
|
|
111
|
+
if (msg.type && msg.type !== 'text') {
|
|
112
|
+
message.type = msg.type;
|
|
140
113
|
}
|
|
114
|
+
messages.push(message);
|
|
141
115
|
}
|
|
142
|
-
catch (error) {
|
|
143
|
-
throw new Error(`Failed to parse tools JSON: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
144
|
-
}
|
|
145
|
-
}
|
|
146
|
-
let max_tokens = advancedOptions.max_tokens || 4096;
|
|
147
|
-
if (!max_tokens || max_tokens <= 0 || isNaN(max_tokens)) {
|
|
148
|
-
max_tokens = 4096;
|
|
149
|
-
console.log('⚠️ Invalid max_tokens value, using default: 4096');
|
|
150
|
-
}
|
|
151
|
-
const temperature = (_a = advancedOptions.temperature) !== null && _a !== void 0 ? _a : 1;
|
|
152
|
-
const top_p = (_b = advancedOptions.top_p) !== null && _b !== void 0 ? _b : 1;
|
|
153
|
-
const frequency_penalty = (_c = advancedOptions.frequency_penalty) !== null && _c !== void 0 ? _c : 0;
|
|
154
|
-
const presence_penalty = (_d = advancedOptions.presence_penalty) !== null && _d !== void 0 ? _d : 0;
|
|
155
|
-
const seed = advancedOptions.seed || 0;
|
|
156
|
-
const stream = (_e = advancedOptions.stream) !== null && _e !== void 0 ? _e : false;
|
|
157
|
-
const user = advancedOptions.user || undefined;
|
|
158
|
-
const stop = advancedOptions.stop || undefined;
|
|
159
|
-
const response_format_ui = advancedOptions.response_format || "text";
|
|
160
|
-
let response_format = undefined;
|
|
161
|
-
if (requestBodyFromJson === null || requestBodyFromJson === void 0 ? void 0 : requestBodyFromJson.response_format) {
|
|
162
|
-
response_format = requestBodyFromJson.response_format;
|
|
163
|
-
console.log('📋 response_format from JSON request body:', JSON.stringify(response_format));
|
|
164
116
|
}
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
117
|
+
console.log('📥 Manual mode - parsed messages:', JSON.stringify(messages, null, 2));
|
|
118
|
+
}
|
|
119
|
+
if (messages.length === 0) {
|
|
120
|
+
messages.push({
|
|
121
|
+
role: "user",
|
|
122
|
+
content: "Hello! How can you help me?",
|
|
123
|
+
});
|
|
124
|
+
}
|
|
125
|
+
console.log('📤 Final messages being sent to API:', JSON.stringify(messages, null, 2));
|
|
126
|
+
const advancedOptions = this.getNodeParameter("advancedOptions", i, {});
|
|
127
|
+
let parsedTools = [];
|
|
128
|
+
const tools = advancedOptions.tools;
|
|
129
|
+
if (tools) {
|
|
130
|
+
try {
|
|
131
|
+
if (typeof tools === 'object' && Array.isArray(tools)) {
|
|
132
|
+
parsedTools = tools;
|
|
133
|
+
console.log('📥 Received tools as direct array (no parsing needed)');
|
|
173
134
|
}
|
|
174
|
-
|
|
175
|
-
|
|
135
|
+
else if (typeof tools === 'string' && tools.trim()) {
|
|
136
|
+
parsedTools = JSON.parse(tools);
|
|
137
|
+
console.log('📥 Parsed tools from JSON string');
|
|
176
138
|
}
|
|
177
139
|
}
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
console.log('🔍 response_format.type:', response_format.type);
|
|
140
|
+
catch (error) {
|
|
141
|
+
throw new Error(`Failed to parse tools JSON: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
181
142
|
}
|
|
182
|
-
|
|
183
|
-
|
|
143
|
+
}
|
|
144
|
+
let max_tokens = advancedOptions.max_tokens || 4096;
|
|
145
|
+
if (!max_tokens || max_tokens <= 0 || isNaN(max_tokens)) {
|
|
146
|
+
max_tokens = 4096;
|
|
147
|
+
console.log('⚠️ Invalid max_tokens value, using default: 4096');
|
|
148
|
+
}
|
|
149
|
+
const temperature = (_a = advancedOptions.temperature) !== null && _a !== void 0 ? _a : 1;
|
|
150
|
+
const top_p = (_b = advancedOptions.top_p) !== null && _b !== void 0 ? _b : 1;
|
|
151
|
+
const frequency_penalty = (_c = advancedOptions.frequency_penalty) !== null && _c !== void 0 ? _c : 0;
|
|
152
|
+
const presence_penalty = (_d = advancedOptions.presence_penalty) !== null && _d !== void 0 ? _d : 0;
|
|
153
|
+
const seed = advancedOptions.seed || 0;
|
|
154
|
+
const stream = (_e = advancedOptions.stream) !== null && _e !== void 0 ? _e : false;
|
|
155
|
+
const user = advancedOptions.user || undefined;
|
|
156
|
+
const stop = advancedOptions.stop || undefined;
|
|
157
|
+
const response_format_ui = advancedOptions.response_format || "text";
|
|
158
|
+
let response_format = undefined;
|
|
159
|
+
if (requestBodyFromJson === null || requestBodyFromJson === void 0 ? void 0 : requestBodyFromJson.response_format) {
|
|
160
|
+
response_format = requestBodyFromJson.response_format;
|
|
161
|
+
console.log('📋 response_format from JSON request body:', JSON.stringify(response_format));
|
|
162
|
+
}
|
|
163
|
+
else if (response_format_ui && response_format_ui !== 'text') {
|
|
164
|
+
response_format = { type: response_format_ui };
|
|
165
|
+
console.log('📋 response_format from UI field:', JSON.stringify(response_format));
|
|
166
|
+
}
|
|
167
|
+
else if (advancedOptions.response_format && typeof advancedOptions.response_format === 'string') {
|
|
168
|
+
try {
|
|
169
|
+
response_format = JSON.parse(advancedOptions.response_format);
|
|
170
|
+
console.log('📋 response_format from advancedOptions:', JSON.stringify(response_format));
|
|
184
171
|
}
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
"gpt-4o": "gpt-4o",
|
|
188
|
-
"gpt-4o-mini": "gpt-4o-mini",
|
|
189
|
-
"gpt-4-turbo": "gpt-4o",
|
|
190
|
-
"claude-3-5-sonnet": "claude-3.5-sonnet",
|
|
191
|
-
"claude-3.5-sonnet-20241022": "claude-3.5-sonnet",
|
|
192
|
-
"o1": "o1",
|
|
193
|
-
"o1-preview": "o1-preview",
|
|
194
|
-
"o1-mini": "o1-mini",
|
|
195
|
-
};
|
|
196
|
-
const copilotModel = modelMapping[model] || model;
|
|
197
|
-
const requestBody = {
|
|
198
|
-
model: copilotModel,
|
|
199
|
-
messages,
|
|
200
|
-
stream,
|
|
201
|
-
temperature,
|
|
202
|
-
max_tokens,
|
|
203
|
-
};
|
|
204
|
-
if (top_p !== 1) {
|
|
205
|
-
requestBody.top_p = top_p;
|
|
172
|
+
catch {
|
|
173
|
+
console.log('⚠️ Failed to parse response_format from advancedOptions');
|
|
206
174
|
}
|
|
207
|
-
|
|
208
|
-
|
|
175
|
+
}
|
|
176
|
+
if (response_format) {
|
|
177
|
+
console.log('✅ Final response_format:', JSON.stringify(response_format));
|
|
178
|
+
console.log('🔍 response_format.type:', response_format.type);
|
|
179
|
+
}
|
|
180
|
+
else {
|
|
181
|
+
console.log('ℹ️ No response_format specified - using default text format');
|
|
182
|
+
}
|
|
183
|
+
const modelMapping = {
|
|
184
|
+
"gpt-4": "gpt-4o",
|
|
185
|
+
"gpt-4o": "gpt-4o",
|
|
186
|
+
"gpt-4o-mini": "gpt-4o-mini",
|
|
187
|
+
"gpt-4-turbo": "gpt-4o",
|
|
188
|
+
"claude-3-5-sonnet": "claude-3.5-sonnet",
|
|
189
|
+
"claude-3.5-sonnet-20241022": "claude-3.5-sonnet",
|
|
190
|
+
"o1": "o1",
|
|
191
|
+
"o1-preview": "o1-preview",
|
|
192
|
+
"o1-mini": "o1-mini",
|
|
193
|
+
};
|
|
194
|
+
const copilotModel = modelMapping[model] || model;
|
|
195
|
+
const requestBody = {
|
|
196
|
+
model: copilotModel,
|
|
197
|
+
messages,
|
|
198
|
+
stream,
|
|
199
|
+
temperature,
|
|
200
|
+
max_tokens,
|
|
201
|
+
};
|
|
202
|
+
if (top_p !== 1) {
|
|
203
|
+
requestBody.top_p = top_p;
|
|
204
|
+
}
|
|
205
|
+
if (frequency_penalty !== 0) {
|
|
206
|
+
requestBody.frequency_penalty = frequency_penalty;
|
|
207
|
+
}
|
|
208
|
+
if (presence_penalty !== 0) {
|
|
209
|
+
requestBody.presence_penalty = presence_penalty;
|
|
210
|
+
}
|
|
211
|
+
if (user) {
|
|
212
|
+
requestBody.user = user;
|
|
213
|
+
}
|
|
214
|
+
if (stop) {
|
|
215
|
+
try {
|
|
216
|
+
requestBody.stop = JSON.parse(stop);
|
|
209
217
|
}
|
|
210
|
-
|
|
211
|
-
requestBody.
|
|
218
|
+
catch {
|
|
219
|
+
requestBody.stop = stop;
|
|
212
220
|
}
|
|
213
|
-
|
|
214
|
-
|
|
221
|
+
}
|
|
222
|
+
if (parsedTools.length > 0) {
|
|
223
|
+
requestBody.tools = parsedTools;
|
|
224
|
+
const tool_choice = advancedOptions.tool_choice || "auto";
|
|
225
|
+
if (tool_choice !== "auto") {
|
|
226
|
+
requestBody.tool_choice = tool_choice;
|
|
215
227
|
}
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
228
|
+
}
|
|
229
|
+
if (response_format) {
|
|
230
|
+
requestBody.response_format = response_format;
|
|
231
|
+
}
|
|
232
|
+
if (seed > 0) {
|
|
233
|
+
requestBody.seed = seed;
|
|
234
|
+
}
|
|
235
|
+
console.log('🚀 Sending request to GitHub Copilot API:');
|
|
236
|
+
console.log(' Model:', copilotModel);
|
|
237
|
+
console.log(' Messages count:', messages.length);
|
|
238
|
+
console.log(' Request body:', JSON.stringify(requestBody, null, 2));
|
|
239
|
+
const response = await (0, utils_1.makeApiRequest)(this, GitHubCopilotEndpoints_1.GITHUB_COPILOT_API.ENDPOINTS.CHAT_COMPLETIONS, requestBody, false);
|
|
240
|
+
const retriesUsed = ((_f = response._retryMetadata) === null || _f === void 0 ? void 0 : _f.retries) || 0;
|
|
241
|
+
if (retriesUsed > 0) {
|
|
242
|
+
console.log(`ℹ️ Request completed with ${retriesUsed} retry(ies)`);
|
|
243
|
+
}
|
|
244
|
+
const cleanJsonFromMarkdown = (content) => {
|
|
245
|
+
if (!content || typeof content !== 'string') {
|
|
246
|
+
return content;
|
|
223
247
|
}
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
248
|
+
try {
|
|
249
|
+
const trimmed = content.trim();
|
|
250
|
+
console.log('🧹 cleanJsonFromMarkdown - Input length:', trimmed.length);
|
|
251
|
+
const jsonBlockRegex = /^```(?:json)?\s*\n([\s\S]*?)\n```\s*$/;
|
|
252
|
+
const match = trimmed.match(jsonBlockRegex);
|
|
253
|
+
if (match && match[1]) {
|
|
254
|
+
const extracted = match[1].trim();
|
|
255
|
+
console.log('✅ cleanJsonFromMarkdown - Extracted from markdown block');
|
|
256
|
+
return extracted;
|
|
229
257
|
}
|
|
258
|
+
console.log('ℹ️ cleanJsonFromMarkdown - No markdown block found, returning as is');
|
|
259
|
+
return trimmed;
|
|
230
260
|
}
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
if (seed > 0) {
|
|
235
|
-
requestBody.seed = seed;
|
|
236
|
-
}
|
|
237
|
-
console.log('🚀 Sending request to GitHub Copilot API:');
|
|
238
|
-
console.log(' Model:', copilotModel);
|
|
239
|
-
console.log(' Messages count:', messages.length);
|
|
240
|
-
console.log(' Request body:', JSON.stringify(requestBody, null, 2));
|
|
241
|
-
const response = await (0, utils_1.makeApiRequest)(this, GitHubCopilotEndpoints_1.GITHUB_COPILOT_API.ENDPOINTS.CHAT_COMPLETIONS, requestBody, false);
|
|
242
|
-
const retriesUsed = ((_f = response._retryMetadata) === null || _f === void 0 ? void 0 : _f.retries) || 0;
|
|
243
|
-
if (retriesUsed > 0) {
|
|
244
|
-
console.log(`ℹ️ Request completed with ${retriesUsed} retry(ies)`);
|
|
261
|
+
catch (error) {
|
|
262
|
+
console.error('❌ cleanJsonFromMarkdown - Error:', error);
|
|
263
|
+
return content;
|
|
245
264
|
}
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
265
|
+
};
|
|
266
|
+
console.log('🔨 Building OpenAI response...');
|
|
267
|
+
console.log('🔍 response_format check:', (response_format === null || response_format === void 0 ? void 0 : response_format.type) === 'json_object' ? 'WILL CLEAN MARKDOWN' : 'WILL KEEP AS IS');
|
|
268
|
+
const openAIResponse = {
|
|
269
|
+
id: response.id || `chatcmpl-${Date.now()}`,
|
|
270
|
+
object: response.object || "chat.completion",
|
|
271
|
+
created: response.created || Math.floor(Date.now() / 1000),
|
|
272
|
+
model: model,
|
|
273
|
+
choices: response.choices.map((choice, choiceIndex) => {
|
|
274
|
+
var _a;
|
|
275
|
+
console.log(`\n📝 Processing choice ${choiceIndex}:`);
|
|
276
|
+
console.log(' - role:', choice.message.role);
|
|
277
|
+
console.log(' - content type:', typeof choice.message.content);
|
|
278
|
+
console.log(' - content length:', ((_a = choice.message.content) === null || _a === void 0 ? void 0 : _a.length) || 0);
|
|
279
|
+
console.log(' - has tool_calls:', !!choice.message.tool_calls);
|
|
280
|
+
let processedContent = choice.message.content;
|
|
281
|
+
if (choice.message.content !== null && choice.message.content !== undefined) {
|
|
282
|
+
if ((response_format === null || response_format === void 0 ? void 0 : response_format.type) === 'json_object') {
|
|
283
|
+
console.log(' 🧹 Applying cleanJsonFromMarkdown (keeping as string)...');
|
|
284
|
+
processedContent = cleanJsonFromMarkdown(choice.message.content);
|
|
285
|
+
console.log(' ✅ Processed content type:', typeof processedContent);
|
|
286
|
+
}
|
|
287
|
+
else {
|
|
288
|
+
console.log(' ℹ️ Keeping content as is');
|
|
259
289
|
}
|
|
260
|
-
console.log('ℹ️ cleanJsonFromMarkdown - No markdown block found, returning as is');
|
|
261
|
-
return trimmed;
|
|
262
290
|
}
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
291
|
+
const choiceObj = {
|
|
292
|
+
index: choice.index,
|
|
293
|
+
message: {
|
|
294
|
+
role: choice.message.role,
|
|
295
|
+
content: processedContent,
|
|
296
|
+
refusal: choice.message.refusal || null,
|
|
297
|
+
annotations: choice.message.annotations || [],
|
|
298
|
+
},
|
|
299
|
+
logprobs: choice.logprobs || null,
|
|
300
|
+
finish_reason: choice.finish_reason,
|
|
301
|
+
};
|
|
302
|
+
if (choice.message.tool_calls && choice.message.tool_calls.length > 0) {
|
|
303
|
+
choiceObj.message.tool_calls = choice.message.tool_calls;
|
|
266
304
|
}
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
console.log(`\n📝 Processing choice ${choiceIndex}:`);
|
|
278
|
-
console.log(' - role:', choice.message.role);
|
|
279
|
-
console.log(' - content type:', typeof choice.message.content);
|
|
280
|
-
console.log(' - content length:', ((_a = choice.message.content) === null || _a === void 0 ? void 0 : _a.length) || 0);
|
|
281
|
-
console.log(' - has tool_calls:', !!choice.message.tool_calls);
|
|
282
|
-
let processedContent = choice.message.content;
|
|
283
|
-
if (choice.message.content !== null && choice.message.content !== undefined) {
|
|
284
|
-
if ((response_format === null || response_format === void 0 ? void 0 : response_format.type) === 'json_object') {
|
|
285
|
-
console.log(' 🧹 Applying cleanJsonFromMarkdown (keeping as string)...');
|
|
286
|
-
processedContent = cleanJsonFromMarkdown(choice.message.content);
|
|
287
|
-
console.log(' ✅ Processed content type:', typeof processedContent);
|
|
288
|
-
}
|
|
289
|
-
else {
|
|
290
|
-
console.log(' ℹ️ Keeping content as is');
|
|
291
|
-
}
|
|
292
|
-
}
|
|
293
|
-
const choiceObj = {
|
|
294
|
-
index: choice.index,
|
|
295
|
-
message: {
|
|
296
|
-
role: choice.message.role,
|
|
297
|
-
content: processedContent,
|
|
298
|
-
refusal: choice.message.refusal || null,
|
|
299
|
-
annotations: choice.message.annotations || [],
|
|
300
|
-
},
|
|
301
|
-
logprobs: choice.logprobs || null,
|
|
302
|
-
finish_reason: choice.finish_reason,
|
|
303
|
-
};
|
|
304
|
-
if (choice.message.tool_calls && choice.message.tool_calls.length > 0) {
|
|
305
|
-
choiceObj.message.tool_calls = choice.message.tool_calls;
|
|
306
|
-
}
|
|
307
|
-
return choiceObj;
|
|
308
|
-
}),
|
|
309
|
-
usage: response.usage || {
|
|
310
|
-
prompt_tokens: 0,
|
|
311
|
-
completion_tokens: 0,
|
|
312
|
-
total_tokens: 0,
|
|
313
|
-
},
|
|
314
|
-
};
|
|
315
|
-
if (response.system_fingerprint) {
|
|
316
|
-
openAIResponse.system_fingerprint = response.system_fingerprint;
|
|
317
|
-
}
|
|
318
|
-
returnData.push({
|
|
319
|
-
json: openAIResponse,
|
|
320
|
-
pairedItem: { item: i },
|
|
321
|
-
});
|
|
322
|
-
}
|
|
323
|
-
else {
|
|
324
|
-
throw new Error(`Unknown operation: ${operation}`);
|
|
305
|
+
return choiceObj;
|
|
306
|
+
}),
|
|
307
|
+
usage: response.usage || {
|
|
308
|
+
prompt_tokens: 0,
|
|
309
|
+
completion_tokens: 0,
|
|
310
|
+
total_tokens: 0,
|
|
311
|
+
},
|
|
312
|
+
};
|
|
313
|
+
if (response.system_fingerprint) {
|
|
314
|
+
openAIResponse.system_fingerprint = response.system_fingerprint;
|
|
325
315
|
}
|
|
316
|
+
returnData.push({
|
|
317
|
+
json: openAIResponse,
|
|
318
|
+
pairedItem: { item: i },
|
|
319
|
+
});
|
|
326
320
|
}
|
|
327
321
|
catch (error) {
|
|
328
322
|
if (this.continueOnFail()) {
|
|
@@ -1,85 +1,9 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.nodeProperties = void 0;
|
|
4
|
-
const
|
|
4
|
+
const ModelProperties_1 = require("../../shared/properties/ModelProperties");
|
|
5
5
|
exports.nodeProperties = [
|
|
6
|
-
|
|
7
|
-
displayName: "Operation",
|
|
8
|
-
name: "operation",
|
|
9
|
-
type: "options",
|
|
10
|
-
noDataExpression: true,
|
|
11
|
-
options: [
|
|
12
|
-
{
|
|
13
|
-
name: "Chat Completion",
|
|
14
|
-
value: "chat",
|
|
15
|
-
description: "Send messages to GitHub Copilot Chat API with full OpenAI compatibility",
|
|
16
|
-
},
|
|
17
|
-
],
|
|
18
|
-
default: "chat",
|
|
19
|
-
},
|
|
20
|
-
{
|
|
21
|
-
displayName: "Model Source",
|
|
22
|
-
name: "modelSource",
|
|
23
|
-
type: "options",
|
|
24
|
-
options: [
|
|
25
|
-
{
|
|
26
|
-
name: "From List (Auto-Discovered)",
|
|
27
|
-
value: "fromList",
|
|
28
|
-
description: "Select from available models based on your subscription",
|
|
29
|
-
},
|
|
30
|
-
{
|
|
31
|
-
name: "Custom (Manual Entry)",
|
|
32
|
-
value: "custom",
|
|
33
|
-
description: "Enter model name manually (use at your own risk)",
|
|
34
|
-
},
|
|
35
|
-
],
|
|
36
|
-
default: "fromList",
|
|
37
|
-
description: "Choose how to specify the model",
|
|
38
|
-
},
|
|
39
|
-
{
|
|
40
|
-
displayName: "Model",
|
|
41
|
-
name: "model",
|
|
42
|
-
type: "options",
|
|
43
|
-
typeOptions: {
|
|
44
|
-
loadOptionsMethod: "getAvailableModels",
|
|
45
|
-
},
|
|
46
|
-
default: GitHubCopilotModels_1.DEFAULT_MODELS.GENERAL,
|
|
47
|
-
description: "Select the GitHub Copilot model to use (loaded dynamically based on your subscription)",
|
|
48
|
-
displayOptions: {
|
|
49
|
-
show: {
|
|
50
|
-
modelSource: ["fromList"],
|
|
51
|
-
},
|
|
52
|
-
},
|
|
53
|
-
},
|
|
54
|
-
{
|
|
55
|
-
displayName: "Custom Model Name",
|
|
56
|
-
name: "customModel",
|
|
57
|
-
type: "string",
|
|
58
|
-
default: "",
|
|
59
|
-
placeholder: "gpt-4o, claude-3.5-sonnet, grok-code-fast-1, etc.",
|
|
60
|
-
description: "Enter the model name manually. Use at your own risk if the model is not available in your subscription.",
|
|
61
|
-
hint: "Examples: gpt-4o, gpt-4o-mini, claude-3.5-sonnet, gemini-2.0-flash-exp, grok-code-fast-1",
|
|
62
|
-
displayOptions: {
|
|
63
|
-
show: {
|
|
64
|
-
modelSource: ["custom"],
|
|
65
|
-
},
|
|
66
|
-
},
|
|
67
|
-
},
|
|
68
|
-
{
|
|
69
|
-
displayName: "Custom Model Name",
|
|
70
|
-
name: "customModel",
|
|
71
|
-
type: "string",
|
|
72
|
-
default: "",
|
|
73
|
-
placeholder: "gpt-4o, claude-3.5-sonnet, grok-code-fast-1, etc.",
|
|
74
|
-
description: "Enter the model name manually. This is useful for new/beta models not yet in the list.",
|
|
75
|
-
hint: "Examples: gpt-4o, gpt-4o-mini, claude-3.5-sonnet, gemini-2.0-flash-exp, grok-code-fast-1",
|
|
76
|
-
displayOptions: {
|
|
77
|
-
show: {
|
|
78
|
-
modelSource: ["fromList"],
|
|
79
|
-
model: ["__manual__"],
|
|
80
|
-
},
|
|
81
|
-
},
|
|
82
|
-
},
|
|
6
|
+
...ModelProperties_1.CHAT_MODEL_PROPERTIES,
|
|
83
7
|
{
|
|
84
8
|
displayName: "Messages Input Mode",
|
|
85
9
|
name: "messagesInputMode",
|
package/dist/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "n8n-nodes-github-copilot",
|
|
3
|
-
"version": "3.38.
|
|
3
|
+
"version": "3.38.13",
|
|
4
4
|
"description": "n8n community node for GitHub Copilot with CLI integration, Chat API access, and AI Chat Model for workflows - access GPT-5, Claude, Gemini and more using your Copilot subscription",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"homepage": "https://github.com/sufficit/n8n-nodes-github-copilot",
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.CHAT_MODEL_PROPERTIES = exports.MANUAL_MODEL_PROPERTY = exports.CHAT_MODEL_PROPERTY = void 0;
|
|
4
|
+
const GitHubCopilotModels_1 = require("../models/GitHubCopilotModels");
|
|
5
|
+
exports.CHAT_MODEL_PROPERTY = {
|
|
6
|
+
displayName: "Model",
|
|
7
|
+
name: "model",
|
|
8
|
+
type: "options",
|
|
9
|
+
typeOptions: {
|
|
10
|
+
loadOptionsMethod: "getAvailableModels",
|
|
11
|
+
},
|
|
12
|
+
default: GitHubCopilotModels_1.DEFAULT_MODELS.GENERAL,
|
|
13
|
+
description: "Select the GitHub Copilot model to use (loaded dynamically based on your subscription)",
|
|
14
|
+
};
|
|
15
|
+
exports.MANUAL_MODEL_PROPERTY = {
|
|
16
|
+
displayName: "Custom Model Name",
|
|
17
|
+
name: "customModel",
|
|
18
|
+
type: "string",
|
|
19
|
+
default: "",
|
|
20
|
+
placeholder: "gpt-4o, claude-3.5-sonnet, grok-code-fast-1, etc.",
|
|
21
|
+
description: "Enter the model name manually. This is useful for new/beta models not yet in the list.",
|
|
22
|
+
hint: "Examples: gpt-4o, gpt-4o-mini, claude-3.5-sonnet, gemini-2.0-flash-exp, grok-code-fast-1",
|
|
23
|
+
displayOptions: {
|
|
24
|
+
show: {
|
|
25
|
+
model: ["__manual__"],
|
|
26
|
+
},
|
|
27
|
+
},
|
|
28
|
+
};
|
|
29
|
+
exports.CHAT_MODEL_PROPERTIES = [
|
|
30
|
+
exports.CHAT_MODEL_PROPERTY,
|
|
31
|
+
exports.MANUAL_MODEL_PROPERTY,
|
|
32
|
+
];
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "n8n-nodes-github-copilot",
|
|
3
|
-
"version": "3.38.
|
|
3
|
+
"version": "3.38.13",
|
|
4
4
|
"description": "n8n community node for GitHub Copilot with CLI integration, Chat API access, and AI Chat Model for workflows - access GPT-5, Claude, Gemini and more using your Copilot subscription",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"homepage": "https://github.com/sufficit/n8n-nodes-github-copilot",
|