n8n-nodes-github-copilot 3.38.4 → 3.38.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -38,7 +38,7 @@ class GitHubCopilotOpenAI {
38
38
  };
39
39
  }
40
40
  async execute() {
41
- var _a;
41
+ var _a, _b, _c, _d, _e, _f;
42
42
  const items = this.getInputData();
43
43
  const returnData = [];
44
44
  for (let i = 0; i < items.length; i++) {
@@ -69,8 +69,6 @@ class GitHubCopilotOpenAI {
69
69
  }
70
70
  }
71
71
  const messagesInputMode = this.getNodeParameter("messagesInputMode", i, "manual");
72
- const temperature = this.getNodeParameter("temperature", i, 1);
73
- const tools = this.getNodeParameter("tools", i, "");
74
72
  let messages = [];
75
73
  let requestBodyFromJson = undefined;
76
74
  if (messagesInputMode === "json") {
@@ -123,7 +121,9 @@ class GitHubCopilotOpenAI {
123
121
  });
124
122
  }
125
123
  console.log('📤 Final messages being sent to API:', JSON.stringify(messages, null, 2));
124
+ const advancedOptions = this.getNodeParameter("advancedOptions", i, {});
126
125
  let parsedTools = [];
126
+ const tools = advancedOptions.tools;
127
127
  if (tools) {
128
128
  try {
129
129
  if (typeof tools === 'object' && Array.isArray(tools)) {
@@ -139,14 +139,20 @@ class GitHubCopilotOpenAI {
139
139
  throw new Error(`Failed to parse tools JSON: ${error instanceof Error ? error.message : "Unknown error"}`);
140
140
  }
141
141
  }
142
- let max_tokens = this.getNodeParameter("max_tokens", i, 4096);
142
+ let max_tokens = advancedOptions.max_tokens || 4096;
143
143
  if (!max_tokens || max_tokens <= 0 || isNaN(max_tokens)) {
144
144
  max_tokens = 4096;
145
145
  console.log('⚠️ Invalid max_tokens value, using default: 4096');
146
146
  }
147
- const seed = this.getNodeParameter("seed", i, 0);
148
- const response_format_ui = this.getNodeParameter("response_format", i, "text");
149
- const advancedOptions = this.getNodeParameter("advancedOptions", i, {});
147
+ const temperature = (_a = advancedOptions.temperature) !== null && _a !== void 0 ? _a : 1;
148
+ const top_p = (_b = advancedOptions.top_p) !== null && _b !== void 0 ? _b : 1;
149
+ const frequency_penalty = (_c = advancedOptions.frequency_penalty) !== null && _c !== void 0 ? _c : 0;
150
+ const presence_penalty = (_d = advancedOptions.presence_penalty) !== null && _d !== void 0 ? _d : 0;
151
+ const seed = advancedOptions.seed || 0;
152
+ const stream = (_e = advancedOptions.stream) !== null && _e !== void 0 ? _e : false;
153
+ const user = advancedOptions.user || undefined;
154
+ const stop = advancedOptions.stop || undefined;
155
+ const response_format_ui = advancedOptions.response_format || "text";
150
156
  let response_format = undefined;
151
157
  if (requestBodyFromJson === null || requestBodyFromJson === void 0 ? void 0 : requestBodyFromJson.response_format) {
152
158
  response_format = requestBodyFromJson.response_format;
@@ -187,20 +193,54 @@ class GitHubCopilotOpenAI {
187
193
  const requestBody = {
188
194
  model: copilotModel,
189
195
  messages,
190
- stream: false,
196
+ stream,
191
197
  temperature,
192
198
  max_tokens,
193
199
  };
200
+ if (top_p !== 1) {
201
+ requestBody.top_p = top_p;
202
+ }
203
+ if (frequency_penalty !== 0) {
204
+ requestBody.frequency_penalty = frequency_penalty;
205
+ }
206
+ if (presence_penalty !== 0) {
207
+ requestBody.presence_penalty = presence_penalty;
208
+ }
209
+ if (user) {
210
+ requestBody.user = user;
211
+ }
212
+ if (stop) {
213
+ try {
214
+ requestBody.stop = JSON.parse(stop);
215
+ }
216
+ catch {
217
+ requestBody.stop = stop;
218
+ }
219
+ }
194
220
  if (parsedTools.length > 0) {
195
221
  requestBody.tools = parsedTools;
222
+ const tool_choice = advancedOptions.tool_choice || "auto";
223
+ if (tool_choice !== "auto") {
224
+ requestBody.tool_choice = tool_choice;
225
+ }
196
226
  }
197
227
  if (response_format) {
198
228
  requestBody.response_format = response_format;
199
229
  if (response_format.type === 'json_object') {
200
230
  const allMessagesText = messages.map(m => m.content).join(' ').toLowerCase();
201
231
  if (!allMessagesText.includes('json')) {
202
- throw new Error('When using response_format "json_object", you must include the word "json" in your messages. ' +
203
- 'Add instructions like "Respond in JSON format" to your system message or user prompt.');
232
+ const systemMessageIndex = messages.findIndex(m => m.role === 'system');
233
+ if (systemMessageIndex !== -1) {
234
+ messages[systemMessageIndex].content += '\n\nResponse format: json';
235
+ console.log('ℹ️ Auto-injected "json" keyword into existing system message for json_object format');
236
+ }
237
+ else {
238
+ messages.unshift({
239
+ role: 'system',
240
+ content: 'Response format: json'
241
+ });
242
+ console.log('ℹ️ Auto-injected system message with "json" keyword for json_object format');
243
+ }
204
244
  }
205
245
  }
206
246
  }
@@ -212,7 +252,7 @@ class GitHubCopilotOpenAI {
212
252
  console.log(' Messages count:', messages.length);
213
253
  console.log(' Request body:', JSON.stringify(requestBody, null, 2));
214
254
  const response = await (0, utils_1.makeApiRequest)(this, GitHubCopilotEndpoints_1.GITHUB_COPILOT_API.ENDPOINTS.CHAT_COMPLETIONS, requestBody, false);
215
- const retriesUsed = ((_a = response._retryMetadata) === null || _a === void 0 ? void 0 : _a.retries) || 0;
255
+ const retriesUsed = ((_f = response._retryMetadata) === null || _f === void 0 ? void 0 : _f.retries) || 0;
216
256
  if (retriesUsed > 0) {
217
257
  console.log(`ℹ️ Request completed with ${retriesUsed} retry(ies)`);
218
258
  }
@@ -263,32 +303,34 @@ class GitHubCopilotOpenAI {
263
303
  console.log(' ℹ️ Keeping content as is');
264
304
  }
265
305
  }
266
- return {
306
+ const choiceObj = {
267
307
  index: choice.index,
268
308
  message: {
269
309
  role: choice.message.role,
270
- ...(choice.message.content !== null && choice.message.content !== undefined && {
271
- content: processedContent
272
- }),
273
- refusal: choice.message.refusal || null,
274
- annotations: choice.message.annotations || [],
275
- ...(choice.message.tool_calls && { tool_calls: choice.message.tool_calls }),
310
+ content: processedContent,
276
311
  },
277
- logprobs: choice.logprobs || null,
278
312
  finish_reason: choice.finish_reason,
279
313
  };
314
+ if (choice.message.tool_calls && choice.message.tool_calls.length > 0) {
315
+ choiceObj.message.tool_calls = choice.message.tool_calls;
316
+ }
317
+ if (choice.message.refusal) {
318
+ choiceObj.message.refusal = choice.message.refusal;
319
+ }
320
+ if (choice.logprobs) {
321
+ choiceObj.logprobs = choice.logprobs;
322
+ }
323
+ return choiceObj;
280
324
  }),
281
325
  usage: response.usage || {
282
326
  prompt_tokens: 0,
283
327
  completion_tokens: 0,
284
328
  total_tokens: 0,
285
329
  },
286
- ...(retriesUsed > 0 && {
287
- _retry_info: {
288
- retries: retriesUsed,
289
- }
290
- }),
291
330
  };
331
+ if (response.system_fingerprint) {
332
+ openAIResponse.system_fingerprint = response.system_fingerprint;
333
+ }
292
334
  returnData.push({
293
335
  json: openAIResponse,
294
336
  pairedItem: { item: i },
@@ -187,14 +187,132 @@ exports.nodeProperties = [
187
187
  description: "Array of messages for the conversation",
188
188
  },
189
189
  {
190
- displayName: "Tools (Optional)",
191
- name: "tools",
192
- type: "string",
193
- default: "",
194
- typeOptions: {
195
- rows: 10,
196
- },
197
- placeholder: `[
190
+ displayName: "Advanced Options",
191
+ name: "advancedOptions",
192
+ type: "collection",
193
+ placeholder: "Add Advanced Option",
194
+ default: {},
195
+ options: [
196
+ {
197
+ displayName: "Response Format",
198
+ name: "response_format",
199
+ type: "options",
200
+ options: [
201
+ {
202
+ name: "Text",
203
+ value: "text",
204
+ description: "Return response as plain text",
205
+ },
206
+ {
207
+ name: "JSON Object",
208
+ value: "json_object",
209
+ description: "Return response as JSON object",
210
+ },
211
+ ],
212
+ default: "text",
213
+ description: "The format of the response",
214
+ },
215
+ {
216
+ displayName: "Temperature",
217
+ name: "temperature",
218
+ type: "number",
219
+ typeOptions: {
220
+ minValue: 0,
221
+ maxValue: 2,
222
+ numberPrecision: 2,
223
+ },
224
+ default: 1,
225
+ description: "Controls randomness in the response. Lower values make responses more focused and deterministic.",
226
+ },
227
+ {
228
+ displayName: "Max Tokens",
229
+ name: "max_tokens",
230
+ type: "number",
231
+ typeOptions: {
232
+ minValue: 1,
233
+ maxValue: 16384,
234
+ },
235
+ default: 4096,
236
+ placeholder: "4096",
237
+ description: "Maximum number of tokens to generate in the response",
238
+ hint: "Default: 4096 tokens. Increase for longer responses, decrease for shorter ones.",
239
+ },
240
+ {
241
+ displayName: "Top P",
242
+ name: "top_p",
243
+ type: "number",
244
+ typeOptions: {
245
+ minValue: 0,
246
+ maxValue: 1,
247
+ numberPrecision: 2,
248
+ },
249
+ default: 1,
250
+ description: "Controls diversity via nucleus sampling",
251
+ },
252
+ {
253
+ displayName: "Frequency Penalty",
254
+ name: "frequency_penalty",
255
+ type: "number",
256
+ typeOptions: {
257
+ minValue: -2,
258
+ maxValue: 2,
259
+ numberPrecision: 2,
260
+ },
261
+ default: 0,
262
+ description: "Penalty for repeated tokens based on their frequency",
263
+ },
264
+ {
265
+ displayName: "Presence Penalty",
266
+ name: "presence_penalty",
267
+ type: "number",
268
+ typeOptions: {
269
+ minValue: -2,
270
+ maxValue: 2,
271
+ numberPrecision: 2,
272
+ },
273
+ default: 0,
274
+ description: "Penalty for repeated tokens based on their presence",
275
+ },
276
+ {
277
+ displayName: "Stop Sequences",
278
+ name: "stop",
279
+ type: "string",
280
+ default: "",
281
+ placeholder: "[\"\\n\", \"Human:\", \"AI:\"]",
282
+ description: "JSON array of strings where the API will stop generating tokens",
283
+ },
284
+ {
285
+ displayName: "Stream",
286
+ name: "stream",
287
+ type: "boolean",
288
+ default: false,
289
+ description: "Whether to stream the response",
290
+ },
291
+ {
292
+ displayName: "Seed",
293
+ name: "seed",
294
+ type: "number",
295
+ default: 0,
296
+ placeholder: "12345",
297
+ description: "Seed for deterministic sampling (0 = disabled)",
298
+ },
299
+ {
300
+ displayName: "User ID",
301
+ name: "user",
302
+ type: "string",
303
+ default: "",
304
+ placeholder: "user-123",
305
+ description: "Unique identifier for the end-user",
306
+ },
307
+ {
308
+ displayName: "Tools (Function Calling)",
309
+ name: "tools",
310
+ type: "string",
311
+ default: "",
312
+ typeOptions: {
313
+ rows: 10,
314
+ },
315
+ placeholder: `[
198
316
  {
199
317
  "type": "function",
200
318
  "function": {
@@ -213,156 +331,38 @@ exports.nodeProperties = [
213
331
  }
214
332
  }
215
333
  ]`,
216
- description: "Optional: Array of tools/functions available to the model (OpenAI format). Leave empty if not using function calling.",
217
- hint: "JSON array of tool definitions in OpenAI format. Leave this field empty if you don't need function calling.",
218
- },
219
- {
220
- displayName: "Tool Choice",
221
- name: "tool_choice",
222
- type: "options",
223
- options: [
224
- {
225
- name: "Auto",
226
- value: "auto",
227
- description: "Let the model decide whether to call functions",
334
+ description: "Optional: Array of tools/functions available to the model (OpenAI format). Leave empty if not using function calling.",
335
+ hint: "JSON array of tool definitions in OpenAI format. Leave this field empty if you don't need function calling.",
228
336
  },
229
337
  {
230
- name: "None",
231
- value: "none",
232
- description: "Force the model to not call any functions",
233
- },
234
- {
235
- name: "Required",
236
- value: "required",
237
- description: "Force the model to call at least one function",
238
- },
239
- ],
240
- default: "auto",
241
- description: "Control how the model uses tools",
242
- displayOptions: {
243
- show: {
244
- tools: ["/.+/"],
245
- },
246
- },
247
- },
248
- {
249
- displayName: "Response Format",
250
- name: "response_format",
251
- type: "options",
252
- options: [
253
- {
254
- name: "Text",
255
- value: "text",
256
- description: "Return response as plain text",
257
- },
258
- {
259
- name: "JSON Object",
260
- value: "json_object",
261
- description: "Return response as JSON object",
338
+ displayName: "Tool Choice",
339
+ name: "tool_choice",
340
+ type: "options",
341
+ options: [
342
+ {
343
+ name: "Auto",
344
+ value: "auto",
345
+ description: "Let the model decide whether to call functions",
346
+ },
347
+ {
348
+ name: "None",
349
+ value: "none",
350
+ description: "Force the model to not call any functions",
351
+ },
352
+ {
353
+ name: "Required",
354
+ value: "required",
355
+ description: "Force the model to call at least one function",
356
+ },
357
+ ],
358
+ default: "auto",
359
+ description: "Control how the model uses tools",
360
+ displayOptions: {
361
+ show: {
362
+ tools: ["/.+/"],
363
+ },
364
+ },
262
365
  },
263
- ],
264
- default: "text",
265
- description: "The format of the response",
266
- },
267
- {
268
- displayName: "Temperature",
269
- name: "temperature",
270
- type: "number",
271
- typeOptions: {
272
- minValue: 0,
273
- maxValue: 2,
274
- numberPrecision: 2,
275
- },
276
- default: 1,
277
- description: "Controls randomness in the response. Lower values make responses more focused and deterministic.",
278
- },
279
- {
280
- displayName: "Max Tokens",
281
- name: "max_tokens",
282
- type: "number",
283
- typeOptions: {
284
- minValue: 1,
285
- maxValue: 16384,
286
- },
287
- default: 4096,
288
- placeholder: "4096",
289
- description: "Maximum number of tokens to generate in the response",
290
- hint: "Default: 4096 tokens. Increase for longer responses, decrease for shorter ones.",
291
- },
292
- {
293
- displayName: "Top P",
294
- name: "top_p",
295
- type: "number",
296
- typeOptions: {
297
- minValue: 0,
298
- maxValue: 1,
299
- numberPrecision: 2,
300
- },
301
- default: 1,
302
- description: "Controls diversity via nucleus sampling",
303
- },
304
- {
305
- displayName: "Frequency Penalty",
306
- name: "frequency_penalty",
307
- type: "number",
308
- typeOptions: {
309
- minValue: -2,
310
- maxValue: 2,
311
- numberPrecision: 2,
312
- },
313
- default: 0,
314
- description: "Penalty for repeated tokens based on their frequency",
315
- },
316
- {
317
- displayName: "Presence Penalty",
318
- name: "presence_penalty",
319
- type: "number",
320
- typeOptions: {
321
- minValue: -2,
322
- maxValue: 2,
323
- numberPrecision: 2,
324
- },
325
- default: 0,
326
- description: "Penalty for repeated tokens based on their presence",
327
- },
328
- {
329
- displayName: "Stop Sequences",
330
- name: "stop",
331
- type: "string",
332
- default: "",
333
- placeholder: "[\"\\n\", \"Human:\", \"AI:\"]",
334
- description: "JSON array of strings where the API will stop generating tokens",
335
- },
336
- {
337
- displayName: "Stream",
338
- name: "stream",
339
- type: "boolean",
340
- default: false,
341
- description: "Whether to stream the response",
342
- },
343
- {
344
- displayName: "Seed",
345
- name: "seed",
346
- type: "number",
347
- default: "",
348
- placeholder: "12345",
349
- description: "Seed for deterministic sampling",
350
- },
351
- {
352
- displayName: "User ID",
353
- name: "user",
354
- type: "string",
355
- default: "",
356
- placeholder: "user-123",
357
- description: "Unique identifier for the end-user",
358
- },
359
- {
360
- displayName: "Advanced Options",
361
- name: "advancedOptions",
362
- type: "collection",
363
- placeholder: "Add Advanced Option",
364
- default: {},
365
- options: [
366
366
  {
367
367
  displayName: "Enable Retry",
368
368
  name: "enableRetry",
package/dist/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "n8n-nodes-github-copilot",
3
- "version": "3.38.4",
3
+ "version": "3.38.6",
4
4
  "description": "n8n community node for GitHub Copilot with CLI integration, Chat API access, and AI Chat Model for workflows - access GPT-5, Claude, Gemini and more using your Copilot subscription",
5
5
  "license": "MIT",
6
6
  "homepage": "https://github.com/sufficit/n8n-nodes-github-copilot",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "n8n-nodes-github-copilot",
3
- "version": "3.38.4",
3
+ "version": "3.38.6",
4
4
  "description": "n8n community node for GitHub Copilot with CLI integration, Chat API access, and AI Chat Model for workflows - access GPT-5, Claude, Gemini and more using your Copilot subscription",
5
5
  "license": "MIT",
6
6
  "homepage": "https://github.com/sufficit/n8n-nodes-github-copilot",