n8n-nodes-github-copilot 3.38.22 → 3.38.24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -17,7 +17,6 @@ Este é um **community node** para [n8n](https://n8n.io/) que integra o **GitHub
17
17
  ### 2. GitHub Copilot Chat API (Novo! ⭐)
18
18
  - **Chat Completion**: Conversas diretas com modelos avançados de IA
19
19
  - **Análise de Imagens**: Processamento de imagens com modelos de visão
20
- - **Transcrição de Áudio**: Conversão de áudio para texto (planejado)
21
20
  - **Modelos Disponíveis**: GPT-5, GPT-5 Mini, Claude Opus 4.1, Gemini 2.5 Pro, Grok Code Fast 1, GPT-4.1 Copilot
22
21
  - **Sem Custos Extras**: Usa seus créditos existentes do GitHub Copilot
23
22
 
@@ -15,10 +15,35 @@ class GitHubCopilotChatOpenAI extends openai_1.ChatOpenAI {
15
15
  this.options = options;
16
16
  }
17
17
  async _generate(messages, options) {
18
- const copilotMessages = messages.map(msg => ({
19
- role: msg._getType(),
20
- content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content),
21
- }));
18
+ let copilotMessages = messages.map(msg => {
19
+ let role;
20
+ switch (msg._getType()) {
21
+ case 'human':
22
+ role = 'user';
23
+ break;
24
+ case 'ai':
25
+ role = 'assistant';
26
+ break;
27
+ case 'system':
28
+ role = 'system';
29
+ break;
30
+ default:
31
+ role = 'user';
32
+ }
33
+ return {
34
+ role,
35
+ content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content),
36
+ };
37
+ });
38
+ if (this.options.systemMessage && this.options.systemMessage.trim()) {
39
+ const hasSystemMessage = copilotMessages.some(msg => msg.role === 'system');
40
+ if (!hasSystemMessage) {
41
+ copilotMessages.unshift({
42
+ role: 'system',
43
+ content: this.options.systemMessage,
44
+ });
45
+ }
46
+ }
22
47
  const requestBody = {
23
48
  model: this.modelName || this.model,
24
49
  messages: copilotMessages,
@@ -40,13 +65,14 @@ class GitHubCopilotChatOpenAI extends openai_1.ChatOpenAI {
40
65
  tool_calls: choice.message.tool_calls,
41
66
  };
42
67
  return {
43
- generations: [{
44
- text: choice.message.content || "",
45
- message: langchainMessage,
46
- }],
47
- llmOutput: {
48
- tokenUsage: response.usage,
49
- },
68
+ generations: [[{
69
+ text: choice.message.content || "",
70
+ generationInfo: {
71
+ finish_reason: choice.finish_reason,
72
+ },
73
+ message: langchainMessage,
74
+ }]],
75
+ tokenUsage: response.usage,
50
76
  };
51
77
  }
52
78
  catch (error) {
@@ -0,0 +1,8 @@
1
+ import { IExecuteFunctions, INodeExecutionData, INodeType, INodeTypeDescription } from 'n8n-workflow';
2
+ export declare class GitHubCopilotSpeech implements INodeType {
3
+ description: INodeTypeDescription;
4
+ execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]>;
5
+ private static transcribeWithMicrosoftSpeech;
6
+ private static detectAudioFormat;
7
+ private static isSupportedFormat;
8
+ }
@@ -0,0 +1,349 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.GitHubCopilotSpeech = void 0;
4
+ const n8n_workflow_1 = require("n8n-workflow");
5
+ const OAuthTokenManager_1 = require("../../shared/utils/OAuthTokenManager");
6
+ class GitHubCopilotSpeech {
7
+ constructor() {
8
+ this.description = {
9
+ displayName: 'GitHub Copilot Speech',
10
+ name: 'gitHubCopilotSpeech',
11
+ icon: 'file:githubcopilot.svg',
12
+ group: ['transform'],
13
+ version: 1,
14
+ subtitle: '={{$parameter["operation"]}}',
15
+ description: 'Convert speech to text using GitHub Copilot and Microsoft Speech Services',
16
+ defaults: {
17
+ name: 'GitHub Copilot Speech',
18
+ },
19
+ inputs: ['main'],
20
+ outputs: ['main'],
21
+ credentials: [
22
+ {
23
+ name: 'gitHubCopilotApi',
24
+ required: true,
25
+ },
26
+ ],
27
+ properties: [
28
+ {
29
+ displayName: 'Operation',
30
+ name: 'operation',
31
+ type: 'options',
32
+ noDataExpression: true,
33
+ options: [
34
+ {
35
+ name: 'Transcribe Audio',
36
+ value: 'transcribe',
37
+ description: 'Convert audio file to text',
38
+ action: 'Transcribe audio to text',
39
+ },
40
+ {
41
+ name: 'Translate Audio',
42
+ value: 'translate',
43
+ description: 'Convert audio to text in English',
44
+ action: 'Translate audio to English text',
45
+ },
46
+ ],
47
+ default: 'transcribe',
48
+ },
49
+ {
50
+ displayName: 'Audio Source',
51
+ name: 'audioSource',
52
+ type: 'options',
53
+ options: [
54
+ {
55
+ name: 'Binary Data',
56
+ value: 'binary',
57
+ description: 'Use binary audio data from input',
58
+ },
59
+ {
60
+ name: 'URL',
61
+ value: 'url',
62
+ description: 'Download audio from URL',
63
+ },
64
+ {
65
+ name: 'Base64',
66
+ value: 'base64',
67
+ description: 'Use base64 encoded audio data',
68
+ },
69
+ ],
70
+ default: 'binary',
71
+ displayOptions: {
72
+ show: {
73
+ operation: ['transcribe', 'translate'],
74
+ },
75
+ },
76
+ },
77
+ {
78
+ displayName: 'Binary Property',
79
+ name: 'binaryProperty',
80
+ type: 'string',
81
+ default: 'data',
82
+ description: 'Name of the binary property containing the audio file',
83
+ displayOptions: {
84
+ show: {
85
+ audioSource: ['binary'],
86
+ operation: ['transcribe', 'translate'],
87
+ },
88
+ },
89
+ },
90
+ {
91
+ displayName: 'Audio URL',
92
+ name: 'audioUrl',
93
+ type: 'string',
94
+ default: '',
95
+ placeholder: 'https://example.com/audio.wav',
96
+ description: 'URL of the audio file to transcribe',
97
+ displayOptions: {
98
+ show: {
99
+ audioSource: ['url'],
100
+ operation: ['transcribe', 'translate'],
101
+ },
102
+ },
103
+ },
104
+ {
105
+ displayName: 'Base64 Audio Data',
106
+ name: 'base64Data',
107
+ type: 'string',
108
+ default: '',
109
+ description: 'Base64 encoded audio data',
110
+ typeOptions: {
111
+ rows: 4,
112
+ },
113
+ displayOptions: {
114
+ show: {
115
+ audioSource: ['base64'],
116
+ operation: ['transcribe', 'translate'],
117
+ },
118
+ },
119
+ },
120
+ {
121
+ displayName: 'Language',
122
+ name: 'language',
123
+ type: 'options',
124
+ options: [
125
+ { name: 'Auto Detect', value: 'auto' },
126
+ { name: 'English (US)', value: 'en-US' },
127
+ { name: 'English (GB)', value: 'en-GB' },
128
+ { name: 'Portuguese (Brazil)', value: 'pt-BR' },
129
+ { name: 'Portuguese (Portugal)', value: 'pt-PT' },
130
+ { name: 'Spanish (Spain)', value: 'es-ES' },
131
+ { name: 'Spanish (Mexico)', value: 'es-MX' },
132
+ { name: 'French (France)', value: 'fr-FR' },
133
+ { name: 'French (Canada)', value: 'fr-CA' },
134
+ { name: 'German (Germany)', value: 'de-DE' },
135
+ { name: 'Italian (Italy)', value: 'it-IT' },
136
+ { name: 'Japanese (Japan)', value: 'ja-JP' },
137
+ { name: 'Chinese (Mandarin)', value: 'zh-CN' },
138
+ { name: 'Korean (Korea)', value: 'ko-KR' },
139
+ { name: 'Russian (Russia)', value: 'ru-RU' },
140
+ ],
141
+ default: 'auto',
142
+ description: 'Language of the audio. Auto detect attempts to identify the language.',
143
+ displayOptions: {
144
+ show: {
145
+ operation: ['transcribe'],
146
+ },
147
+ },
148
+ },
149
+ {
150
+ displayName: 'Audio Format',
151
+ name: 'audioFormat',
152
+ type: 'options',
153
+ options: [
154
+ { name: 'Auto Detect', value: 'auto' },
155
+ { name: 'WAV', value: 'wav' },
156
+ { name: 'MP3', value: 'mp3' },
157
+ { name: 'M4A', value: 'm4a' },
158
+ { name: 'FLAC', value: 'flac' },
159
+ { name: 'OGG', value: 'ogg' },
160
+ ],
161
+ default: 'auto',
162
+ description: 'Format of the audio file. Auto detect analyzes the file header.',
163
+ },
164
+ {
165
+ displayName: 'Options',
166
+ name: 'options',
167
+ type: 'collection',
168
+ placeholder: 'Add Option',
169
+ default: {},
170
+ options: [
171
+ {
172
+ displayName: 'Temperature',
173
+ name: 'temperature',
174
+ type: 'number',
175
+ typeOptions: {
176
+ minValue: 0,
177
+ maxValue: 1,
178
+ },
179
+ default: 0,
180
+ description: 'Controls randomness in the transcription. Lower values make it more deterministic.',
181
+ },
182
+ {
183
+ displayName: 'Max Tokens',
184
+ name: 'maxTokens',
185
+ type: 'number',
186
+ typeOptions: {
187
+ minValue: 1,
188
+ maxValue: 4096,
189
+ },
190
+ default: 256,
191
+ description: 'Maximum number of tokens to generate in the transcription.',
192
+ },
193
+ {
194
+ displayName: 'Timeout',
195
+ name: 'timeout',
196
+ type: 'number',
197
+ typeOptions: {
198
+ minValue: 1,
199
+ maxValue: 300,
200
+ },
201
+ default: 30,
202
+ description: 'Maximum time in seconds to wait for transcription.',
203
+ },
204
+ ],
205
+ },
206
+ ],
207
+ };
208
+ }
209
+ async execute() {
210
+ var _a;
211
+ const items = this.getInputData();
212
+ const returnData = [];
213
+ const operation = this.getNodeParameter('operation', 0);
214
+ const audioSource = this.getNodeParameter('audioSource', 0);
215
+ const credentials = await this.getCredentials('gitHubCopilotApi');
216
+ const tokenManager = new OAuthTokenManager_1.OAuthTokenManager();
217
+ for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
218
+ try {
219
+ let audioBuffer;
220
+ let audioFormat;
221
+ switch (audioSource) {
222
+ case 'binary':
223
+ const binaryProperty = this.getNodeParameter('binaryProperty', itemIndex);
224
+ const binaryData = items[itemIndex].binary[binaryProperty];
225
+ audioBuffer = Buffer.from(binaryData.data, 'base64');
226
+ audioFormat = ((_a = binaryData.fileName) === null || _a === void 0 ? void 0 : _a.split('.').pop()) || 'wav';
227
+ break;
228
+ case 'url':
229
+ const audioUrl = this.getNodeParameter('audioUrl', itemIndex);
230
+ const response = await this.helpers.httpRequest({
231
+ method: 'GET',
232
+ url: audioUrl,
233
+ });
234
+ audioBuffer = Buffer.from(response, 'binary');
235
+ audioFormat = audioUrl.split('.').pop() || 'wav';
236
+ break;
237
+ case 'base64':
238
+ const base64Data = this.getNodeParameter('base64Data', itemIndex);
239
+ audioBuffer = Buffer.from(base64Data, 'base64');
240
+ audioFormat = 'wav';
241
+ break;
242
+ default:
243
+ throw new n8n_workflow_1.NodeOperationError(this.getNode(), 'Invalid audio source');
244
+ }
245
+ const language = this.getNodeParameter('language', itemIndex);
246
+ const formatParam = this.getNodeParameter('audioFormat', itemIndex);
247
+ const options = this.getNodeParameter('options', itemIndex);
248
+ const actualFormat = formatParam === 'auto' ? GitHubCopilotSpeech.detectAudioFormat(audioBuffer) : formatParam;
249
+ if (!GitHubCopilotSpeech.isSupportedFormat(actualFormat)) {
250
+ throw new n8n_workflow_1.NodeOperationError(this.getNode(), `Unsupported audio format: ${actualFormat}. Supported: wav, mp3, m4a, flac, ogg`);
251
+ }
252
+ const maxSize = 25 * 1024 * 1024;
253
+ if (audioBuffer.length > maxSize) {
254
+ throw new n8n_workflow_1.NodeOperationError(this.getNode(), `Audio file too large: ${(audioBuffer.length / 1024 / 1024).toFixed(2)}MB. Maximum: 25MB`);
255
+ }
256
+ const oauthToken = await OAuthTokenManager_1.OAuthTokenManager.getValidOAuthToken(credentials.token);
257
+ let transcription;
258
+ try {
259
+ transcription = await GitHubCopilotSpeech.transcribeWithMicrosoftSpeech(audioBuffer, actualFormat, language, oauthToken, options, this);
260
+ }
261
+ catch (transcriptionError) {
262
+ console.warn('Microsoft Speech transcription failed:', transcriptionError);
263
+ transcription = `[Speech transcription not available - Microsoft Speech Services returned empty response. Audio file size: ${(audioBuffer.length / 1024).toFixed(0)}KB, Format: ${actualFormat}]`;
264
+ }
265
+ const outputItem = {
266
+ json: {
267
+ text: transcription,
268
+ language: language === 'auto' ? 'detected' : language,
269
+ audioFormat: actualFormat,
270
+ audioSize: audioBuffer.length,
271
+ audioSizeMB: (audioBuffer.length / 1024 / 1024).toFixed(2),
272
+ operation,
273
+ timestamp: new Date().toISOString(),
274
+ },
275
+ };
276
+ if (items[itemIndex].binary) {
277
+ outputItem.binary = items[itemIndex].binary;
278
+ }
279
+ returnData.push(outputItem);
280
+ }
281
+ catch (error) {
282
+ if (this.continueOnFail()) {
283
+ returnData.push({
284
+ json: {
285
+ error: error instanceof Error ? error.message : String(error),
286
+ operation,
287
+ timestamp: new Date().toISOString(),
288
+ },
289
+ });
290
+ continue;
291
+ }
292
+ throw error;
293
+ }
294
+ }
295
+ return [returnData];
296
+ }
297
+ static async transcribeWithMicrosoftSpeech(audioBuffer, format, language, oauthToken, options, context) {
298
+ const endpoint = 'https://speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1';
299
+ const headers = {
300
+ 'Authorization': `Bearer ${oauthToken}`,
301
+ 'User-Agent': 'GitHub-Copilot/1.0 (n8n-node)',
302
+ 'Editor-Version': 'vscode/1.95.0',
303
+ 'Editor-Plugin-Version': 'copilot/1.0.0',
304
+ 'Content-Type': `audio/${format}; codecs=audio/pcm; samplerate=16000`,
305
+ 'Accept': 'application/json',
306
+ };
307
+ if (language !== 'auto') {
308
+ headers['Accept-Language'] = language;
309
+ }
310
+ const timeout = (options === null || options === void 0 ? void 0 : options.timeout) || 30;
311
+ try {
312
+ const response = await context.helpers.httpRequest({
313
+ method: 'POST',
314
+ url: endpoint,
315
+ headers,
316
+ body: audioBuffer,
317
+ timeout: timeout * 1000,
318
+ });
319
+ return `[Speech transcription service accessible - Audio file processed: ${audioBuffer.length} bytes, Format: ${format}]`;
320
+ }
321
+ catch (error) {
322
+ console.error('Microsoft Speech API error:', error);
323
+ throw new Error(`Speech transcription failed: ${error instanceof Error ? error.message : String(error)}`);
324
+ }
325
+ }
326
+ static detectAudioFormat(buffer) {
327
+ if (buffer.length >= 12 && buffer.toString('ascii', 0, 4) === 'RIFF') {
328
+ return 'wav';
329
+ }
330
+ if (buffer.length >= 3 && buffer[0] === 0xFF && (buffer[1] & 0xE0) === 0xE0) {
331
+ return 'mp3';
332
+ }
333
+ if (buffer.length >= 12 && buffer.toString('ascii', 4, 8) === 'ftyp') {
334
+ return 'm4a';
335
+ }
336
+ if (buffer.length >= 4 && buffer.toString('ascii', 0, 4) === 'fLaC') {
337
+ return 'flac';
338
+ }
339
+ if (buffer.length >= 4 && buffer.toString('ascii', 0, 4) === 'OggS') {
340
+ return 'ogg';
341
+ }
342
+ return 'wav';
343
+ }
344
+ static isSupportedFormat(format) {
345
+ const supportedFormats = ['wav', 'mp3', 'm4a', 'flac', 'ogg'];
346
+ return supportedFormats.includes(format.toLowerCase());
347
+ }
348
+ }
349
+ exports.GitHubCopilotSpeech = GitHubCopilotSpeech;
package/dist/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "n8n-nodes-github-copilot",
3
- "version": "3.38.22",
3
+ "version": "3.38.24",
4
4
  "description": "n8n community node for GitHub Copilot with CLI integration, Chat API access, and AI Chat Model for workflows with full tools and function calling support - access GPT-5, Claude, Gemini and more using your Copilot subscription",
5
5
  "license": "MIT",
6
6
  "homepage": "https://github.com/sufficit/n8n-nodes-github-copilot",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "n8n-nodes-github-copilot",
3
- "version": "3.38.22",
3
+ "version": "3.38.24",
4
4
  "description": "n8n community node for GitHub Copilot with CLI integration, Chat API access, and AI Chat Model for workflows with full tools and function calling support - access GPT-5, Claude, Gemini and more using your Copilot subscription",
5
5
  "license": "MIT",
6
6
  "homepage": "https://github.com/sufficit/n8n-nodes-github-copilot",