n8n-nodes-github-copilot 3.4.0 → 3.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/credentials/GitHubApi.credentials.d.ts +8 -8
- package/dist/credentials/GitHubApi.credentials.js +50 -50
- package/dist/credentials/GitHubApiManual.credentials.d.ts +7 -7
- package/dist/credentials/GitHubApiManual.credentials.js +33 -33
- package/dist/nodes/GitHubCopilot/GitHubCopilot.node.d.ts +5 -5
- package/dist/nodes/GitHubCopilot/GitHubCopilot.node.js +324 -324
- package/dist/nodes/GitHubCopilotChatAPI/GitHubCopilotChatAPI.node.d.ts +5 -5
- package/dist/nodes/GitHubCopilotChatAPI/GitHubCopilotChatAPI.node.js +141 -146
- package/dist/nodes/GitHubCopilotChatAPI/nodeProperties.d.ts +2 -2
- package/dist/nodes/GitHubCopilotChatAPI/nodeProperties.js +172 -202
- package/dist/nodes/GitHubCopilotChatAPI/utils/helpers.d.ts +19 -21
- package/dist/nodes/GitHubCopilotChatAPI/utils/helpers.js +130 -131
- package/dist/nodes/GitHubCopilotChatAPI/utils/imageProcessor.d.ts +8 -8
- package/dist/nodes/GitHubCopilotChatAPI/utils/imageProcessor.js +100 -101
- package/dist/nodes/GitHubCopilotChatAPI/utils/index.d.ts +3 -3
- package/dist/nodes/GitHubCopilotChatAPI/utils/index.js +19 -19
- package/dist/nodes/GitHubCopilotChatAPI/utils/mediaDetection.d.ts +14 -14
- package/dist/nodes/GitHubCopilotChatAPI/utils/mediaDetection.js +70 -71
- package/dist/nodes/GitHubCopilotChatAPI/utils/modelCapabilities.d.ts +5 -5
- package/dist/nodes/GitHubCopilotChatAPI/utils/modelCapabilities.js +113 -113
- package/dist/nodes/GitHubCopilotChatAPI/utils/types.d.ts +57 -57
- package/dist/nodes/GitHubCopilotChatAPI/utils/types.js +2 -2
- package/dist/nodes/GitHubCopilotChatModel/GitHubCopilotChatModel.node.d.ts +5 -5
- package/dist/nodes/GitHubCopilotChatModel/GitHubCopilotChatModel.node.js +140 -211
- package/dist/shared/models/GitHubCopilotModels.d.ts +43 -0
- package/dist/shared/models/GitHubCopilotModels.js +218 -0
- package/package.json +5 -4
- package/dist/credentials/N8nApi.credentials.d.ts +0 -7
- package/dist/credentials/N8nApi.credentials.js +0 -31
- package/dist/nodes/GitHubCopilotChatAPI/GitHubCopilotChatAPI.node.backup.d.ts +0 -5
- package/dist/nodes/GitHubCopilotChatAPI/GitHubCopilotChatAPI.node.backup.js +0 -651
- package/dist/nodes/GitHubCopilotChatAPI/utils/audioProcessor.d.ts +0 -11
- package/dist/nodes/GitHubCopilotChatAPI/utils/audioProcessor.js +0 -86
- package/dist/nodes/N8nAiAgent/N8nAiAgent.node.d.ts +0 -5
- package/dist/nodes/N8nAiAgent/N8nAiAgent.node.js +0 -152
|
@@ -1,11 +0,0 @@
|
|
|
1
|
-
import { IExecuteFunctions } from 'n8n-workflow';
|
|
2
|
-
import { ProcessedFileResult } from './types';
|
|
3
|
-
export declare function processAudioFile(context: IExecuteFunctions, itemIndex: number, audioSource: string, audioFile?: string, audioUrl?: string, audioProperty?: string): Promise<ProcessedFileResult>;
|
|
4
|
-
export declare function chunkAudioData(base64Data: string, maxChunkSize?: number): string[];
|
|
5
|
-
export declare function optimizeAudioForTokens(base64Data: string, maxTokens?: number): {
|
|
6
|
-
data: string;
|
|
7
|
-
truncated: boolean;
|
|
8
|
-
originalTokens: number;
|
|
9
|
-
finalTokens: number;
|
|
10
|
-
};
|
|
11
|
-
export declare function createAudioSummary(filename: string, size: number, duration?: number): string;
|
|
@@ -1,86 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.createAudioSummary = exports.optimizeAudioForTokens = exports.chunkAudioData = exports.processAudioFile = void 0;
|
|
4
|
-
const helpers_1 = require("./helpers");
|
|
5
|
-
async function processAudioFile(context, itemIndex, audioSource, audioFile, audioUrl, audioProperty) {
|
|
6
|
-
var _a, _b;
|
|
7
|
-
let audioBuffer;
|
|
8
|
-
let filename;
|
|
9
|
-
switch (audioSource) {
|
|
10
|
-
case 'file':
|
|
11
|
-
if (!audioFile) {
|
|
12
|
-
throw new Error('Audio file content is required when source is "file"');
|
|
13
|
-
}
|
|
14
|
-
audioBuffer = Buffer.from(audioFile, 'base64');
|
|
15
|
-
filename = 'uploaded_audio.mp3';
|
|
16
|
-
break;
|
|
17
|
-
case 'url':
|
|
18
|
-
if (!audioUrl) {
|
|
19
|
-
throw new Error('Audio URL is required when source is "url"');
|
|
20
|
-
}
|
|
21
|
-
audioBuffer = await (0, helpers_1.downloadFileFromUrl)(audioUrl);
|
|
22
|
-
filename = audioUrl.split('/').pop() || 'downloaded_audio.mp3';
|
|
23
|
-
break;
|
|
24
|
-
case 'binary':
|
|
25
|
-
if (!audioProperty) {
|
|
26
|
-
throw new Error('Audio property name is required when source is "binary"');
|
|
27
|
-
}
|
|
28
|
-
audioBuffer = await (0, helpers_1.getFileFromBinary)(context, itemIndex, audioProperty);
|
|
29
|
-
const items = context.getInputData();
|
|
30
|
-
const item = items[itemIndex];
|
|
31
|
-
filename = ((_b = (_a = item.binary) === null || _a === void 0 ? void 0 : _a[audioProperty]) === null || _b === void 0 ? void 0 : _b.fileName) || 'binary_audio.mp3';
|
|
32
|
-
break;
|
|
33
|
-
default:
|
|
34
|
-
throw new Error(`Invalid audio source: ${audioSource}`);
|
|
35
|
-
}
|
|
36
|
-
(0, helpers_1.validateFileSize)(audioBuffer, 25600);
|
|
37
|
-
const base64Audio = audioBuffer.toString('base64');
|
|
38
|
-
const estimatedTokens = (0, helpers_1.estimateTokens)(base64Audio);
|
|
39
|
-
if (estimatedTokens > 100000) {
|
|
40
|
-
throw new Error(`Audio file too large (${estimatedTokens} estimated tokens). Consider using a smaller file.`);
|
|
41
|
-
}
|
|
42
|
-
const mimeType = (0, helpers_1.getAudioMimeType)(filename);
|
|
43
|
-
return {
|
|
44
|
-
data: base64Audio,
|
|
45
|
-
mimeType,
|
|
46
|
-
filename,
|
|
47
|
-
size: audioBuffer.length,
|
|
48
|
-
estimatedTokens
|
|
49
|
-
};
|
|
50
|
-
}
|
|
51
|
-
exports.processAudioFile = processAudioFile;
|
|
52
|
-
function chunkAudioData(base64Data, maxChunkSize = 50000) {
|
|
53
|
-
const chunks = [];
|
|
54
|
-
for (let i = 0; i < base64Data.length; i += maxChunkSize) {
|
|
55
|
-
chunks.push(base64Data.slice(i, i + maxChunkSize));
|
|
56
|
-
}
|
|
57
|
-
return chunks;
|
|
58
|
-
}
|
|
59
|
-
exports.chunkAudioData = chunkAudioData;
|
|
60
|
-
function optimizeAudioForTokens(base64Data, maxTokens = 100000) {
|
|
61
|
-
const originalTokens = (0, helpers_1.estimateTokens)(base64Data);
|
|
62
|
-
if (originalTokens <= maxTokens) {
|
|
63
|
-
return {
|
|
64
|
-
data: base64Data,
|
|
65
|
-
truncated: false,
|
|
66
|
-
originalTokens,
|
|
67
|
-
finalTokens: originalTokens
|
|
68
|
-
};
|
|
69
|
-
}
|
|
70
|
-
const compressionRatio = maxTokens / originalTokens;
|
|
71
|
-
const targetLength = Math.floor(base64Data.length * compressionRatio);
|
|
72
|
-
const compressedData = base64Data.slice(0, Math.max(targetLength, 1000));
|
|
73
|
-
return {
|
|
74
|
-
data: compressedData,
|
|
75
|
-
truncated: true,
|
|
76
|
-
originalTokens,
|
|
77
|
-
finalTokens: (0, helpers_1.estimateTokens)(compressedData)
|
|
78
|
-
};
|
|
79
|
-
}
|
|
80
|
-
exports.optimizeAudioForTokens = optimizeAudioForTokens;
|
|
81
|
-
function createAudioSummary(filename, size, duration) {
|
|
82
|
-
const sizeKB = Math.round(size / 1024);
|
|
83
|
-
const durationText = duration ? ` (${Math.round(duration)}s)` : '';
|
|
84
|
-
return `Audio file: ${filename} - ${sizeKB}KB${durationText}. File too large for direct processing, providing description instead.`;
|
|
85
|
-
}
|
|
86
|
-
exports.createAudioSummary = createAudioSummary;
|
|
@@ -1,152 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.N8nAiAgent = void 0;
|
|
4
|
-
class N8nAiAgent {
|
|
5
|
-
constructor() {
|
|
6
|
-
this.description = {
|
|
7
|
-
displayName: 'N8N AI Agent Model',
|
|
8
|
-
name: 'n8nAiAgent',
|
|
9
|
-
icon: 'file:n8n-ai.svg',
|
|
10
|
-
group: ['transform'],
|
|
11
|
-
version: 1,
|
|
12
|
-
description: 'N8N AI Agent chat model for advanced AI capabilities with tool calling and memory',
|
|
13
|
-
defaults: {
|
|
14
|
-
name: 'N8N AI Agent Model',
|
|
15
|
-
},
|
|
16
|
-
codex: {
|
|
17
|
-
categories: ['AI'],
|
|
18
|
-
subcategories: {
|
|
19
|
-
AI: ['Language Models', 'Root Nodes'],
|
|
20
|
-
'Language Models': ['Chat Models (Recommended)'],
|
|
21
|
-
},
|
|
22
|
-
resources: {
|
|
23
|
-
primaryDocumentation: [
|
|
24
|
-
{
|
|
25
|
-
url: 'https://docs.n8n.io/',
|
|
26
|
-
},
|
|
27
|
-
],
|
|
28
|
-
},
|
|
29
|
-
},
|
|
30
|
-
inputs: [],
|
|
31
|
-
outputs: ["ai_languageModel"],
|
|
32
|
-
outputNames: ['Model'],
|
|
33
|
-
credentials: [
|
|
34
|
-
{
|
|
35
|
-
name: 'n8nApi',
|
|
36
|
-
required: true,
|
|
37
|
-
},
|
|
38
|
-
],
|
|
39
|
-
properties: [
|
|
40
|
-
{
|
|
41
|
-
displayName: 'Model',
|
|
42
|
-
name: 'model',
|
|
43
|
-
type: 'options',
|
|
44
|
-
default: 'n8n-ai-agent',
|
|
45
|
-
description: 'The N8N AI Agent model to use',
|
|
46
|
-
options: [
|
|
47
|
-
{
|
|
48
|
-
name: 'N8N AI Agent',
|
|
49
|
-
value: 'n8n-ai-agent',
|
|
50
|
-
description: 'N8N AI Agent with tool calling and memory capabilities',
|
|
51
|
-
},
|
|
52
|
-
{
|
|
53
|
-
name: 'N8N AI Agent Pro',
|
|
54
|
-
value: 'n8n-ai-agent-pro',
|
|
55
|
-
description: 'Enhanced version with advanced features',
|
|
56
|
-
},
|
|
57
|
-
],
|
|
58
|
-
},
|
|
59
|
-
{
|
|
60
|
-
displayName: 'Options',
|
|
61
|
-
name: 'options',
|
|
62
|
-
placeholder: 'Add Option',
|
|
63
|
-
description: 'Additional options for the AI model',
|
|
64
|
-
type: 'collection',
|
|
65
|
-
default: {},
|
|
66
|
-
options: [
|
|
67
|
-
{
|
|
68
|
-
displayName: 'Temperature',
|
|
69
|
-
name: 'temperature',
|
|
70
|
-
default: 0.7,
|
|
71
|
-
typeOptions: { maxValue: 2, minValue: 0, numberPrecision: 1 },
|
|
72
|
-
description: 'Controls randomness in output. Lower values make responses more focused.',
|
|
73
|
-
type: 'number',
|
|
74
|
-
},
|
|
75
|
-
{
|
|
76
|
-
displayName: 'Maximum Number of Tokens',
|
|
77
|
-
name: 'maxTokens',
|
|
78
|
-
default: 1000,
|
|
79
|
-
description: 'The maximum number of tokens to generate',
|
|
80
|
-
type: 'number',
|
|
81
|
-
typeOptions: {
|
|
82
|
-
maxValue: 32768,
|
|
83
|
-
},
|
|
84
|
-
},
|
|
85
|
-
{
|
|
86
|
-
displayName: 'Enable Tools',
|
|
87
|
-
name: 'enableTools',
|
|
88
|
-
type: 'boolean',
|
|
89
|
-
default: false,
|
|
90
|
-
description: 'Whether to enable tool calling capabilities',
|
|
91
|
-
},
|
|
92
|
-
{
|
|
93
|
-
displayName: 'Enable Memory',
|
|
94
|
-
name: 'enableMemory',
|
|
95
|
-
type: 'boolean',
|
|
96
|
-
default: false,
|
|
97
|
-
description: 'Whether to enable conversation memory',
|
|
98
|
-
},
|
|
99
|
-
],
|
|
100
|
-
},
|
|
101
|
-
],
|
|
102
|
-
};
|
|
103
|
-
}
|
|
104
|
-
async supplyData(itemIndex) {
|
|
105
|
-
const credentials = await this.getCredentials('n8nApi');
|
|
106
|
-
const model = this.getNodeParameter('model', itemIndex);
|
|
107
|
-
const options = this.getNodeParameter('options', itemIndex, {});
|
|
108
|
-
const mockModel = {
|
|
109
|
-
_call: async (messages, options) => {
|
|
110
|
-
const apiKey = credentials.apiKey;
|
|
111
|
-
const baseUrl = credentials.baseUrl || 'http://localhost:5678';
|
|
112
|
-
const requestBody = {
|
|
113
|
-
model: model,
|
|
114
|
-
messages: messages,
|
|
115
|
-
temperature: (options === null || options === void 0 ? void 0 : options.temperature) || 0.7,
|
|
116
|
-
max_tokens: (options === null || options === void 0 ? void 0 : options.maxTokens) || 1000,
|
|
117
|
-
tools: (options === null || options === void 0 ? void 0 : options.enableTools) || false,
|
|
118
|
-
memory: (options === null || options === void 0 ? void 0 : options.enableMemory) || false,
|
|
119
|
-
};
|
|
120
|
-
try {
|
|
121
|
-
const response = await fetch(`${baseUrl}/api/v1/ai-agent/chat`, {
|
|
122
|
-
method: 'POST',
|
|
123
|
-
headers: {
|
|
124
|
-
'Content-Type': 'application/json',
|
|
125
|
-
'Authorization': `Bearer ${apiKey}`,
|
|
126
|
-
},
|
|
127
|
-
body: JSON.stringify(requestBody),
|
|
128
|
-
});
|
|
129
|
-
if (!response.ok) {
|
|
130
|
-
throw new Error(`API request failed: ${response.statusText}`);
|
|
131
|
-
}
|
|
132
|
-
const result = await response.json();
|
|
133
|
-
return result.response || result.message || 'No response from AI agent';
|
|
134
|
-
}
|
|
135
|
-
catch (error) {
|
|
136
|
-
return `N8N AI Agent response using model: ${model} (${error instanceof Error ? error.message : 'Error'})`;
|
|
137
|
-
}
|
|
138
|
-
},
|
|
139
|
-
_modelType: 'chat_model',
|
|
140
|
-
_llmType: 'n8n-ai-agent',
|
|
141
|
-
modelName: model,
|
|
142
|
-
temperature: options.temperature || 0.7,
|
|
143
|
-
maxTokens: options.maxTokens || 1000,
|
|
144
|
-
enableTools: options.enableTools || false,
|
|
145
|
-
enableMemory: options.enableMemory || false,
|
|
146
|
-
};
|
|
147
|
-
return {
|
|
148
|
-
response: mockModel,
|
|
149
|
-
};
|
|
150
|
-
}
|
|
151
|
-
}
|
|
152
|
-
exports.N8nAiAgent = N8nAiAgent;
|