n8n-nodes-github-copilot 3.3.0 → 3.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/credentials/N8nApi.credentials.d.ts +7 -0
- package/dist/credentials/N8nApi.credentials.js +31 -0
- package/dist/nodes/GitHubCopilotChatModel/GitHubCopilotChatModel.node.d.ts +5 -0
- package/dist/nodes/GitHubCopilotChatModel/GitHubCopilotChatModel.node.js +211 -0
- package/dist/nodes/GitHubCopilotChatModel/copilot.svg +34 -0
- package/dist/nodes/N8nAiAgent/N8nAiAgent.node.d.ts +2 -2
- package/dist/nodes/N8nAiAgent/N8nAiAgent.node.js +125 -187
- package/package.json +3 -3
- package/dist/nodes/N8nAiAgent/n8n-ai.svg +0 -35
- package/dist/nodes/N8nAiAgent/nodeProperties.d.ts +0 -2
- package/dist/nodes/N8nAiAgent/nodeProperties.js +0 -432
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.N8nApi = void 0;
|
|
4
|
+
class N8nApi {
|
|
5
|
+
constructor() {
|
|
6
|
+
this.name = 'n8nApi';
|
|
7
|
+
this.displayName = 'N8N API';
|
|
8
|
+
this.documentationUrl = 'https://docs.n8n.io/api/';
|
|
9
|
+
this.properties = [
|
|
10
|
+
{
|
|
11
|
+
displayName: 'API Key',
|
|
12
|
+
name: 'apiKey',
|
|
13
|
+
type: 'string',
|
|
14
|
+
typeOptions: { password: true },
|
|
15
|
+
default: '',
|
|
16
|
+
required: true,
|
|
17
|
+
description: 'The API key for N8N API access',
|
|
18
|
+
},
|
|
19
|
+
{
|
|
20
|
+
displayName: 'Base URL',
|
|
21
|
+
name: 'baseUrl',
|
|
22
|
+
type: 'string',
|
|
23
|
+
default: 'https://app.n8n.cloud/api/v1',
|
|
24
|
+
required: true,
|
|
25
|
+
description: 'The base URL for your N8N instance API',
|
|
26
|
+
placeholder: 'https://your-instance.app.n8n.cloud/api/v1',
|
|
27
|
+
},
|
|
28
|
+
];
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
exports.N8nApi = N8nApi;
|
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.GitHubCopilotChatModel = void 0;
|
|
4
|
+
class GitHubCopilotChatModel {
|
|
5
|
+
constructor() {
|
|
6
|
+
this.description = {
|
|
7
|
+
displayName: 'GitHub Copilot Chat Model',
|
|
8
|
+
name: 'gitHubCopilotChatModel',
|
|
9
|
+
icon: 'file:copilot.svg',
|
|
10
|
+
group: ['transform'],
|
|
11
|
+
version: 1,
|
|
12
|
+
description: 'GitHub Copilot chat model for AI workflows - access GPT-5, Claude, Gemini and more using your Copilot subscription',
|
|
13
|
+
defaults: {
|
|
14
|
+
name: 'GitHub Copilot Chat Model',
|
|
15
|
+
},
|
|
16
|
+
codex: {
|
|
17
|
+
categories: ['AI'],
|
|
18
|
+
subcategories: {
|
|
19
|
+
AI: ['Language Models', 'Root Nodes'],
|
|
20
|
+
'Language Models': ['Chat Models (Recommended)'],
|
|
21
|
+
},
|
|
22
|
+
resources: {
|
|
23
|
+
primaryDocumentation: [
|
|
24
|
+
{
|
|
25
|
+
url: 'https://docs.github.com/copilot/using-github-copilot/using-github-copilot-chat',
|
|
26
|
+
},
|
|
27
|
+
],
|
|
28
|
+
},
|
|
29
|
+
},
|
|
30
|
+
inputs: [],
|
|
31
|
+
outputs: ["ai_languageModel"],
|
|
32
|
+
outputNames: ['Model'],
|
|
33
|
+
credentials: [
|
|
34
|
+
{
|
|
35
|
+
name: 'gitHubApiManual',
|
|
36
|
+
required: true,
|
|
37
|
+
},
|
|
38
|
+
],
|
|
39
|
+
properties: [
|
|
40
|
+
{
|
|
41
|
+
displayName: 'Model',
|
|
42
|
+
name: 'model',
|
|
43
|
+
type: 'options',
|
|
44
|
+
default: 'gpt-4o',
|
|
45
|
+
description: 'The GitHub Copilot model to use',
|
|
46
|
+
options: [
|
|
47
|
+
{
|
|
48
|
+
name: 'GPT-4o (Recommended)',
|
|
49
|
+
value: 'gpt-4o',
|
|
50
|
+
description: 'Latest GPT-4 model with vision and reasoning capabilities',
|
|
51
|
+
},
|
|
52
|
+
{
|
|
53
|
+
name: 'GPT-5 (Preview)',
|
|
54
|
+
value: 'gpt-5',
|
|
55
|
+
description: 'Next generation GPT model (requires access)',
|
|
56
|
+
},
|
|
57
|
+
{
|
|
58
|
+
name: 'Claude 3.5 Sonnet',
|
|
59
|
+
value: 'claude-3-5-sonnet-20241022',
|
|
60
|
+
description: 'Anthropic Claude with excellent reasoning',
|
|
61
|
+
},
|
|
62
|
+
{
|
|
63
|
+
name: 'Claude 3.5 Haiku',
|
|
64
|
+
value: 'claude-3-5-haiku-20241022',
|
|
65
|
+
description: 'Faster Claude model for simple tasks',
|
|
66
|
+
},
|
|
67
|
+
{
|
|
68
|
+
name: 'Gemini 2.0 Flash',
|
|
69
|
+
value: 'gemini-2.0-flash-exp',
|
|
70
|
+
description: 'Google\'s latest Gemini model',
|
|
71
|
+
},
|
|
72
|
+
{
|
|
73
|
+
name: 'Gemini 1.5 Pro',
|
|
74
|
+
value: 'gemini-1.5-pro-002',
|
|
75
|
+
description: 'Google Gemini Pro with large context',
|
|
76
|
+
},
|
|
77
|
+
{
|
|
78
|
+
name: 'o1 Preview',
|
|
79
|
+
value: 'o1-preview',
|
|
80
|
+
description: 'OpenAI o1 with advanced reasoning',
|
|
81
|
+
},
|
|
82
|
+
{
|
|
83
|
+
name: 'o1 Mini',
|
|
84
|
+
value: 'o1-mini',
|
|
85
|
+
description: 'Faster o1 model for coding tasks',
|
|
86
|
+
},
|
|
87
|
+
],
|
|
88
|
+
},
|
|
89
|
+
{
|
|
90
|
+
displayName: 'Options',
|
|
91
|
+
name: 'options',
|
|
92
|
+
placeholder: 'Add Option',
|
|
93
|
+
description: 'Additional options for the GitHub Copilot model',
|
|
94
|
+
type: 'collection',
|
|
95
|
+
default: {},
|
|
96
|
+
options: [
|
|
97
|
+
{
|
|
98
|
+
displayName: 'Temperature',
|
|
99
|
+
name: 'temperature',
|
|
100
|
+
default: 0.7,
|
|
101
|
+
typeOptions: { maxValue: 2, minValue: 0, numberPrecision: 1 },
|
|
102
|
+
description: 'Controls randomness in output. Lower values make responses more focused.',
|
|
103
|
+
type: 'number',
|
|
104
|
+
},
|
|
105
|
+
{
|
|
106
|
+
displayName: 'Maximum Number of Tokens',
|
|
107
|
+
name: 'maxTokens',
|
|
108
|
+
default: 1000,
|
|
109
|
+
description: 'The maximum number of tokens to generate',
|
|
110
|
+
type: 'number',
|
|
111
|
+
typeOptions: {
|
|
112
|
+
maxValue: 32768,
|
|
113
|
+
},
|
|
114
|
+
},
|
|
115
|
+
{
|
|
116
|
+
displayName: 'Top P',
|
|
117
|
+
name: 'topP',
|
|
118
|
+
default: 1,
|
|
119
|
+
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 2 },
|
|
120
|
+
description: 'Controls diversity of output by nucleus sampling',
|
|
121
|
+
type: 'number',
|
|
122
|
+
},
|
|
123
|
+
{
|
|
124
|
+
displayName: 'Enable Vision',
|
|
125
|
+
name: 'enableVision',
|
|
126
|
+
type: 'boolean',
|
|
127
|
+
default: true,
|
|
128
|
+
description: 'Whether to enable image processing capabilities',
|
|
129
|
+
},
|
|
130
|
+
{
|
|
131
|
+
displayName: 'System Message',
|
|
132
|
+
name: 'systemMessage',
|
|
133
|
+
type: 'string',
|
|
134
|
+
default: '',
|
|
135
|
+
description: 'System message to set the behavior of the assistant',
|
|
136
|
+
typeOptions: {
|
|
137
|
+
rows: 3,
|
|
138
|
+
},
|
|
139
|
+
},
|
|
140
|
+
],
|
|
141
|
+
},
|
|
142
|
+
],
|
|
143
|
+
};
|
|
144
|
+
}
|
|
145
|
+
async supplyData(itemIndex) {
|
|
146
|
+
const model = this.getNodeParameter('model', itemIndex);
|
|
147
|
+
const options = this.getNodeParameter('options', itemIndex, {});
|
|
148
|
+
const copilotModel = {
|
|
149
|
+
_call: async (messages) => {
|
|
150
|
+
var _a;
|
|
151
|
+
const requestBody = {
|
|
152
|
+
intent: true,
|
|
153
|
+
model: model,
|
|
154
|
+
messages: messages,
|
|
155
|
+
n: 1,
|
|
156
|
+
stream: false,
|
|
157
|
+
temperature: options.temperature || 0.7,
|
|
158
|
+
max_tokens: options.maxTokens || 1000,
|
|
159
|
+
top_p: options.topP || 1,
|
|
160
|
+
};
|
|
161
|
+
if (options.systemMessage) {
|
|
162
|
+
const systemMessage = {
|
|
163
|
+
role: 'system',
|
|
164
|
+
content: options.systemMessage,
|
|
165
|
+
};
|
|
166
|
+
requestBody.messages = [systemMessage, ...messages];
|
|
167
|
+
}
|
|
168
|
+
try {
|
|
169
|
+
const credentials = await this.getCredentials('gitHubApiManual');
|
|
170
|
+
const response = await fetch('https://api.githubcopilot.com/chat/completions', {
|
|
171
|
+
method: 'POST',
|
|
172
|
+
headers: {
|
|
173
|
+
'Authorization': `Bearer ${credentials.accessToken}`,
|
|
174
|
+
'Content-Type': 'application/json',
|
|
175
|
+
'User-Agent': 'n8n-github-copilot-chat-model',
|
|
176
|
+
...(options.enableVision && {
|
|
177
|
+
'Copilot-Vision-Request': 'true',
|
|
178
|
+
'Copilot-Media-Request': 'true'
|
|
179
|
+
}),
|
|
180
|
+
},
|
|
181
|
+
body: JSON.stringify(requestBody),
|
|
182
|
+
});
|
|
183
|
+
if (!response.ok) {
|
|
184
|
+
throw new Error(`GitHub Copilot API error: ${response.status} ${response.statusText}`);
|
|
185
|
+
}
|
|
186
|
+
const result = await response.json();
|
|
187
|
+
if (result.choices && result.choices.length > 0) {
|
|
188
|
+
const choice = result.choices[0];
|
|
189
|
+
return ((_a = choice.message) === null || _a === void 0 ? void 0 : _a.content) || 'No response from GitHub Copilot';
|
|
190
|
+
}
|
|
191
|
+
return 'No response from GitHub Copilot';
|
|
192
|
+
}
|
|
193
|
+
catch (error) {
|
|
194
|
+
throw new Error(`GitHub Copilot API error: ${error instanceof Error ? error.message : 'Unknown error'}`);
|
|
195
|
+
}
|
|
196
|
+
},
|
|
197
|
+
_modelType: 'chat_model',
|
|
198
|
+
_llmType: 'github-copilot',
|
|
199
|
+
modelName: model,
|
|
200
|
+
temperature: options.temperature || 0.7,
|
|
201
|
+
maxTokens: options.maxTokens || 1000,
|
|
202
|
+
topP: options.topP || 1,
|
|
203
|
+
enableVision: options.enableVision || true,
|
|
204
|
+
systemMessage: options.systemMessage || '',
|
|
205
|
+
};
|
|
206
|
+
return {
|
|
207
|
+
response: copilotModel,
|
|
208
|
+
};
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
exports.GitHubCopilotChatModel = GitHubCopilotChatModel;
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" width="24" height="24">
|
|
2
|
+
<defs>
|
|
3
|
+
<linearGradient id="copilotGradient" x1="0%" y1="0%" x2="100%" y2="100%">
|
|
4
|
+
<stop offset="0%" style="stop-color:#1f6feb;stop-opacity:1" />
|
|
5
|
+
<stop offset="100%" style="stop-color:#0969da;stop-opacity:1" />
|
|
6
|
+
</linearGradient>
|
|
7
|
+
</defs>
|
|
8
|
+
|
|
9
|
+
<!-- GitHub Copilot inspired icon -->
|
|
10
|
+
<circle cx="12" cy="12" r="11" fill="url(#copilotGradient)" stroke="#ffffff" stroke-width="1"/>
|
|
11
|
+
|
|
12
|
+
<!-- Copilot face -->
|
|
13
|
+
<ellipse cx="12" cy="10" rx="8" ry="6" fill="#ffffff" opacity="0.9"/>
|
|
14
|
+
|
|
15
|
+
<!-- Eyes -->
|
|
16
|
+
<circle cx="9" cy="9" r="1.5" fill="#1f6feb"/>
|
|
17
|
+
<circle cx="15" cy="9" r="1.5" fill="#1f6feb"/>
|
|
18
|
+
|
|
19
|
+
<!-- Light reflection in eyes -->
|
|
20
|
+
<circle cx="9.5" cy="8.5" r="0.5" fill="#ffffff"/>
|
|
21
|
+
<circle cx="15.5" cy="8.5" r="0.5" fill="#ffffff"/>
|
|
22
|
+
|
|
23
|
+
<!-- Mouth/Interface line -->
|
|
24
|
+
<path d="M8 12 L16 12" stroke="#1f6feb" stroke-width="1.5" stroke-linecap="round"/>
|
|
25
|
+
|
|
26
|
+
<!-- Code brackets -->
|
|
27
|
+
<path d="M6 15 L8 17 L6 19" stroke="#ffffff" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round" fill="none"/>
|
|
28
|
+
<path d="M18 15 L16 17 L18 19" stroke="#ffffff" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round" fill="none"/>
|
|
29
|
+
|
|
30
|
+
<!-- AI indicator dots -->
|
|
31
|
+
<circle cx="10" cy="17" r="0.5" fill="#ffffff" opacity="0.8"/>
|
|
32
|
+
<circle cx="12" cy="17" r="0.5" fill="#ffffff" opacity="0.6"/>
|
|
33
|
+
<circle cx="14" cy="17" r="0.5" fill="#ffffff" opacity="0.4"/>
|
|
34
|
+
</svg>
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { INodeType, INodeTypeDescription } from 'n8n-workflow';
|
|
2
2
|
export declare class N8nAiAgent implements INodeType {
|
|
3
3
|
description: INodeTypeDescription;
|
|
4
|
-
|
|
4
|
+
supplyData(this: any, itemIndex: number): Promise<any>;
|
|
5
5
|
}
|
|
@@ -1,214 +1,152 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.N8nAiAgent = void 0;
|
|
4
|
-
const n8n_workflow_1 = require("n8n-workflow");
|
|
5
|
-
const nodeProperties_1 = require("./nodeProperties");
|
|
6
|
-
const mediaDetection_1 = require("../GitHubCopilotChatAPI/utils/mediaDetection");
|
|
7
4
|
class N8nAiAgent {
|
|
8
5
|
constructor() {
|
|
9
6
|
this.description = {
|
|
10
|
-
displayName: 'N8N AI Agent',
|
|
7
|
+
displayName: 'N8N AI Agent Model',
|
|
11
8
|
name: 'n8nAiAgent',
|
|
12
9
|
icon: 'file:n8n-ai.svg',
|
|
13
|
-
group: ['
|
|
10
|
+
group: ['transform'],
|
|
14
11
|
version: 1,
|
|
15
|
-
|
|
16
|
-
description: 'Connect to N8N AI Agent service for advanced AI capabilities with tool calling and memory',
|
|
12
|
+
description: 'N8N AI Agent chat model for advanced AI capabilities with tool calling and memory',
|
|
17
13
|
defaults: {
|
|
18
|
-
name: 'N8N AI Agent',
|
|
14
|
+
name: 'N8N AI Agent Model',
|
|
19
15
|
},
|
|
20
|
-
|
|
21
|
-
|
|
16
|
+
codex: {
|
|
17
|
+
categories: ['AI'],
|
|
18
|
+
subcategories: {
|
|
19
|
+
AI: ['Language Models', 'Root Nodes'],
|
|
20
|
+
'Language Models': ['Chat Models (Recommended)'],
|
|
21
|
+
},
|
|
22
|
+
resources: {
|
|
23
|
+
primaryDocumentation: [
|
|
24
|
+
{
|
|
25
|
+
url: 'https://docs.n8n.io/',
|
|
26
|
+
},
|
|
27
|
+
],
|
|
28
|
+
},
|
|
29
|
+
},
|
|
30
|
+
inputs: [],
|
|
31
|
+
outputs: ["ai_languageModel"],
|
|
32
|
+
outputNames: ['Model'],
|
|
22
33
|
credentials: [
|
|
23
34
|
{
|
|
24
35
|
name: 'n8nApi',
|
|
25
36
|
required: true,
|
|
26
37
|
},
|
|
27
38
|
],
|
|
28
|
-
properties:
|
|
39
|
+
properties: [
|
|
40
|
+
{
|
|
41
|
+
displayName: 'Model',
|
|
42
|
+
name: 'model',
|
|
43
|
+
type: 'options',
|
|
44
|
+
default: 'n8n-ai-agent',
|
|
45
|
+
description: 'The N8N AI Agent model to use',
|
|
46
|
+
options: [
|
|
47
|
+
{
|
|
48
|
+
name: 'N8N AI Agent',
|
|
49
|
+
value: 'n8n-ai-agent',
|
|
50
|
+
description: 'N8N AI Agent with tool calling and memory capabilities',
|
|
51
|
+
},
|
|
52
|
+
{
|
|
53
|
+
name: 'N8N AI Agent Pro',
|
|
54
|
+
value: 'n8n-ai-agent-pro',
|
|
55
|
+
description: 'Enhanced version with advanced features',
|
|
56
|
+
},
|
|
57
|
+
],
|
|
58
|
+
},
|
|
59
|
+
{
|
|
60
|
+
displayName: 'Options',
|
|
61
|
+
name: 'options',
|
|
62
|
+
placeholder: 'Add Option',
|
|
63
|
+
description: 'Additional options for the AI model',
|
|
64
|
+
type: 'collection',
|
|
65
|
+
default: {},
|
|
66
|
+
options: [
|
|
67
|
+
{
|
|
68
|
+
displayName: 'Temperature',
|
|
69
|
+
name: 'temperature',
|
|
70
|
+
default: 0.7,
|
|
71
|
+
typeOptions: { maxValue: 2, minValue: 0, numberPrecision: 1 },
|
|
72
|
+
description: 'Controls randomness in output. Lower values make responses more focused.',
|
|
73
|
+
type: 'number',
|
|
74
|
+
},
|
|
75
|
+
{
|
|
76
|
+
displayName: 'Maximum Number of Tokens',
|
|
77
|
+
name: 'maxTokens',
|
|
78
|
+
default: 1000,
|
|
79
|
+
description: 'The maximum number of tokens to generate',
|
|
80
|
+
type: 'number',
|
|
81
|
+
typeOptions: {
|
|
82
|
+
maxValue: 32768,
|
|
83
|
+
},
|
|
84
|
+
},
|
|
85
|
+
{
|
|
86
|
+
displayName: 'Enable Tools',
|
|
87
|
+
name: 'enableTools',
|
|
88
|
+
type: 'boolean',
|
|
89
|
+
default: false,
|
|
90
|
+
description: 'Whether to enable tool calling capabilities',
|
|
91
|
+
},
|
|
92
|
+
{
|
|
93
|
+
displayName: 'Enable Memory',
|
|
94
|
+
name: 'enableMemory',
|
|
95
|
+
type: 'boolean',
|
|
96
|
+
default: false,
|
|
97
|
+
description: 'Whether to enable conversation memory',
|
|
98
|
+
},
|
|
99
|
+
],
|
|
100
|
+
},
|
|
101
|
+
],
|
|
29
102
|
};
|
|
30
103
|
}
|
|
31
|
-
async
|
|
32
|
-
const
|
|
33
|
-
const
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
const
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
}
|
|
49
|
-
|
|
50
|
-
{
|
|
51
|
-
|
|
52
|
-
|
|
104
|
+
async supplyData(itemIndex) {
|
|
105
|
+
const credentials = await this.getCredentials('n8nApi');
|
|
106
|
+
const model = this.getNodeParameter('model', itemIndex);
|
|
107
|
+
const options = this.getNodeParameter('options', itemIndex, {});
|
|
108
|
+
const mockModel = {
|
|
109
|
+
_call: async (messages, options) => {
|
|
110
|
+
const apiKey = credentials.apiKey;
|
|
111
|
+
const baseUrl = credentials.baseUrl || 'http://localhost:5678';
|
|
112
|
+
const requestBody = {
|
|
113
|
+
model: model,
|
|
114
|
+
messages: messages,
|
|
115
|
+
temperature: (options === null || options === void 0 ? void 0 : options.temperature) || 0.7,
|
|
116
|
+
max_tokens: (options === null || options === void 0 ? void 0 : options.maxTokens) || 1000,
|
|
117
|
+
tools: (options === null || options === void 0 ? void 0 : options.enableTools) || false,
|
|
118
|
+
memory: (options === null || options === void 0 ? void 0 : options.enableMemory) || false,
|
|
119
|
+
};
|
|
120
|
+
try {
|
|
121
|
+
const response = await fetch(`${baseUrl}/api/v1/ai-agent/chat`, {
|
|
122
|
+
method: 'POST',
|
|
123
|
+
headers: {
|
|
124
|
+
'Content-Type': 'application/json',
|
|
125
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
53
126
|
},
|
|
54
|
-
|
|
55
|
-
if (includeMedia) {
|
|
56
|
-
const mediaSource = this.getNodeParameter('mediaSource', i);
|
|
57
|
-
const processedMedia = await (0, mediaDetection_1.processMediaFile)(this, i, mediaSource, this.getNodeParameter('mediaFile', i, ''), this.getNodeParameter('mediaUrl', i, ''), this.getNodeParameter('mediaProperty', i, ''));
|
|
58
|
-
if (processedMedia && processedMedia.dataUrl) {
|
|
59
|
-
messageContent.push({
|
|
60
|
-
type: 'image_url',
|
|
61
|
-
image_url: {
|
|
62
|
-
url: processedMedia.dataUrl,
|
|
63
|
-
},
|
|
64
|
-
});
|
|
65
|
-
}
|
|
66
|
-
}
|
|
67
|
-
messages.push({
|
|
68
|
-
role: 'user',
|
|
69
|
-
content: messageContent,
|
|
127
|
+
body: JSON.stringify(requestBody),
|
|
70
128
|
});
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
const historyMessages = this.getNodeParameter('conversationHistory', i, []);
|
|
74
|
-
messages.splice(-1, 0, ...historyMessages);
|
|
129
|
+
if (!response.ok) {
|
|
130
|
+
throw new Error(`API request failed: ${response.statusText}`);
|
|
75
131
|
}
|
|
76
|
-
const
|
|
77
|
-
|
|
78
|
-
const temperature = this.getNodeParameter('temperature', i, 0.7);
|
|
79
|
-
const enableTools = this.getNodeParameter('enableTools', i, false);
|
|
80
|
-
if (maxTokens > 0) {
|
|
81
|
-
advancedOptions.max_tokens = maxTokens;
|
|
82
|
-
}
|
|
83
|
-
advancedOptions.temperature = temperature;
|
|
84
|
-
if (enableTools) {
|
|
85
|
-
const toolsConfig = this.getNodeParameter('toolsConfig', i, {});
|
|
86
|
-
if (toolsConfig.tools && Array.isArray(toolsConfig.tools)) {
|
|
87
|
-
advancedOptions.tools = toolsConfig.tools;
|
|
88
|
-
}
|
|
89
|
-
}
|
|
90
|
-
const requestBody = {
|
|
91
|
-
model,
|
|
92
|
-
messages,
|
|
93
|
-
stream: false,
|
|
94
|
-
...advancedOptions,
|
|
95
|
-
};
|
|
96
|
-
const response = await makeN8nAiAgentRequest(this, '/chat', requestBody, includeMedia);
|
|
97
|
-
const result = {
|
|
98
|
-
message: response.response || response.message || '',
|
|
99
|
-
model,
|
|
100
|
-
operation,
|
|
101
|
-
usage: response.usage || null,
|
|
102
|
-
tool_calls: response.tool_calls || null,
|
|
103
|
-
memory: response.memory || null,
|
|
104
|
-
finish_reason: response.finish_reason || 'completed',
|
|
105
|
-
};
|
|
106
|
-
returnData.push({
|
|
107
|
-
json: result,
|
|
108
|
-
pairedItem: { item: i },
|
|
109
|
-
});
|
|
132
|
+
const result = await response.json();
|
|
133
|
+
return result.response || result.message || 'No response from AI agent';
|
|
110
134
|
}
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
const toolArguments = this.getNodeParameter('toolArguments', i, {});
|
|
114
|
-
const context = this.getNodeParameter('context', i, '');
|
|
115
|
-
const requestBody = {
|
|
116
|
-
tool: toolName,
|
|
117
|
-
arguments: toolArguments,
|
|
118
|
-
context,
|
|
119
|
-
model,
|
|
120
|
-
};
|
|
121
|
-
const response = await makeN8nAiAgentRequest(this, '/tools', requestBody, false);
|
|
122
|
-
const result = {
|
|
123
|
-
tool_name: toolName,
|
|
124
|
-
result: response.result || response.response,
|
|
125
|
-
execution_time: response.execution_time || null,
|
|
126
|
-
operation,
|
|
127
|
-
model,
|
|
128
|
-
};
|
|
129
|
-
returnData.push({
|
|
130
|
-
json: result,
|
|
131
|
-
pairedItem: { item: i },
|
|
132
|
-
});
|
|
135
|
+
catch (error) {
|
|
136
|
+
return `N8N AI Agent response using model: ${model} (${error instanceof Error ? error.message : 'Error'})`;
|
|
133
137
|
}
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
const result = {
|
|
147
|
-
action: memoryAction,
|
|
148
|
-
session_id: sessionId,
|
|
149
|
-
data: response.data || response.memory,
|
|
150
|
-
operation,
|
|
151
|
-
};
|
|
152
|
-
returnData.push({
|
|
153
|
-
json: result,
|
|
154
|
-
pairedItem: { item: i },
|
|
155
|
-
});
|
|
156
|
-
}
|
|
157
|
-
}
|
|
158
|
-
catch (error) {
|
|
159
|
-
if (this.continueOnFail()) {
|
|
160
|
-
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
|
161
|
-
returnData.push({
|
|
162
|
-
json: {
|
|
163
|
-
error: errorMessage,
|
|
164
|
-
operation: this.getNodeParameter('operation', i),
|
|
165
|
-
model: this.getNodeParameter('model', i),
|
|
166
|
-
},
|
|
167
|
-
pairedItem: { item: i },
|
|
168
|
-
});
|
|
169
|
-
}
|
|
170
|
-
else {
|
|
171
|
-
throw error;
|
|
172
|
-
}
|
|
173
|
-
}
|
|
174
|
-
}
|
|
175
|
-
return [returnData];
|
|
138
|
+
},
|
|
139
|
+
_modelType: 'chat_model',
|
|
140
|
+
_llmType: 'n8n-ai-agent',
|
|
141
|
+
modelName: model,
|
|
142
|
+
temperature: options.temperature || 0.7,
|
|
143
|
+
maxTokens: options.maxTokens || 1000,
|
|
144
|
+
enableTools: options.enableTools || false,
|
|
145
|
+
enableMemory: options.enableMemory || false,
|
|
146
|
+
};
|
|
147
|
+
return {
|
|
148
|
+
response: mockModel,
|
|
149
|
+
};
|
|
176
150
|
}
|
|
177
151
|
}
|
|
178
152
|
exports.N8nAiAgent = N8nAiAgent;
|
|
179
|
-
async function makeN8nAiAgentRequest(context, endpoint, requestBody, hasMedia) {
|
|
180
|
-
const credentials = await context.getCredentials('n8nApi');
|
|
181
|
-
const baseUrl = credentials.baseUrl || 'http://localhost:5678';
|
|
182
|
-
const apiKey = credentials.apiKey;
|
|
183
|
-
if (!apiKey) {
|
|
184
|
-
throw new n8n_workflow_1.NodeOperationError(context.getNode(), 'N8N API key is required');
|
|
185
|
-
}
|
|
186
|
-
const options = {
|
|
187
|
-
method: 'POST',
|
|
188
|
-
headers: {
|
|
189
|
-
'Content-Type': 'application/json',
|
|
190
|
-
'Authorization': `Bearer ${apiKey}`,
|
|
191
|
-
'User-Agent': 'N8nAiAgentNode/1.0',
|
|
192
|
-
...(hasMedia && { 'AI-Vision-Request': 'true' }),
|
|
193
|
-
},
|
|
194
|
-
body: JSON.stringify(requestBody),
|
|
195
|
-
uri: `${baseUrl}/api/v1/ai-agent${endpoint}`,
|
|
196
|
-
json: true,
|
|
197
|
-
};
|
|
198
|
-
try {
|
|
199
|
-
const response = await context.helpers.request(options);
|
|
200
|
-
return response;
|
|
201
|
-
}
|
|
202
|
-
catch (error) {
|
|
203
|
-
const apiError = error;
|
|
204
|
-
if (apiError.statusCode === 401) {
|
|
205
|
-
throw new n8n_workflow_1.NodeOperationError(context.getNode(), 'Invalid N8N API key');
|
|
206
|
-
}
|
|
207
|
-
else if (apiError.statusCode === 404) {
|
|
208
|
-
throw new n8n_workflow_1.NodeOperationError(context.getNode(), 'N8N AI Agent endpoint not found. Make sure AI Agent is enabled in your N8N instance.');
|
|
209
|
-
}
|
|
210
|
-
else {
|
|
211
|
-
throw new n8n_workflow_1.NodeOperationError(context.getNode(), `N8N AI Agent API error: ${apiError.message || String(error)}`);
|
|
212
|
-
}
|
|
213
|
-
}
|
|
214
|
-
}
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "n8n-nodes-github-copilot",
|
|
3
|
-
"version": "3.
|
|
4
|
-
"description": "n8n community node for GitHub Copilot
|
|
3
|
+
"version": "3.4.0",
|
|
4
|
+
"description": "n8n community node for GitHub Copilot with CLI integration, Chat API access, and AI Chat Model for workflows - access GPT-5, Claude, Gemini and more using your Copilot subscription",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"homepage": "https://github.com/sufficit/n8n-nodes-github-copilot",
|
|
7
7
|
"author": {
|
|
@@ -33,7 +33,7 @@
|
|
|
33
33
|
"nodes": [
|
|
34
34
|
"dist/nodes/GitHubCopilot/GitHubCopilot.node.js",
|
|
35
35
|
"dist/nodes/GitHubCopilotChatAPI/GitHubCopilotChatAPI.node.js",
|
|
36
|
-
"dist/nodes/
|
|
36
|
+
"dist/nodes/GitHubCopilotChatModel/GitHubCopilotChatModel.node.js"
|
|
37
37
|
]
|
|
38
38
|
},
|
|
39
39
|
"keywords": [
|
|
@@ -1,35 +0,0 @@
|
|
|
1
|
-
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 100 100" width="100" height="100">
|
|
2
|
-
<!-- Background circle -->
|
|
3
|
-
<circle cx="50" cy="50" r="45" fill="#ff6d5a" stroke="#e55a4a" stroke-width="2"/>
|
|
4
|
-
|
|
5
|
-
<!-- N8N Logo style -->
|
|
6
|
-
<g transform="translate(50,50)" fill="white">
|
|
7
|
-
<!-- Brain/Neural network pattern -->
|
|
8
|
-
<circle cx="-15" cy="-15" r="3" opacity="0.9"/>
|
|
9
|
-
<circle cx="0" cy="-20" r="3" opacity="0.9"/>
|
|
10
|
-
<circle cx="15" cy="-15" r="3" opacity="0.9"/>
|
|
11
|
-
<circle cx="-20" cy="0" r="3" opacity="0.9"/>
|
|
12
|
-
<circle cx="0" cy="0" r="4" opacity="1"/>
|
|
13
|
-
<circle cx="20" cy="0" r="3" opacity="0.9"/>
|
|
14
|
-
<circle cx="-15" cy="15" r="3" opacity="0.9"/>
|
|
15
|
-
<circle cx="0" cy="20" r="3" opacity="0.9"/>
|
|
16
|
-
<circle cx="15" cy="15" r="3" opacity="0.9"/>
|
|
17
|
-
|
|
18
|
-
<!-- Connections -->
|
|
19
|
-
<line x1="-15" y1="-15" x2="0" y2="-20" stroke="white" stroke-width="1" opacity="0.6"/>
|
|
20
|
-
<line x1="0" y1="-20" x2="15" y2="-15" stroke="white" stroke-width="1" opacity="0.6"/>
|
|
21
|
-
<line x1="-20" y1="0" x2="-15" y2="-15" stroke="white" stroke-width="1" opacity="0.6"/>
|
|
22
|
-
<line x1="-20" y1="0" x2="0" y2="0" stroke="white" stroke-width="2" opacity="0.8"/>
|
|
23
|
-
<line x1="0" y1="0" x2="20" y2="0" stroke="white" stroke-width="2" opacity="0.8"/>
|
|
24
|
-
<line x1="20" y1="0" x2="15" y2="15" stroke="white" stroke-width="1" opacity="0.6"/>
|
|
25
|
-
<line x1="0" y1="0" x2="0" y2="20" stroke="white" stroke-width="2" opacity="0.8"/>
|
|
26
|
-
<line x1="-15" y1="15" x2="0" y2="20" stroke="white" stroke-width="1" opacity="0.6"/>
|
|
27
|
-
<line x1="0" y1="20" x2="15" y2="15" stroke="white" stroke-width="1" opacity="0.6"/>
|
|
28
|
-
|
|
29
|
-
<!-- AI indicator -->
|
|
30
|
-
<text x="0" y="35" text-anchor="middle" font-family="Arial, sans-serif" font-size="12" font-weight="bold" fill="white">AI</text>
|
|
31
|
-
</g>
|
|
32
|
-
|
|
33
|
-
<!-- N8N text at bottom -->
|
|
34
|
-
<text x="50" y="85" text-anchor="middle" font-family="Arial, sans-serif" font-size="10" font-weight="bold" fill="white">N8N</text>
|
|
35
|
-
</svg>
|
|
@@ -1,432 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.nodeProperties = void 0;
|
|
4
|
-
exports.nodeProperties = [
|
|
5
|
-
{
|
|
6
|
-
displayName: 'Operation',
|
|
7
|
-
name: 'operation',
|
|
8
|
-
type: 'options',
|
|
9
|
-
noDataExpression: true,
|
|
10
|
-
options: [
|
|
11
|
-
{
|
|
12
|
-
name: 'Chat',
|
|
13
|
-
value: 'chat',
|
|
14
|
-
description: 'Send a chat message to N8N AI Agent',
|
|
15
|
-
action: 'Send a chat message',
|
|
16
|
-
},
|
|
17
|
-
{
|
|
18
|
-
name: 'Use Tools',
|
|
19
|
-
value: 'tools',
|
|
20
|
-
description: 'Execute specific tools through AI Agent',
|
|
21
|
-
action: 'Execute tools',
|
|
22
|
-
},
|
|
23
|
-
{
|
|
24
|
-
name: 'Memory',
|
|
25
|
-
value: 'memory',
|
|
26
|
-
description: 'Manage AI Agent memory/context',
|
|
27
|
-
action: 'Manage memory',
|
|
28
|
-
},
|
|
29
|
-
],
|
|
30
|
-
default: 'chat',
|
|
31
|
-
},
|
|
32
|
-
{
|
|
33
|
-
displayName: 'AI Model',
|
|
34
|
-
name: 'model',
|
|
35
|
-
type: 'options',
|
|
36
|
-
options: [
|
|
37
|
-
{
|
|
38
|
-
name: 'GPT-4 Turbo',
|
|
39
|
-
value: 'gpt-4-turbo',
|
|
40
|
-
description: 'OpenAI GPT-4 Turbo - Latest and most capable',
|
|
41
|
-
},
|
|
42
|
-
{
|
|
43
|
-
name: 'GPT-4',
|
|
44
|
-
value: 'gpt-4',
|
|
45
|
-
description: 'OpenAI GPT-4 - High quality reasoning',
|
|
46
|
-
},
|
|
47
|
-
{
|
|
48
|
-
name: 'GPT-3.5 Turbo',
|
|
49
|
-
value: 'gpt-3.5-turbo',
|
|
50
|
-
description: 'OpenAI GPT-3.5 Turbo - Fast and efficient',
|
|
51
|
-
},
|
|
52
|
-
{
|
|
53
|
-
name: 'Claude 3 Opus',
|
|
54
|
-
value: 'claude-3-opus',
|
|
55
|
-
description: 'Anthropic Claude 3 Opus - Superior reasoning',
|
|
56
|
-
},
|
|
57
|
-
{
|
|
58
|
-
name: 'Claude 3 Sonnet',
|
|
59
|
-
value: 'claude-3-sonnet',
|
|
60
|
-
description: 'Anthropic Claude 3 Sonnet - Balanced performance',
|
|
61
|
-
},
|
|
62
|
-
{
|
|
63
|
-
name: 'Claude 3 Haiku',
|
|
64
|
-
value: 'claude-3-haiku',
|
|
65
|
-
description: 'Anthropic Claude 3 Haiku - Fast responses',
|
|
66
|
-
},
|
|
67
|
-
{
|
|
68
|
-
name: 'Gemini Pro',
|
|
69
|
-
value: 'gemini-pro',
|
|
70
|
-
description: 'Google Gemini Pro - Multimodal capabilities',
|
|
71
|
-
},
|
|
72
|
-
],
|
|
73
|
-
default: 'gpt-4-turbo',
|
|
74
|
-
description: 'Select the AI model to use',
|
|
75
|
-
},
|
|
76
|
-
{
|
|
77
|
-
displayName: 'Message',
|
|
78
|
-
name: 'message',
|
|
79
|
-
type: 'string',
|
|
80
|
-
typeOptions: {
|
|
81
|
-
rows: 3,
|
|
82
|
-
},
|
|
83
|
-
displayOptions: {
|
|
84
|
-
show: {
|
|
85
|
-
operation: ['chat'],
|
|
86
|
-
},
|
|
87
|
-
},
|
|
88
|
-
default: '',
|
|
89
|
-
placeholder: 'Enter your message here...',
|
|
90
|
-
description: 'The message to send to the AI Agent',
|
|
91
|
-
required: true,
|
|
92
|
-
},
|
|
93
|
-
{
|
|
94
|
-
displayName: 'System Message',
|
|
95
|
-
name: 'systemMessage',
|
|
96
|
-
type: 'string',
|
|
97
|
-
typeOptions: {
|
|
98
|
-
rows: 2,
|
|
99
|
-
},
|
|
100
|
-
displayOptions: {
|
|
101
|
-
show: {
|
|
102
|
-
operation: ['chat'],
|
|
103
|
-
},
|
|
104
|
-
},
|
|
105
|
-
default: '',
|
|
106
|
-
placeholder: 'You are a helpful AI assistant...',
|
|
107
|
-
description: 'System prompt to set the AI behavior and context',
|
|
108
|
-
},
|
|
109
|
-
{
|
|
110
|
-
displayName: 'Include Media',
|
|
111
|
-
name: 'includeMedia',
|
|
112
|
-
type: 'boolean',
|
|
113
|
-
displayOptions: {
|
|
114
|
-
show: {
|
|
115
|
-
operation: ['chat'],
|
|
116
|
-
},
|
|
117
|
-
},
|
|
118
|
-
default: false,
|
|
119
|
-
description: 'Whether to include images in the chat message',
|
|
120
|
-
},
|
|
121
|
-
{
|
|
122
|
-
displayName: 'Media Source',
|
|
123
|
-
name: 'mediaSource',
|
|
124
|
-
type: 'options',
|
|
125
|
-
options: [
|
|
126
|
-
{
|
|
127
|
-
name: 'Upload File',
|
|
128
|
-
value: 'manual',
|
|
129
|
-
description: 'Upload an image file directly',
|
|
130
|
-
},
|
|
131
|
-
{
|
|
132
|
-
name: 'From URL',
|
|
133
|
-
value: 'url',
|
|
134
|
-
description: 'Use an image from URL',
|
|
135
|
-
},
|
|
136
|
-
{
|
|
137
|
-
name: 'From Binary Data',
|
|
138
|
-
value: 'binary',
|
|
139
|
-
description: 'Use image from previous node binary data',
|
|
140
|
-
},
|
|
141
|
-
],
|
|
142
|
-
displayOptions: {
|
|
143
|
-
show: {
|
|
144
|
-
operation: ['chat'],
|
|
145
|
-
includeMedia: [true],
|
|
146
|
-
},
|
|
147
|
-
},
|
|
148
|
-
default: 'manual',
|
|
149
|
-
description: 'Source of the media file',
|
|
150
|
-
},
|
|
151
|
-
{
|
|
152
|
-
displayName: 'Image File',
|
|
153
|
-
name: 'mediaFile',
|
|
154
|
-
type: 'string',
|
|
155
|
-
displayOptions: {
|
|
156
|
-
show: {
|
|
157
|
-
operation: ['chat'],
|
|
158
|
-
includeMedia: [true],
|
|
159
|
-
mediaSource: ['manual'],
|
|
160
|
-
},
|
|
161
|
-
},
|
|
162
|
-
default: '',
|
|
163
|
-
placeholder: 'Paste base64 image data...',
|
|
164
|
-
description: 'Base64 encoded image data',
|
|
165
|
-
},
|
|
166
|
-
{
|
|
167
|
-
displayName: 'Image URL',
|
|
168
|
-
name: 'mediaUrl',
|
|
169
|
-
type: 'string',
|
|
170
|
-
displayOptions: {
|
|
171
|
-
show: {
|
|
172
|
-
operation: ['chat'],
|
|
173
|
-
includeMedia: [true],
|
|
174
|
-
mediaSource: ['url'],
|
|
175
|
-
},
|
|
176
|
-
},
|
|
177
|
-
default: '',
|
|
178
|
-
placeholder: 'https://example.com/image.jpg',
|
|
179
|
-
description: 'URL of the image to analyze',
|
|
180
|
-
},
|
|
181
|
-
{
|
|
182
|
-
displayName: 'Binary Property',
|
|
183
|
-
name: 'mediaProperty',
|
|
184
|
-
type: 'string',
|
|
185
|
-
displayOptions: {
|
|
186
|
-
show: {
|
|
187
|
-
operation: ['chat'],
|
|
188
|
-
includeMedia: [true],
|
|
189
|
-
mediaSource: ['binary'],
|
|
190
|
-
},
|
|
191
|
-
},
|
|
192
|
-
default: 'data',
|
|
193
|
-
placeholder: 'data',
|
|
194
|
-
description: 'Name of the binary property containing the image',
|
|
195
|
-
},
|
|
196
|
-
{
|
|
197
|
-
displayName: 'Include Conversation History',
|
|
198
|
-
name: 'includeHistory',
|
|
199
|
-
type: 'boolean',
|
|
200
|
-
displayOptions: {
|
|
201
|
-
show: {
|
|
202
|
-
operation: ['chat'],
|
|
203
|
-
},
|
|
204
|
-
},
|
|
205
|
-
default: false,
|
|
206
|
-
description: 'Include previous messages for context',
|
|
207
|
-
},
|
|
208
|
-
{
|
|
209
|
-
displayName: 'Conversation History',
|
|
210
|
-
name: 'conversationHistory',
|
|
211
|
-
type: 'json',
|
|
212
|
-
typeOptions: {
|
|
213
|
-
rows: 4,
|
|
214
|
-
},
|
|
215
|
-
displayOptions: {
|
|
216
|
-
show: {
|
|
217
|
-
operation: ['chat'],
|
|
218
|
-
includeHistory: [true],
|
|
219
|
-
},
|
|
220
|
-
},
|
|
221
|
-
default: '[]',
|
|
222
|
-
placeholder: '[{"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi there!"}]',
|
|
223
|
-
description: 'Previous messages in OpenAI chat format',
|
|
224
|
-
},
|
|
225
|
-
{
|
|
226
|
-
displayName: 'Enable Tools',
|
|
227
|
-
name: 'enableTools',
|
|
228
|
-
type: 'boolean',
|
|
229
|
-
displayOptions: {
|
|
230
|
-
show: {
|
|
231
|
-
operation: ['chat'],
|
|
232
|
-
},
|
|
233
|
-
},
|
|
234
|
-
default: false,
|
|
235
|
-
description: 'Allow AI to use tools and function calling',
|
|
236
|
-
},
|
|
237
|
-
{
|
|
238
|
-
displayName: 'Tools Configuration',
|
|
239
|
-
name: 'toolsConfig',
|
|
240
|
-
type: 'json',
|
|
241
|
-
typeOptions: {
|
|
242
|
-
rows: 6,
|
|
243
|
-
},
|
|
244
|
-
displayOptions: {
|
|
245
|
-
show: {
|
|
246
|
-
operation: ['chat'],
|
|
247
|
-
enableTools: [true],
|
|
248
|
-
},
|
|
249
|
-
},
|
|
250
|
-
default: '{"tools": []}',
|
|
251
|
-
placeholder: '{"tools": [{"type": "function", "function": {"name": "get_weather", "description": "Get weather info"}}]}',
|
|
252
|
-
description: 'Tools available to the AI Agent',
|
|
253
|
-
},
|
|
254
|
-
{
|
|
255
|
-
displayName: 'Tool Name',
|
|
256
|
-
name: 'toolName',
|
|
257
|
-
type: 'string',
|
|
258
|
-
displayOptions: {
|
|
259
|
-
show: {
|
|
260
|
-
operation: ['tools'],
|
|
261
|
-
},
|
|
262
|
-
},
|
|
263
|
-
default: '',
|
|
264
|
-
placeholder: 'get_weather',
|
|
265
|
-
description: 'Name of the tool to execute',
|
|
266
|
-
required: true,
|
|
267
|
-
},
|
|
268
|
-
{
|
|
269
|
-
displayName: 'Tool Arguments',
|
|
270
|
-
name: 'toolArguments',
|
|
271
|
-
type: 'json',
|
|
272
|
-
typeOptions: {
|
|
273
|
-
rows: 3,
|
|
274
|
-
},
|
|
275
|
-
displayOptions: {
|
|
276
|
-
show: {
|
|
277
|
-
operation: ['tools'],
|
|
278
|
-
},
|
|
279
|
-
},
|
|
280
|
-
default: '{}',
|
|
281
|
-
placeholder: '{"location": "São Paulo", "units": "celsius"}',
|
|
282
|
-
description: 'Arguments to pass to the tool',
|
|
283
|
-
},
|
|
284
|
-
{
|
|
285
|
-
displayName: 'Context',
|
|
286
|
-
name: 'context',
|
|
287
|
-
type: 'string',
|
|
288
|
-
typeOptions: {
|
|
289
|
-
rows: 2,
|
|
290
|
-
},
|
|
291
|
-
displayOptions: {
|
|
292
|
-
show: {
|
|
293
|
-
operation: ['tools'],
|
|
294
|
-
},
|
|
295
|
-
},
|
|
296
|
-
default: '',
|
|
297
|
-
placeholder: 'User is asking about weather in their city...',
|
|
298
|
-
description: 'Context for tool execution',
|
|
299
|
-
},
|
|
300
|
-
{
|
|
301
|
-
displayName: 'Memory Action',
|
|
302
|
-
name: 'memoryAction',
|
|
303
|
-
type: 'options',
|
|
304
|
-
options: [
|
|
305
|
-
{
|
|
306
|
-
name: 'Store',
|
|
307
|
-
value: 'store',
|
|
308
|
-
description: 'Store data in AI Agent memory',
|
|
309
|
-
},
|
|
310
|
-
{
|
|
311
|
-
name: 'Retrieve',
|
|
312
|
-
value: 'retrieve',
|
|
313
|
-
description: 'Retrieve data from AI Agent memory',
|
|
314
|
-
},
|
|
315
|
-
{
|
|
316
|
-
name: 'Clear',
|
|
317
|
-
value: 'clear',
|
|
318
|
-
description: 'Clear AI Agent memory',
|
|
319
|
-
},
|
|
320
|
-
],
|
|
321
|
-
displayOptions: {
|
|
322
|
-
show: {
|
|
323
|
-
operation: ['memory'],
|
|
324
|
-
},
|
|
325
|
-
},
|
|
326
|
-
default: 'retrieve',
|
|
327
|
-
description: 'Action to perform on AI Agent memory',
|
|
328
|
-
},
|
|
329
|
-
{
|
|
330
|
-
displayName: 'Session ID',
|
|
331
|
-
name: 'sessionId',
|
|
332
|
-
type: 'string',
|
|
333
|
-
displayOptions: {
|
|
334
|
-
show: {
|
|
335
|
-
operation: ['memory'],
|
|
336
|
-
},
|
|
337
|
-
},
|
|
338
|
-
default: '',
|
|
339
|
-
placeholder: 'user-123',
|
|
340
|
-
description: 'Unique session identifier for memory isolation',
|
|
341
|
-
},
|
|
342
|
-
{
|
|
343
|
-
displayName: 'Memory Data',
|
|
344
|
-
name: 'memoryData',
|
|
345
|
-
type: 'json',
|
|
346
|
-
typeOptions: {
|
|
347
|
-
rows: 3,
|
|
348
|
-
},
|
|
349
|
-
displayOptions: {
|
|
350
|
-
show: {
|
|
351
|
-
operation: ['memory'],
|
|
352
|
-
memoryAction: ['store'],
|
|
353
|
-
},
|
|
354
|
-
},
|
|
355
|
-
default: '{}',
|
|
356
|
-
placeholder: '{"user_preferences": {"language": "pt-BR", "timezone": "America/Sao_Paulo"}}',
|
|
357
|
-
description: 'Data to store in memory',
|
|
358
|
-
},
|
|
359
|
-
{
|
|
360
|
-
displayName: 'Advanced Options',
|
|
361
|
-
name: 'advancedOptions',
|
|
362
|
-
type: 'collection',
|
|
363
|
-
placeholder: 'Add Option',
|
|
364
|
-
default: {},
|
|
365
|
-
displayOptions: {
|
|
366
|
-
show: {
|
|
367
|
-
operation: ['chat'],
|
|
368
|
-
},
|
|
369
|
-
},
|
|
370
|
-
options: [
|
|
371
|
-
{
|
|
372
|
-
displayName: 'Max Tokens',
|
|
373
|
-
name: 'maxTokens',
|
|
374
|
-
type: 'number',
|
|
375
|
-
default: 1000,
|
|
376
|
-
description: 'Maximum number of tokens to generate (0 for model default)',
|
|
377
|
-
typeOptions: {
|
|
378
|
-
minValue: 0,
|
|
379
|
-
maxValue: 4000,
|
|
380
|
-
},
|
|
381
|
-
},
|
|
382
|
-
{
|
|
383
|
-
displayName: 'Temperature',
|
|
384
|
-
name: 'temperature',
|
|
385
|
-
type: 'number',
|
|
386
|
-
default: 0.7,
|
|
387
|
-
description: 'Controls randomness (0.0 = deterministic, 1.0 = very random)',
|
|
388
|
-
typeOptions: {
|
|
389
|
-
minValue: 0,
|
|
390
|
-
maxValue: 1,
|
|
391
|
-
numberPrecision: 2,
|
|
392
|
-
},
|
|
393
|
-
},
|
|
394
|
-
{
|
|
395
|
-
displayName: 'Top P',
|
|
396
|
-
name: 'topP',
|
|
397
|
-
type: 'number',
|
|
398
|
-
default: 1,
|
|
399
|
-
description: 'Controls diversity via nucleus sampling',
|
|
400
|
-
typeOptions: {
|
|
401
|
-
minValue: 0.01,
|
|
402
|
-
maxValue: 1,
|
|
403
|
-
numberPrecision: 2,
|
|
404
|
-
},
|
|
405
|
-
},
|
|
406
|
-
{
|
|
407
|
-
displayName: 'Frequency Penalty',
|
|
408
|
-
name: 'frequencyPenalty',
|
|
409
|
-
type: 'number',
|
|
410
|
-
default: 0,
|
|
411
|
-
description: 'Reduces repetition of words',
|
|
412
|
-
typeOptions: {
|
|
413
|
-
minValue: -2,
|
|
414
|
-
maxValue: 2,
|
|
415
|
-
numberPrecision: 2,
|
|
416
|
-
},
|
|
417
|
-
},
|
|
418
|
-
{
|
|
419
|
-
displayName: 'Presence Penalty',
|
|
420
|
-
name: 'presencePenalty',
|
|
421
|
-
type: 'number',
|
|
422
|
-
default: 0,
|
|
423
|
-
description: 'Encourages new topics',
|
|
424
|
-
typeOptions: {
|
|
425
|
-
minValue: -2,
|
|
426
|
-
maxValue: 2,
|
|
427
|
-
numberPrecision: 2,
|
|
428
|
-
},
|
|
429
|
-
},
|
|
430
|
-
],
|
|
431
|
-
},
|
|
432
|
-
];
|