node-red-contrib-ai-agent 0.0.3 → 0.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/agent/ai-agent.js CHANGED
@@ -1,216 +1,326 @@
1
1
  const axios = require('axios');
2
2
 
3
+ /**
4
+ * Helper functions for AI Agent Node
5
+ */
6
+
7
+ // Validate AI configuration
8
+ function validateAIConfig(aiagent) {
9
+ if (!aiagent) return 'AI configuration missing. Ensure an AI Model node is connected.';
10
+ if (!aiagent.model) return 'AI model not specified. Please configure the AI Model node with a valid model.';
11
+ if (!aiagent.apiKey) return 'API key not found. Please configure the AI Model node with a valid API key.';
12
+ return null; // No errors
13
+ }
14
+
15
+ /**
16
+ * Creates and returns a message object
17
+ * @param {string} role - The role of the message (e.g., 'user', 'assistant', 'system')
18
+ * @param {string} content - The content of the message
19
+ * @returns {Object} - The message object
20
+ */
21
+ function createMessage(role, content) {
22
+ return {
23
+ role: role,
24
+ content: content,
25
+ timestamp: new Date().toISOString(),
26
+ type: 'conversation'
27
+ };
28
+ }
29
+
30
+ /**
31
+ * Prepares the prompt with context if memory is available
32
+ * @param {Object} node - The AI Agent node
33
+ * @param {Object} msg - The input message
34
+ * @param {string} inputText - The input text
35
+ * @returns {Object} - The prepared prompt object
36
+ */
37
+ function preparePrompt(node, msg, inputText) {
38
+ const messages = [{ role: 'system', content: node.systemPrompt }];
39
+ let userMessage = null;
40
+
41
+ // Add context if using memory
42
+ if (msg.aimemory) {
43
+ if (!msg.aimemory.context) {
44
+ throw new Error('Memory not properly initialized. Ensure a memory node is connected.');
45
+ }
46
+
47
+ // Ensure memory has required structure
48
+ msg.aimemory.context = msg.aimemory.context || [];
49
+ msg.aimemory.maxItems = msg.aimemory.maxItems || 1000;
50
+
51
+ // Add conversation context
52
+ messages.push(...msg.aimemory.context);
53
+
54
+ // Create and store user message for later context update
55
+ userMessage = createMessage('user', inputText);
56
+ }
57
+
58
+ // Add current user input
59
+ messages.push({ role: 'user', content: inputText });
60
+
61
+ return { messages, userMessage };
62
+ }
63
+
64
+ /**
65
+ * Updates the conversation context with new messages
66
+ * @param {Object} msg - The input message
67
+ * @param {Object} userMessage - The user message
68
+ * @param {string} assistantResponse - The assistant response
69
+ */
70
+ function updateContext(msg, userMessage, assistantResponse) {
71
+ if (!msg.aimemory?.context) return;
72
+
73
+ const assistantMessage = createMessage('assistant', assistantResponse);
74
+ const newContext = [...msg.aimemory.context, userMessage, assistantMessage];
75
+ const maxItems = msg.aimemory.maxItems || 1000;
76
+
77
+ msg.aimemory.context = newContext.slice(-maxItems);
78
+ }
79
+
80
+ /**
81
+ * Handles errors consistently
82
+ * @param {Object} node - The AI Agent node
83
+ * @param {Object} msg - The input message
84
+ * @param {Error} error - The error object
85
+ */
86
+ function handleError(node, msg, error) {
87
+ const errorMsg = error.response?.data?.error?.message || error.message || 'Unknown error';
88
+ node.status({ fill: 'red', shape: 'ring', text: 'Error' });
89
+ node.error('AI Agent Error: ' + errorMsg, msg);
90
+ }
91
+
92
+ /**
93
+ * Formats tools for the OpenAI/OpenRouter API
94
+ * @param {Array} tools - Array of tool definitions
95
+ * @returns {Array} - Formatted tools for the API
96
+ */
97
+ function formatToolsForAPI(tools) {
98
+ return tools.map(tool => {
99
+ const type = tool.type || 'function';
100
+ const fn = tool.function || {};
101
+ const name = fn.name || 'function';
102
+ const description = fn.description || 'function';
103
+ const parameters = fn.parameters || {};
104
+ return {
105
+ type: type,
106
+ function: {
107
+ name: name,
108
+ description: description,
109
+ parameters: parameters || {
110
+ type: 'object',
111
+ properties: {},
112
+ required: []
113
+ }
114
+ }
115
+ };
116
+ });
117
+ }
118
+
119
+ /**
120
+ * Calls the AI with proper error handling
121
+ * @param {Object} node - The AI Agent node
122
+ * @param {Object} aiConfig - The AI configuration object
123
+ * @param {Array} messages - The messages to send to the AI
124
+ * @returns {Promise<string>} - The AI response
125
+ */
126
+ async function callAI(node, aiConfig, messages) {
127
+ const hasTools = aiConfig.tools && Array.isArray(aiConfig.tools) && aiConfig.tools.length > 0;
128
+ const tools = hasTools ? aiConfig.tools : [];
129
+ const toolChoice = hasTools ? 'auto' : 'none';
130
+
131
+ node.warn(`Calling ${aiConfig.model} with ${tools.length} tools and ${toolChoice} tool choice`);
132
+
133
+ try {
134
+ node.status({ fill: 'blue', shape: 'dot', text: `Calling ${aiConfig.model}...` });
135
+
136
+ // Prepare request payload
137
+ const requestPayload = {
138
+ model: aiConfig.model,
139
+ temperature: aiConfig.temperature,
140
+ // max_tokens: aiConfig.maxTokens,
141
+ messages: messages,
142
+ };
143
+
144
+ // Add tools if available
145
+ if (hasTools) {
146
+ node.warn('Adding tools: ' + JSON.stringify(tools, null, 2));
147
+ requestPayload.tools = formatToolsForAPI(tools);
148
+ requestPayload.tool_choice = toolChoice;
149
+ }
150
+
151
+ node.warn(JSON.stringify(requestPayload, null, 2));
152
+
153
+ const response = await axios.post(
154
+ 'https://openrouter.ai/api/v1/chat/completions',
155
+ requestPayload,
156
+ {
157
+ headers: {
158
+ 'Authorization': `Bearer ${aiConfig.apiKey}`,
159
+ 'Content-Type': 'application/json',
160
+ 'HTTP-Referer': 'https://nodered.org/',
161
+ 'X-Title': 'Node-RED AI Agent'
162
+ }
163
+ }
164
+ );
165
+
166
+ // Check if the response contains tool calls
167
+ const responseMessage = response.data.choices[0]?.message;
168
+
169
+ node.warn(JSON.stringify(responseMessage, null, 2));
170
+
171
+ if (responseMessage?.tool_calls && aiConfig.tools) {
172
+ // Process tool calls
173
+ if (node.warn) node.warn('Processing tool calls');
174
+ return await processToolCalls(node, responseMessage, aiConfig.tools, messages, aiConfig);
175
+ }
176
+
177
+ node.warn('Processing response');
178
+ return responseMessage?.content?.trim() || '';
179
+
180
+ } catch (error) {
181
+ const errorMsg = error.response?.data?.error?.message || error.message;
182
+ throw new Error(`AI API Error: ${errorMsg}`);
183
+ }
184
+ }
185
+
186
+ /**
187
+ * Helper function to process tool calls from AI response
188
+ * @param {Object} node - The Node-RED node instance
189
+ * @param {Object} responseMessage - The AI response message containing tool calls
190
+ * @param {Array} tools - Array of available tools
191
+ * @param {Array} messages - Conversation messages
192
+ * @returns {Promise<string>} - Result of tool executions
193
+ */
194
+ async function processToolCalls(node, responseMessage, tools, messages, aiConfig) {
195
+ try {
196
+ const toolCalls = responseMessage.tool_calls || [];
197
+ let toolResults = [];
198
+ if (node && node.warn) {
199
+ node.warn('Processing tool calls: ' + JSON.stringify(toolCalls, null, 2));
200
+ }
201
+
202
+ // Process each tool call
203
+ for (const toolCall of toolCalls) {
204
+ const { id, function: fn } = toolCall;
205
+ const { name, arguments: args } = fn;
206
+
207
+ // Find the matching tool
208
+ const tool = tools.find(t => t.function?.name === name);
209
+ if (!tool) {
210
+ toolResults.push({
211
+ tool_call_id: id,
212
+ role: 'tool',
213
+ name,
214
+ content: JSON.stringify({ error: `Tool '${name}' not found` })
215
+ });
216
+ continue;
217
+ }
218
+
219
+ // Execute the tool
220
+ try {
221
+ const parsedArgs = typeof args === 'string' ? JSON.parse(args) : args;
222
+ const result = await tool.execute(parsedArgs);
223
+
224
+ toolResults.push({
225
+ tool_call_id: id,
226
+ role: 'tool',
227
+ name,
228
+ content: typeof result === 'string' ? result : JSON.stringify(result)
229
+ });
230
+ } catch (error) {
231
+ toolResults.push({
232
+ tool_call_id: id,
233
+ role: 'tool',
234
+ name,
235
+ content: JSON.stringify({ error: error.message })
236
+ });
237
+ }
238
+ }
239
+
240
+ // Add tool results to the messages array
241
+ const updatedMessages = [...messages, responseMessage, ...toolResults];
242
+
243
+ // Make a new API call to let the AI process the tool results
244
+ const aiResponse = await callAI(node, { ...aiConfig, tools: null }, updatedMessages);
245
+
246
+ // Return the final AI response
247
+ return aiResponse;
248
+ } catch (error) {
249
+ return `Error processing tool calls: ${error.message}`;
250
+ }
251
+ }
252
+
3
253
  module.exports = function (RED) {
254
+ /**
255
+ * AI Agent Node
256
+ * @param {Object} config - The node configuration object
257
+ */
4
258
  function AiAgentNode(config) {
5
- RED.nodes.createNode(this, config)
6
- var node = this
259
+ RED.nodes.createNode(this, config);
260
+ const node = this;
7
261
 
8
262
  // Configuration
9
- this.agentName = config.name || 'AI Agent'
10
- this.systemPrompt = config.systemPrompt || 'You are a helpful AI assistant.'
11
- this.responseType = config.responseType || 'text'
12
-
13
- // Initialize agent state
14
- this.context = {
15
- conversation: [],
16
- lastInteraction: null
17
- }
263
+ this.agentName = config.name || 'AI Agent';
264
+ this.systemPrompt = config.systemPrompt || 'You are a helpful AI assistant.';
265
+ this.responseType = config.responseType || 'text';
18
266
 
19
- // Default responses
20
- this.defaultResponse = 'I understand you said: '
21
-
22
267
  // Handle node cleanup
23
268
  node.on('close', function (done) {
24
- // Clean up any resources here
25
- node.status({})
26
- if (done) done()
27
- })
28
-
269
+ node.status({});
270
+ if (done) done();
271
+ });
272
+
29
273
  // Process incoming messages
30
274
  node.on('input', async function (msg, send, done) {
31
- // Set status to processing
32
275
  node.status({ fill: 'blue', shape: 'dot', text: 'processing...' });
33
-
34
- // Validate AI configuration
35
- const validateAIConfig = () => {
36
- if (!msg.aiagent || !msg.aiagent.model || !msg.aiagent.apiKey) {
37
- const errorMsg = 'Missing required AI configuration. Ensure an AI Model node is properly connected and configured.';
38
- node.status({ fill: 'red', shape: 'ring', text: 'Error: Missing AI config' });
39
- node.error(errorMsg, msg);
40
- throw new Error(errorMsg);
41
- }
42
- };
43
-
276
+
44
277
  try {
45
- validateAIConfig();
278
+ // 1. Validate AI configuration
279
+ const validationError = validateAIConfig(msg.aiagent);
280
+ if (validationError) {
281
+ throw new Error(validationError);
282
+ }
46
283
 
47
- // Get input from message or use default
284
+ // 2. Get input
48
285
  const input = msg.payload || {};
49
286
  const inputText = typeof input === 'string' ? input : JSON.stringify(input);
50
287
 
51
- // Update conversation context
52
- node.context.lastInteraction = new Date();
53
- node.context.conversation.push({
54
- role: 'user',
55
- content: inputText,
56
- timestamp: node.context.lastInteraction
57
- });
288
+ // 3. Prepare prompt with context
289
+ const { messages, userMessage } = preparePrompt(node, msg, inputText);
58
290
 
59
- let response;
291
+ // 4. Execute the prompt and get response
292
+ const response = await callAI(node, msg.aiagent, messages);
60
293
 
61
- try {
62
- // Use OpenRouter for AI responses
63
- response = await generateAIResponse.call(node, inputText, msg.aiagent);
64
- } catch (error) {
65
- const errorMsg = error.response?.data?.error?.message || error.message;
66
- node.status({fill:"red", shape:"ring", text:"API Error: " + (errorMsg || 'Unknown error').substring(0, 30)});
67
- node.error('OpenRouter API Error: ' + errorMsg, msg);
68
- if (done) done(error);
69
- return;
294
+ // 5. Update context if using memory
295
+ if (msg.aimemory && userMessage) {
296
+ updateContext(msg, userMessage, response);
70
297
  }
71
298
 
72
- // Update context with AI response
73
- node.context.conversation.push({ role: 'assistant', content: response, timestamp: new Date() })
74
-
75
- // Format response based on configuration
76
- if (node.responseType === 'object') {
77
- msg.payload = {
78
- agent: node.agentName,
79
- type: 'ai',
80
- input: input,
81
- response: response,
82
- timestamp: new Date().toISOString(),
83
- context: {
84
- conversationLength: node.context.conversation.length,
85
- lastInteraction: node.context.lastInteraction
86
- }
299
+ // 6. Format and send response
300
+ msg.payload = node.responseType === 'object' ? {
301
+ agent: node.agentName,
302
+ type: 'ai',
303
+ input: input,
304
+ response: response,
305
+ timestamp: new Date().toISOString(),
306
+ context: {
307
+ conversationLength: msg.aimemory?.context?.length || 0,
308
+ lastInteraction: new Date().toISOString(),
309
+ ...(msg.aimemory && { aimemory: msg.aimemory })
87
310
  }
88
- } else {
89
- msg.payload = response
90
- }
91
-
92
- // Send the message
93
- send(msg)
311
+ } : response;
94
312
 
95
- // Update status
96
- node.status({ fill: 'green', shape: 'dot', text: 'ready' })
313
+ send(msg);
314
+ node.status({ fill: 'green', shape: 'dot', text: 'ready' });
97
315
 
98
- // Complete processing
99
- if (done) {
100
- done()
101
- }
102
316
  } catch (error) {
103
- // Handle errors
104
- const errorMsg = error.message || 'Unknown error occurred'
105
- node.status({ fill: 'red', shape: 'ring', text: 'error' })
106
- node.error('Error in AI Agent: ' + errorMsg, msg)
107
- if (done) {
108
- done(error)
109
- }
317
+ handleError(node, msg, error);
318
+ } finally {
319
+ if (done) done();
110
320
  }
111
- })
112
-
113
- // Generate AI response using OpenRouter API
114
- async function generateAIResponse(input, aiConfig) {
115
- const messages = [
116
- {
117
- role: 'system',
118
- content: node.systemPrompt
119
- },
120
- {
121
- role: 'user',
122
- content: input
123
- }
124
- ];
125
-
126
- node.status({fill:"blue", shape:"dot", text:"Calling " + aiConfig.model + "..."});
127
-
128
- // Prepare tools array if available
129
- const tools = aiConfig.tools ? aiConfig.tools.map(tool => ({
130
- type: 'function',
131
- function: {
132
- name: tool.name,
133
- description: tool.description,
134
- parameters: tool.parameters || {}
135
- }
136
- })) : [];
137
-
138
- // Initial API call
139
- const response = await axios.post(
140
- 'https://openrouter.ai/api/v1/chat/completions',
141
- {
142
- model: aiConfig.model,
143
- messages: messages,
144
- temperature: aiConfig.temperature || 0.7,
145
- max_tokens: aiConfig.maxTokens || 1000,
146
- tools: tools.length > 0 ? tools : undefined,
147
- tool_choice: tools.length > 0 ? 'auto' : 'none'
148
- },
149
- {
150
- headers: {
151
- 'Authorization': `Bearer ${aiConfig.apiKey}`,
152
- 'Content-Type': 'application/json',
153
- 'HTTP-Referer': 'https://nodered.org/',
154
- 'X-Title': 'Node-RED AI Agent'
155
- }
156
- }
157
- );
158
-
159
- const responseMessage = response.data.choices[0].message;
160
-
161
- // Check if the model wants to call a tool
162
- const toolCalls = responseMessage.tool_calls;
163
- if (toolCalls && toolCalls.length > 0) {
164
- // Process each tool call
165
- for (const toolCall of toolCalls) {
166
- const functionName = toolCall.function.name;
167
- const functionArgs = JSON.parse(toolCall.function.arguments || '{}');
168
-
169
- // Find the tool
170
- const tool = aiConfig.tools?.find(t => t.name === functionName);
171
- if (!tool) {
172
- throw new Error(`Tool ${functionName} not found`);
173
- }
174
-
175
- // Execute the tool
176
- const toolResponse = await tool.execute(functionArgs);
177
-
178
- // Add the tool response to the messages
179
- messages.push({
180
- role: 'tool',
181
- content: JSON.stringify(toolResponse),
182
- tool_call_id: toolCall.id
183
- });
184
- }
185
-
186
- // Make a second request with the tool responses
187
- const secondResponse = await axios.post(
188
- 'https://openrouter.ai/api/v1/chat/completions',
189
- {
190
- model: aiConfig.model,
191
- messages: messages,
192
- temperature: aiConfig.temperature || 0.7,
193
- max_tokens: aiConfig.maxTokens || 1000,
194
- tools: tools,
195
- tool_choice: 'none' // Force the model to respond normally
196
- },
197
- {
198
- headers: {
199
- 'Authorization': `Bearer ${aiConfig.apiKey}`,
200
- 'Content-Type': 'application/json',
201
- 'HTTP-Referer': 'https://nodered.org/',
202
- 'X-Title': 'Node-RED AI Agent'
203
- }
204
- }
205
- );
206
-
207
- return secondResponse.data.choices[0].message.content.trim();
208
- }
209
-
210
- return responseMessage.content.trim();
211
- }
321
+ });
212
322
  }
213
323
 
214
- // Registering the node-red type
215
- RED.nodes.registerType('ai-agent', AiAgentNode)
216
- }
324
+ // Register the node type
325
+ RED.nodes.registerType('ai-agent', AiAgentNode);
326
+ };
@@ -0,0 +1,63 @@
1
+ <script type="text/javascript">
2
+ RED.nodes.registerType('ai-memory-file', {
3
+ category: 'AI Agent',
4
+ paletteLabel: 'AI Memory (File)',
5
+ color: '#a6bbcf',
6
+ defaults: {
7
+ name: { value: "" },
8
+ filename: {
9
+ value: "ai-memories.json",
10
+ required: true,
11
+ validate: function(v) {
12
+ return v.length > 0;
13
+ }
14
+ }
15
+ },
16
+ inputs: 1,
17
+ outputs: 1,
18
+ icon: "file.png",
19
+ label: function() {
20
+ return this.name || "AI Memory (File)";
21
+ },
22
+ labelStyle: function() {
23
+ return this.name ? "node_label_italic" : "";
24
+ },
25
+ oneditprepare: function() {
26
+ // Initialize any UI components here
27
+ },
28
+ oneditsave: function() {
29
+ // Handle save if needed
30
+ },
31
+ oneditcancel: function() {
32
+ // Cleanup if needed
33
+ }
34
+ });
35
+ </script>
36
+
37
+ <script type="text/html" data-template-name="ai-memory-file">
38
+ <div class="form-row">
39
+ <label for="node-input-name"><i class="fa fa-tag"></i> Name</label>
40
+ <input type="text" id="node-input-name" placeholder="Name">
41
+ </div>
42
+ <div class="form-row">
43
+ <label for="node-input-filename"><i class="fa fa-file"></i> Filename</label>
44
+ <input type="text" id="node-input-filename" placeholder="ai-memories.json">
45
+ </div>
46
+ <div class="form-tips">
47
+ <p>Memories will be stored in Node-RED's user directory.</p>
48
+ </div>
49
+ </script>
50
+
51
+ <script type="text/html" data-help-name="ai-memory-file">
52
+ <p>Stores AI memories in a JSON file.</p>
53
+ <h3>Inputs</h3>
54
+ <dl class="message-properties">
55
+ <dt>payload <span>object|string</span></dt>
56
+ <dd>The message to store or retrieve from memory.</dd>
57
+ </dl>
58
+ <h3>Outputs</h3>
59
+ <dl class="message-properties">
60
+ <dt>payload <span>object|string</span></dt>
61
+ <dd>The processed message with memory operations applied.</dd>
62
+ </dl>
63
+ </script>
@@ -0,0 +1,62 @@
1
+ module.exports = function(RED) {
2
+ 'use strict';
3
+
4
+ function MemoryFileNode(config) {
5
+ RED.nodes.createNode(this, config);
6
+ const node = this;
7
+
8
+ // Configuration
9
+ node.name = config.name || 'AI Memory (File)';
10
+ node.filename = config.filename || 'ai-memories.json';
11
+
12
+ // Initialize empty memories array
13
+ node.memories = [];
14
+
15
+ // Load existing memories from file if they exist
16
+ const fs = require('fs');
17
+ const path = require('path');
18
+ const filePath = path.join(RED.settings.userDir, node.filename);
19
+
20
+ try {
21
+ if (fs.existsSync(filePath)) {
22
+ const data = fs.readFileSync(filePath, 'utf8');
23
+ node.memories = JSON.parse(data);
24
+ node.status({fill:"green",shape:"dot",text:"Ready"});
25
+ } else {
26
+ node.status({fill:"blue",shape:"ring",text:"New file will be created"});
27
+ }
28
+ } catch (err) {
29
+ node.error("Error loading memory file: " + err.message);
30
+ node.status({fill:"red",shape:"ring",text:"Error loading"});
31
+ }
32
+
33
+ // Handle incoming messages
34
+ node.on('input', function(msg) {
35
+ try {
36
+ // For now, just pass through the message
37
+ // We'll add memory operations in the next iteration
38
+ node.send(msg);
39
+
40
+ // Update status
41
+ node.status({fill:"green",shape:"dot",text:node.memories.length + " memories"});
42
+ } catch (err) {
43
+ node.error("Error in memory node: " + err.message, msg);
44
+ node.status({fill:"red",shape:"ring",text:"Error"});
45
+ }
46
+ });
47
+
48
+ // Cleanup on node removal
49
+ node.on('close', function() {
50
+ // Save memories to file
51
+ try {
52
+ fs.writeFileSync(filePath, JSON.stringify(node.memories, null, 2));
53
+ } catch (err) {
54
+ node.error("Error saving memory file: " + err.message);
55
+ }
56
+ node.status({});
57
+ });
58
+ }
59
+
60
+ // Register the node type
61
+ RED.nodes.registerType("ai-memory-file", MemoryFileNode);
62
+ };