llmjs2 1.3.8 → 1.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +31 -476
- package/chain/AGENT_STEP_README.md +102 -0
- package/chain/README.md +257 -0
- package/chain/WORKFLOW_README.md +85 -0
- package/chain/agent-step-example.js +232 -0
- package/chain/docs/AGENT.md +126 -0
- package/chain/docs/GRAPH.md +490 -0
- package/chain/examples.js +314 -0
- package/chain/index.js +31 -0
- package/chain/lib/agent.js +338 -0
- package/chain/lib/flow/agent-step.js +119 -0
- package/chain/lib/flow/edge.js +24 -0
- package/chain/lib/flow/flow.js +76 -0
- package/chain/lib/flow/graph.js +331 -0
- package/chain/lib/flow/index.js +7 -0
- package/chain/lib/flow/step.js +63 -0
- package/chain/lib/memory/in-memory.js +117 -0
- package/chain/lib/memory/index.js +36 -0
- package/chain/lib/memory/lance-memory.js +225 -0
- package/chain/lib/memory/sqlite-memory.js +309 -0
- package/chain/simple-agent-step-example.js +168 -0
- package/chain/workflow-example-usage.js +70 -0
- package/chain/workflow-example.json +59 -0
- package/core/README.md +485 -0
- package/core/cli.js +275 -0
- package/core/docs/BASIC_USAGE.md +62 -0
- package/core/docs/CLI.md +104 -0
- package/{docs → core/docs}/GET_STARTED.md +129 -129
- package/{docs → core/docs}/GUARDRAILS_GUIDE.md +734 -734
- package/{docs → core/docs}/README.md +47 -47
- package/core/docs/ROUTER_GUIDE.md +199 -0
- package/{docs → core/docs}/SERVER_MODE.md +358 -350
- package/core/index.js +115 -0
- package/{providers → core/providers}/ollama.js +14 -6
- package/{providers → core/providers}/openai.js +14 -6
- package/{providers → core/providers}/openrouter.js +14 -6
- package/core/router.js +252 -0
- package/{server.js → core/server.js} +15 -5
- package/package.json +43 -27
- package/cli.js +0 -195
- package/docs/BASIC_USAGE.md +0 -296
- package/docs/CLI.md +0 -455
- package/docs/ROUTER_GUIDE.md +0 -402
- package/index.js +0 -265
- package/router.js +0 -273
- package/test-completion.js +0 -99
- package/test.js +0 -246
- /package/{config.yaml → core/config.yaml} +0 -0
- /package/{logger.js → core/logger.js} +0 -0
|
@@ -0,0 +1,314 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Advanced examples showing llmjs-chain capabilities
|
|
3
|
+
*/
|
|
4
|
+
const { Step, Flow, Agent, Graph, InMemory, completion } = require('./index');
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Example 1: Simple LLM Chain
|
|
8
|
+
* Chain multiple LLM calls together
|
|
9
|
+
*/
|
|
10
|
+
async function example1_SimpleChain() {
|
|
11
|
+
console.log('\n=== Example 1: Simple LLM Chain ===');
|
|
12
|
+
|
|
13
|
+
// Step 1: Generate a story premise
|
|
14
|
+
const premiseStep = new Step('premise', {
|
|
15
|
+
processor: async (context, inputs) => {
|
|
16
|
+
const messages = [{
|
|
17
|
+
role: 'user',
|
|
18
|
+
content: `Generate a creative premise for a ${inputs.genre} story.`
|
|
19
|
+
}];
|
|
20
|
+
const result = await completion({ messages, model: 'openai/gpt-3.5-turbo' });
|
|
21
|
+
return { premise: result };
|
|
22
|
+
}
|
|
23
|
+
});
|
|
24
|
+
|
|
25
|
+
// Step 2: Expand the premise into a short story
|
|
26
|
+
const storyStep = new Step('story', {
|
|
27
|
+
processor: async (context, inputs) => {
|
|
28
|
+
const messages = [{
|
|
29
|
+
role: 'user',
|
|
30
|
+
content: `Write a short story based on this premise: ${inputs.premise.premise}`
|
|
31
|
+
}];
|
|
32
|
+
const result = await completion({ messages, model: 'openai/gpt-3.5-turbo' });
|
|
33
|
+
return { story: result };
|
|
34
|
+
}
|
|
35
|
+
});
|
|
36
|
+
|
|
37
|
+
// Create and execute flow
|
|
38
|
+
const flow = new Flow('story-generator');
|
|
39
|
+
flow.addStep(premiseStep);
|
|
40
|
+
flow.addStep(storyStep);
|
|
41
|
+
|
|
42
|
+
const result = await flow.execute({ genre: 'science fiction' });
|
|
43
|
+
console.log('Generated story:', result.story);
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
/**
|
|
47
|
+
* Example 2: Agent with Tools
|
|
48
|
+
* Create an agent that can perform calculations and get current time
|
|
49
|
+
*/
|
|
50
|
+
async function example2_AgentWithTools() {
|
|
51
|
+
console.log('\n=== Example 2: Agent with Tools ===');
|
|
52
|
+
|
|
53
|
+
const agent = new Agent({
|
|
54
|
+
model: 'openai/gpt-4',
|
|
55
|
+
instruction: 'You are a helpful math assistant. Use tools when needed to solve problems.',
|
|
56
|
+
tools: [
|
|
57
|
+
{
|
|
58
|
+
name: 'calculate',
|
|
59
|
+
description: 'Perform mathematical calculations',
|
|
60
|
+
parameters: {
|
|
61
|
+
type: 'object',
|
|
62
|
+
properties: {
|
|
63
|
+
expression: { type: 'string', description: 'Mathematical expression to evaluate' }
|
|
64
|
+
},
|
|
65
|
+
required: ['expression']
|
|
66
|
+
},
|
|
67
|
+
execute: async ({ expression }) => {
|
|
68
|
+
try {
|
|
69
|
+
// Simple eval for demo - in production use a safe math library
|
|
70
|
+
const result = eval(expression);
|
|
71
|
+
return JSON.stringify({ result });
|
|
72
|
+
} catch (error) {
|
|
73
|
+
return JSON.stringify({ error: error.message });
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
},
|
|
77
|
+
{
|
|
78
|
+
name: 'get_current_time',
|
|
79
|
+
description: 'Get the current date and time',
|
|
80
|
+
parameters: {
|
|
81
|
+
type: 'object',
|
|
82
|
+
properties: {}
|
|
83
|
+
},
|
|
84
|
+
execute: async () => {
|
|
85
|
+
return JSON.stringify({ time: new Date().toISOString() });
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
]
|
|
89
|
+
});
|
|
90
|
+
|
|
91
|
+
const result = await agent.generate('Calculate the area of a circle with radius 5, and tell me what time it is.');
|
|
92
|
+
console.log('Agent response:', result);
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
/**
|
|
96
|
+
* Example 3: Complex Graph with Branching
|
|
97
|
+
* A graph that processes data differently based on conditions
|
|
98
|
+
*/
|
|
99
|
+
async function example3_ComplexGraph() {
|
|
100
|
+
console.log('\n=== Example 3: Complex Graph with Branching ===');
|
|
101
|
+
|
|
102
|
+
const graph = new Graph('data-processor');
|
|
103
|
+
|
|
104
|
+
// Input validation step
|
|
105
|
+
const validateStep = new Step('validate', {
|
|
106
|
+
processor: async (context, inputs) => {
|
|
107
|
+
const data = inputs.data;
|
|
108
|
+
const isValid = Array.isArray(data) && data.length > 0;
|
|
109
|
+
return { data, isValid };
|
|
110
|
+
}
|
|
111
|
+
});
|
|
112
|
+
|
|
113
|
+
// Process array data
|
|
114
|
+
const processArrayStep = new Step('process-array', {
|
|
115
|
+
processor: async (context, inputs) => {
|
|
116
|
+
const data = inputs.validate.data;
|
|
117
|
+
const sum = data.reduce((a, b) => a + b, 0);
|
|
118
|
+
const average = sum / data.length;
|
|
119
|
+
return { sum, average, processed: true };
|
|
120
|
+
}
|
|
121
|
+
});
|
|
122
|
+
|
|
123
|
+
// Process single value
|
|
124
|
+
const processSingleStep = new Step('process-single', {
|
|
125
|
+
processor: async (context, inputs) => {
|
|
126
|
+
const data = inputs.validate.data;
|
|
127
|
+
const value = data;
|
|
128
|
+
const doubled = value * 2;
|
|
129
|
+
return { original: value, doubled, processed: true };
|
|
130
|
+
}
|
|
131
|
+
});
|
|
132
|
+
|
|
133
|
+
// Format results
|
|
134
|
+
const formatStep = new Step('format', {
|
|
135
|
+
processor: async (context, inputs) => {
|
|
136
|
+
let result = 'Processing failed';
|
|
137
|
+
|
|
138
|
+
if (inputs['process-array']) {
|
|
139
|
+
const { sum, average } = inputs['process-array'];
|
|
140
|
+
result = `Array processed - Sum: ${sum}, Average: ${average}`;
|
|
141
|
+
} else if (inputs['process-single']) {
|
|
142
|
+
const { original, doubled } = inputs['process-single'];
|
|
143
|
+
result = `Single value processed - Original: ${original}, Doubled: ${doubled}`;
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
return { result };
|
|
147
|
+
}
|
|
148
|
+
});
|
|
149
|
+
|
|
150
|
+
// Add nodes
|
|
151
|
+
graph.addNode(validateStep);
|
|
152
|
+
graph.addNode(processArrayStep);
|
|
153
|
+
graph.addNode(processSingleStep);
|
|
154
|
+
graph.addNode(formatStep);
|
|
155
|
+
|
|
156
|
+
// Add edges with conditions
|
|
157
|
+
graph.addEdge('validate', 'process-array', (result) => result.isValid && Array.isArray(result.data));
|
|
158
|
+
graph.addEdge('validate', 'process-single', (result) => result.isValid && !Array.isArray(result.data));
|
|
159
|
+
graph.addEdge('validate', 'format', (result) => !result.isValid); // Invalid data goes directly to format
|
|
160
|
+
graph.addEdge('process-array', 'format');
|
|
161
|
+
graph.addEdge('process-single', 'format');
|
|
162
|
+
|
|
163
|
+
// Test with array data
|
|
164
|
+
console.log('Testing with array data:');
|
|
165
|
+
const arrayResult = await graph.execute(['validate'], { data: [1, 2, 3, 4, 5] });
|
|
166
|
+
console.log('Array result:', arrayResult.format.result);
|
|
167
|
+
|
|
168
|
+
// Test with single value
|
|
169
|
+
console.log('\nTesting with single value:');
|
|
170
|
+
const singleResult = await graph.execute(['validate'], { data: 10 });
|
|
171
|
+
console.log('Single value result:', singleResult.format.result);
|
|
172
|
+
|
|
173
|
+
// Test with invalid data
|
|
174
|
+
console.log('\nTesting with invalid data:');
|
|
175
|
+
const invalidResult = await graph.execute(['validate'], { data: null });
|
|
176
|
+
console.log('Invalid data result:', invalidResult.format?.result || 'No result');
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
/**
|
|
180
|
+
* Example 4: Multi-Agent Collaboration
|
|
181
|
+
* Agents working together through a flow
|
|
182
|
+
*/
|
|
183
|
+
async function example4_MultiAgentCollaboration() {
|
|
184
|
+
console.log('\n=== Example 4: Multi-Agent Collaboration ===');
|
|
185
|
+
|
|
186
|
+
// Agent 1: Research specialist
|
|
187
|
+
const researcher = new Agent({
|
|
188
|
+
model: 'openai/gpt-3.5-turbo',
|
|
189
|
+
instruction: 'You are a research specialist. Provide detailed information on requested topics.'
|
|
190
|
+
});
|
|
191
|
+
|
|
192
|
+
// Agent 2: Writer
|
|
193
|
+
const writer = new Agent({
|
|
194
|
+
model: 'openai/gpt-3.5-turbo',
|
|
195
|
+
instruction: 'You are a skilled writer. Create engaging content based on provided information.'
|
|
196
|
+
});
|
|
197
|
+
|
|
198
|
+
// Agent 3: Editor
|
|
199
|
+
const editor = new Agent({
|
|
200
|
+
model: 'openai/gpt-3.5-turbo',
|
|
201
|
+
instruction: 'You are an editor. Review and improve the quality of written content.'
|
|
202
|
+
});
|
|
203
|
+
|
|
204
|
+
// Create steps that use agents
|
|
205
|
+
const researchStep = new Step('research', {
|
|
206
|
+
processor: async (context, inputs) => {
|
|
207
|
+
const research = await researcher.generate(`Research key facts about ${inputs.topic}`);
|
|
208
|
+
return { research };
|
|
209
|
+
}
|
|
210
|
+
});
|
|
211
|
+
|
|
212
|
+
const writeStep = new Step('write', {
|
|
213
|
+
processor: async (context, inputs) => {
|
|
214
|
+
const article = await writer.generate(
|
|
215
|
+
`Write a 300-word article about ${inputs.topic} using this research: ${inputs.research.research}`
|
|
216
|
+
);
|
|
217
|
+
return { article };
|
|
218
|
+
}
|
|
219
|
+
});
|
|
220
|
+
|
|
221
|
+
const editStep = new Step('edit', {
|
|
222
|
+
processor: async (context, inputs) => {
|
|
223
|
+
const edited = await editor.generate(
|
|
224
|
+
`Edit and improve this article: ${inputs.write.article}`
|
|
225
|
+
);
|
|
226
|
+
return { finalArticle: edited };
|
|
227
|
+
}
|
|
228
|
+
});
|
|
229
|
+
|
|
230
|
+
// Create and execute collaboration flow
|
|
231
|
+
const collaborationFlow = new Flow('article-creation');
|
|
232
|
+
collaborationFlow.addStep(researchStep);
|
|
233
|
+
collaborationFlow.addStep(writeStep);
|
|
234
|
+
collaborationFlow.addStep(editStep);
|
|
235
|
+
|
|
236
|
+
const result = await collaborationFlow.execute({ topic: 'artificial intelligence' });
|
|
237
|
+
console.log('Final article:', result.finalArticle);
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
/**
|
|
241
|
+
* Example 5: Agent with Memory
|
|
242
|
+
* Demonstrates how agents can use memory for conversation persistence
|
|
243
|
+
*/
|
|
244
|
+
async function example5_AgentWithMemory() {
|
|
245
|
+
console.log('\n=== Example 5: Agent with Memory ===');
|
|
246
|
+
|
|
247
|
+
// Create memory instance
|
|
248
|
+
const memory = new InMemory();
|
|
249
|
+
|
|
250
|
+
// Create agent with memory
|
|
251
|
+
const agent = new Agent({
|
|
252
|
+
model: 'openai/gpt-3.5-turbo',
|
|
253
|
+
instruction: 'You are a helpful assistant with conversation memory.',
|
|
254
|
+
memory: memory
|
|
255
|
+
});
|
|
256
|
+
|
|
257
|
+
// Simulate a conversation with memory
|
|
258
|
+
console.log('Starting conversation...');
|
|
259
|
+
|
|
260
|
+
// First interaction
|
|
261
|
+
const response1 = await agent.generate({
|
|
262
|
+
userPrompt: 'My name is Alice and I love programming.',
|
|
263
|
+
memory: { resourceId: 'alice-session', threadId: 'conversation-1', session: false }
|
|
264
|
+
});
|
|
265
|
+
console.log('Agent response 1:', response1);
|
|
266
|
+
|
|
267
|
+
// Second interaction - should remember the name
|
|
268
|
+
const response2 = await agent.generate({
|
|
269
|
+
userPrompt: 'What programming language do you recommend for beginners?',
|
|
270
|
+
memory: { resourceId: 'alice-session', threadId: 'conversation-1', session: true }
|
|
271
|
+
});
|
|
272
|
+
console.log('Agent response 2:', response2);
|
|
273
|
+
|
|
274
|
+
// Search memory
|
|
275
|
+
const searchResults = await memory.search('programming', 'alice-session', 5);
|
|
276
|
+
console.log('Found', searchResults.length, 'messages mentioning programming');
|
|
277
|
+
|
|
278
|
+
// Get session history
|
|
279
|
+
const history = await memory.getSessionHistory('alice-session', 'conversation-1', 10);
|
|
280
|
+
console.log('Session has', history.length, 'total messages');
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
/**
|
|
284
|
+
* Run all examples
|
|
285
|
+
*/
|
|
286
|
+
async function runExamples() {
|
|
287
|
+
console.log('Running llmjs-chain advanced examples...');
|
|
288
|
+
|
|
289
|
+
try {
|
|
290
|
+
await example1_SimpleChain();
|
|
291
|
+
await example2_AgentWithTools();
|
|
292
|
+
await example3_ComplexGraph();
|
|
293
|
+
await example4_MultiAgentCollaboration();
|
|
294
|
+
await example5_AgentWithMemory();
|
|
295
|
+
} catch (error) {
|
|
296
|
+
console.error('Example failed:', error.message);
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
console.log('\n=== All examples completed ===');
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
// Run examples if this file is executed directly
|
|
303
|
+
if (require.main === module) {
|
|
304
|
+
runExamples().catch(console.error);
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
module.exports = {
|
|
308
|
+
example1_SimpleChain,
|
|
309
|
+
example2_AgentWithTools,
|
|
310
|
+
example3_ComplexGraph,
|
|
311
|
+
example4_MultiAgentCollaboration,
|
|
312
|
+
example5_AgentWithMemory,
|
|
313
|
+
runExamples
|
|
314
|
+
};
|
package/chain/index.js
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
// Re-export everything from core runtime
|
|
2
|
+
const llmjs2 = require('../core');
|
|
3
|
+
|
|
4
|
+
// Import chain components
|
|
5
|
+
const { Step, Flow, Graph, Edge, AgentStep } = require('./lib/flow/');
|
|
6
|
+
const { Agent } = require('./lib/agent');
|
|
7
|
+
const { memory, InMemory, LanceMemory, SQLiteMemory } = require('./lib/memory/');
|
|
8
|
+
|
|
9
|
+
// Export everything
|
|
10
|
+
module.exports = {
|
|
11
|
+
// llmjs2 exports
|
|
12
|
+
...llmjs2,
|
|
13
|
+
|
|
14
|
+
// Chain components
|
|
15
|
+
Step,
|
|
16
|
+
Flow,
|
|
17
|
+
Graph,
|
|
18
|
+
Edge,
|
|
19
|
+
AgentStep,
|
|
20
|
+
Agent,
|
|
21
|
+
InMemory,
|
|
22
|
+
LanceMemory,
|
|
23
|
+
SQLiteMemory,
|
|
24
|
+
memory,
|
|
25
|
+
|
|
26
|
+
// Convenience functions
|
|
27
|
+
createStep: (name, config) => new Step(name, config),
|
|
28
|
+
createFlow: (name, steps) => new Flow(name, steps || []),
|
|
29
|
+
createGraph: (name) => new Graph(name),
|
|
30
|
+
createAgent: (name, config) => new Agent(name, config)
|
|
31
|
+
};
|
|
@@ -0,0 +1,338 @@
|
|
|
1
|
+
const { completion } = require('llmjs2');
|
|
2
|
+
const crypto = require('crypto');
|
|
3
|
+
const { InMemory } = require('./memory/in-memory');
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Agent class for AI interactions with tool support
|
|
7
|
+
*/
|
|
8
|
+
class Agent {
|
|
9
|
+
/**
|
|
10
|
+
* Create a new Agent instance
|
|
11
|
+
* @param {Object} options - Agent configuration
|
|
12
|
+
* @param {string} options.model - Model name to use (optional, auto-detected if not provided)
|
|
13
|
+
* @param {string} options.instruction - System instruction
|
|
14
|
+
* @param {string} options.apikey - API key for the LLM (optional, auto-detected from environment)
|
|
15
|
+
* @param {Array} options.tools - Array of tool definitions
|
|
16
|
+
* @param {Object} options.memory - Memory instance for conversation history
|
|
17
|
+
*/
|
|
18
|
+
constructor({ model, instruction, apikey, tools, memory }) {
|
|
19
|
+
this.model = model; // Can be undefined, let llmjs2 auto-detect
|
|
20
|
+
this.instruction = instruction;
|
|
21
|
+
this.apikey = apikey; // Can be undefined, let llmjs2 auto-detect
|
|
22
|
+
this.tools = this.normalizeTools(tools || []);
|
|
23
|
+
this.memory = memory;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Normalize tool definitions to ensure consistent format and add error handling
|
|
28
|
+
* @param {Array} tools - Array of tool objects
|
|
29
|
+
* @returns {Array} Normalized tools array
|
|
30
|
+
*/
|
|
31
|
+
normalizeTools(tools) {
|
|
32
|
+
return tools.map(tool => {
|
|
33
|
+
// If tool already has the full format, use it as-is
|
|
34
|
+
if (tool.parameters && tool.parameters.type === 'object' && tool.parameters.properties) {
|
|
35
|
+
return {
|
|
36
|
+
...tool,
|
|
37
|
+
execute: this.wrapExecute(tool.execute)
|
|
38
|
+
};
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
// Convert simplified format to full OpenAI format
|
|
42
|
+
const properties = {};
|
|
43
|
+
const required = [];
|
|
44
|
+
|
|
45
|
+
for (const [paramName, paramDef] of Object.entries(tool.parameters || {})) {
|
|
46
|
+
properties[paramName] = {
|
|
47
|
+
type: paramDef.type,
|
|
48
|
+
description: paramDef.description
|
|
49
|
+
};
|
|
50
|
+
if (paramDef.required) {
|
|
51
|
+
required.push(paramName);
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
return {
|
|
56
|
+
name: tool.name,
|
|
57
|
+
description: tool.description,
|
|
58
|
+
parameters: {
|
|
59
|
+
type: 'object',
|
|
60
|
+
properties,
|
|
61
|
+
required
|
|
62
|
+
},
|
|
63
|
+
execute: this.wrapExecute(tool.execute)
|
|
64
|
+
};
|
|
65
|
+
});
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Wrap execute function with try-catch error handling
|
|
70
|
+
* @param {Function} executeFn - The original execute function
|
|
71
|
+
* @returns {Function} Wrapped execute function
|
|
72
|
+
*/
|
|
73
|
+
wrapExecute(executeFn) {
|
|
74
|
+
return async (args) => {
|
|
75
|
+
try {
|
|
76
|
+
return await executeFn(args);
|
|
77
|
+
} catch (error) {
|
|
78
|
+
return JSON.stringify({
|
|
79
|
+
error: error.message,
|
|
80
|
+
stack: error.stack
|
|
81
|
+
});
|
|
82
|
+
}
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
/**
|
|
87
|
+
* Convert tool definitions to the format expected by llmjs2
|
|
88
|
+
* @param {Array} tools - Array of tool objects with execute method
|
|
89
|
+
* @returns {Array} Formatted tools array
|
|
90
|
+
*/
|
|
91
|
+
static formatTools(tools) {
|
|
92
|
+
return tools.map(tool => ({
|
|
93
|
+
type: 'function',
|
|
94
|
+
function: {
|
|
95
|
+
name: tool.name,
|
|
96
|
+
description: tool.description,
|
|
97
|
+
parameters: tool.parameters
|
|
98
|
+
}
|
|
99
|
+
}));
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
/**
|
|
103
|
+
* Generate a random unique ID
|
|
104
|
+
* @returns {string} Random unique string
|
|
105
|
+
*/
|
|
106
|
+
#generateId() {
|
|
107
|
+
return crypto.randomBytes(8).toString('hex');
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
/**
|
|
111
|
+
* Execute a tool call
|
|
112
|
+
* @param {Object} toolCall - The tool call object from the AI
|
|
113
|
+
* @returns {Promise<string>} The result of the tool execution
|
|
114
|
+
*/
|
|
115
|
+
async executeToolCall(toolCall) {
|
|
116
|
+
const functionName = toolCall.function.name;
|
|
117
|
+
const args = toolCall.function.arguments;
|
|
118
|
+
|
|
119
|
+
console.log(`[Tool Call] ${functionName}(${JSON.stringify(args)})`);
|
|
120
|
+
|
|
121
|
+
// Find the tool with the matching name
|
|
122
|
+
const tool = this.tools.find(t => t.name === functionName);
|
|
123
|
+
|
|
124
|
+
if (!tool) {
|
|
125
|
+
return `Unknown tool: ${functionName}`;
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
try {
|
|
129
|
+
// Execute the tool's async execute method
|
|
130
|
+
const result = await tool.execute(args);
|
|
131
|
+
return result;
|
|
132
|
+
} catch (error) {
|
|
133
|
+
return `Error executing tool: ${error.message}`;
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
/**
|
|
138
|
+
* Process a single chat turn with automatic tool execution
|
|
139
|
+
* @param {Array} messages - Conversation history
|
|
140
|
+
* @param {string} [apikey] - Optional API key override
|
|
141
|
+
* @param {Object} memoryOptions - Memory options (resourceId, threadId)
|
|
142
|
+
* @returns {Promise<Object>} The AI response with full conversation history
|
|
143
|
+
*/
|
|
144
|
+
async processChat(messages, apikey, memoryOptions = {}) {
|
|
145
|
+
const apiKey = apikey || this.apikey;
|
|
146
|
+
|
|
147
|
+
try {
|
|
148
|
+
// Get AI response
|
|
149
|
+
const result = await completion({
|
|
150
|
+
model: this.model,
|
|
151
|
+
messages: messages,
|
|
152
|
+
apiKey: apiKey,
|
|
153
|
+
tools: Agent.formatTools(this.tools),
|
|
154
|
+
final: false // Get full result object including tool_calls
|
|
155
|
+
});
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
// Check if we got tool calls
|
|
159
|
+
if (result.tool_calls && result.tool_calls.length > 0) {
|
|
160
|
+
// Add assistant message with tool calls to history
|
|
161
|
+
const assistantMessage = {
|
|
162
|
+
role: 'assistant',
|
|
163
|
+
content: result.content || 'Tool calls executed.'
|
|
164
|
+
};
|
|
165
|
+
|
|
166
|
+
console.log(`[tool_calls] ${result.tool_calls.length} tool call(s) detected.`);
|
|
167
|
+
|
|
168
|
+
// Execute each tool call and add results to history
|
|
169
|
+
for (const toolCall of result.tool_calls) {
|
|
170
|
+
const toolResult = await this.executeToolCall(toolCall);
|
|
171
|
+
|
|
172
|
+
messages.push(assistantMessage);
|
|
173
|
+
|
|
174
|
+
// Add tool result as a system message since llmjs2 doesn't support 'tool' role yet
|
|
175
|
+
messages.push({
|
|
176
|
+
role: 'tool',
|
|
177
|
+
content: `Tool result for ${toolCall.function.name}: ${toolResult}`
|
|
178
|
+
});
|
|
179
|
+
|
|
180
|
+
// Save tool result to memory if memoryOptions provided (non-blocking)
|
|
181
|
+
if (memoryOptions.resourceId && memoryOptions.threadId && this.memory) {
|
|
182
|
+
this.memory.save(
|
|
183
|
+
toolResult,
|
|
184
|
+
memoryOptions.resourceId,
|
|
185
|
+
memoryOptions.threadId,
|
|
186
|
+
'tool',
|
|
187
|
+
'tool_result',
|
|
188
|
+
this.#generateId(),
|
|
189
|
+
toolCall.function.name
|
|
190
|
+
);
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
// Recursively process to get next response
|
|
194
|
+
return this.processChat(messages, apiKey, memoryOptions);
|
|
195
|
+
}
|
|
196
|
+
} else {
|
|
197
|
+
// Final response - no tool calls
|
|
198
|
+
const assistantMessage = {
|
|
199
|
+
role: 'assistant',
|
|
200
|
+
content: result.content
|
|
201
|
+
};
|
|
202
|
+
messages.push(assistantMessage);
|
|
203
|
+
|
|
204
|
+
// Save assistant message to memory if memoryOptions provided (non-blocking)
|
|
205
|
+
if (memoryOptions.resourceId && memoryOptions.threadId && this.memory) {
|
|
206
|
+
this.memory.save(
|
|
207
|
+
result.content,
|
|
208
|
+
memoryOptions.resourceId,
|
|
209
|
+
memoryOptions.threadId,
|
|
210
|
+
'assistant',
|
|
211
|
+
'text',
|
|
212
|
+
this.#generateId(),
|
|
213
|
+
null
|
|
214
|
+
);
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
return {
|
|
218
|
+
response: result.content,
|
|
219
|
+
messages: messages
|
|
220
|
+
};
|
|
221
|
+
}
|
|
222
|
+
} catch (error) {
|
|
223
|
+
return {
|
|
224
|
+
response: `Error: ${error.message}`,
|
|
225
|
+
messages: messages
|
|
226
|
+
};
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
/**
|
|
231
|
+
* Generate a response from the AI
|
|
232
|
+
* @param {string|Object} input - User prompt string or object with userPrompt, memory options, and optional apikey
|
|
233
|
+
* @returns {Promise<string>} The AI response
|
|
234
|
+
*/
|
|
235
|
+
async generate(input) {
|
|
236
|
+
let userPrompt;
|
|
237
|
+
let apiKey;
|
|
238
|
+
let memoryOptions = {};
|
|
239
|
+
|
|
240
|
+
if (typeof input === 'string') {
|
|
241
|
+
// Simple string prompt
|
|
242
|
+
userPrompt = input;
|
|
243
|
+
apiKey = this.apikey;
|
|
244
|
+
} else if (typeof input === 'object') {
|
|
245
|
+
// Object with userPrompt and optional memory/apikey
|
|
246
|
+
userPrompt = input.userPrompt;
|
|
247
|
+
apiKey = input.apikey || this.apikey;
|
|
248
|
+
memoryOptions = input.memory || {};
|
|
249
|
+
} else {
|
|
250
|
+
return 'Invalid input format. Expected string or object with userPrompt.';
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
// Build messages array with system instruction and user prompt
|
|
254
|
+
const messages = [];
|
|
255
|
+
|
|
256
|
+
// Add system instruction if provided
|
|
257
|
+
if (this.instruction) {
|
|
258
|
+
messages.push({
|
|
259
|
+
role: 'system',
|
|
260
|
+
content: this.instruction
|
|
261
|
+
});
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
// Load conversation history from memory if memoryOptions provided
|
|
265
|
+
if (memoryOptions.resourceId && memoryOptions.threadId && this.memory) {
|
|
266
|
+
// Check if session history should be loaded
|
|
267
|
+
const loadSession = memoryOptions.session !== undefined ? memoryOptions.session : false;
|
|
268
|
+
|
|
269
|
+
// Check if relevance search should be performed
|
|
270
|
+
const loadRelevance = memoryOptions.relevance !== undefined ? memoryOptions.relevance : false;
|
|
271
|
+
|
|
272
|
+
if (loadSession) {
|
|
273
|
+
const history = await this.memory.getSessionHistory(
|
|
274
|
+
memoryOptions.resourceId,
|
|
275
|
+
memoryOptions.threadId,
|
|
276
|
+
10
|
|
277
|
+
);
|
|
278
|
+
|
|
279
|
+
console.log("history: ", history); // Debug log for session history
|
|
280
|
+
|
|
281
|
+
// Add history messages (excluding system message and tool message)
|
|
282
|
+
for (const msg of history) {
|
|
283
|
+
if (msg.role !== 'tool') {
|
|
284
|
+
messages.push({
|
|
285
|
+
role: msg.role,
|
|
286
|
+
content: msg.content
|
|
287
|
+
});
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
if (loadRelevance) {
|
|
293
|
+
// Search for relevant past history
|
|
294
|
+
const relevant = await this.memory.search(
|
|
295
|
+
userPrompt,
|
|
296
|
+
memoryOptions.resourceId,
|
|
297
|
+
5
|
|
298
|
+
);
|
|
299
|
+
|
|
300
|
+
// Combine relevant history into a string and append to user prompt
|
|
301
|
+
let referenceContent = '';
|
|
302
|
+
for (const msg of relevant) {
|
|
303
|
+
referenceContent += `[${msg.role.toUpperCase()}]: ${msg.content}\n\n`;
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
if (referenceContent) {
|
|
307
|
+
userPrompt = `${userPrompt}\n\n--- Reference History ---\n${referenceContent}`;
|
|
308
|
+
}
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
// Add user message
|
|
313
|
+
messages.push({
|
|
314
|
+
role: 'user',
|
|
315
|
+
content: userPrompt
|
|
316
|
+
});
|
|
317
|
+
|
|
318
|
+
// Save user message to memory if memoryOptions provided (non-blocking)
|
|
319
|
+
if (memoryOptions.resourceId && memoryOptions.threadId && this.memory) {
|
|
320
|
+
this.memory.save(
|
|
321
|
+
userPrompt,
|
|
322
|
+
memoryOptions.resourceId,
|
|
323
|
+
memoryOptions.threadId,
|
|
324
|
+
'user',
|
|
325
|
+
'text',
|
|
326
|
+
this.#generateId(),
|
|
327
|
+
null
|
|
328
|
+
);
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
// Process the chat
|
|
332
|
+
const result = await this.processChat(messages, apiKey, memoryOptions);
|
|
333
|
+
|
|
334
|
+
return result.response;
|
|
335
|
+
}
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
module.exports = { Agent };
|