llmjs2 1.3.9 → 1.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +31 -476
- package/chain/AGENT_STEP_README.md +102 -0
- package/chain/README.md +257 -0
- package/chain/WORKFLOW_README.md +85 -0
- package/chain/agent-step-example.js +232 -0
- package/chain/docs/AGENT.md +126 -0
- package/chain/docs/GRAPH.md +490 -0
- package/chain/examples.js +314 -0
- package/chain/index.js +31 -0
- package/chain/lib/agent.js +338 -0
- package/chain/lib/flow/agent-step.js +119 -0
- package/chain/lib/flow/edge.js +24 -0
- package/chain/lib/flow/flow.js +76 -0
- package/chain/lib/flow/graph.js +331 -0
- package/chain/lib/flow/index.js +7 -0
- package/chain/lib/flow/step.js +63 -0
- package/chain/lib/memory/in-memory.js +117 -0
- package/chain/lib/memory/index.js +36 -0
- package/chain/lib/memory/lance-memory.js +225 -0
- package/chain/lib/memory/sqlite-memory.js +309 -0
- package/chain/simple-agent-step-example.js +168 -0
- package/chain/workflow-example-usage.js +70 -0
- package/chain/workflow-example.json +59 -0
- package/core/README.md +485 -0
- package/core/cli.js +275 -0
- package/core/docs/BASIC_USAGE.md +62 -0
- package/core/docs/CLI.md +104 -0
- package/{docs → core/docs}/GET_STARTED.md +129 -129
- package/{docs → core/docs}/GUARDRAILS_GUIDE.md +734 -734
- package/{docs → core/docs}/README.md +47 -47
- package/core/docs/ROUTER_GUIDE.md +199 -0
- package/{docs → core/docs}/SERVER_MODE.md +358 -350
- package/core/index.js +115 -0
- package/{providers → core/providers}/ollama.js +14 -6
- package/{providers → core/providers}/openai.js +14 -6
- package/{providers → core/providers}/openrouter.js +14 -6
- package/core/router.js +252 -0
- package/{server.js → core/server.js} +15 -5
- package/package.json +43 -27
- package/cli.js +0 -195
- package/docs/BASIC_USAGE.md +0 -296
- package/docs/CLI.md +0 -455
- package/docs/ROUTER_GUIDE.md +0 -402
- package/index.js +0 -267
- package/router.js +0 -273
- package/test-completion.js +0 -99
- package/test.js +0 -246
- /package/{config.yaml → core/config.yaml} +0 -0
- /package/{logger.js → core/logger.js} +0 -0
package/chain/README.md
ADDED
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
# llmjs-chain
|
|
2
|
+
|
|
3
|
+
An extension of llmjs2 that provides chaining capabilities for building complex LLM workflows with Agents, Graphs, Steps, and Flows.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- **Step**: Basic unit of work that can execute LLM calls or custom processing
|
|
8
|
+
- **Flow**: Linear sequence of steps that execute in order
|
|
9
|
+
- **Agent**: Intelligent entity that can make decisions and use tools
|
|
10
|
+
- **Graph**: Complex execution graph with branching and parallelism
|
|
11
|
+
- **Full llmjs2 compatibility**: All llmjs2 features are available
|
|
12
|
+
|
|
13
|
+
## Installation
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
npm install llmjs-chain
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
This will automatically install llmjs2 as a dependency.
|
|
20
|
+
|
|
21
|
+
## Quick Start
|
|
22
|
+
|
|
23
|
+
```javascript
|
|
24
|
+
import { Step, Flow, Agent, Graph, completion } from 'llmjs-chain';
|
|
25
|
+
|
|
26
|
+
// Set API keys (same as llmjs2)
|
|
27
|
+
process.env.OPENAI_API_KEY = 'your-openai-key';
|
|
28
|
+
|
|
29
|
+
// Basic completion (from llmjs2)
|
|
30
|
+
const response = await completion('Hello, how are you?');
|
|
31
|
+
console.log(response);
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
## Terminal Chat App
|
|
35
|
+
|
|
36
|
+
Experience llmjs-chain with an interactive terminal chat interface that automatically detects available API keys and models:
|
|
37
|
+
|
|
38
|
+
```bash
|
|
39
|
+
npm run chat
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
Features:
|
|
43
|
+
- Conversational chat with memory persistence
|
|
44
|
+
- Built-in tools: `get_current_time`, `calculate`
|
|
45
|
+
- Commands: `/help`, `/history`, `/memory`, `/clear`, `/exit`
|
|
46
|
+
- Automatic API key and model detection (no configuration needed)
|
|
47
|
+
- Automatic model routing and tool execution
|
|
48
|
+
|
|
49
|
+
The chat app will automatically use whichever API keys are available in your environment variables, following the same auto-detection logic as llmjs2.
|
|
50
|
+
|
|
51
|
+
## Components
|
|
52
|
+
|
|
53
|
+
### Step
|
|
54
|
+
|
|
55
|
+
A Step is the basic unit of work in a chain. It can execute an LLM call or custom processing logic.
|
|
56
|
+
|
|
57
|
+
```javascript
|
|
58
|
+
import { Step } from 'llmjs-chain';
|
|
59
|
+
|
|
60
|
+
const step = new Step('my-step', {
|
|
61
|
+
processor: async (context, inputs) => {
|
|
62
|
+
// Custom processing logic
|
|
63
|
+
return { result: `Processed: ${inputs.data}` };
|
|
64
|
+
}
|
|
65
|
+
});
|
|
66
|
+
|
|
67
|
+
const result = await step.execute({}, { data: 'Hello World' });
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
### Flow
|
|
71
|
+
|
|
72
|
+
A Flow executes steps in a linear sequence.
|
|
73
|
+
|
|
74
|
+
```javascript
|
|
75
|
+
import { Step, Flow } from 'llmjs-chain';
|
|
76
|
+
|
|
77
|
+
const step1 = new Step('step1', {
|
|
78
|
+
processor: async (context, inputs) => {
|
|
79
|
+
return { data: inputs.input.toUpperCase() };
|
|
80
|
+
}
|
|
81
|
+
});
|
|
82
|
+
|
|
83
|
+
const step2 = new Step('step2', {
|
|
84
|
+
processor: async (context, inputs) => {
|
|
85
|
+
return { result: `Final: ${inputs.step1.data}` };
|
|
86
|
+
}
|
|
87
|
+
});
|
|
88
|
+
|
|
89
|
+
const flow = new Flow('my-flow');
|
|
90
|
+
flow.addStep(step1);
|
|
91
|
+
flow.addStep(step2);
|
|
92
|
+
|
|
93
|
+
const result = await flow.execute({ input: 'hello' });
|
|
94
|
+
console.log(result); // { result: 'Final: HELLO' }
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
### Agent
|
|
98
|
+
|
|
99
|
+
An Agent is an intelligent entity that can make decisions and use tools.
|
|
100
|
+
|
|
101
|
+
```javascript
|
|
102
|
+
import { Agent } from 'llmjs-chain';
|
|
103
|
+
|
|
104
|
+
const agent = new Agent({
|
|
105
|
+
model: 'openai/gpt-4',
|
|
106
|
+
instruction: 'You are a helpful coding assistant.',
|
|
107
|
+
tools: [
|
|
108
|
+
{
|
|
109
|
+
name: 'run_code',
|
|
110
|
+
description: 'Execute JavaScript code',
|
|
111
|
+
parameters: {
|
|
112
|
+
type: 'object',
|
|
113
|
+
properties: {
|
|
114
|
+
code: { type: 'string' }
|
|
115
|
+
}
|
|
116
|
+
},
|
|
117
|
+
execute: async ({ code }) => {
|
|
118
|
+
return eval(code);
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
]
|
|
122
|
+
});
|
|
123
|
+
|
|
124
|
+
// Execute task
|
|
125
|
+
const result = await agent.generate('Calculate 2 + 2 and tell me the result');
|
|
126
|
+
```
|
|
127
|
+
|
|
128
|
+
### Memory Factory
|
|
129
|
+
|
|
130
|
+
llmjs-chain provides a memory factory for creating different types of memory storage:
|
|
131
|
+
|
|
132
|
+
```javascript
|
|
133
|
+
import { memory } from 'llmjs-chain';
|
|
134
|
+
|
|
135
|
+
// In-memory storage (default)
|
|
136
|
+
const inMemory = memory.create();
|
|
137
|
+
|
|
138
|
+
// LanceDB vector storage
|
|
139
|
+
const lanceMemory = memory.create({ type: 'lancedb', config: { db: 'my_db', dir: './data' } });
|
|
140
|
+
|
|
141
|
+
// SQLite storage with vector support
|
|
142
|
+
const sqliteMemory = memory.create({ type: 'sqlite', config: { db: './memory.db' } });
|
|
143
|
+
|
|
144
|
+
// Direct class instantiation (alternative)
|
|
145
|
+
import { InMemory, LanceMemory, SQLiteMemory } from 'llmjs-chain';
|
|
146
|
+
const directMemory = new InMemory();
|
|
147
|
+
```
|
|
148
|
+
|
|
149
|
+
### Memory Classes
|
|
150
|
+
|
|
151
|
+
#### InMemory
|
|
152
|
+
Basic in-memory storage for conversation history:
|
|
153
|
+
|
|
154
|
+
```javascript
|
|
155
|
+
const memory = new InMemory();
|
|
156
|
+
|
|
157
|
+
// Save messages
|
|
158
|
+
await memory.save('Hello!', 'user123', 'thread456', 'user', 'text');
|
|
159
|
+
await memory.save('Hi there!', 'user123', 'thread456', 'assistant', 'text');
|
|
160
|
+
|
|
161
|
+
// Get session history
|
|
162
|
+
const history = await memory.getSessionHistory('user123', 'thread456', 10);
|
|
163
|
+
|
|
164
|
+
// Search messages
|
|
165
|
+
const results = await memory.search('hello', 'user123', 5);
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
### Graph
|
|
169
|
+
|
|
170
|
+
A Graph allows complex execution with branching and parallelism.
|
|
171
|
+
|
|
172
|
+
```javascript
|
|
173
|
+
import { Step, Graph } from 'llmjs-chain';
|
|
174
|
+
|
|
175
|
+
const graph = new Graph('my-graph');
|
|
176
|
+
|
|
177
|
+
// Create nodes
|
|
178
|
+
const start = new Step('start', {
|
|
179
|
+
processor: async (context, inputs) => ({ value: inputs.number })
|
|
180
|
+
});
|
|
181
|
+
|
|
182
|
+
const double = new Step('double', {
|
|
183
|
+
processor: async (context, inputs) => ({ result: inputs.start.value * 2 })
|
|
184
|
+
});
|
|
185
|
+
|
|
186
|
+
const square = new Step('square', {
|
|
187
|
+
processor: async (context, inputs) => ({ result: inputs.start.value ** 2 })
|
|
188
|
+
});
|
|
189
|
+
|
|
190
|
+
// Add nodes and edges
|
|
191
|
+
graph.addNode(start);
|
|
192
|
+
graph.addNode(double);
|
|
193
|
+
graph.addNode(square);
|
|
194
|
+
|
|
195
|
+
graph.addEdge('start', 'double');
|
|
196
|
+
graph.addEdge('start', 'square');
|
|
197
|
+
|
|
198
|
+
// Execute graph
|
|
199
|
+
const results = await graph.execute(['start'], { number: 5 });
|
|
200
|
+
console.log(results);
|
|
201
|
+
// {
|
|
202
|
+
// start: { value: 5 },
|
|
203
|
+
// double: { result: 10 },
|
|
204
|
+
// square: { result: 25 }
|
|
205
|
+
// }
|
|
206
|
+
```
|
|
207
|
+
|
|
208
|
+
## Advanced Usage
|
|
209
|
+
|
|
210
|
+
### Conditional Graph Execution
|
|
211
|
+
|
|
212
|
+
```javascript
|
|
213
|
+
graph.addEdge('start', 'path-a', (result, context) => result.value > 10);
|
|
214
|
+
graph.addEdge('start', 'path-b', (result, context) => result.value <= 10);
|
|
215
|
+
```
|
|
216
|
+
|
|
217
|
+
### Agent with Memory
|
|
218
|
+
|
|
219
|
+
```javascript
|
|
220
|
+
agent.remember({ role: 'user', content: 'Remember that I like coffee' });
|
|
221
|
+
agent.remember({ role: 'assistant', content: 'Got it, you like coffee!' });
|
|
222
|
+
|
|
223
|
+
// Later conversations will include this memory
|
|
224
|
+
const response = await agent.execute('What do I like to drink?');
|
|
225
|
+
```
|
|
226
|
+
|
|
227
|
+
### Step Dependencies
|
|
228
|
+
|
|
229
|
+
```javascript
|
|
230
|
+
const step = new Step('dependent-step', {
|
|
231
|
+
dependencies: ['previous-step'],
|
|
232
|
+
processor: async (context, inputs) => {
|
|
233
|
+
// This step will only execute after 'previous-step' has completed
|
|
234
|
+
return { result: inputs['previous-step'].data };
|
|
235
|
+
}
|
|
236
|
+
});
|
|
237
|
+
```
|
|
238
|
+
|
|
239
|
+
## API Keys Setup
|
|
240
|
+
|
|
241
|
+
Same as llmjs2:
|
|
242
|
+
|
|
243
|
+
```bash
|
|
244
|
+
export OPENAI_API_KEY=your_openai_api_key
|
|
245
|
+
export OLLAMA_API_KEY=your_ollama_api_key
|
|
246
|
+
export OPEN_ROUTER_API_KEY=your_openrouter_api_key
|
|
247
|
+
```
|
|
248
|
+
|
|
249
|
+
## Testing
|
|
250
|
+
|
|
251
|
+
```bash
|
|
252
|
+
npm test
|
|
253
|
+
```
|
|
254
|
+
|
|
255
|
+
## License
|
|
256
|
+
|
|
257
|
+
MIT
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
# Workflow Example
|
|
2
|
+
|
|
3
|
+
This directory contains a complete example of defining and running workflows using llmjs-chain's Graph system.
|
|
4
|
+
|
|
5
|
+
## Files
|
|
6
|
+
|
|
7
|
+
- **`workflow-example.json`** - JSON definition of a sample workflow
|
|
8
|
+
- **`workflow-example-usage.js`** - Usage example showing how to load and run the workflow
|
|
9
|
+
|
|
10
|
+
## Running the Example
|
|
11
|
+
|
|
12
|
+
```bash
|
|
13
|
+
node workflow-example-usage.js
|
|
14
|
+
```
|
|
15
|
+
|
|
16
|
+
## Workflow Description
|
|
17
|
+
|
|
18
|
+
The example workflow demonstrates:
|
|
19
|
+
|
|
20
|
+
1. **Input Validation** - Validates that input data exists and is valid
|
|
21
|
+
2. **Parallel Processing** - Two different processing paths run simultaneously
|
|
22
|
+
- Path A: Multiplies each value by 2
|
|
23
|
+
- Path B: Adds 10 to each value
|
|
24
|
+
3. **Result Checking** - Sums all processed values and checks if total exceeds threshold
|
|
25
|
+
4. **Conditional Branching** - Routes to success or warning handler based on result size
|
|
26
|
+
|
|
27
|
+
## Workflow Structure
|
|
28
|
+
|
|
29
|
+
```
|
|
30
|
+
validate → [process-a, process-b] → check-result → [success-handler | warning-handler]
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
## JSON Configuration Format
|
|
34
|
+
|
|
35
|
+
```json
|
|
36
|
+
{
|
|
37
|
+
"name": "workflow-name",
|
|
38
|
+
"description": "Workflow description",
|
|
39
|
+
"steps": [
|
|
40
|
+
{
|
|
41
|
+
"name": "step-name",
|
|
42
|
+
"description": "Step description",
|
|
43
|
+
"execute": "async (context) => { /* step logic as string */ }"
|
|
44
|
+
}
|
|
45
|
+
],
|
|
46
|
+
"edges": [
|
|
47
|
+
{ "from": "step1", "to": "step2" },
|
|
48
|
+
{ "from": "step2", "to": "step3", "condition": "(result) => result.success" }
|
|
49
|
+
]
|
|
50
|
+
}
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
## Key Features Demonstrated
|
|
54
|
+
|
|
55
|
+
- ✅ **JSON Workflow Definition** - Define complete workflows in JSON
|
|
56
|
+
- ✅ **Parallel Execution** - Multiple steps can run simultaneously
|
|
57
|
+
- ✅ **Conditional Edges** - Branch workflow based on step results
|
|
58
|
+
- ✅ **Context Passing** - Steps can access results from previous steps
|
|
59
|
+
- ✅ **Error Handling** - Validation and error propagation
|
|
60
|
+
- ✅ **Dynamic Loading** - Load workflows from files at runtime
|
|
61
|
+
|
|
62
|
+
## Customizing the Example
|
|
63
|
+
|
|
64
|
+
1. **Modify Steps**: Edit the `execute` functions in `workflow-example.json`
|
|
65
|
+
2. **Change Logic**: Update conditions and processing logic
|
|
66
|
+
3. **Add Steps**: Extend the workflow with additional processing steps
|
|
67
|
+
4. **Create New Workflows**: Use this as a template for your own workflows
|
|
68
|
+
|
|
69
|
+
## Integration with Agents
|
|
70
|
+
|
|
71
|
+
This workflow system can be combined with AI agents for intelligent processing:
|
|
72
|
+
|
|
73
|
+
```javascript
|
|
74
|
+
// Agent step that uses AI for decision making
|
|
75
|
+
{
|
|
76
|
+
"name": "analyze-data",
|
|
77
|
+
"execute": "async (context) => {
|
|
78
|
+
const agent = new Agent({ instruction: 'Analyze this data...' });
|
|
79
|
+
const result = await agent.generate(context.previousStep.data);
|
|
80
|
+
return { analysis: result };
|
|
81
|
+
}"
|
|
82
|
+
}
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
The JSON-based workflow system makes it easy to define, version, and deploy complex processing pipelines with conditional logic and parallel execution.
|
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* AgentStep Example Program
|
|
3
|
+
*
|
|
4
|
+
* This example demonstrates how to use AgentStep in llmjs-chain workflows.
|
|
5
|
+
* It creates a multi-agent workflow for content creation and analysis.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
const { Agent, AgentStep, Graph } = require('./index');
|
|
9
|
+
|
|
10
|
+
async function runAgentStepExample() {
|
|
11
|
+
console.log('🚀 Starting AgentStep Workflow Example\n');
|
|
12
|
+
|
|
13
|
+
// ============================================================================
|
|
14
|
+
// 1. Create Specialized AI Agents
|
|
15
|
+
// ============================================================================
|
|
16
|
+
|
|
17
|
+
console.log('📝 Creating specialized AI agents...');
|
|
18
|
+
|
|
19
|
+
// Content Creator Agent - generates creative content
|
|
20
|
+
const contentCreator = new Agent({
|
|
21
|
+
instruction: `You are a creative content writer. Generate engaging, well-structured content based on the given topic.
|
|
22
|
+
Focus on being informative yet entertaining. Keep responses concise but comprehensive.`,
|
|
23
|
+
tools: [
|
|
24
|
+
{
|
|
25
|
+
name: 'get_word_count',
|
|
26
|
+
description: 'Count words in a text',
|
|
27
|
+
parameters: { text: { type: 'string', required: true } },
|
|
28
|
+
execute: async ({ text }) => `Word count: ${text.split(/\s+/).length}`
|
|
29
|
+
}
|
|
30
|
+
]
|
|
31
|
+
});
|
|
32
|
+
|
|
33
|
+
// Content Analyzer Agent - analyzes and provides feedback
|
|
34
|
+
const contentAnalyzer = new Agent({
|
|
35
|
+
instruction: `You are a content quality analyst. Analyze the provided content for:
|
|
36
|
+
- Engagement and readability
|
|
37
|
+
- Structure and flow
|
|
38
|
+
- Key strengths and areas for improvement
|
|
39
|
+
- SEO considerations
|
|
40
|
+
Provide specific, actionable feedback.`,
|
|
41
|
+
tools: [
|
|
42
|
+
{
|
|
43
|
+
name: 'analyze_sentiment',
|
|
44
|
+
description: 'Analyze sentiment of text',
|
|
45
|
+
parameters: { text: { type: 'string', required: true } },
|
|
46
|
+
execute: async ({ text }) => {
|
|
47
|
+
// Simple sentiment analysis simulation
|
|
48
|
+
const positiveWords = ['good', 'great', 'excellent', 'amazing', 'wonderful'];
|
|
49
|
+
const negativeWords = ['bad', 'poor', 'terrible', 'awful', 'horrible'];
|
|
50
|
+
|
|
51
|
+
const words = text.toLowerCase().split(/\s+/);
|
|
52
|
+
const positiveCount = words.filter(w => positiveWords.includes(w)).length;
|
|
53
|
+
const negativeCount = words.filter(w => negativeWords.includes(w)).length;
|
|
54
|
+
|
|
55
|
+
if (positiveCount > negativeCount) return 'Positive sentiment';
|
|
56
|
+
if (negativeCount > positiveCount) return 'Negative sentiment';
|
|
57
|
+
return 'Neutral sentiment';
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
]
|
|
61
|
+
});
|
|
62
|
+
|
|
63
|
+
// Content Editor Agent - refines and improves content
|
|
64
|
+
const contentEditor = new Agent({
|
|
65
|
+
instruction: `You are an expert content editor. Review and improve the provided content based on:
|
|
66
|
+
- Clarity and conciseness
|
|
67
|
+
- Grammar and style
|
|
68
|
+
- Engagement improvements
|
|
69
|
+
- Call-to-action additions
|
|
70
|
+
Provide the improved version along with change explanations.`,
|
|
71
|
+
tools: [
|
|
72
|
+
{
|
|
73
|
+
name: 'check_grammar',
|
|
74
|
+
description: 'Check grammar in text',
|
|
75
|
+
parameters: { text: { type: 'string', required: true } },
|
|
76
|
+
execute: async ({ text }) => 'Grammar check completed - no major issues found'
|
|
77
|
+
}
|
|
78
|
+
]
|
|
79
|
+
});
|
|
80
|
+
|
|
81
|
+
// ============================================================================
|
|
82
|
+
// 2. Create AgentSteps with Input/Output Mapping
|
|
83
|
+
// ============================================================================
|
|
84
|
+
|
|
85
|
+
console.log('🔧 Creating AgentSteps with specialized mappings...');
|
|
86
|
+
|
|
87
|
+
// Step 1: Content Creation
|
|
88
|
+
const createContentStep = new AgentStep({
|
|
89
|
+
name: 'create-content',
|
|
90
|
+
agent: contentCreator,
|
|
91
|
+
inputMapper: (context) => {
|
|
92
|
+
const topic = context.topic || 'Artificial Intelligence';
|
|
93
|
+
const audience = context.audience || 'general public';
|
|
94
|
+
const wordCount = context.targetWordCount || 300;
|
|
95
|
+
|
|
96
|
+
return `Create an engaging article about "${topic}" for a ${audience} audience.
|
|
97
|
+
Target word count: approximately ${wordCount} words.
|
|
98
|
+
Make it informative, entertaining, and well-structured with a clear introduction, body, and conclusion.`;
|
|
99
|
+
},
|
|
100
|
+
outputMapper: (response, context) => ({
|
|
101
|
+
originalContent: response,
|
|
102
|
+
createdAt: new Date().toISOString(),
|
|
103
|
+
topic: context.topic,
|
|
104
|
+
stage: 'created'
|
|
105
|
+
})
|
|
106
|
+
});
|
|
107
|
+
|
|
108
|
+
// Step 2: Content Analysis
|
|
109
|
+
const analyzeContentStep = new AgentStep({
|
|
110
|
+
name: 'analyze-content',
|
|
111
|
+
agent: contentAnalyzer,
|
|
112
|
+
inputMapper: (context) => {
|
|
113
|
+
const content = context['create-content'].originalContent;
|
|
114
|
+
return `Please analyze this content for quality, engagement, and improvement opportunities:
|
|
115
|
+
|
|
116
|
+
${content}
|
|
117
|
+
|
|
118
|
+
Provide specific feedback on strengths, weaknesses, and suggestions for enhancement.`;
|
|
119
|
+
},
|
|
120
|
+
outputMapper: (response, context) => ({
|
|
121
|
+
analysis: response,
|
|
122
|
+
analyzedAt: new Date().toISOString(),
|
|
123
|
+
stage: 'analyzed'
|
|
124
|
+
})
|
|
125
|
+
});
|
|
126
|
+
|
|
127
|
+
// Step 3: Content Editing
|
|
128
|
+
const editContentStep = new AgentStep({
|
|
129
|
+
name: 'edit-content',
|
|
130
|
+
agent: contentEditor,
|
|
131
|
+
inputMapper: (context) => {
|
|
132
|
+
const originalContent = context['create-content'].originalContent;
|
|
133
|
+
const analysis = context['analyze-content'].analysis;
|
|
134
|
+
|
|
135
|
+
return `Please edit and improve this content based on the analysis provided:
|
|
136
|
+
|
|
137
|
+
ORIGINAL CONTENT:
|
|
138
|
+
${originalContent}
|
|
139
|
+
|
|
140
|
+
ANALYSIS/FEEDBACK:
|
|
141
|
+
${analysis}
|
|
142
|
+
|
|
143
|
+
Provide an improved version that addresses the feedback, along with explanations of your changes.`;
|
|
144
|
+
},
|
|
145
|
+
outputMapper: (response, context) => ({
|
|
146
|
+
finalContent: response,
|
|
147
|
+
editedAt: new Date().toISOString(),
|
|
148
|
+
stage: 'final'
|
|
149
|
+
})
|
|
150
|
+
});
|
|
151
|
+
|
|
152
|
+
// ============================================================================
|
|
153
|
+
// 3. Build and Execute the Multi-Agent Workflow
|
|
154
|
+
// ============================================================================
|
|
155
|
+
|
|
156
|
+
console.log('🏗️ Building multi-agent workflow...');
|
|
157
|
+
|
|
158
|
+
const contentWorkflow = new Graph({
|
|
159
|
+
name: 'content-creation-pipeline'
|
|
160
|
+
})
|
|
161
|
+
.step(createContentStep, analyzeContentStep, editContentStep)
|
|
162
|
+
.edge(createContentStep, analyzeContentStep)
|
|
163
|
+
.edge(analyzeContentStep, editContentStep)
|
|
164
|
+
.compile();
|
|
165
|
+
|
|
166
|
+
console.log('▶️ Executing content creation workflow...\n');
|
|
167
|
+
|
|
168
|
+
// Execute with different topics to show flexibility
|
|
169
|
+
const topics = [
|
|
170
|
+
{
|
|
171
|
+
topic: 'The Future of Renewable Energy',
|
|
172
|
+
audience: 'tech-savvy professionals',
|
|
173
|
+
targetWordCount: 400
|
|
174
|
+
},
|
|
175
|
+
{
|
|
176
|
+
topic: 'Healthy Eating Habits',
|
|
177
|
+
audience: 'busy parents',
|
|
178
|
+
targetWordCount: 350
|
|
179
|
+
}
|
|
180
|
+
];
|
|
181
|
+
|
|
182
|
+
for (let i = 0; i < topics.length; i++) {
|
|
183
|
+
const topicConfig = topics[i];
|
|
184
|
+
console.log(`📝 Processing Topic ${i + 1}: "${topicConfig.topic}"`);
|
|
185
|
+
console.log('═'.repeat(60));
|
|
186
|
+
|
|
187
|
+
try {
|
|
188
|
+
const result = await contentWorkflow.run(topicConfig);
|
|
189
|
+
|
|
190
|
+
// Display results
|
|
191
|
+
console.log('\n📄 ORIGINAL CONTENT:');
|
|
192
|
+
console.log(result['create-content'].originalContent.substring(0, 200) + '...');
|
|
193
|
+
|
|
194
|
+
console.log('\n🔍 ANALYSIS:');
|
|
195
|
+
console.log(result['analyze-content'].analysis.substring(0, 150) + '...');
|
|
196
|
+
|
|
197
|
+
console.log('\n✨ FINAL EDITED CONTENT:');
|
|
198
|
+
console.log(result['edit-content'].finalContent.substring(0, 200) + '...');
|
|
199
|
+
|
|
200
|
+
console.log('\n✅ Workflow completed successfully!');
|
|
201
|
+
console.log('═'.repeat(60));
|
|
202
|
+
console.log();
|
|
203
|
+
|
|
204
|
+
} catch (error) {
|
|
205
|
+
console.error(`❌ Error processing topic ${i + 1}:`, error.message);
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
// ============================================================================
|
|
210
|
+
// 4. Demonstrate Error Handling
|
|
211
|
+
// ============================================================================
|
|
212
|
+
|
|
213
|
+
console.log('🧪 Testing error handling...');
|
|
214
|
+
|
|
215
|
+
try {
|
|
216
|
+
// This should fail due to missing topic
|
|
217
|
+
await contentWorkflow.run({});
|
|
218
|
+
} catch (error) {
|
|
219
|
+
console.log('✅ Error handling works:', error.message);
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
console.log('\n🎉 AgentStep example completed successfully!');
|
|
223
|
+
console.log('\n💡 Key Features Demonstrated:');
|
|
224
|
+
console.log(' • Multiple specialized AI agents in one workflow');
|
|
225
|
+
console.log(' • Custom input/output mapping for each agent');
|
|
226
|
+
console.log(' • Sequential processing with context passing');
|
|
227
|
+
console.log(' • Real-world content creation pipeline');
|
|
228
|
+
console.log(' • Error handling and validation');
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
// Run the example
|
|
232
|
+
runAgentStepExample().catch(console.error);
|
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
# Agent Usage Guide
|
|
2
|
+
|
|
3
|
+
The Agent class provides AI interactions with tool support and conversation memory.
|
|
4
|
+
|
|
5
|
+
## Table of Contents
|
|
6
|
+
- [Basic Usage](#basic-agent-usage)
|
|
7
|
+
- [Tool Definition](#tool-definition)
|
|
8
|
+
- [Memory Integration](#memory-integration)
|
|
9
|
+
- [Advanced Configuration](#advanced-agent-configuration)
|
|
10
|
+
|
|
11
|
+
## Basic Agent Usage
|
|
12
|
+
|
|
13
|
+
```javascript
|
|
14
|
+
const { Agent } = require('llmjs-chain');
|
|
15
|
+
|
|
16
|
+
// Create a simple agent
|
|
17
|
+
const agent = new Agent({
|
|
18
|
+
instruction: 'You are a helpful AI assistant.',
|
|
19
|
+
model: 'openai/gpt-4' // Optional - auto-detected if not provided
|
|
20
|
+
});
|
|
21
|
+
|
|
22
|
+
// Generate a response
|
|
23
|
+
async function chat() {
|
|
24
|
+
const response = await agent.generate('Hello, how are you?');
|
|
25
|
+
console.log(response);
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
chat();
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
## Tool Definition
|
|
32
|
+
|
|
33
|
+
Agents can use tools to perform actions. Tools can be defined in a simplified format:
|
|
34
|
+
|
|
35
|
+
```javascript
|
|
36
|
+
const agent = new Agent({
|
|
37
|
+
instruction: 'You are a helpful assistant with access to tools.',
|
|
38
|
+
tools: [
|
|
39
|
+
{
|
|
40
|
+
name: 'calculate',
|
|
41
|
+
description: 'Perform mathematical calculations',
|
|
42
|
+
parameters: {
|
|
43
|
+
expression: { type: 'string', description: 'Math expression to evaluate', required: true }
|
|
44
|
+
},
|
|
45
|
+
execute: async ({ expression }) => {
|
|
46
|
+
const result = eval(expression);
|
|
47
|
+
return JSON.stringify({ result, expression });
|
|
48
|
+
}
|
|
49
|
+
},
|
|
50
|
+
{
|
|
51
|
+
name: 'get_weather',
|
|
52
|
+
description: 'Get current weather for a location',
|
|
53
|
+
parameters: {
|
|
54
|
+
location: { type: 'string', description: 'City name', required: true }
|
|
55
|
+
},
|
|
56
|
+
execute: async ({ location }) => {
|
|
57
|
+
// Implement weather API call
|
|
58
|
+
return JSON.stringify({ location, temperature: 22, condition: 'sunny' });
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
]
|
|
62
|
+
});
|
|
63
|
+
|
|
64
|
+
// The agent will automatically use tools when needed
|
|
65
|
+
agent.generate('What is 15 + 27?').then(console.log);
|
|
66
|
+
agent.generate('What is the weather in Tokyo?').then(console.log);
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
## Memory Integration
|
|
70
|
+
|
|
71
|
+
Agents can maintain conversation history using the InMemory class:
|
|
72
|
+
|
|
73
|
+
```javascript
|
|
74
|
+
const { Agent, InMemory } = require('llmjs-chain');
|
|
75
|
+
|
|
76
|
+
const memory = new InMemory();
|
|
77
|
+
const agent = new Agent({
|
|
78
|
+
instruction: 'You are a helpful assistant with memory.',
|
|
79
|
+
memory: memory
|
|
80
|
+
});
|
|
81
|
+
|
|
82
|
+
// Start a conversation session
|
|
83
|
+
const response1 = await agent.generate({
|
|
84
|
+
userPrompt: 'My name is Alice.',
|
|
85
|
+
memory: {
|
|
86
|
+
resourceId: 'user_alice',
|
|
87
|
+
threadId: 'conversation_1',
|
|
88
|
+
session: true // Include session history
|
|
89
|
+
}
|
|
90
|
+
});
|
|
91
|
+
|
|
92
|
+
const response2 = await agent.generate({
|
|
93
|
+
userPrompt: 'What is my name?',
|
|
94
|
+
memory: {
|
|
95
|
+
resourceId: 'user_alice',
|
|
96
|
+
threadId: 'conversation_1',
|
|
97
|
+
session: true
|
|
98
|
+
}
|
|
99
|
+
});
|
|
100
|
+
// Agent will remember your name from the previous message
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
## Advanced Agent Configuration
|
|
104
|
+
|
|
105
|
+
```javascript
|
|
106
|
+
const agent = new Agent({
|
|
107
|
+
model: 'ollama/llama2', // Specific model
|
|
108
|
+
apikey: 'your-api-key', // Override auto-detected API key
|
|
109
|
+
instruction: `You are an expert software developer.
|
|
110
|
+
Always provide detailed explanations and code examples.`,
|
|
111
|
+
tools: [/* tool definitions */],
|
|
112
|
+
memory: new InMemory()
|
|
113
|
+
});
|
|
114
|
+
|
|
115
|
+
// Advanced generation with memory options
|
|
116
|
+
const response = await agent.generate({
|
|
117
|
+
userPrompt: 'Help me debug this JavaScript code...',
|
|
118
|
+
apikey: 'override-key', // Override API key for this call
|
|
119
|
+
memory: {
|
|
120
|
+
resourceId: 'debug_session',
|
|
121
|
+
threadId: 'js_debug_001',
|
|
122
|
+
session: true, // Include conversation history
|
|
123
|
+
relevance: true // Also search for relevant past conversations
|
|
124
|
+
}
|
|
125
|
+
});
|
|
126
|
+
```
|