llmjs2 0.0.1 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +486 -1
- package/dist/agent.d.ts +80 -0
- package/dist/agent.d.ts.map +1 -0
- package/dist/agent.js +189 -0
- package/dist/agent.js.map +1 -0
- package/dist/index.d.ts +74 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +191 -0
- package/dist/index.js.map +1 -0
- package/dist/providers/base.d.ts +58 -0
- package/dist/providers/base.d.ts.map +1 -0
- package/dist/providers/base.js +149 -0
- package/dist/providers/base.js.map +1 -0
- package/dist/providers/index.d.ts +8 -0
- package/dist/providers/index.d.ts.map +1 -0
- package/dist/providers/index.js +7 -0
- package/dist/providers/index.js.map +1 -0
- package/dist/providers/ollama.d.ts +42 -0
- package/dist/providers/ollama.d.ts.map +1 -0
- package/dist/providers/ollama.js +260 -0
- package/dist/providers/ollama.js.map +1 -0
- package/dist/providers/openai.d.ts +38 -0
- package/dist/providers/openai.d.ts.map +1 -0
- package/dist/providers/openai.js +289 -0
- package/dist/providers/openai.js.map +1 -0
- package/dist/types.d.ts +182 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +6 -0
- package/dist/types.js.map +1 -0
- package/package.json +45 -10
- package/src/agent.ts +285 -0
- package/src/index.ts +268 -0
- package/src/providers/base.ts +216 -0
- package/src/providers/index.ts +8 -0
- package/src/providers/ollama.ts +429 -0
- package/src/providers/openai.ts +485 -0
- package/src/types.ts +231 -0
- package/llmjs.js +0 -61
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2024 littlellmjs
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
CHANGED
|
@@ -1 +1,486 @@
|
|
|
1
|
-
|
|
1
|
+
# llmjs2
|
|
2
|
+
|
|
3
|
+
LLM abstraction layer for Node.js. Unified API for multiple LLM providers with error handling and automatic retry logic.
|
|
4
|
+
|
|
5
|
+
**Supported Providers**: OpenAI, Ollama
|
|
6
|
+
|
|
7
|
+
## Features
|
|
8
|
+
|
|
9
|
+
- 🚀 **Unified API**: Single interface for multiple LLM providers
|
|
10
|
+
- **Automatic Retries**: Exponential backoff retry logic with configurable parameters
|
|
11
|
+
- ⚙️ **Type-Safe**: Full TypeScript support with comprehensive type definitions
|
|
12
|
+
- 🛡️ **Robust Error Handling**: Custom error types with retryability information
|
|
13
|
+
- 🔍 **Debugging**: Built-in logging and debug mode for troubleshooting
|
|
14
|
+
- 📦 **Zero Dependencies**: Pure Node.js with no external dependencies
|
|
15
|
+
- ✅ **Production-Ready**: Enterprise-grade error handling and validation
|
|
16
|
+
|
|
17
|
+
## Installation
|
|
18
|
+
|
|
19
|
+
```bash
|
|
20
|
+
npm install llmjs2
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
## Quick Start
|
|
24
|
+
|
|
25
|
+
### Basic Completion
|
|
26
|
+
|
|
27
|
+
```javascript
|
|
28
|
+
import { completion } from 'llmjs2';
|
|
29
|
+
|
|
30
|
+
const result = await completion({
|
|
31
|
+
model: 'openai/gpt-4',
|
|
32
|
+
apiKey: 'sk-...', // Or use OPENAI_API_KEY env var
|
|
33
|
+
messages: [
|
|
34
|
+
{ role: 'system', content: 'You are a helpful assistant.' },
|
|
35
|
+
{ role: 'user', content: 'What is TypeScript?' }
|
|
36
|
+
]
|
|
37
|
+
});
|
|
38
|
+
|
|
39
|
+
console.log(result.content);
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
### Using Ollama Locally
|
|
43
|
+
|
|
44
|
+
```javascript
|
|
45
|
+
import { completion } from 'llmjs2';
|
|
46
|
+
|
|
47
|
+
const result = await completion({
|
|
48
|
+
model: 'ollama/mistral',
|
|
49
|
+
baseUrl: 'http://localhost:11434', // Default Ollama URL
|
|
50
|
+
messages: [
|
|
51
|
+
{ role: 'user', content: 'Explain quantum computing' }
|
|
52
|
+
]
|
|
53
|
+
});
|
|
54
|
+
|
|
55
|
+
console.log(result.content);
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
### Using Agent for Stateful Conversations
|
|
59
|
+
|
|
60
|
+
```javascript
|
|
61
|
+
import { Agent } from 'llmjs2';
|
|
62
|
+
|
|
63
|
+
const agent = new Agent({
|
|
64
|
+
model: 'openai/gpt-4',
|
|
65
|
+
apiKey: 'sk-...',
|
|
66
|
+
instruction: 'You are a helpful assistant that explains technical concepts.',
|
|
67
|
+
tools: [], // Optional function calling
|
|
68
|
+
maxTokens: 500,
|
|
69
|
+
temperature: 0.7
|
|
70
|
+
});
|
|
71
|
+
|
|
72
|
+
// Generate response (maintains conversation history)
|
|
73
|
+
const result = await agent.generate({
|
|
74
|
+
userPrompt: 'What is TypeScript?',
|
|
75
|
+
images: [],
|
|
76
|
+
references: [],
|
|
77
|
+
context: { role: 'student', level: 'beginner' }
|
|
78
|
+
});
|
|
79
|
+
|
|
80
|
+
console.log(result.response);
|
|
81
|
+
|
|
82
|
+
// Continue conversation (history automatically maintained)
|
|
83
|
+
const followUp = await agent.generate({
|
|
84
|
+
userPrompt: 'Can you give me an example?'
|
|
85
|
+
});
|
|
86
|
+
|
|
87
|
+
console.log(followUp.response);
|
|
88
|
+
|
|
89
|
+
// Stream the response
|
|
90
|
+
const stream = agent.generateStream({
|
|
91
|
+
userPrompt: 'Explain decorators in TypeScript'
|
|
92
|
+
});
|
|
93
|
+
|
|
94
|
+
for await (const chunk of stream) {
|
|
95
|
+
process.stdout.write(chunk.delta);
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
// Manage conversation history
|
|
99
|
+
console.log(agent.getHistory()); // Get all messages
|
|
100
|
+
agent.clearHistory(); // Clear history (keeps system instruction)
|
|
101
|
+
agent.addMessage('system', 'New instruction');
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
## API Reference
|
|
105
|
+
|
|
106
|
+
### `completion(request: CompletionRequest): Promise<CompletionResponse>`
|
|
107
|
+
|
|
108
|
+
Create a completion request. Supports both OpenAI and Ollama.
|
|
109
|
+
|
|
110
|
+
**Parameters:**
|
|
111
|
+
|
|
112
|
+
- `model` (string, required): Model identifier
|
|
113
|
+
- OpenAI: `openai/gpt-4`, `openai/gpt-3.5-turbo`, etc.
|
|
114
|
+
- Ollama: `ollama/mistral`, `ollama/neural-chat`, etc.
|
|
115
|
+
|
|
116
|
+
- `messages` (Message[], required): Array of messages with `role` and `content`
|
|
117
|
+
|
|
118
|
+
- `apiKey` (string, optional): API key for OpenAI (required for OpenAI models)
|
|
119
|
+
|
|
120
|
+
- `baseUrl` (string, optional): Custom API endpoint (mainly for Ollama)
|
|
121
|
+
|
|
122
|
+
- `maxTokens` (number, optional): Maximum tokens to generate
|
|
123
|
+
|
|
124
|
+
- `temperature` (number, optional): Sampling temperature (0-2). Higher = more random
|
|
125
|
+
|
|
126
|
+
- `topP` (number, optional): Nucleus sampling parameter (0-1)
|
|
127
|
+
|
|
128
|
+
- `topK` (number, optional): Top-k sampling (Ollama)
|
|
129
|
+
|
|
130
|
+
- `frequencyPenalty` (number, optional): Frequency penalty (-2 to 2)
|
|
131
|
+
|
|
132
|
+
- `presencePenalty` (number, optional): Presence penalty (-2 to 2)
|
|
133
|
+
|
|
134
|
+
- `stop` (string[], optional): Stop sequences
|
|
135
|
+
|
|
136
|
+
- `timeout` (number, optional): Request timeout in milliseconds
|
|
137
|
+
|
|
138
|
+
- `retry` (object, optional): Retry configuration
|
|
139
|
+
- `maxRetries` (number): Maximum retry attempts
|
|
140
|
+
- `backoffMultiplier` (number): Exponential backoff multiplier
|
|
141
|
+
- `initialDelayMs` (number): Initial retry delay
|
|
142
|
+
|
|
143
|
+
**Returns:** `CompletionResponse`
|
|
144
|
+
|
|
145
|
+
```typescript
|
|
146
|
+
{
|
|
147
|
+
content: string; // Generated text
|
|
148
|
+
model: string; // Model used
|
|
149
|
+
stopReason?: string; // Stop reason
|
|
150
|
+
usage?: { // Token usage (if available)
|
|
151
|
+
promptTokens?: number;
|
|
152
|
+
completionTokens?: number;
|
|
153
|
+
totalTokens?: number;
|
|
154
|
+
};
|
|
155
|
+
raw?: unknown; // Raw provider response
|
|
156
|
+
toolCalls?: Array<{ // Function calls (if any)
|
|
157
|
+
id?: string;
|
|
158
|
+
name: string;
|
|
159
|
+
arguments: Record<string, unknown>;
|
|
160
|
+
}>;
|
|
161
|
+
}
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
### `configure(options: CompletionOptions): void`
|
|
165
|
+
|
|
166
|
+
Configure global settings for all completions.
|
|
167
|
+
|
|
168
|
+
```javascript
|
|
169
|
+
import { configure } from 'llmjs2';
|
|
170
|
+
|
|
171
|
+
configure({
|
|
172
|
+
debug: true, // Enable debug logging
|
|
173
|
+
globalTimeout: 60000, // 60 second default timeout
|
|
174
|
+
globalRetry: {
|
|
175
|
+
maxRetries: 5,
|
|
176
|
+
backoffMultiplier: 2,
|
|
177
|
+
initialDelayMs: 1000
|
|
178
|
+
},
|
|
179
|
+
logger: (level, message, data) => {
|
|
180
|
+
console.log(`[${level}] ${message}`, data);
|
|
181
|
+
}
|
|
182
|
+
});
|
|
183
|
+
```
|
|
184
|
+
|
|
185
|
+
### `validateProvider(model: string, apiKey?: string, baseUrl?: string): Promise<void>`
|
|
186
|
+
|
|
187
|
+
Validate that a provider is configured correctly and accessible.
|
|
188
|
+
|
|
189
|
+
```javascript
|
|
190
|
+
import { validateProvider } from 'llmjs2';
|
|
191
|
+
|
|
192
|
+
try {
|
|
193
|
+
await validateProvider('openai/gpt-4', 'sk-...');
|
|
194
|
+
console.log('OpenAI provider is valid');
|
|
195
|
+
} catch (error) {
|
|
196
|
+
console.error('Provider validation failed:', error.message);
|
|
197
|
+
}
|
|
198
|
+
```
|
|
199
|
+
|
|
200
|
+
## Agent - Stateful Conversations
|
|
201
|
+
|
|
202
|
+
### `new Agent(config: AgentConfig): Agent`
|
|
203
|
+
|
|
204
|
+
Create a stateful agent for managing conversations with automatic history tracking.
|
|
205
|
+
|
|
206
|
+
**Configuration:**
|
|
207
|
+
|
|
208
|
+
```typescript
|
|
209
|
+
interface AgentConfig {
|
|
210
|
+
model: string; // Model identifier (required)
|
|
211
|
+
apiKey?: string; // API key (if needed)
|
|
212
|
+
baseUrl?: string; // Custom endpoint
|
|
213
|
+
instruction?: string; // System instruction/role
|
|
214
|
+
tools?: Tool[]; // Available functions
|
|
215
|
+
maxTokens?: number; // Max response tokens
|
|
216
|
+
temperature?: number; // Sampling temperature
|
|
217
|
+
timeout?: number; // Request timeout
|
|
218
|
+
}
|
|
219
|
+
```
|
|
220
|
+
|
|
221
|
+
**Methods:**
|
|
222
|
+
|
|
223
|
+
### `agent.generate(request: AgentGenerateRequest): Promise<AgentGenerateResponse>`
|
|
224
|
+
|
|
225
|
+
Generate a response while maintaining conversation history.
|
|
226
|
+
|
|
227
|
+
**Parameters:**
|
|
228
|
+
- `userPrompt` (string, required): User message
|
|
229
|
+
- `images` (string[], optional): Image data/URLs
|
|
230
|
+
- `references` (string[], optional): Reference documents
|
|
231
|
+
- `context` (Record, optional): Additional context variables
|
|
232
|
+
|
|
233
|
+
**Returns:**
|
|
234
|
+
```typescript
|
|
235
|
+
{
|
|
236
|
+
response: string; // Generated text
|
|
237
|
+
completion: CompletionResponse; // Full provider response
|
|
238
|
+
toolCalls?: Array<{ // Function calls if any
|
|
239
|
+
name: string;
|
|
240
|
+
arguments: Record<string, unknown>;
|
|
241
|
+
}>;
|
|
242
|
+
}
|
|
243
|
+
```
|
|
244
|
+
|
|
245
|
+
**Example:**
|
|
246
|
+
```javascript
|
|
247
|
+
const agent = new Agent({
|
|
248
|
+
model: 'openai/gpt-4',
|
|
249
|
+
apiKey: 'sk-...',
|
|
250
|
+
instruction: 'You are a coding expert.'
|
|
251
|
+
});
|
|
252
|
+
|
|
253
|
+
const result = await agent.generate({
|
|
254
|
+
userPrompt: 'How do I use async/await?',
|
|
255
|
+
context: { language: 'JavaScript' }
|
|
256
|
+
});
|
|
257
|
+
|
|
258
|
+
console.log(result.response);
|
|
259
|
+
```
|
|
260
|
+
|
|
261
|
+
### `agent.getHistory(): Message[]`
|
|
262
|
+
|
|
263
|
+
Get the current conversation history.
|
|
264
|
+
|
|
265
|
+
```javascript
|
|
266
|
+
const messages = agent.getHistory();
|
|
267
|
+
console.log(messages);
|
|
268
|
+
```
|
|
269
|
+
|
|
270
|
+
### `agent.clearHistory(): void`
|
|
271
|
+
|
|
272
|
+
Clear conversation history (system instruction is preserved).
|
|
273
|
+
|
|
274
|
+
```javascript
|
|
275
|
+
agent.clearHistory();
|
|
276
|
+
```
|
|
277
|
+
|
|
278
|
+
### `agent.addMessage(role, content): void`
|
|
279
|
+
|
|
280
|
+
Manually add a message to the history.
|
|
281
|
+
|
|
282
|
+
```javascript
|
|
283
|
+
agent.addMessage('assistant', 'Custom response');
|
|
284
|
+
agent.addMessage('user', 'Follow-up question');
|
|
285
|
+
```
|
|
286
|
+
|
|
287
|
+
### `agent.getConfig(): AgentConfig`
|
|
288
|
+
|
|
289
|
+
Get the current agent configuration.
|
|
290
|
+
|
|
291
|
+
```javascript
|
|
292
|
+
const config = agent.getConfig();
|
|
293
|
+
```
|
|
294
|
+
|
|
295
|
+
## Error Handling
|
|
296
|
+
|
|
297
|
+
All errors are instances of `LLMError` with additional properties:
|
|
298
|
+
|
|
299
|
+
```typescript
|
|
300
|
+
interface LLMError extends Error {
|
|
301
|
+
code?: string; // Error code
|
|
302
|
+
statusCode?: number; // HTTP status code
|
|
303
|
+
details?: unknown; // Additional error details
|
|
304
|
+
retryable?: boolean; // Whether to retry
|
|
305
|
+
}
|
|
306
|
+
```
|
|
307
|
+
|
|
308
|
+
**Example:**
|
|
309
|
+
|
|
310
|
+
```javascript
|
|
311
|
+
import { completion, LLMError } from 'llmjs2';
|
|
312
|
+
|
|
313
|
+
try {
|
|
314
|
+
const result = await completion({
|
|
315
|
+
model: 'openai/gpt-4',
|
|
316
|
+
apiKey: 'sk-...',
|
|
317
|
+
messages: [{ role: 'user', content: 'Hello' }]
|
|
318
|
+
});
|
|
319
|
+
} catch (error) {
|
|
320
|
+
if (error instanceof LLMError) {
|
|
321
|
+
console.error(`Error [${error.code}]:`, error.message);
|
|
322
|
+
|
|
323
|
+
if (error.retryable) {
|
|
324
|
+
console.log('Error is retryable, will retry...');
|
|
325
|
+
}
|
|
326
|
+
}
|
|
327
|
+
}
|
|
328
|
+
```
|
|
329
|
+
|
|
330
|
+
## Advanced Usage
|
|
331
|
+
|
|
332
|
+
### Function Calling
|
|
333
|
+
|
|
334
|
+
```javascript
|
|
335
|
+
import { completion } from 'llmjs2';
|
|
336
|
+
|
|
337
|
+
const result = await completion({
|
|
338
|
+
model: 'openai/gpt-4',
|
|
339
|
+
apiKey: 'sk-...',
|
|
340
|
+
messages: [
|
|
341
|
+
{ role: 'user', content: 'What is the weather in San Francisco?' }
|
|
342
|
+
],
|
|
343
|
+
tools: [
|
|
344
|
+
{
|
|
345
|
+
type: 'function',
|
|
346
|
+
function: {
|
|
347
|
+
name: 'get_weather',
|
|
348
|
+
description: 'Get weather for a location',
|
|
349
|
+
parameters: {
|
|
350
|
+
type: 'object',
|
|
351
|
+
properties: {
|
|
352
|
+
location: { type: 'string' },
|
|
353
|
+
unit: { type: 'string', enum: ['celsius', 'fahrenheit'] }
|
|
354
|
+
},
|
|
355
|
+
required: ['location']
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
]
|
|
360
|
+
});
|
|
361
|
+
|
|
362
|
+
if (result.toolCalls) {
|
|
363
|
+
for (const call of result.toolCalls) {
|
|
364
|
+
console.log(`Function: ${call.name}`);
|
|
365
|
+
console.log(`Arguments:`, call.arguments);
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
```
|
|
369
|
+
|
|
370
|
+
### Custom Request Headers
|
|
371
|
+
|
|
372
|
+
```javascript
|
|
373
|
+
import { completion } from 'llmjs2';
|
|
374
|
+
|
|
375
|
+
const result = await completion({
|
|
376
|
+
model: 'openai/gpt-4',
|
|
377
|
+
apiKey: 'sk-...',
|
|
378
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
379
|
+
headers: {
|
|
380
|
+
'X-Custom-Header': 'custom-value'
|
|
381
|
+
}
|
|
382
|
+
});
|
|
383
|
+
```
|
|
384
|
+
|
|
385
|
+
### Provider-Specific Configuration
|
|
386
|
+
|
|
387
|
+
For Ollama with custom settings:
|
|
388
|
+
|
|
389
|
+
```javascript
|
|
390
|
+
import { completion } from 'llmjs2';
|
|
391
|
+
|
|
392
|
+
const result = await completion({
|
|
393
|
+
model: 'ollama/mistral',
|
|
394
|
+
baseUrl: 'http://192.168.1.100:11434',
|
|
395
|
+
messages: [
|
|
396
|
+
{ role: 'user', content: 'Explain AI' }
|
|
397
|
+
],
|
|
398
|
+
temperature: 0.7,
|
|
399
|
+
topK: 40,
|
|
400
|
+
topP: 0.9,
|
|
401
|
+
maxTokens: 2048
|
|
402
|
+
});
|
|
403
|
+
```
|
|
404
|
+
|
|
405
|
+
## Environment Variables
|
|
406
|
+
|
|
407
|
+
**OpenAI:**
|
|
408
|
+
- `OPENAI_API_KEY`: Your OpenAI API key (alternative to passing `apiKey`)
|
|
409
|
+
|
|
410
|
+
**Ollama:**
|
|
411
|
+
- Ollama reads from local `http://localhost:11434` by default
|
|
412
|
+
- Override with `baseUrl` parameter in request
|
|
413
|
+
|
|
414
|
+
## Type Definitions
|
|
415
|
+
|
|
416
|
+
Full TypeScript support with comprehensive types:
|
|
417
|
+
|
|
418
|
+
```typescript
|
|
419
|
+
import type {
|
|
420
|
+
CompletionRequest,
|
|
421
|
+
CompletionResponse,
|
|
422
|
+
CompletionChunk,
|
|
423
|
+
Message,
|
|
424
|
+
MessageRole,
|
|
425
|
+
Tool,
|
|
426
|
+
ProviderType,
|
|
427
|
+
ProviderConfig,
|
|
428
|
+
ProviderError,
|
|
429
|
+
CompletionOptions,
|
|
430
|
+
AgentConfig,
|
|
431
|
+
AgentGenerateRequest,
|
|
432
|
+
AgentGenerateResponse
|
|
433
|
+
} from 'llmjs2';
|
|
434
|
+
|
|
435
|
+
import { Agent } from 'llmjs2';
|
|
436
|
+
```
|
|
437
|
+
|
|
438
|
+
## Performance Considerations
|
|
439
|
+
|
|
440
|
+
1. **Batching**: Batch multiple requests to reduce API calls
|
|
441
|
+
2. **Caching**: Implement caching for common queries
|
|
442
|
+
3. **Timeouts**: Configure appropriate timeouts for your use case
|
|
443
|
+
4. **Retry Logic**: Automatic exponential backoff is built-in and configurable
|
|
444
|
+
|
|
445
|
+
## Testing
|
|
446
|
+
|
|
447
|
+
```bash
|
|
448
|
+
# Run tests
|
|
449
|
+
npm test
|
|
450
|
+
|
|
451
|
+
# Run tests in watch mode
|
|
452
|
+
npm run test:watch
|
|
453
|
+
```
|
|
454
|
+
|
|
455
|
+
## Building
|
|
456
|
+
|
|
457
|
+
```bash
|
|
458
|
+
# Build TypeScript to JavaScript
|
|
459
|
+
npm run build
|
|
460
|
+
|
|
461
|
+
# Build in watch mode
|
|
462
|
+
npm run build:watch
|
|
463
|
+
|
|
464
|
+
# Clean build artifacts
|
|
465
|
+
npm run clean
|
|
466
|
+
```
|
|
467
|
+
|
|
468
|
+
## License
|
|
469
|
+
|
|
470
|
+
MIT - See LICENSE file for details
|
|
471
|
+
|
|
472
|
+
## Support
|
|
473
|
+
|
|
474
|
+
- GitHub Issues: [github.com/littlellmjs/llmjs2/issues](https://github.com/littlellmjs/llmjs2/issues)
|
|
475
|
+
- Documentation: Full API reference above
|
|
476
|
+
|
|
477
|
+
## Changelog
|
|
478
|
+
|
|
479
|
+
### 1.0.0
|
|
480
|
+
- Initial production release
|
|
481
|
+
- Full OpenAI and Ollama support
|
|
482
|
+
- Streaming API with async generators
|
|
483
|
+
- Automatic retry with exponential backoff
|
|
484
|
+
- Comprehensive error handling
|
|
485
|
+
- TypeScript 5+ support
|
|
486
|
+
- Zero external dependencies
|
package/dist/agent.d.ts
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Agent - Stateful conversation manager with tool support
|
|
3
|
+
*/
|
|
4
|
+
import { CompletionResponse, CompletionChunk, Message, Tool } from './index.js';
|
|
5
|
+
/**
|
|
6
|
+
* Agent configuration
|
|
7
|
+
*/
|
|
8
|
+
export interface AgentConfig {
|
|
9
|
+
/** Model identifier (e.g., 'ollama/qwen3.5:397b-cloud') */
|
|
10
|
+
model: string;
|
|
11
|
+
/** API key for the provider (optional, reads from OLLAMA_CLOUD_API_KEY env by default) */
|
|
12
|
+
apiKey?: string;
|
|
13
|
+
/** Base URL for the API (optional, defaults to https://ollama.com) */
|
|
14
|
+
baseUrl?: string;
|
|
15
|
+
/** System instruction for the agent */
|
|
16
|
+
instruction?: string;
|
|
17
|
+
/** Available tools/functions */
|
|
18
|
+
tools?: Tool[];
|
|
19
|
+
/** Tool executor function - receives tool name and arguments, returns result string */
|
|
20
|
+
toolExecutor?: (toolName: string, args: Record<string, unknown>) => string;
|
|
21
|
+
}
|
|
22
|
+
/**
|
|
23
|
+
* Agent generation request
|
|
24
|
+
*/
|
|
25
|
+
export interface AgentGenerateRequest {
|
|
26
|
+
/** User prompt/message */
|
|
27
|
+
userPrompt: string;
|
|
28
|
+
/** Optional images (base64 or URLs) */
|
|
29
|
+
images?: string[];
|
|
30
|
+
/** Optional reference documents or context */
|
|
31
|
+
references?: string[];
|
|
32
|
+
/** Additional context variables */
|
|
33
|
+
context?: Record<string, unknown>;
|
|
34
|
+
}
|
|
35
|
+
/**
|
|
36
|
+
* Agent generation response
|
|
37
|
+
*/
|
|
38
|
+
export interface AgentGenerateResponse {
|
|
39
|
+
/** Generated response */
|
|
40
|
+
response: string;
|
|
41
|
+
/** Full completion response from provider */
|
|
42
|
+
completion: CompletionResponse;
|
|
43
|
+
}
|
|
44
|
+
/**
|
|
45
|
+
* Stateful agent for conversations and tool use
|
|
46
|
+
*/
|
|
47
|
+
export declare class Agent {
|
|
48
|
+
private config;
|
|
49
|
+
private conversationHistory;
|
|
50
|
+
constructor(config: AgentConfig);
|
|
51
|
+
/**
|
|
52
|
+
* Generate a response for a user message
|
|
53
|
+
*/
|
|
54
|
+
generate(request: AgentGenerateRequest): Promise<AgentGenerateResponse>;
|
|
55
|
+
/**
|
|
56
|
+
* Get completion from the model
|
|
57
|
+
*/
|
|
58
|
+
private getCompletion;
|
|
59
|
+
/**
|
|
60
|
+
* Stream a response for a user message
|
|
61
|
+
*/
|
|
62
|
+
generateStream(request: AgentGenerateRequest): AsyncIterable<CompletionChunk>;
|
|
63
|
+
/**
|
|
64
|
+
* Get conversation history
|
|
65
|
+
*/
|
|
66
|
+
getHistory(): Message[];
|
|
67
|
+
/**
|
|
68
|
+
* Clear conversation history (keeps system instruction if set)
|
|
69
|
+
*/
|
|
70
|
+
clearHistory(): void;
|
|
71
|
+
/**
|
|
72
|
+
* Add a message to history
|
|
73
|
+
*/
|
|
74
|
+
addMessage(role: 'system' | 'user' | 'assistant', content: string): void;
|
|
75
|
+
/**
|
|
76
|
+
* Get the current configuration
|
|
77
|
+
*/
|
|
78
|
+
getConfig(): AgentConfig;
|
|
79
|
+
}
|
|
80
|
+
//# sourceMappingURL=agent.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"agent.d.ts","sourceRoot":"","sources":["../src/agent.ts"],"names":[],"mappings":"AAAA;;GAEG;AAEH,OAAO,EAIL,kBAAkB,EAClB,eAAe,EACf,OAAO,EACP,IAAI,EAEL,MAAM,YAAY,CAAC;AAEpB;;GAEG;AACH,MAAM,WAAW,WAAW;IAC1B,2DAA2D;IAC3D,KAAK,EAAE,MAAM,CAAC;IAEd,0FAA0F;IAC1F,MAAM,CAAC,EAAE,MAAM,CAAC;IAEhB,sEAAsE;IACtE,OAAO,CAAC,EAAE,MAAM,CAAC;IAEjB,uCAAuC;IACvC,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB,gCAAgC;IAChC,KAAK,CAAC,EAAE,IAAI,EAAE,CAAC;IAEf,uFAAuF;IACvF,YAAY,CAAC,EAAE,CAAC,QAAQ,EAAE,MAAM,EAAE,IAAI,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAAK,MAAM,CAAC;CAC5E;AAED;;GAEG;AACH,MAAM,WAAW,oBAAoB;IACnC,0BAA0B;IAC1B,UAAU,EAAE,MAAM,CAAC;IAEnB,uCAAuC;IACvC,MAAM,CAAC,EAAE,MAAM,EAAE,CAAC;IAElB,8CAA8C;IAC9C,UAAU,CAAC,EAAE,MAAM,EAAE,CAAC;IAEtB,mCAAmC;IACnC,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACnC;AAED;;GAEG;AACH,MAAM,WAAW,qBAAqB;IACpC,yBAAyB;IACzB,QAAQ,EAAE,MAAM,CAAC;IAEjB,6CAA6C;IAC7C,UAAU,EAAE,kBAAkB,CAAC;CAChC;AAED;;GAEG;AACH,qBAAa,KAAK;IAChB,OAAO,CAAC,MAAM,CAAc;IAC5B,OAAO,CAAC,mBAAmB,CAAiB;gBAEhC,MAAM,EAAE,WAAW;IAgB/B;;OAEG;IACG,QAAQ,CAAC,OAAO,EAAE,oBAAoB,GAAG,OAAO,CAAC,qBAAqB,CAAC;IAiF7E;;OAEG;YACW,aAAa;IAY3B;;OAEG;IACI,cAAc,CACnB,OAAO,EAAE,oBAAoB,GAC5B,aAAa,CAAC,eAAe,CAAC;IAoDjC;;OAEG;IACH,UAAU,IAAI,OAAO,EAAE;IAIvB;;OAEG;IACH,YAAY,IAAI,IAAI;IAapB;;OAEG;IACH,UAAU,CAAC,IAAI,EAAE,QAAQ,GAAG,MAAM,GAAG,WAAW,EAAE,OAAO,EAAE,MAAM,GAAG,IAAI;IAOxE;;OAEG;IACH,SAAS,IAAI,WAAW;CAGzB"}
|