llmjs2 1.0.1 → 1.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/README.md +74 -417
  2. package/grapes.jpg +0 -0
  3. package/index.d.ts +43 -0
  4. package/index.js +465 -0
  5. package/package.json +10 -42
  6. package/spec.txt +73 -0
  7. package/test-generate-tools-suite.js +100 -0
  8. package/test-generate-tools.js +57 -0
  9. package/test-generate.js +31 -0
  10. package/test.js +33 -0
  11. package/LICENSE +0 -21
  12. package/dist/agent.d.ts +0 -80
  13. package/dist/agent.d.ts.map +0 -1
  14. package/dist/agent.js +0 -199
  15. package/dist/agent.js.map +0 -1
  16. package/dist/index.d.ts +0 -74
  17. package/dist/index.d.ts.map +0 -1
  18. package/dist/index.js +0 -191
  19. package/dist/index.js.map +0 -1
  20. package/dist/providers/base.d.ts +0 -58
  21. package/dist/providers/base.d.ts.map +0 -1
  22. package/dist/providers/base.js +0 -149
  23. package/dist/providers/base.js.map +0 -1
  24. package/dist/providers/index.d.ts +0 -8
  25. package/dist/providers/index.d.ts.map +0 -1
  26. package/dist/providers/index.js +0 -7
  27. package/dist/providers/index.js.map +0 -1
  28. package/dist/providers/ollama.d.ts +0 -42
  29. package/dist/providers/ollama.d.ts.map +0 -1
  30. package/dist/providers/ollama.js +0 -260
  31. package/dist/providers/ollama.js.map +0 -1
  32. package/dist/providers/openai.d.ts +0 -38
  33. package/dist/providers/openai.d.ts.map +0 -1
  34. package/dist/providers/openai.js +0 -322
  35. package/dist/providers/openai.js.map +0 -1
  36. package/dist/types.d.ts +0 -191
  37. package/dist/types.d.ts.map +0 -1
  38. package/dist/types.js +0 -6
  39. package/dist/types.js.map +0 -1
  40. package/src/agent.ts +0 -295
  41. package/src/index.ts +0 -268
  42. package/src/providers/base.ts +0 -216
  43. package/src/providers/index.ts +0 -8
  44. package/src/providers/ollama.ts +0 -429
  45. package/src/providers/openai.ts +0 -521
  46. package/src/types.ts +0 -243
package/README.md CHANGED
@@ -1,472 +1,129 @@
1
1
  # llmjs2
2
2
 
3
- LLM abstraction layer for Node.js. Unified API for multiple LLM providers with error handling and automatic retry logic.
4
-
5
- **Supported Providers**: Ollama
3
+ `llmjs2` is a zero-dependency Node.js library that provides a small, robust interface for calling Ollama and Ollama Cloud from Node 18+.
6
4
 
7
5
  ## Features
8
6
 
9
- - 🚀 **Unified API**: Single interface for Ollama provider
10
- - **Automatic Retries**: Exponential backoff retry logic with configurable parameters
11
- - ⚙️ **Type-Safe**: Full TypeScript support with comprehensive type definitions
12
- - 🛡️ **Robust Error Handling**: Custom error types with retryability information
13
- - 🔍 **Debugging**: Built-in logging and debug mode for troubleshooting
14
- - 📦 **Zero Dependencies**: Pure Node.js with no external dependencies
15
- - ✅ **Production-Ready**: Enterprise-grade error handling and validation
7
+ - Zero runtime dependencies
8
+ - ESM-only
9
+ - OpenAI-compatible `messages` schema
10
+ - Automatic provider routing via `provider/model-name`
11
+ - Default fallback to `https://api.ollama.com`
12
+ - Clear connection and model errors
16
13
 
17
14
  ## Installation
18
15
 
16
+ No dependencies are required. Install or link the package as usual:
17
+
19
18
  ```bash
20
19
  npm install llmjs2
21
20
  ```
22
21
 
23
- ## Quick Start
22
+ ## Usage
24
23
 
25
- ### Basic Completion (Ollama)
26
-
27
- ```javascript
24
+ ```js
28
25
  import { completion } from 'llmjs2';
29
26
 
30
- const result = await completion({
31
- model: 'ollama/mistral',
32
- baseUrl: 'http://localhost:11434',
33
- messages: [
34
- { role: 'system', content: 'You are a helpful assistant.' },
35
- { role: 'user', content: 'What is TypeScript?' }
36
- ]
37
- });
38
-
39
- console.log(result.content);
27
+ const response = await completion('ollama/llama3', 'Write a short poem about Node.js.');
28
+ console.log(response);
40
29
  ```
41
30
 
42
- ### Using Ollama Locally
31
+ Or with a full options object:
43
32
 
44
- ```javascript
33
+ ```js
45
34
  import { completion } from 'llmjs2';
46
35
 
47
- const result = await completion({
48
- model: 'ollama/mistral',
49
- baseUrl: 'http://localhost:11434', // Default Ollama URL
36
+ const response = await completion({
37
+ model: 'ollama/llama3',
50
38
  messages: [
51
- { role: 'user', content: 'Explain quantum computing' }
52
- ]
53
- });
54
-
55
- console.log(result.content);
56
- ```
57
-
58
- ### Using Agent for Stateful Conversations
59
-
60
- ```javascript
61
- import { Agent } from 'llmjs2';
62
-
63
- const agent = new Agent({
64
- model: 'ollama/qwen3.5:397b-cloud',
65
- baseUrl: 'https://ollama.com',
66
- instruction: 'You are a helpful assistant that explains technical concepts.',
67
- tools: [], // Optional function calling
68
- });
69
-
70
- // Generate response (maintains conversation history)
71
- const result = await agent.generate({
72
- userPrompt: 'What is TypeScript?',
73
- images: [],
74
- references: [],
75
- context: { role: 'student', level: 'beginner' }
76
- });
77
-
78
- console.log(result.response);
79
-
80
- // Continue conversation (history automatically maintained)
81
- const followUp = await agent.generate({
82
- userPrompt: 'Can you give me an example?'
83
- });
84
-
85
- console.log(followUp.response);
86
-
87
- // Stream the response
88
- const stream = agent.generateStream({
89
- userPrompt: 'Explain decorators in TypeScript'
90
- });
91
-
92
- for await (const chunk of stream) {
93
- process.stdout.write(chunk.delta);
94
- }
95
-
96
- // Manage conversation history
97
- console.log(agent.getHistory()); // Get all messages
98
- agent.clearHistory(); // Clear history (keeps system instruction)
99
- agent.addMessage('system', 'New instruction');
100
- ```
101
-
102
- ## API Reference
103
-
104
- ### `completion(request: CompletionRequest): Promise<CompletionResponse>`
105
-
106
- Create a completion request. Supports Ollama.
107
-
108
- **Parameters:**
109
-
110
- - `model` (string, required): Model identifier
111
- - Ollama: `ollama/mistral`, `ollama/neural-chat`, `ollama/qwen3.5:397b-cloud`, etc.
112
-
113
- - `messages` (Message[], required): Array of messages with `role` and `content`
114
-
115
- - `baseUrl` (string, optional): Custom API endpoint (mainly for Ollama)
116
-
117
- - `maxTokens` (number, optional): Maximum tokens to generate
118
-
119
- - `temperature` (number, optional): Sampling temperature (0-2). Higher = more random
120
-
121
- - `topP` (number, optional): Nucleus sampling parameter (0-1)
122
-
123
- - `topK` (number, optional): Top-k sampling (Ollama)
124
-
125
- - `frequencyPenalty` (number, optional): Frequency penalty (-2 to 2)
126
-
127
- - `presencePenalty` (number, optional): Presence penalty (-2 to 2)
128
-
129
- - `stop` (string[], optional): Stop sequences
130
-
131
- - `timeout` (number, optional): Request timeout in milliseconds
132
-
133
- - `retry` (object, optional): Retry configuration
134
- - `maxRetries` (number): Maximum retry attempts
135
- - `backoffMultiplier` (number): Exponential backoff multiplier
136
- - `initialDelayMs` (number): Initial retry delay
137
-
138
- **Returns:** `CompletionResponse`
139
-
140
- ```typescript
141
- {
142
- content: string; // Generated text
143
- model: string; // Model used
144
- stopReason?: string; // Stop reason
145
- usage?: { // Token usage (if available)
146
- promptTokens?: number;
147
- completionTokens?: number;
148
- totalTokens?: number;
149
- };
150
- raw?: unknown; // Raw provider response
151
- toolCalls?: Array<{ // Function calls (if any)
152
- id?: string;
153
- name: string;
154
- arguments: Record<string, unknown>;
155
- }>;
156
- }
157
- ```
158
-
159
- ### `configure(options: CompletionOptions): void`
160
-
161
- Configure global settings for all completions.
162
-
163
- ```javascript
164
- import { configure } from 'llmjs2';
165
-
166
- configure({
167
- debug: true, // Enable debug logging
168
- globalTimeout: 60000, // 60 second default timeout
169
- globalRetry: {
170
- maxRetries: 5,
171
- backoffMultiplier: 2,
172
- initialDelayMs: 1000
173
- },
174
- logger: (level, message, data) => {
175
- console.log(`[${level}] ${message}`, data);
176
- }
39
+ { role: 'system', content: 'You are a helpful assistant.' },
40
+ { role: 'user', content: 'Summarize the benefits of zero dependencies.' },
41
+ ],
177
42
  });
178
- ```
179
-
180
- ### `validateProvider(model: string, apiKey?: string, baseUrl?: string): Promise<void>`
181
-
182
- Validate that a provider is configured correctly and accessible.
183
-
184
- ```javascript
185
- import { validateProvider } from 'llmjs2';
186
-
187
- try {
188
- await validateProvider('ollama/mistral', process.env.OLLAMA_CLOUD_API_KEY);
189
- console.log('Ollama provider is valid');
190
- } catch (error) {
191
- console.error('Provider validation failed:', error.message);
192
- }
193
- ```
194
-
195
- ## Agent - Stateful Conversations
196
-
197
- ### `new Agent(config: AgentConfig): Agent`
198
43
 
199
- Create a stateful agent for managing conversations with automatic history tracking.
200
-
201
- **Configuration:**
202
-
203
- ```typescript
204
- interface AgentConfig {
205
- model: string; // Model identifier (required)
206
- apiKey?: string; // API key (if needed)
207
- baseUrl?: string; // Custom endpoint
208
- instruction?: string; // System instruction/role
209
- tools?: Tool[]; // Available functions
210
- maxTokens?: number; // Max response tokens
211
- temperature?: number; // Sampling temperature
212
- timeout?: number; // Request timeout
213
- }
44
+ console.log(response);
214
45
  ```
215
46
 
216
- **Methods:**
217
-
218
- ### `agent.generate(request: AgentGenerateRequest): Promise<AgentGenerateResponse>`
219
-
220
- Generate a response while maintaining conversation history.
47
+ ## generate()
221
48
 
222
- **Parameters:**
223
- - `userPrompt` (string, required): User message
224
- - `images` (string[], optional): Image data/URLs
225
- - `references` (string[], optional): Reference documents
226
- - `context` (Record, optional): Additional context variables
49
+ The library also exposes `generate()` for prompt-based flows with optional images, references, system instructions, and tools.
227
50
 
228
- **Returns:**
229
- ```typescript
230
- {
231
- response: string; // Generated text
232
- completion: CompletionResponse; // Full provider response
233
- toolCalls?: Array<{ // Function calls if any
234
- name: string;
235
- arguments: Record<string, unknown>;
236
- }>;
237
- }
238
- ```
239
-
240
- **Example:**
241
- ```javascript
242
- const agent = new Agent({
243
- model: 'ollama/qwen3.5:397b-cloud',
244
- baseUrl: 'https://ollama.com',
245
- instruction: 'You are a coding expert.'
246
- });
51
+ ```js
52
+ import { generate } from 'llmjs2';
247
53
 
248
- const result = await agent.generate({
249
- userPrompt: 'How do I use async/await?',
250
- context: { language: 'JavaScript' }
54
+ const response = await generate({
55
+ model: 'ollama/llama3',
56
+ userPrompt: 'Describe this picture.',
57
+ images: ['https://example.com/image.png'],
58
+ references: ['Some reference text about the image.'],
59
+ systemPrompt: 'You are a visual assistant.',
60
+ tools: [
61
+ {
62
+ name: 'get_weather',
63
+ description: 'Get the current weather for a location',
64
+ parameters: {
65
+ location: { type: 'string', required: true, description: 'City and state' },
66
+ },
67
+ handler: ({ location }) => `Weather in ${location}: Sunny`,
68
+ },
69
+ ],
251
70
  });
252
71
 
253
- console.log(result.response);
254
- ```
255
-
256
- ### `agent.getHistory(): Message[]`
257
-
258
- Get the current conversation history.
259
-
260
- ```javascript
261
- const messages = agent.getHistory();
262
- console.log(messages);
263
- ```
264
-
265
- ### `agent.clearHistory(): void`
266
-
267
- Clear conversation history (system instruction is preserved).
268
-
269
- ```javascript
270
- agent.clearHistory();
271
- ```
272
-
273
- ### `agent.addMessage(role, content): void`
274
-
275
- Manually add a message to the history.
276
-
277
- ```javascript
278
- agent.addMessage('assistant', 'Custom response');
279
- agent.addMessage('user', 'Follow-up question');
280
- ```
281
-
282
- ### `agent.getConfig(): AgentConfig`
283
-
284
- Get the current agent configuration.
285
-
286
- ```javascript
287
- const config = agent.getConfig();
288
- ```
289
-
290
- ## Error Handling
291
-
292
- All errors are instances of `LLMError` with additional properties:
293
-
294
- ```typescript
295
- interface LLMError extends Error {
296
- code?: string; // Error code
297
- statusCode?: number; // HTTP status code
298
- details?: unknown; // Additional error details
299
- retryable?: boolean; // Whether to retry
300
- }
301
- ```
302
-
303
- **Example:**
304
-
305
- ```javascript
306
- import { completion, LLMError } from 'llmjs2';
307
-
308
- try {
309
- const result = await completion({
310
- model: 'ollama/mistral',
311
- baseUrl: 'http://localhost:11434',
312
- messages: [{ role: 'user', content: 'Hello' }]
313
- });
314
- } catch (error) {
315
- if (error instanceof LLMError) {
316
- console.error(`Error [${error.code}]:`, error.message);
317
-
318
- if (error.retryable) {
319
- console.log('Error is retryable, will retry...');
320
- }
321
- }
322
- }
72
+ console.log(response);
323
73
  ```
324
74
 
325
- ## Advanced Usage
75
+ You can also pass a message history directly:
326
76
 
327
- ### Function Calling (Ollama + embedded handlers)
328
-
329
- ```javascript
330
- import { completion } from 'llmjs2';
77
+ ```js
78
+ import { generate } from 'llmjs2';
331
79
 
332
- const result = await completion({
333
- model: 'ollama/mistral',
334
- baseUrl: 'http://localhost:11434',
80
+ const response = await generate({
81
+ model: 'ollama/llama3',
335
82
  messages: [
336
- { role: 'user', content: 'What is the weather in San Francisco?' }
83
+ { role: 'system', content: 'You are a helpful assistant.' },
84
+ { role: 'user', content: 'Use a tool if needed.' },
337
85
  ],
338
86
  tools: [
339
87
  {
340
88
  name: 'get_weather',
341
- description: 'Get weather for a location',
89
+ description: 'Get the current weather for a location',
342
90
  parameters: {
343
- location: { type: 'string', required: true },
344
- unit: { type: 'string', enum: ['celsius', 'fahrenheit'] }
91
+ location: { type: 'string', required: true, description: 'City and state' },
345
92
  },
346
- }
347
- }
348
- }
349
- ]
350
- });
351
-
352
- if (result.toolCalls) {
353
- for (const call of result.toolCalls) {
354
- console.log(`Function: ${call.name}`);
355
- console.log(`Arguments:`, call.arguments);
356
- }
357
- }
358
- ```
359
-
360
- ### Custom Request Headers
361
-
362
- ```javascript
363
- import { completion } from 'llmjs2';
364
-
365
- const result = await completion({
366
- model: 'ollama/mistral',
367
- baseUrl: 'http://localhost:11434',
368
- messages: [{ role: 'user', content: 'Hello' }],
369
- headers: {
370
- 'X-Custom-Header': 'custom-value'
371
- }
372
- });
373
- ```
374
-
375
- ### Provider-Specific Configuration
376
-
377
- For Ollama with custom settings:
378
- ```javascript
379
- import { completion } from 'llmjs2';
380
-
381
- const result = await completion({
382
- model: 'ollama/mistral',
383
- baseUrl: 'http://192.168.1.100:11434',
384
- messages: [
385
- { role: 'user', content: 'Explain AI' }
93
+ handler: ({ location }) => `Weather in ${location}: Sunny`,
94
+ },
386
95
  ],
387
- temperature: 0.7,
388
- topK: 40,
389
- topP: 0.9,
390
- maxTokens: 2048
391
96
  });
392
- ```
393
97
 
394
- ## Environment Variables
395
-
396
- **Ollama:**
397
- - Ollama reads from local `http://localhost:11434` by default
398
- - Override with `baseUrl` parameter in request
399
-
400
- ## Type Definitions
401
-
402
- Full TypeScript support with comprehensive types:
403
-
404
- ```typescript
405
- import type {
406
- CompletionRequest,
407
- CompletionResponse,
408
- CompletionChunk,
409
- Message,
410
- MessageRole,
411
- Tool,
412
- ProviderType,
413
- ProviderConfig,
414
- ProviderError,
415
- CompletionOptions,
416
- AgentConfig,
417
- AgentGenerateRequest,
418
- AgentGenerateResponse
419
- } from 'llmjs2';
420
-
421
- import { Agent } from 'llmjs2';
98
+ console.log(response);
422
99
  ```
423
100
 
424
- ## Performance Considerations
425
-
426
- 1. **Batching**: Batch multiple requests to reduce API calls
427
- 2. **Caching**: Implement caching for common queries
428
- 3. **Timeouts**: Configure appropriate timeouts for your use case
429
- 4. **Retry Logic**: Automatic exponential backoff is built-in and configurable
430
-
431
- ## Testing
432
-
433
- ```bash
434
- # Run tests
435
- npm test
436
-
437
- # Run tests in watch mode
438
- npm run test:watch
439
- ```
101
+ ## Configuration
440
102
 
441
- ## Building
103
+ The library resolves connection details in this order:
442
104
 
443
- ```bash
444
- # Build TypeScript to JavaScript
445
- npm run build
105
+ 1. Explicit config via `options.ollamaBaseUrl` / `options.ollamaApiKey`
106
+ 2. Environment variables `OLLAMA_BASE_URL` and `OLLAMA_API_KEY`
107
+ 3. Default fallback `https://api.ollama.com`
446
108
 
447
- # Build in watch mode
448
- npm run build:watch
109
+ Example:
449
110
 
450
- # Clean build artifacts
451
- npm run clean
111
+ ```js
112
+ const response = await completion({
113
+ model: 'ollama/llama3',
114
+ prompt: 'What is llmjs2?',
115
+ ollamaBaseUrl: 'https://my-ollama-proxy.local',
116
+ ollamaApiKey: process.env.OLLAMA_API_KEY,
117
+ });
452
118
  ```
453
119
 
454
- ## License
455
-
456
- MIT - See LICENSE file for details
457
-
458
- ## Support
120
+ ## Error Handling
459
121
 
460
- - GitHub Issues: [github.com/littlellmjs/llmjs2/issues](https://github.com/littlellmjs/llmjs2/issues)
461
- - Documentation: Full API reference above
122
+ - `llmjs2: Could not connect to [URL]. Check your OLLAMA_BASE_URL.`
123
+ - `llmjs2: Model "[name]" not found on provider "[provider]".`
124
+ - `llmjs2: Unsupported provider "[provider]".`
462
125
 
463
- ## Changelog
126
+ ## Notes
464
127
 
465
- ### 1.0.0
466
- - Initial production release
467
- - Ollama support
468
- - Streaming API with async generators
469
- - Automatic retry with exponential backoff
470
- - Comprehensive error handling
471
- - TypeScript 5+ support
472
- - Zero external dependencies
128
+ - Node.js 18.0.0 or later is required for native `fetch` support.
129
+ - No hyper-parameters such as `temperature` or `max_tokens` are exposed in the high-level API.
package/grapes.jpg ADDED
Binary file
package/index.d.ts ADDED
@@ -0,0 +1,43 @@
1
+ export interface Llmjs2Message {
2
+ role: 'system' | 'user' | 'assistant' | string;
3
+ content: string;
4
+ }
5
+
6
+ export interface CompletionOptions {
7
+ model: string;
8
+ messages?: Llmjs2Message[];
9
+ prompt?: string;
10
+ ollamaBaseUrl?: string;
11
+ ollamaApiKey?: string;
12
+ }
13
+
14
+ export interface ToolParameter {
15
+ type: string;
16
+ required?: boolean;
17
+ description: string;
18
+ enum?: string[];
19
+ }
20
+
21
+ export interface ToolSchema {
22
+ name: string;
23
+ description: string;
24
+ parameters: Record<string, ToolParameter>;
25
+ handler: (args: Record<string, any>) => string | Promise<string>;
26
+ }
27
+
28
+ export interface GenerateOptions {
29
+ model: string;
30
+ userPrompt?: string;
31
+ messages?: Llmjs2Message[];
32
+ images?: Array<string | Buffer>;
33
+ references?: Array<string | Buffer>;
34
+ tools?: ToolSchema[];
35
+ systemPrompt?: string;
36
+ ollamaBaseUrl?: string;
37
+ ollamaApiKey?: string;
38
+ }
39
+
40
+ export function completion(model: string, prompt: string): Promise<string>;
41
+ export function completion(options: CompletionOptions): Promise<string>;
42
+ export function generate(options: GenerateOptions): Promise<string>;
43
+ export function generate(model: string, userPrompt: string, images?: Array<string | Buffer>, references?: Array<string | Buffer>, tools?: ToolSchema[]): Promise<string>;