llmjs2 1.0.0 → 1.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/README.md +39 -450
  2. package/grapes.jpg +0 -0
  3. package/index.d.ts +43 -0
  4. package/index.js +465 -0
  5. package/package.json +7 -47
  6. package/spec.txt +73 -0
  7. package/test-generate-tools-suite.js +100 -0
  8. package/test-generate-tools.js +57 -0
  9. package/test-generate.js +31 -0
  10. package/test.js +33 -0
  11. package/LICENSE +0 -21
  12. package/dist/agent.d.ts +0 -80
  13. package/dist/agent.d.ts.map +0 -1
  14. package/dist/agent.js +0 -189
  15. package/dist/agent.js.map +0 -1
  16. package/dist/index.d.ts +0 -74
  17. package/dist/index.d.ts.map +0 -1
  18. package/dist/index.js +0 -191
  19. package/dist/index.js.map +0 -1
  20. package/dist/providers/base.d.ts +0 -58
  21. package/dist/providers/base.d.ts.map +0 -1
  22. package/dist/providers/base.js +0 -149
  23. package/dist/providers/base.js.map +0 -1
  24. package/dist/providers/index.d.ts +0 -8
  25. package/dist/providers/index.d.ts.map +0 -1
  26. package/dist/providers/index.js +0 -7
  27. package/dist/providers/index.js.map +0 -1
  28. package/dist/providers/ollama.d.ts +0 -42
  29. package/dist/providers/ollama.d.ts.map +0 -1
  30. package/dist/providers/ollama.js +0 -260
  31. package/dist/providers/ollama.js.map +0 -1
  32. package/dist/providers/openai.d.ts +0 -38
  33. package/dist/providers/openai.d.ts.map +0 -1
  34. package/dist/providers/openai.js +0 -289
  35. package/dist/providers/openai.js.map +0 -1
  36. package/dist/types.d.ts +0 -182
  37. package/dist/types.d.ts.map +0 -1
  38. package/dist/types.js +0 -6
  39. package/dist/types.js.map +0 -1
  40. package/src/agent.ts +0 -285
  41. package/src/index.ts +0 -268
  42. package/src/providers/base.ts +0 -216
  43. package/src/providers/index.ts +0 -8
  44. package/src/providers/ollama.ts +0 -429
  45. package/src/providers/openai.ts +0 -485
  46. package/src/types.ts +0 -231
package/README.md CHANGED
@@ -1,486 +1,75 @@
1
1
  # llmjs2
2
2
 
3
- LLM abstraction layer for Node.js. Unified API for multiple LLM providers with error handling and automatic retry logic.
4
-
5
- **Supported Providers**: OpenAI, Ollama
3
+ `llmjs2` is a zero-dependency Node.js library that provides a small, robust interface for calling Ollama and Ollama Cloud from Node 18+.
6
4
 
7
5
  ## Features
8
6
 
9
- - 🚀 **Unified API**: Single interface for multiple LLM providers
10
- - **Automatic Retries**: Exponential backoff retry logic with configurable parameters
11
- - ⚙️ **Type-Safe**: Full TypeScript support with comprehensive type definitions
12
- - 🛡️ **Robust Error Handling**: Custom error types with retryability information
13
- - 🔍 **Debugging**: Built-in logging and debug mode for troubleshooting
14
- - 📦 **Zero Dependencies**: Pure Node.js with no external dependencies
15
- - ✅ **Production-Ready**: Enterprise-grade error handling and validation
7
+ - Zero runtime dependencies
8
+ - ESM-only
9
+ - OpenAI-compatible `messages` schema
10
+ - Automatic provider routing via `provider/model-name`
11
+ - Default fallback to `https://api.ollama.com`
12
+ - Clear connection and model errors
16
13
 
17
14
  ## Installation
18
15
 
16
+ No dependencies are required. Install or link the package as usual:
17
+
19
18
  ```bash
20
19
  npm install llmjs2
21
20
  ```
22
21
 
23
- ## Quick Start
24
-
25
- ### Basic Completion
22
+ ## Usage
26
23
 
27
- ```javascript
24
+ ```js
28
25
  import { completion } from 'llmjs2';
29
26
 
30
- const result = await completion({
31
- model: 'openai/gpt-4',
32
- apiKey: 'sk-...', // Or use OPENAI_API_KEY env var
33
- messages: [
34
- { role: 'system', content: 'You are a helpful assistant.' },
35
- { role: 'user', content: 'What is TypeScript?' }
36
- ]
37
- });
38
-
39
- console.log(result.content);
27
+ const response = await completion('ollama/llama3', 'Write a short poem about Node.js.');
28
+ console.log(response);
40
29
  ```
41
30
 
42
- ### Using Ollama Locally
31
+ Or with a full options object:
43
32
 
44
- ```javascript
33
+ ```js
45
34
  import { completion } from 'llmjs2';
46
35
 
47
- const result = await completion({
48
- model: 'ollama/mistral',
49
- baseUrl: 'http://localhost:11434', // Default Ollama URL
36
+ const response = await completion({
37
+ model: 'ollama/llama3',
50
38
  messages: [
51
- { role: 'user', content: 'Explain quantum computing' }
52
- ]
53
- });
54
-
55
- console.log(result.content);
56
- ```
57
-
58
- ### Using Agent for Stateful Conversations
59
-
60
- ```javascript
61
- import { Agent } from 'llmjs2';
62
-
63
- const agent = new Agent({
64
- model: 'openai/gpt-4',
65
- apiKey: 'sk-...',
66
- instruction: 'You are a helpful assistant that explains technical concepts.',
67
- tools: [], // Optional function calling
68
- maxTokens: 500,
69
- temperature: 0.7
70
- });
71
-
72
- // Generate response (maintains conversation history)
73
- const result = await agent.generate({
74
- userPrompt: 'What is TypeScript?',
75
- images: [],
76
- references: [],
77
- context: { role: 'student', level: 'beginner' }
78
- });
79
-
80
- console.log(result.response);
81
-
82
- // Continue conversation (history automatically maintained)
83
- const followUp = await agent.generate({
84
- userPrompt: 'Can you give me an example?'
85
- });
86
-
87
- console.log(followUp.response);
88
-
89
- // Stream the response
90
- const stream = agent.generateStream({
91
- userPrompt: 'Explain decorators in TypeScript'
92
- });
93
-
94
- for await (const chunk of stream) {
95
- process.stdout.write(chunk.delta);
96
- }
97
-
98
- // Manage conversation history
99
- console.log(agent.getHistory()); // Get all messages
100
- agent.clearHistory(); // Clear history (keeps system instruction)
101
- agent.addMessage('system', 'New instruction');
102
- ```
103
-
104
- ## API Reference
105
-
106
- ### `completion(request: CompletionRequest): Promise<CompletionResponse>`
107
-
108
- Create a completion request. Supports both OpenAI and Ollama.
109
-
110
- **Parameters:**
111
-
112
- - `model` (string, required): Model identifier
113
- - OpenAI: `openai/gpt-4`, `openai/gpt-3.5-turbo`, etc.
114
- - Ollama: `ollama/mistral`, `ollama/neural-chat`, etc.
115
-
116
- - `messages` (Message[], required): Array of messages with `role` and `content`
117
-
118
- - `apiKey` (string, optional): API key for OpenAI (required for OpenAI models)
119
-
120
- - `baseUrl` (string, optional): Custom API endpoint (mainly for Ollama)
121
-
122
- - `maxTokens` (number, optional): Maximum tokens to generate
123
-
124
- - `temperature` (number, optional): Sampling temperature (0-2). Higher = more random
125
-
126
- - `topP` (number, optional): Nucleus sampling parameter (0-1)
127
-
128
- - `topK` (number, optional): Top-k sampling (Ollama)
129
-
130
- - `frequencyPenalty` (number, optional): Frequency penalty (-2 to 2)
131
-
132
- - `presencePenalty` (number, optional): Presence penalty (-2 to 2)
133
-
134
- - `stop` (string[], optional): Stop sequences
135
-
136
- - `timeout` (number, optional): Request timeout in milliseconds
137
-
138
- - `retry` (object, optional): Retry configuration
139
- - `maxRetries` (number): Maximum retry attempts
140
- - `backoffMultiplier` (number): Exponential backoff multiplier
141
- - `initialDelayMs` (number): Initial retry delay
142
-
143
- **Returns:** `CompletionResponse`
144
-
145
- ```typescript
146
- {
147
- content: string; // Generated text
148
- model: string; // Model used
149
- stopReason?: string; // Stop reason
150
- usage?: { // Token usage (if available)
151
- promptTokens?: number;
152
- completionTokens?: number;
153
- totalTokens?: number;
154
- };
155
- raw?: unknown; // Raw provider response
156
- toolCalls?: Array<{ // Function calls (if any)
157
- id?: string;
158
- name: string;
159
- arguments: Record<string, unknown>;
160
- }>;
161
- }
162
- ```
163
-
164
- ### `configure(options: CompletionOptions): void`
165
-
166
- Configure global settings for all completions.
167
-
168
- ```javascript
169
- import { configure } from 'llmjs2';
170
-
171
- configure({
172
- debug: true, // Enable debug logging
173
- globalTimeout: 60000, // 60 second default timeout
174
- globalRetry: {
175
- maxRetries: 5,
176
- backoffMultiplier: 2,
177
- initialDelayMs: 1000
178
- },
179
- logger: (level, message, data) => {
180
- console.log(`[${level}] ${message}`, data);
181
- }
182
- });
183
- ```
184
-
185
- ### `validateProvider(model: string, apiKey?: string, baseUrl?: string): Promise<void>`
186
-
187
- Validate that a provider is configured correctly and accessible.
188
-
189
- ```javascript
190
- import { validateProvider } from 'llmjs2';
191
-
192
- try {
193
- await validateProvider('openai/gpt-4', 'sk-...');
194
- console.log('OpenAI provider is valid');
195
- } catch (error) {
196
- console.error('Provider validation failed:', error.message);
197
- }
198
- ```
199
-
200
- ## Agent - Stateful Conversations
201
-
202
- ### `new Agent(config: AgentConfig): Agent`
203
-
204
- Create a stateful agent for managing conversations with automatic history tracking.
205
-
206
- **Configuration:**
207
-
208
- ```typescript
209
- interface AgentConfig {
210
- model: string; // Model identifier (required)
211
- apiKey?: string; // API key (if needed)
212
- baseUrl?: string; // Custom endpoint
213
- instruction?: string; // System instruction/role
214
- tools?: Tool[]; // Available functions
215
- maxTokens?: number; // Max response tokens
216
- temperature?: number; // Sampling temperature
217
- timeout?: number; // Request timeout
218
- }
219
- ```
220
-
221
- **Methods:**
222
-
223
- ### `agent.generate(request: AgentGenerateRequest): Promise<AgentGenerateResponse>`
224
-
225
- Generate a response while maintaining conversation history.
226
-
227
- **Parameters:**
228
- - `userPrompt` (string, required): User message
229
- - `images` (string[], optional): Image data/URLs
230
- - `references` (string[], optional): Reference documents
231
- - `context` (Record, optional): Additional context variables
232
-
233
- **Returns:**
234
- ```typescript
235
- {
236
- response: string; // Generated text
237
- completion: CompletionResponse; // Full provider response
238
- toolCalls?: Array<{ // Function calls if any
239
- name: string;
240
- arguments: Record<string, unknown>;
241
- }>;
242
- }
243
- ```
244
-
245
- **Example:**
246
- ```javascript
247
- const agent = new Agent({
248
- model: 'openai/gpt-4',
249
- apiKey: 'sk-...',
250
- instruction: 'You are a coding expert.'
251
- });
252
-
253
- const result = await agent.generate({
254
- userPrompt: 'How do I use async/await?',
255
- context: { language: 'JavaScript' }
256
- });
257
-
258
- console.log(result.response);
259
- ```
260
-
261
- ### `agent.getHistory(): Message[]`
262
-
263
- Get the current conversation history.
264
-
265
- ```javascript
266
- const messages = agent.getHistory();
267
- console.log(messages);
268
- ```
269
-
270
- ### `agent.clearHistory(): void`
271
-
272
- Clear conversation history (system instruction is preserved).
273
-
274
- ```javascript
275
- agent.clearHistory();
276
- ```
277
-
278
- ### `agent.addMessage(role, content): void`
279
-
280
- Manually add a message to the history.
281
-
282
- ```javascript
283
- agent.addMessage('assistant', 'Custom response');
284
- agent.addMessage('user', 'Follow-up question');
285
- ```
286
-
287
- ### `agent.getConfig(): AgentConfig`
288
-
289
- Get the current agent configuration.
290
-
291
- ```javascript
292
- const config = agent.getConfig();
293
- ```
294
-
295
- ## Error Handling
296
-
297
- All errors are instances of `LLMError` with additional properties:
298
-
299
- ```typescript
300
- interface LLMError extends Error {
301
- code?: string; // Error code
302
- statusCode?: number; // HTTP status code
303
- details?: unknown; // Additional error details
304
- retryable?: boolean; // Whether to retry
305
- }
306
- ```
307
-
308
- **Example:**
309
-
310
- ```javascript
311
- import { completion, LLMError } from 'llmjs2';
312
-
313
- try {
314
- const result = await completion({
315
- model: 'openai/gpt-4',
316
- apiKey: 'sk-...',
317
- messages: [{ role: 'user', content: 'Hello' }]
318
- });
319
- } catch (error) {
320
- if (error instanceof LLMError) {
321
- console.error(`Error [${error.code}]:`, error.message);
322
-
323
- if (error.retryable) {
324
- console.log('Error is retryable, will retry...');
325
- }
326
- }
327
- }
328
- ```
329
-
330
- ## Advanced Usage
331
-
332
- ### Function Calling
333
-
334
- ```javascript
335
- import { completion } from 'llmjs2';
336
-
337
- const result = await completion({
338
- model: 'openai/gpt-4',
339
- apiKey: 'sk-...',
340
- messages: [
341
- { role: 'user', content: 'What is the weather in San Francisco?' }
39
+ { role: 'system', content: 'You are a helpful assistant.' },
40
+ { role: 'user', content: 'Summarize the benefits of zero dependencies.' },
342
41
  ],
343
- tools: [
344
- {
345
- type: 'function',
346
- function: {
347
- name: 'get_weather',
348
- description: 'Get weather for a location',
349
- parameters: {
350
- type: 'object',
351
- properties: {
352
- location: { type: 'string' },
353
- unit: { type: 'string', enum: ['celsius', 'fahrenheit'] }
354
- },
355
- required: ['location']
356
- }
357
- }
358
- }
359
- ]
360
42
  });
361
43
 
362
- if (result.toolCalls) {
363
- for (const call of result.toolCalls) {
364
- console.log(`Function: ${call.name}`);
365
- console.log(`Arguments:`, call.arguments);
366
- }
367
- }
44
+ console.log(response);
368
45
  ```
369
46
 
370
- ### Custom Request Headers
371
-
372
- ```javascript
373
- import { completion } from 'llmjs2';
47
+ ## Configuration
374
48
 
375
- const result = await completion({
376
- model: 'openai/gpt-4',
377
- apiKey: 'sk-...',
378
- messages: [{ role: 'user', content: 'Hello' }],
379
- headers: {
380
- 'X-Custom-Header': 'custom-value'
381
- }
382
- });
383
- ```
49
+ The library resolves connection details in this order:
384
50
 
385
- ### Provider-Specific Configuration
51
+ 1. Explicit config via `options.ollamaBaseUrl` / `options.ollamaApiKey`
52
+ 2. Environment variables `OLLAMA_BASE_URL` and `OLLAMA_API_KEY`
53
+ 3. Default fallback `https://api.ollama.com`
386
54
 
387
- For Ollama with custom settings:
55
+ Example:
388
56
 
389
- ```javascript
390
- import { completion } from 'llmjs2';
391
-
392
- const result = await completion({
393
- model: 'ollama/mistral',
394
- baseUrl: 'http://192.168.1.100:11434',
395
- messages: [
396
- { role: 'user', content: 'Explain AI' }
397
- ],
398
- temperature: 0.7,
399
- topK: 40,
400
- topP: 0.9,
401
- maxTokens: 2048
57
+ ```js
58
+ const response = await completion({
59
+ model: 'ollama/llama3',
60
+ prompt: 'What is llmjs2?',
61
+ ollamaBaseUrl: 'https://my-ollama-proxy.local',
62
+ ollamaApiKey: process.env.OLLAMA_API_KEY,
402
63
  });
403
64
  ```
404
65
 
405
- ## Environment Variables
406
-
407
- **OpenAI:**
408
- - `OPENAI_API_KEY`: Your OpenAI API key (alternative to passing `apiKey`)
409
-
410
- **Ollama:**
411
- - Ollama reads from local `http://localhost:11434` by default
412
- - Override with `baseUrl` parameter in request
413
-
414
- ## Type Definitions
415
-
416
- Full TypeScript support with comprehensive types:
417
-
418
- ```typescript
419
- import type {
420
- CompletionRequest,
421
- CompletionResponse,
422
- CompletionChunk,
423
- Message,
424
- MessageRole,
425
- Tool,
426
- ProviderType,
427
- ProviderConfig,
428
- ProviderError,
429
- CompletionOptions,
430
- AgentConfig,
431
- AgentGenerateRequest,
432
- AgentGenerateResponse
433
- } from 'llmjs2';
434
-
435
- import { Agent } from 'llmjs2';
436
- ```
437
-
438
- ## Performance Considerations
439
-
440
- 1. **Batching**: Batch multiple requests to reduce API calls
441
- 2. **Caching**: Implement caching for common queries
442
- 3. **Timeouts**: Configure appropriate timeouts for your use case
443
- 4. **Retry Logic**: Automatic exponential backoff is built-in and configurable
444
-
445
- ## Testing
446
-
447
- ```bash
448
- # Run tests
449
- npm test
450
-
451
- # Run tests in watch mode
452
- npm run test:watch
453
- ```
454
-
455
- ## Building
456
-
457
- ```bash
458
- # Build TypeScript to JavaScript
459
- npm run build
460
-
461
- # Build in watch mode
462
- npm run build:watch
463
-
464
- # Clean build artifacts
465
- npm run clean
466
- ```
467
-
468
- ## License
469
-
470
- MIT - See LICENSE file for details
471
-
472
- ## Support
66
+ ## Error Handling
473
67
 
474
- - GitHub Issues: [github.com/littlellmjs/llmjs2/issues](https://github.com/littlellmjs/llmjs2/issues)
475
- - Documentation: Full API reference above
68
+ - `llmjs2: Could not connect to [URL]. Check your OLLAMA_BASE_URL.`
69
+ - `llmjs2: Model "[name]" not found on provider "[provider]".`
70
+ - `llmjs2: Unsupported provider "[provider]".`
476
71
 
477
- ## Changelog
72
+ ## Notes
478
73
 
479
- ### 1.0.0
480
- - Initial production release
481
- - Full OpenAI and Ollama support
482
- - Streaming API with async generators
483
- - Automatic retry with exponential backoff
484
- - Comprehensive error handling
485
- - TypeScript 5+ support
486
- - Zero external dependencies
74
+ - Node.js 18.0.0 or later is required for native `fetch` support.
75
+ - No hyper-parameters such as `temperature` or `max_tokens` are exposed in the high-level API.
package/grapes.jpg ADDED
Binary file
package/index.d.ts ADDED
@@ -0,0 +1,43 @@
1
+ export interface Llmjs2Message {
2
+ role: 'system' | 'user' | 'assistant' | string;
3
+ content: string;
4
+ }
5
+
6
+ export interface CompletionOptions {
7
+ model: string;
8
+ messages?: Llmjs2Message[];
9
+ prompt?: string;
10
+ ollamaBaseUrl?: string;
11
+ ollamaApiKey?: string;
12
+ }
13
+
14
+ export interface ToolParameter {
15
+ type: string;
16
+ required?: boolean;
17
+ description: string;
18
+ enum?: string[];
19
+ }
20
+
21
+ export interface ToolSchema {
22
+ name: string;
23
+ description: string;
24
+ parameters: Record<string, ToolParameter>;
25
+ handler: (args: Record<string, any>) => string | Promise<string>;
26
+ }
27
+
28
+ export interface GenerateOptions {
29
+ model: string;
30
+ userPrompt?: string;
31
+ messages?: Llmjs2Message[];
32
+ images?: Array<string | Buffer>;
33
+ references?: Array<string | Buffer>;
34
+ tools?: ToolSchema[];
35
+ systemPrompt?: string;
36
+ ollamaBaseUrl?: string;
37
+ ollamaApiKey?: string;
38
+ }
39
+
40
+ export function completion(model: string, prompt: string): Promise<string>;
41
+ export function completion(options: CompletionOptions): Promise<string>;
42
+ export function generate(options: GenerateOptions): Promise<string>;
43
+ export function generate(model: string, userPrompt: string, images?: Array<string | Buffer>, references?: Array<string | Buffer>, tools?: ToolSchema[]): Promise<string>;