llmjs2 1.0.1 → 1.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/README.md +39 -436
  2. package/grapes.jpg +0 -0
  3. package/index.d.ts +43 -0
  4. package/index.js +465 -0
  5. package/package.json +7 -47
  6. package/spec.txt +73 -0
  7. package/test-generate-tools-suite.js +100 -0
  8. package/test-generate-tools.js +57 -0
  9. package/test-generate.js +31 -0
  10. package/test.js +33 -0
  11. package/LICENSE +0 -21
  12. package/dist/agent.d.ts +0 -80
  13. package/dist/agent.d.ts.map +0 -1
  14. package/dist/agent.js +0 -199
  15. package/dist/agent.js.map +0 -1
  16. package/dist/index.d.ts +0 -74
  17. package/dist/index.d.ts.map +0 -1
  18. package/dist/index.js +0 -191
  19. package/dist/index.js.map +0 -1
  20. package/dist/providers/base.d.ts +0 -58
  21. package/dist/providers/base.d.ts.map +0 -1
  22. package/dist/providers/base.js +0 -149
  23. package/dist/providers/base.js.map +0 -1
  24. package/dist/providers/index.d.ts +0 -8
  25. package/dist/providers/index.d.ts.map +0 -1
  26. package/dist/providers/index.js +0 -7
  27. package/dist/providers/index.js.map +0 -1
  28. package/dist/providers/ollama.d.ts +0 -42
  29. package/dist/providers/ollama.d.ts.map +0 -1
  30. package/dist/providers/ollama.js +0 -260
  31. package/dist/providers/ollama.js.map +0 -1
  32. package/dist/providers/openai.d.ts +0 -38
  33. package/dist/providers/openai.d.ts.map +0 -1
  34. package/dist/providers/openai.js +0 -322
  35. package/dist/providers/openai.js.map +0 -1
  36. package/dist/types.d.ts +0 -191
  37. package/dist/types.d.ts.map +0 -1
  38. package/dist/types.js +0 -6
  39. package/dist/types.js.map +0 -1
  40. package/src/agent.ts +0 -295
  41. package/src/index.ts +0 -268
  42. package/src/providers/base.ts +0 -216
  43. package/src/providers/index.ts +0 -8
  44. package/src/providers/ollama.ts +0 -429
  45. package/src/providers/openai.ts +0 -521
  46. package/src/types.ts +0 -243
@@ -1,521 +0,0 @@
1
- /**
2
- * OpenAI provider implementation
3
- */
4
-
5
- import https from 'https';
6
- import http from 'http';
7
- import { URL } from 'url';
8
-
9
- import {
10
- CompletionRequest,
11
- CompletionResponse,
12
- CompletionChunk,
13
- ProviderConfig,
14
- } from '../types.js';
15
- import {
16
- BaseProvider,
17
- validateCompletionRequest,
18
- withRetry,
19
- LLMError,
20
- } from './base.js';
21
-
22
- /**
23
- * OpenAI API request format
24
- */
25
- interface OpenAIRequest {
26
- model: string;
27
- messages: Array<{
28
- role: string;
29
- content: string;
30
- }>;
31
- max_tokens?: number;
32
- temperature?: number;
33
- top_p?: number;
34
- frequency_penalty?: number;
35
- presence_penalty?: number;
36
- stop?: string[];
37
- tools?: Array<{
38
- type: 'function';
39
- function: {
40
- name: string;
41
- description?: string;
42
- parameters?: Record<string, unknown>;
43
- };
44
- }>;
45
- tool_choice?: 'auto' | 'required' | string;
46
- stream?: boolean;
47
- }
48
-
49
- /**
50
- * OpenAI API response format
51
- */
52
- interface OpenAIChoice {
53
- message?: {
54
- content: string | null;
55
- tool_calls?: Array<{
56
- id: string;
57
- type: string;
58
- function: {
59
- name: string;
60
- arguments: string;
61
- };
62
- }>;
63
- };
64
- delta?: {
65
- content?: string;
66
- tool_calls?: Array<{
67
- index: number;
68
- id?: string;
69
- type?: string;
70
- function?: {
71
- name?: string;
72
- arguments?: string;
73
- };
74
- }>;
75
- };
76
- finish_reason?: string;
77
- }
78
-
79
- interface OpenAIStreamResponse {
80
- id: string;
81
- object: string;
82
- created: number;
83
- model: string;
84
- choices: OpenAIChoice[];
85
- usage?: {
86
- prompt_tokens: number;
87
- completion_tokens: number;
88
- total_tokens: number;
89
- };
90
- }
91
-
92
- /**
93
- * OpenAI Provider implementation
94
- */
95
- export class OpenAIProvider extends BaseProvider {
96
- private apiKey: string;
97
- private baseUrl: string = 'https://api.openai.com/v1';
98
-
99
- constructor(config: ProviderConfig) {
100
- super(config);
101
-
102
- if (!config.apiKey) {
103
- throw new Error('OpenAI API key is required');
104
- }
105
-
106
- this.apiKey = config.apiKey;
107
-
108
- if (config.baseUrl) {
109
- this.baseUrl = config.baseUrl;
110
- }
111
- }
112
-
113
- /**
114
- * Parse model string (e.g., 'openai/gpt-4' -> 'gpt-4')
115
- */
116
- parseModel(model: string): string {
117
- if (model.startsWith('openai/')) {
118
- return model.slice(7); // Remove 'openai/' prefix
119
- }
120
- return model;
121
- }
122
-
123
- /**
124
- * Validate configuration
125
- */
126
- async validate(): Promise<void> {
127
- try {
128
- // Make a simple request to verify API key
129
- await this.makeRequest('/models', 'GET');
130
- this.logger('info', 'OpenAI API validation successful');
131
- } catch (error) {
132
- throw new LLMError(
133
- `OpenAI validation failed: ${error instanceof Error ? error.message : String(error)}`,
134
- 'VALIDATION_FAILED'
135
- );
136
- }
137
- }
138
-
139
- /**
140
- * Create a completion
141
- */
142
- async complete(request: CompletionRequest): Promise<CompletionResponse> {
143
- validateCompletionRequest(request);
144
-
145
- const model = this.parseModel(request.model);
146
-
147
- return withRetry(
148
- async () => {
149
- const openaiRequest: OpenAIRequest = {
150
- model,
151
- messages: request.messages.map((msg) => ({
152
- role: msg.role,
153
- content: msg.content,
154
- })),
155
- };
156
-
157
- // Add optional parameters
158
- if (request.maxTokens) openaiRequest.max_tokens = request.maxTokens;
159
- if (request.temperature !== undefined)
160
- openaiRequest.temperature = request.temperature;
161
- if (request.topP !== undefined) openaiRequest.top_p = request.topP;
162
- if (request.frequencyPenalty !== undefined)
163
- openaiRequest.frequency_penalty = request.frequencyPenalty;
164
- if (request.presencePenalty !== undefined)
165
- openaiRequest.presence_penalty = request.presencePenalty;
166
- if (request.stop) openaiRequest.stop = request.stop;
167
- if (request.tools) {
168
- openaiRequest.tools = request.tools.map((tool) => {
169
- const finalName = 'function' in tool && tool.function?.name ? tool.function.name : (tool as any).name;
170
- const finalDescription = 'function' in tool && tool.function?.description ? tool.function.description : tool.description;
171
- const finalParameters = 'function' in tool && tool.function?.parameters ? tool.function.parameters : tool.parameters;
172
-
173
- return {
174
- type: 'function',
175
- function: {
176
- name: finalName,
177
- description: finalDescription,
178
- parameters: finalParameters,
179
- },
180
- };
181
- });
182
- }
183
- if (request.toolChoice) openaiRequest.tool_choice = request.toolChoice;
184
-
185
- this.logger('debug', 'OpenAI completion request', {
186
- model,
187
- messageCount: request.messages.length,
188
- });
189
-
190
- const response = await this.makeRequest<OpenAIStreamResponse>(
191
- '/chat/completions',
192
- 'POST',
193
- openaiRequest,
194
- request
195
- );
196
-
197
- if (!response.choices || response.choices.length === 0) {
198
- throw new LLMError('No choices in OpenAI response', 'NO_CHOICES');
199
- }
200
-
201
- const choice = response.choices[0];
202
- const message = choice.message;
203
-
204
- if (!message) {
205
- throw new LLMError('No message in OpenAI response choice', 'NO_MESSAGE');
206
- }
207
-
208
- const result: CompletionResponse = {
209
- content: message.content || '',
210
- model: response.model,
211
- stopReason: choice.finish_reason,
212
- raw: response,
213
- };
214
-
215
- if (response.usage) {
216
- result.usage = {
217
- promptTokens: response.usage.prompt_tokens,
218
- completionTokens: response.usage.completion_tokens,
219
- totalTokens: response.usage.total_tokens,
220
- };
221
- }
222
-
223
- if (message.tool_calls && message.tool_calls.length > 0) {
224
- result.toolCalls = message.tool_calls.map((call) => ({
225
- id: call.id,
226
- name: call.function.name,
227
- arguments: JSON.parse(call.function.arguments),
228
- }));
229
- }
230
-
231
- this.logger('debug', 'OpenAI completion response', {
232
- model: response.model,
233
- tokens: response.usage?.total_tokens,
234
- });
235
-
236
- return result;
237
- },
238
- this.getRetryConfig(request)
239
- );
240
- }
241
-
242
- /**
243
- * Stream completion
244
- */
245
- async *completeStream(
246
- request: CompletionRequest
247
- ): AsyncIterable<CompletionChunk> {
248
- validateCompletionRequest(request);
249
-
250
- const model = this.parseModel(request.model);
251
-
252
- const openaiRequest: OpenAIRequest = {
253
- model,
254
- stream: true,
255
- messages: request.messages.map((msg) => ({
256
- role: msg.role,
257
- content: msg.content,
258
- })),
259
- };
260
-
261
- if (request.maxTokens) openaiRequest.max_tokens = request.maxTokens;
262
- if (request.temperature !== undefined)
263
- openaiRequest.temperature = request.temperature;
264
- if (request.topP !== undefined) openaiRequest.top_p = request.topP;
265
- if (request.frequencyPenalty !== undefined)
266
- openaiRequest.frequency_penalty = request.frequencyPenalty;
267
- if (request.presencePenalty !== undefined)
268
- openaiRequest.presence_penalty = request.presencePenalty;
269
- if (request.stop) openaiRequest.stop = request.stop;
270
- if (request.tools) {
271
- openaiRequest.tools = request.tools.map((tool) => {
272
- if ('function' in tool && tool.function?.name) {
273
- return {
274
- type: 'function',
275
- function: {
276
- name: tool.function.name,
277
- description: tool.function.description,
278
- parameters: tool.function.parameters,
279
- },
280
- };
281
- }
282
- return {
283
- type: 'function',
284
- function: {
285
- name: (tool as any).name,
286
- description: tool.description,
287
- parameters: tool.parameters,
288
- },
289
- };
290
- });
291
- }
292
- if (request.toolChoice) openaiRequest.tool_choice = request.toolChoice;
293
-
294
- this.logger('debug', 'OpenAI stream request', { model });
295
-
296
- const stream = await this.makeStreamRequest(
297
- '/chat/completions',
298
- 'POST',
299
- openaiRequest,
300
- request
301
- );
302
-
303
- for await (const chunk of stream) {
304
- if (chunk.choices && chunk.choices.length > 0) {
305
- const choice = chunk.choices[0];
306
-
307
- if (choice.delta?.content) {
308
- yield {
309
- delta: choice.delta.content,
310
- stopReason: choice.finish_reason,
311
- };
312
- }
313
- }
314
- }
315
- }
316
-
317
- /**
318
- * Make HTTP request to OpenAI API
319
- */
320
- private makeRequest<T>(
321
- path: string,
322
- method: string = 'POST',
323
- body?: unknown,
324
- request?: CompletionRequest
325
- ): Promise<T> {
326
- return new Promise((resolve, reject) => {
327
- const url = new URL(path, this.baseUrl);
328
- const timeout = this.getTimeout(request);
329
-
330
- const requestOptions = {
331
- method,
332
- headers: {
333
- 'Content-Type': 'application/json',
334
- Authorization: `Bearer ${this.apiKey}`,
335
- ...this.getHeaders(request),
336
- },
337
- timeout,
338
- };
339
-
340
- const protocol = this.baseUrl.startsWith('https') ? https : http;
341
-
342
- const req = protocol.request(url, requestOptions, (res) => {
343
- let data = '';
344
-
345
- res.on('data', (chunk) => {
346
- data += chunk;
347
- });
348
-
349
- res.on('end', () => {
350
- if (!res.statusCode || res.statusCode >= 400) {
351
- try {
352
- const errorData = JSON.parse(data);
353
- reject(
354
- new LLMError(
355
- errorData.error?.message || 'OpenAI API error',
356
- errorData.error?.code,
357
- res.statusCode,
358
- errorData,
359
- res.statusCode === 429 ||
360
- res.statusCode === 502 ||
361
- res.statusCode === 503
362
- )
363
- );
364
- } catch {
365
- reject(
366
- new LLMError(`OpenAI API error: ${data}`, 'API_ERROR', res.statusCode, null, false)
367
- );
368
- }
369
- } else {
370
- try {
371
- resolve(JSON.parse(data) as T);
372
- } catch (error) {
373
- reject(
374
- new LLMError(
375
- 'Failed to parse OpenAI response',
376
- 'PARSE_ERROR',
377
- undefined,
378
- { data }
379
- )
380
- );
381
- }
382
- }
383
- });
384
- });
385
-
386
- req.on('error', (error) => {
387
- reject(
388
- new LLMError(
389
- `OpenAI request failed: ${error.message}`,
390
- 'REQUEST_FAILED',
391
- undefined,
392
- { error: error.message },
393
- true
394
- )
395
- );
396
- });
397
-
398
- if (body) {
399
- req.write(JSON.stringify(body));
400
- }
401
-
402
- req.end();
403
- });
404
- }
405
-
406
- /**
407
- * Stream HTTP request
408
- */
409
- private makeStreamRequest(
410
- path: string,
411
- method: string = 'POST',
412
- body?: unknown,
413
- request?: CompletionRequest
414
- ): AsyncIterable<OpenAIStreamResponse> {
415
- const self = this;
416
-
417
- return {
418
- async *[Symbol.asyncIterator]() {
419
- const url = new URL(path, self.baseUrl);
420
- const timeout = self.getTimeout(request);
421
-
422
- const requestOptions = {
423
- method,
424
- headers: {
425
- 'Content-Type': 'application/json',
426
- Authorization: `Bearer ${self.apiKey}`,
427
- ...self.getHeaders(request),
428
- },
429
- timeout,
430
- };
431
-
432
- const protocol = self.baseUrl.startsWith('https') ? https : http;
433
- const chunks: OpenAIStreamResponse[] = [];
434
-
435
- await new Promise<void>((resolve, reject) => {
436
- const req = protocol.request(url, requestOptions, (res) => {
437
- if (!res.statusCode || res.statusCode >= 400) {
438
- let errorData = '';
439
- res.on('data', (chunk) => {
440
- errorData += chunk;
441
- });
442
- res.on('end', () => {
443
- reject(
444
- new LLMError(
445
- `OpenAI stream error: ${errorData}`,
446
- 'STREAM_ERROR',
447
- res.statusCode
448
- )
449
- );
450
- });
451
- return;
452
- }
453
-
454
- let buffer = '';
455
-
456
- res.on('data', (chunk) => {
457
- buffer += chunk.toString();
458
- const lines = buffer.split('\n');
459
- buffer = lines[lines.length - 1];
460
-
461
- for (let i = 0; i < lines.length - 1; i++) {
462
- const line = lines[i].trim();
463
-
464
- if (!line || line === '[DONE]') continue;
465
-
466
- if (line.startsWith('data: ')) {
467
- try {
468
- const data = JSON.parse(line.slice(6)) as OpenAIStreamResponse;
469
- chunks.push(data);
470
- } catch (error) {
471
- // Ignore parse errors in stream
472
- }
473
- }
474
- }
475
- });
476
-
477
- res.on('end', () => {
478
- resolve();
479
- });
480
- });
481
-
482
- req.on('error', (error) => {
483
- reject(
484
- new LLMError(
485
- `OpenAI stream request failed: ${error.message}`,
486
- 'REQUEST_FAILED',
487
- undefined,
488
- { error: error.message },
489
- true
490
- )
491
- );
492
- });
493
-
494
- req.on('timeout', () => {
495
- req.destroy();
496
- reject(
497
- new LLMError(
498
- 'OpenAI stream request timeout',
499
- 'TIMEOUT',
500
- undefined,
501
- null,
502
- true
503
- )
504
- );
505
- });
506
-
507
- if (body) {
508
- req.write(JSON.stringify(body));
509
- }
510
-
511
- req.end();
512
- });
513
-
514
- // Yield collected chunks
515
- for (const chunk of chunks) {
516
- yield chunk;
517
- }
518
- },
519
- };
520
- }
521
- }