llmjs2 1.0.0 → 1.0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +39 -450
- package/grapes.jpg +0 -0
- package/index.d.ts +43 -0
- package/index.js +465 -0
- package/package.json +7 -47
- package/spec.txt +73 -0
- package/test-generate-tools-suite.js +100 -0
- package/test-generate-tools.js +57 -0
- package/test-generate.js +31 -0
- package/test.js +33 -0
- package/LICENSE +0 -21
- package/dist/agent.d.ts +0 -80
- package/dist/agent.d.ts.map +0 -1
- package/dist/agent.js +0 -189
- package/dist/agent.js.map +0 -1
- package/dist/index.d.ts +0 -74
- package/dist/index.d.ts.map +0 -1
- package/dist/index.js +0 -191
- package/dist/index.js.map +0 -1
- package/dist/providers/base.d.ts +0 -58
- package/dist/providers/base.d.ts.map +0 -1
- package/dist/providers/base.js +0 -149
- package/dist/providers/base.js.map +0 -1
- package/dist/providers/index.d.ts +0 -8
- package/dist/providers/index.d.ts.map +0 -1
- package/dist/providers/index.js +0 -7
- package/dist/providers/index.js.map +0 -1
- package/dist/providers/ollama.d.ts +0 -42
- package/dist/providers/ollama.d.ts.map +0 -1
- package/dist/providers/ollama.js +0 -260
- package/dist/providers/ollama.js.map +0 -1
- package/dist/providers/openai.d.ts +0 -38
- package/dist/providers/openai.d.ts.map +0 -1
- package/dist/providers/openai.js +0 -289
- package/dist/providers/openai.js.map +0 -1
- package/dist/types.d.ts +0 -182
- package/dist/types.d.ts.map +0 -1
- package/dist/types.js +0 -6
- package/dist/types.js.map +0 -1
- package/src/agent.ts +0 -285
- package/src/index.ts +0 -268
- package/src/providers/base.ts +0 -216
- package/src/providers/index.ts +0 -8
- package/src/providers/ollama.ts +0 -429
- package/src/providers/openai.ts +0 -485
- package/src/types.ts +0 -231
package/src/providers/ollama.ts
DELETED
|
@@ -1,429 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Ollama provider implementation
|
|
3
|
-
*/
|
|
4
|
-
|
|
5
|
-
import { URL } from 'url';
|
|
6
|
-
|
|
7
|
-
import {
|
|
8
|
-
CompletionRequest,
|
|
9
|
-
CompletionResponse,
|
|
10
|
-
CompletionChunk,
|
|
11
|
-
ProviderConfig,
|
|
12
|
-
} from '../types.js';
|
|
13
|
-
import {
|
|
14
|
-
BaseProvider,
|
|
15
|
-
validateCompletionRequest,
|
|
16
|
-
withRetry,
|
|
17
|
-
LLMError,
|
|
18
|
-
} from './base.js';
|
|
19
|
-
|
|
20
|
-
/**
|
|
21
|
-
* Ollama API request format
|
|
22
|
-
*/
|
|
23
|
-
interface OllamaRequest {
|
|
24
|
-
model: string;
|
|
25
|
-
messages: Array<{
|
|
26
|
-
role: string;
|
|
27
|
-
content: string;
|
|
28
|
-
}>;
|
|
29
|
-
stream?: boolean;
|
|
30
|
-
temperature?: number;
|
|
31
|
-
top_p?: number;
|
|
32
|
-
top_k?: number;
|
|
33
|
-
num_predict?: number;
|
|
34
|
-
stop?: string[];
|
|
35
|
-
}
|
|
36
|
-
|
|
37
|
-
/**
|
|
38
|
-
* Ollama API response format
|
|
39
|
-
*/
|
|
40
|
-
interface OllamaResponse {
|
|
41
|
-
model: string;
|
|
42
|
-
created_at: string;
|
|
43
|
-
message?: {
|
|
44
|
-
role: string;
|
|
45
|
-
content: string;
|
|
46
|
-
};
|
|
47
|
-
done: boolean;
|
|
48
|
-
total_duration?: number;
|
|
49
|
-
load_duration?: number;
|
|
50
|
-
prompt_eval_count?: number;
|
|
51
|
-
prompt_eval_duration?: number;
|
|
52
|
-
eval_count?: number;
|
|
53
|
-
eval_duration?: number;
|
|
54
|
-
}
|
|
55
|
-
|
|
56
|
-
/**
|
|
57
|
-
* Ollama Provider implementation
|
|
58
|
-
*/
|
|
59
|
-
export class OllamaProvider extends BaseProvider {
|
|
60
|
-
private baseUrl: string = 'https://ollama.com';
|
|
61
|
-
private apiKey?: string;
|
|
62
|
-
|
|
63
|
-
constructor(config: ProviderConfig) {
|
|
64
|
-
super(config);
|
|
65
|
-
|
|
66
|
-
// Get API key from config or env variable
|
|
67
|
-
const apiKey = config.apiKey || process.env.OLLAMA_CLOUD_API_KEY;
|
|
68
|
-
|
|
69
|
-
if (apiKey) {
|
|
70
|
-
this.apiKey = apiKey;
|
|
71
|
-
// Default to Ollama Cloud for API key-based requests
|
|
72
|
-
if (!config.baseUrl) {
|
|
73
|
-
this.baseUrl = 'https://ollama.com';
|
|
74
|
-
} else {
|
|
75
|
-
this.baseUrl = config.baseUrl;
|
|
76
|
-
}
|
|
77
|
-
} else {
|
|
78
|
-
// Local Ollama setup
|
|
79
|
-
if (config.baseUrl) {
|
|
80
|
-
this.baseUrl = config.baseUrl;
|
|
81
|
-
}
|
|
82
|
-
}
|
|
83
|
-
}
|
|
84
|
-
|
|
85
|
-
/**
|
|
86
|
-
* Parse model string (e.g., 'ollama/mistral' -> 'mistral')
|
|
87
|
-
*/
|
|
88
|
-
parseModel(model: string): string {
|
|
89
|
-
if (model.startsWith('ollama/')) {
|
|
90
|
-
return model.slice(7); // Remove 'ollama/' prefix
|
|
91
|
-
}
|
|
92
|
-
return model;
|
|
93
|
-
}
|
|
94
|
-
|
|
95
|
-
/**
|
|
96
|
-
* Validate configuration
|
|
97
|
-
*/
|
|
98
|
-
async validate(): Promise<void> {
|
|
99
|
-
try {
|
|
100
|
-
// Make a simple request to verify Ollama is running
|
|
101
|
-
const response = await this.makeRequest<{ models: Array<{ name: string }> }>(
|
|
102
|
-
'/api/tags',
|
|
103
|
-
'GET'
|
|
104
|
-
);
|
|
105
|
-
|
|
106
|
-
if (!Array.isArray(response.models)) {
|
|
107
|
-
throw new Error('Invalid Ollama response format');
|
|
108
|
-
}
|
|
109
|
-
|
|
110
|
-
this.logger('info', 'Ollama API validation successful');
|
|
111
|
-
} catch (error) {
|
|
112
|
-
throw new LLMError(
|
|
113
|
-
`Ollama validation failed: ${error instanceof Error ? error.message : String(error)}`,
|
|
114
|
-
'VALIDATION_FAILED',
|
|
115
|
-
undefined,
|
|
116
|
-
null,
|
|
117
|
-
true
|
|
118
|
-
);
|
|
119
|
-
}
|
|
120
|
-
}
|
|
121
|
-
|
|
122
|
-
/**
|
|
123
|
-
* Create a completion
|
|
124
|
-
*/
|
|
125
|
-
async complete(request: CompletionRequest): Promise<CompletionResponse> {
|
|
126
|
-
validateCompletionRequest(request);
|
|
127
|
-
|
|
128
|
-
const model = this.parseModel(request.model);
|
|
129
|
-
|
|
130
|
-
return withRetry(
|
|
131
|
-
async () => {
|
|
132
|
-
const ollamaRequest: OllamaRequest = {
|
|
133
|
-
model,
|
|
134
|
-
stream: false,
|
|
135
|
-
messages: request.messages.map((msg) => ({
|
|
136
|
-
role: msg.role,
|
|
137
|
-
content: msg.content,
|
|
138
|
-
})),
|
|
139
|
-
};
|
|
140
|
-
|
|
141
|
-
this.logger('debug', 'Ollama completion request', {
|
|
142
|
-
model,
|
|
143
|
-
messageCount: request.messages.length,
|
|
144
|
-
});
|
|
145
|
-
|
|
146
|
-
// Use non-streaming request for complete()
|
|
147
|
-
const response = await this.makeRequest<OllamaResponse>(
|
|
148
|
-
'/api/chat',
|
|
149
|
-
'POST',
|
|
150
|
-
ollamaRequest,
|
|
151
|
-
request
|
|
152
|
-
);
|
|
153
|
-
|
|
154
|
-
const fullResponse = response.message?.content || '';
|
|
155
|
-
|
|
156
|
-
const result: CompletionResponse = {
|
|
157
|
-
content: fullResponse,
|
|
158
|
-
model: model,
|
|
159
|
-
stopReason: 'stop_sequence',
|
|
160
|
-
raw: response,
|
|
161
|
-
};
|
|
162
|
-
|
|
163
|
-
this.logger('debug', 'Ollama completion response', {
|
|
164
|
-
model,
|
|
165
|
-
contentLength: fullResponse.length,
|
|
166
|
-
});
|
|
167
|
-
|
|
168
|
-
return result;
|
|
169
|
-
},
|
|
170
|
-
this.getRetryConfig(request)
|
|
171
|
-
);
|
|
172
|
-
}
|
|
173
|
-
|
|
174
|
-
/**
|
|
175
|
-
* Stream completion
|
|
176
|
-
*/
|
|
177
|
-
async *completeStream(
|
|
178
|
-
request: CompletionRequest
|
|
179
|
-
): AsyncIterable<CompletionChunk> {
|
|
180
|
-
validateCompletionRequest(request);
|
|
181
|
-
|
|
182
|
-
const model = this.parseModel(request.model);
|
|
183
|
-
|
|
184
|
-
const ollamaRequest: OllamaRequest = {
|
|
185
|
-
model,
|
|
186
|
-
stream: true,
|
|
187
|
-
messages: request.messages.map((msg) => ({
|
|
188
|
-
role: msg.role,
|
|
189
|
-
content: msg.content,
|
|
190
|
-
})),
|
|
191
|
-
};
|
|
192
|
-
|
|
193
|
-
this.logger('debug', 'Ollama stream request', { model });
|
|
194
|
-
|
|
195
|
-
const chunks = await this.streamCompletion(ollamaRequest, request);
|
|
196
|
-
|
|
197
|
-
for await (const chunk of chunks) {
|
|
198
|
-
if (chunk.message?.content) {
|
|
199
|
-
yield {
|
|
200
|
-
delta: chunk.message.content,
|
|
201
|
-
stopReason: chunk.done ? 'stop_sequence' : undefined,
|
|
202
|
-
};
|
|
203
|
-
}
|
|
204
|
-
}
|
|
205
|
-
}
|
|
206
|
-
|
|
207
|
-
/**
|
|
208
|
-
* Internal stream completion handler
|
|
209
|
-
*/
|
|
210
|
-
private async *streamCompletion(
|
|
211
|
-
body: OllamaRequest,
|
|
212
|
-
request?: CompletionRequest
|
|
213
|
-
): AsyncIterable<OllamaResponse> {
|
|
214
|
-
const stream = await this.makeStreamRequest(
|
|
215
|
-
'/api/chat',
|
|
216
|
-
'POST',
|
|
217
|
-
body,
|
|
218
|
-
request
|
|
219
|
-
);
|
|
220
|
-
|
|
221
|
-
for await (const chunk of stream) {
|
|
222
|
-
yield chunk;
|
|
223
|
-
}
|
|
224
|
-
}
|
|
225
|
-
|
|
226
|
-
/**
|
|
227
|
-
* Make HTTP request to Ollama API using fetch
|
|
228
|
-
*/
|
|
229
|
-
private async makeRequest<T>(
|
|
230
|
-
path: string,
|
|
231
|
-
method: string = 'POST',
|
|
232
|
-
body?: unknown,
|
|
233
|
-
request?: CompletionRequest
|
|
234
|
-
): Promise<T> {
|
|
235
|
-
const url = new URL(path, this.baseUrl).toString();
|
|
236
|
-
const timeout = this.getTimeout(request);
|
|
237
|
-
|
|
238
|
-
const headers: Record<string, string> = {
|
|
239
|
-
'Content-Type': 'application/json',
|
|
240
|
-
...this.getHeaders(request),
|
|
241
|
-
};
|
|
242
|
-
|
|
243
|
-
if (this.apiKey) {
|
|
244
|
-
headers['Authorization'] = `Bearer ${this.apiKey}`;
|
|
245
|
-
}
|
|
246
|
-
|
|
247
|
-
this.logger('info', '=== Ollama Request ===', {
|
|
248
|
-
url,
|
|
249
|
-
method,
|
|
250
|
-
headers,
|
|
251
|
-
body: JSON.stringify(body),
|
|
252
|
-
});
|
|
253
|
-
|
|
254
|
-
try {
|
|
255
|
-
const controller = new AbortController();
|
|
256
|
-
const timeoutId = setTimeout(() => controller.abort(), timeout);
|
|
257
|
-
|
|
258
|
-
const response = await fetch(url, {
|
|
259
|
-
method,
|
|
260
|
-
headers,
|
|
261
|
-
body: body ? JSON.stringify(body) : undefined,
|
|
262
|
-
signal: controller.signal,
|
|
263
|
-
});
|
|
264
|
-
|
|
265
|
-
clearTimeout(timeoutId);
|
|
266
|
-
|
|
267
|
-
if (!response.ok) {
|
|
268
|
-
const errorText = await response.text();
|
|
269
|
-
throw new LLMError(
|
|
270
|
-
`Ollama API error: ${errorText}`,
|
|
271
|
-
'API_ERROR',
|
|
272
|
-
response.status,
|
|
273
|
-
null,
|
|
274
|
-
response.status === 503 || response.status === 502
|
|
275
|
-
);
|
|
276
|
-
}
|
|
277
|
-
|
|
278
|
-
const data = await response.json();
|
|
279
|
-
return data as T;
|
|
280
|
-
} catch (error) {
|
|
281
|
-
if (error instanceof LLMError) {
|
|
282
|
-
throw error;
|
|
283
|
-
}
|
|
284
|
-
if (
|
|
285
|
-
error instanceof Error &&
|
|
286
|
-
error.name === 'AbortError'
|
|
287
|
-
) {
|
|
288
|
-
throw new LLMError(
|
|
289
|
-
'Ollama request timeout',
|
|
290
|
-
'TIMEOUT',
|
|
291
|
-
undefined,
|
|
292
|
-
null,
|
|
293
|
-
true
|
|
294
|
-
);
|
|
295
|
-
}
|
|
296
|
-
throw new LLMError(
|
|
297
|
-
`Ollama request failed: ${error instanceof Error ? error.message : String(error)}`,
|
|
298
|
-
'REQUEST_FAILED',
|
|
299
|
-
undefined,
|
|
300
|
-
{ error: error instanceof Error ? error.message : String(error) },
|
|
301
|
-
true
|
|
302
|
-
);
|
|
303
|
-
}
|
|
304
|
-
}
|
|
305
|
-
|
|
306
|
-
/**
|
|
307
|
-
* Stream HTTP request using fetch
|
|
308
|
-
*/
|
|
309
|
-
private makeStreamRequest(
|
|
310
|
-
path: string,
|
|
311
|
-
method: string = 'POST',
|
|
312
|
-
body?: unknown,
|
|
313
|
-
request?: CompletionRequest
|
|
314
|
-
): AsyncIterable<OllamaResponse> {
|
|
315
|
-
const self = this;
|
|
316
|
-
|
|
317
|
-
return {
|
|
318
|
-
async *[Symbol.asyncIterator]() {
|
|
319
|
-
const url = new URL(path, self.baseUrl).toString();
|
|
320
|
-
const timeout = self.getTimeout(request);
|
|
321
|
-
|
|
322
|
-
const headers: Record<string, string> = {
|
|
323
|
-
'Content-Type': 'application/json',
|
|
324
|
-
...self.getHeaders(request),
|
|
325
|
-
};
|
|
326
|
-
|
|
327
|
-
if (self.apiKey) {
|
|
328
|
-
headers['Authorization'] = `Bearer ${self.apiKey}`;
|
|
329
|
-
}
|
|
330
|
-
|
|
331
|
-
self.logger('debug', 'Ollama stream request', {
|
|
332
|
-
url,
|
|
333
|
-
method,
|
|
334
|
-
headers,
|
|
335
|
-
body: JSON.stringify(body),
|
|
336
|
-
});
|
|
337
|
-
|
|
338
|
-
try {
|
|
339
|
-
const controller = new AbortController();
|
|
340
|
-
const timeoutId = setTimeout(() => controller.abort(), timeout);
|
|
341
|
-
|
|
342
|
-
const response = await fetch(url, {
|
|
343
|
-
method,
|
|
344
|
-
headers,
|
|
345
|
-
body: body ? JSON.stringify(body) : undefined,
|
|
346
|
-
signal: controller.signal,
|
|
347
|
-
});
|
|
348
|
-
|
|
349
|
-
clearTimeout(timeoutId);
|
|
350
|
-
|
|
351
|
-
if (!response.ok) {
|
|
352
|
-
const errorText = await response.text();
|
|
353
|
-
throw new LLMError(
|
|
354
|
-
`Ollama stream error: ${errorText}`,
|
|
355
|
-
'STREAM_ERROR',
|
|
356
|
-
response.status
|
|
357
|
-
);
|
|
358
|
-
}
|
|
359
|
-
|
|
360
|
-
const reader = response.body?.getReader();
|
|
361
|
-
if (!reader) {
|
|
362
|
-
throw new LLMError(
|
|
363
|
-
'No response body',
|
|
364
|
-
'STREAM_ERROR',
|
|
365
|
-
undefined
|
|
366
|
-
);
|
|
367
|
-
}
|
|
368
|
-
|
|
369
|
-
const decoder = new TextDecoder();
|
|
370
|
-
let buffer = '';
|
|
371
|
-
|
|
372
|
-
while (true) {
|
|
373
|
-
const { done, value } = await reader.read();
|
|
374
|
-
if (done) break;
|
|
375
|
-
|
|
376
|
-
buffer += decoder.decode(value, { stream: true });
|
|
377
|
-
const lines = buffer.split('\n');
|
|
378
|
-
buffer = lines[lines.length - 1];
|
|
379
|
-
|
|
380
|
-
for (let i = 0; i < lines.length - 1; i++) {
|
|
381
|
-
const line = lines[i].trim();
|
|
382
|
-
if (!line) continue;
|
|
383
|
-
|
|
384
|
-
try {
|
|
385
|
-
const chunk = JSON.parse(line) as OllamaResponse;
|
|
386
|
-
yield chunk;
|
|
387
|
-
} catch (error) {
|
|
388
|
-
// Ignore parse errors in stream
|
|
389
|
-
}
|
|
390
|
-
}
|
|
391
|
-
}
|
|
392
|
-
|
|
393
|
-
// Process any remaining data in buffer
|
|
394
|
-
if (buffer.trim()) {
|
|
395
|
-
try {
|
|
396
|
-
const chunk = JSON.parse(buffer) as OllamaResponse;
|
|
397
|
-
yield chunk;
|
|
398
|
-
} catch (error) {
|
|
399
|
-
// Ignore parse errors
|
|
400
|
-
}
|
|
401
|
-
}
|
|
402
|
-
} catch (error) {
|
|
403
|
-
if (error instanceof LLMError) {
|
|
404
|
-
throw error;
|
|
405
|
-
}
|
|
406
|
-
if (
|
|
407
|
-
error instanceof Error &&
|
|
408
|
-
error.name === 'AbortError'
|
|
409
|
-
) {
|
|
410
|
-
throw new LLMError(
|
|
411
|
-
'Ollama stream request timeout',
|
|
412
|
-
'TIMEOUT',
|
|
413
|
-
undefined,
|
|
414
|
-
null,
|
|
415
|
-
true
|
|
416
|
-
);
|
|
417
|
-
}
|
|
418
|
-
throw new LLMError(
|
|
419
|
-
`Ollama stream request failed: ${error instanceof Error ? error.message : String(error)}`,
|
|
420
|
-
'REQUEST_FAILED',
|
|
421
|
-
undefined,
|
|
422
|
-
{ error: error instanceof Error ? error.message : String(error) },
|
|
423
|
-
true
|
|
424
|
-
);
|
|
425
|
-
}
|
|
426
|
-
},
|
|
427
|
-
};
|
|
428
|
-
}
|
|
429
|
-
}
|