llmjs2 0.0.2 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +486 -1
- package/dist/agent.d.ts +80 -0
- package/dist/agent.d.ts.map +1 -0
- package/dist/agent.js +189 -0
- package/dist/agent.js.map +1 -0
- package/dist/index.d.ts +74 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +191 -0
- package/dist/index.js.map +1 -0
- package/dist/providers/base.d.ts +58 -0
- package/dist/providers/base.d.ts.map +1 -0
- package/dist/providers/base.js +149 -0
- package/dist/providers/base.js.map +1 -0
- package/dist/providers/index.d.ts +8 -0
- package/dist/providers/index.d.ts.map +1 -0
- package/dist/providers/index.js +7 -0
- package/dist/providers/index.js.map +1 -0
- package/dist/providers/ollama.d.ts +42 -0
- package/dist/providers/ollama.d.ts.map +1 -0
- package/dist/providers/ollama.js +260 -0
- package/dist/providers/ollama.js.map +1 -0
- package/dist/providers/openai.d.ts +38 -0
- package/dist/providers/openai.d.ts.map +1 -0
- package/dist/providers/openai.js +289 -0
- package/dist/providers/openai.js.map +1 -0
- package/dist/types.d.ts +182 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +6 -0
- package/dist/types.js.map +1 -0
- package/package.json +44 -10
- package/src/agent.ts +285 -0
- package/src/index.ts +268 -0
- package/src/providers/base.ts +216 -0
- package/src/providers/index.ts +8 -0
- package/src/providers/ollama.ts +429 -0
- package/src/providers/openai.ts +485 -0
- package/src/types.ts +231 -0
- package/llmjs.js +0 -61
package/src/agent.ts
ADDED
|
@@ -0,0 +1,285 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Agent - Stateful conversation manager with tool support
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import {
|
|
6
|
+
completion,
|
|
7
|
+
streamCompletion,
|
|
8
|
+
CompletionRequest,
|
|
9
|
+
CompletionResponse,
|
|
10
|
+
CompletionChunk,
|
|
11
|
+
Message,
|
|
12
|
+
Tool,
|
|
13
|
+
LLMError,
|
|
14
|
+
} from './index.js';
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Agent configuration
|
|
18
|
+
*/
|
|
19
|
+
export interface AgentConfig {
|
|
20
|
+
/** Model identifier (e.g., 'ollama/qwen3.5:397b-cloud') */
|
|
21
|
+
model: string;
|
|
22
|
+
|
|
23
|
+
/** API key for the provider (optional, reads from OLLAMA_CLOUD_API_KEY env by default) */
|
|
24
|
+
apiKey?: string;
|
|
25
|
+
|
|
26
|
+
/** Base URL for the API (optional, defaults to https://ollama.com) */
|
|
27
|
+
baseUrl?: string;
|
|
28
|
+
|
|
29
|
+
/** System instruction for the agent */
|
|
30
|
+
instruction?: string;
|
|
31
|
+
|
|
32
|
+
/** Available tools/functions */
|
|
33
|
+
tools?: Tool[];
|
|
34
|
+
|
|
35
|
+
/** Tool executor function - receives tool name and arguments, returns result string */
|
|
36
|
+
toolExecutor?: (toolName: string, args: Record<string, unknown>) => string;
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
/**
|
|
40
|
+
* Agent generation request
|
|
41
|
+
*/
|
|
42
|
+
export interface AgentGenerateRequest {
|
|
43
|
+
/** User prompt/message */
|
|
44
|
+
userPrompt: string;
|
|
45
|
+
|
|
46
|
+
/** Optional images (base64 or URLs) */
|
|
47
|
+
images?: string[];
|
|
48
|
+
|
|
49
|
+
/** Optional reference documents or context */
|
|
50
|
+
references?: string[];
|
|
51
|
+
|
|
52
|
+
/** Additional context variables */
|
|
53
|
+
context?: Record<string, unknown>;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Agent generation response
|
|
58
|
+
*/
|
|
59
|
+
export interface AgentGenerateResponse {
|
|
60
|
+
/** Generated response */
|
|
61
|
+
response: string;
|
|
62
|
+
|
|
63
|
+
/** Full completion response from provider */
|
|
64
|
+
completion: CompletionResponse;
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
/**
|
|
68
|
+
* Stateful agent for conversations and tool use
|
|
69
|
+
*/
|
|
70
|
+
export class Agent {
|
|
71
|
+
private config: AgentConfig;
|
|
72
|
+
private conversationHistory: Message[] = [];
|
|
73
|
+
|
|
74
|
+
constructor(config: AgentConfig) {
|
|
75
|
+
if (!config.model) {
|
|
76
|
+
throw new LLMError('Model is required in agent config', 'MISSING_MODEL');
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
this.config = config;
|
|
80
|
+
|
|
81
|
+
// Initialize conversation with system instruction
|
|
82
|
+
if (config.instruction) {
|
|
83
|
+
this.conversationHistory.push({
|
|
84
|
+
role: 'system',
|
|
85
|
+
content: config.instruction,
|
|
86
|
+
});
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
/**
|
|
91
|
+
* Generate a response for a user message
|
|
92
|
+
*/
|
|
93
|
+
async generate(request: AgentGenerateRequest): Promise<AgentGenerateResponse> {
|
|
94
|
+
if (!request.userPrompt) {
|
|
95
|
+
throw new LLMError('userPrompt is required', 'MISSING_USER_PROMPT');
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
// Build user message with context
|
|
99
|
+
let userMessage = request.userPrompt;
|
|
100
|
+
|
|
101
|
+
if (request.images && request.images.length > 0) {
|
|
102
|
+
userMessage += `\n\n[Images: ${request.images.length} image(s) attached]`;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
if (request.references && request.references.length > 0) {
|
|
106
|
+
userMessage += '\n\nReferences:\n' + request.references.join('\n---\n');
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
if (request.context && Object.keys(request.context).length > 0) {
|
|
110
|
+
userMessage +=
|
|
111
|
+
'\n\nContext: ' + JSON.stringify(request.context, null, 2);
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
// Add user message to history
|
|
115
|
+
this.conversationHistory.push({
|
|
116
|
+
role: 'user',
|
|
117
|
+
content: userMessage,
|
|
118
|
+
});
|
|
119
|
+
|
|
120
|
+
// Get initial completion
|
|
121
|
+
let completion_result = await this.getCompletion();
|
|
122
|
+
let toolCalls = completion_result.toolCalls;
|
|
123
|
+
let maxIterations = 10;
|
|
124
|
+
let iterations = 0;
|
|
125
|
+
|
|
126
|
+
// Handle tool calls in a loop
|
|
127
|
+
while (toolCalls && toolCalls.length > 0 && iterations < maxIterations) {
|
|
128
|
+
iterations++;
|
|
129
|
+
|
|
130
|
+
// Execute all tool calls
|
|
131
|
+
const toolResults: string[] = [];
|
|
132
|
+
|
|
133
|
+
for (const toolCall of toolCalls) {
|
|
134
|
+
try {
|
|
135
|
+
let result: string;
|
|
136
|
+
|
|
137
|
+
if (this.config.toolExecutor) {
|
|
138
|
+
// Use provided tool executor
|
|
139
|
+
result = this.config.toolExecutor(toolCall.name, toolCall.arguments);
|
|
140
|
+
} else {
|
|
141
|
+
// Fallback: return error
|
|
142
|
+
result = `Tool '${toolCall.name}' not configured`;
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
toolResults.push(`Tool: ${toolCall.name}\nResult: ${result}`);
|
|
146
|
+
} catch (error) {
|
|
147
|
+
toolResults.push(`Tool: ${toolCall.name}\nError: ${error instanceof Error ? error.message : String(error)}`);
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
// Add tool results to conversation
|
|
152
|
+
this.conversationHistory.push({
|
|
153
|
+
role: 'user',
|
|
154
|
+
content: 'Tool Results:\n' + toolResults.join('\n\n'),
|
|
155
|
+
});
|
|
156
|
+
|
|
157
|
+
// Get next completion
|
|
158
|
+
completion_result = await this.getCompletion();
|
|
159
|
+
toolCalls = completion_result.toolCalls;
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
// Add final assistant response to history
|
|
163
|
+
this.conversationHistory.push({
|
|
164
|
+
role: 'assistant',
|
|
165
|
+
content: completion_result.content,
|
|
166
|
+
});
|
|
167
|
+
|
|
168
|
+
return {
|
|
169
|
+
response: completion_result.content,
|
|
170
|
+
completion: completion_result,
|
|
171
|
+
};
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
/**
|
|
175
|
+
* Get completion from the model
|
|
176
|
+
*/
|
|
177
|
+
private async getCompletion(): Promise<CompletionResponse> {
|
|
178
|
+
const completionRequest: CompletionRequest = {
|
|
179
|
+
model: this.config.model,
|
|
180
|
+
apiKey: this.config.apiKey,
|
|
181
|
+
baseUrl: this.config.baseUrl,
|
|
182
|
+
messages: this.conversationHistory,
|
|
183
|
+
tools: this.config.tools,
|
|
184
|
+
};
|
|
185
|
+
|
|
186
|
+
return completion(completionRequest);
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
/**
|
|
190
|
+
* Stream a response for a user message
|
|
191
|
+
*/
|
|
192
|
+
async *generateStream(
|
|
193
|
+
request: AgentGenerateRequest
|
|
194
|
+
): AsyncIterable<CompletionChunk> {
|
|
195
|
+
if (!request.userPrompt) {
|
|
196
|
+
throw new LLMError('userPrompt is required', 'MISSING_USER_PROMPT');
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
// Build user message with context
|
|
200
|
+
let userMessage = request.userPrompt;
|
|
201
|
+
|
|
202
|
+
if (request.images && request.images.length > 0) {
|
|
203
|
+
userMessage += `\n\n[Images: ${request.images.length} image(s) attached]`;
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
if (request.references && request.references.length > 0) {
|
|
207
|
+
userMessage += '\n\nReferences:\n' + request.references.join('\n---\n');
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
if (request.context && Object.keys(request.context).length > 0) {
|
|
211
|
+
userMessage +=
|
|
212
|
+
'\n\nContext: ' + JSON.stringify(request.context, null, 2);
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
// Add user message to history
|
|
216
|
+
this.conversationHistory.push({
|
|
217
|
+
role: 'user',
|
|
218
|
+
content: userMessage,
|
|
219
|
+
});
|
|
220
|
+
|
|
221
|
+
// Create completion request
|
|
222
|
+
const completionRequest: CompletionRequest = {
|
|
223
|
+
model: this.config.model,
|
|
224
|
+
apiKey: this.config.apiKey,
|
|
225
|
+
baseUrl: this.config.baseUrl,
|
|
226
|
+
messages: this.conversationHistory,
|
|
227
|
+
tools: this.config.tools,
|
|
228
|
+
};
|
|
229
|
+
|
|
230
|
+
// Stream and collect response
|
|
231
|
+
let fullResponse = '';
|
|
232
|
+
|
|
233
|
+
const stream = streamCompletion(completionRequest);
|
|
234
|
+
for await (const chunk of stream) {
|
|
235
|
+
fullResponse += chunk.delta;
|
|
236
|
+
yield chunk;
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
// Add assistant response to history
|
|
240
|
+
this.conversationHistory.push({
|
|
241
|
+
role: 'assistant',
|
|
242
|
+
content: fullResponse,
|
|
243
|
+
});
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
/**
|
|
247
|
+
* Get conversation history
|
|
248
|
+
*/
|
|
249
|
+
getHistory(): Message[] {
|
|
250
|
+
return [...this.conversationHistory];
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
/**
|
|
254
|
+
* Clear conversation history (keeps system instruction if set)
|
|
255
|
+
*/
|
|
256
|
+
clearHistory(): void {
|
|
257
|
+
if (this.config.instruction) {
|
|
258
|
+
this.conversationHistory = [
|
|
259
|
+
{
|
|
260
|
+
role: 'system',
|
|
261
|
+
content: this.config.instruction,
|
|
262
|
+
},
|
|
263
|
+
];
|
|
264
|
+
} else {
|
|
265
|
+
this.conversationHistory = [];
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
/**
|
|
270
|
+
* Add a message to history
|
|
271
|
+
*/
|
|
272
|
+
addMessage(role: 'system' | 'user' | 'assistant', content: string): void {
|
|
273
|
+
this.conversationHistory.push({
|
|
274
|
+
role,
|
|
275
|
+
content,
|
|
276
|
+
});
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
/**
|
|
280
|
+
* Get the current configuration
|
|
281
|
+
*/
|
|
282
|
+
getConfig(): AgentConfig {
|
|
283
|
+
return { ...this.config };
|
|
284
|
+
}
|
|
285
|
+
}
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,268 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* llmjs2 - Enterprise-grade LLM abstraction layer
|
|
3
|
+
* Unified API for OpenAI and Ollama
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import {
|
|
7
|
+
CompletionRequest,
|
|
8
|
+
CompletionResponse,
|
|
9
|
+
CompletionChunk,
|
|
10
|
+
CompletionOptions,
|
|
11
|
+
ProviderType,
|
|
12
|
+
IProvider,
|
|
13
|
+
} from './types.js';
|
|
14
|
+
import { OpenAIProvider } from './providers/openai.js';
|
|
15
|
+
import { OllamaProvider } from './providers/ollama.js';
|
|
16
|
+
import { LLMError } from './providers/base.js';
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* Global configuration for completion
|
|
20
|
+
*/
|
|
21
|
+
let globalOptions: CompletionOptions = {};
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* Provider cache
|
|
25
|
+
*/
|
|
26
|
+
const providerCache: Map<string, IProvider> = new Map();
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* Configure global settings
|
|
30
|
+
*/
|
|
31
|
+
export function configure(options: CompletionOptions): void {
|
|
32
|
+
globalOptions = { ...globalOptions, ...options };
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* Get provider type from model string
|
|
37
|
+
*/
|
|
38
|
+
function getProviderType(model: string): ProviderType {
|
|
39
|
+
if (model.startsWith('openai/')) {
|
|
40
|
+
return 'openai';
|
|
41
|
+
}
|
|
42
|
+
if (model.startsWith('ollama/')) {
|
|
43
|
+
return 'ollama';
|
|
44
|
+
}
|
|
45
|
+
// Default to openai if no prefix specified
|
|
46
|
+
return 'openai';
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Create appropriate provider instance
|
|
51
|
+
*/
|
|
52
|
+
function createProvider(
|
|
53
|
+
providerType: ProviderType,
|
|
54
|
+
request: CompletionRequest
|
|
55
|
+
): IProvider {
|
|
56
|
+
const cacheKey = `${providerType}:${request.apiKey || 'default'}:${request.baseUrl || 'default'}`;
|
|
57
|
+
|
|
58
|
+
// Return cached provider if available
|
|
59
|
+
if (providerCache.has(cacheKey)) {
|
|
60
|
+
return providerCache.get(cacheKey)!;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
let provider: IProvider;
|
|
64
|
+
|
|
65
|
+
switch (providerType) {
|
|
66
|
+
case 'openai': {
|
|
67
|
+
const apiKey = request.apiKey || process.env.OPENAI_API_KEY;
|
|
68
|
+
if (!apiKey) {
|
|
69
|
+
throw new LLMError(
|
|
70
|
+
'OpenAI API key is required. Pass it as apiKey in the request or set OPENAI_API_KEY environment variable.',
|
|
71
|
+
'MISSING_API_KEY'
|
|
72
|
+
);
|
|
73
|
+
}
|
|
74
|
+
provider = new OpenAIProvider({
|
|
75
|
+
type: 'openai',
|
|
76
|
+
apiKey: apiKey,
|
|
77
|
+
baseUrl: request.baseUrl,
|
|
78
|
+
timeout: request.timeout ?? globalOptions.globalTimeout,
|
|
79
|
+
retry: request.retry ?? globalOptions.globalRetry,
|
|
80
|
+
});
|
|
81
|
+
break;
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
case 'ollama': {
|
|
85
|
+
provider = new OllamaProvider({
|
|
86
|
+
type: 'ollama',
|
|
87
|
+
baseUrl: request.baseUrl,
|
|
88
|
+
timeout: request.timeout ?? globalOptions.globalTimeout,
|
|
89
|
+
retry: request.retry ?? globalOptions.globalRetry,
|
|
90
|
+
});
|
|
91
|
+
break;
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
default:
|
|
95
|
+
throw new LLMError(`Unsupported provider: ${providerType}`, 'UNKNOWN_PROVIDER');
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
// Apply global settings
|
|
99
|
+
if (globalOptions.debug) {
|
|
100
|
+
provider.setDebug(true);
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
if (globalOptions.logger) {
|
|
104
|
+
provider.setLogger(globalOptions.logger);
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
// Cache the provider
|
|
108
|
+
providerCache.set(cacheKey, provider);
|
|
109
|
+
|
|
110
|
+
return provider;
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
/**
|
|
114
|
+
* Create a completion request
|
|
115
|
+
*
|
|
116
|
+
* @example
|
|
117
|
+
* import { completion } from 'llmjs2';
|
|
118
|
+
*
|
|
119
|
+
* const result = await completion({
|
|
120
|
+
* model: 'openai/gpt-4',
|
|
121
|
+
* apiKey: 'sk-...',
|
|
122
|
+
* messages: [
|
|
123
|
+
* { role: 'user', content: 'Hello!' }
|
|
124
|
+
* ]
|
|
125
|
+
* });
|
|
126
|
+
*
|
|
127
|
+
* console.log(result.content);
|
|
128
|
+
*/
|
|
129
|
+
export async function completion(
|
|
130
|
+
request: CompletionRequest
|
|
131
|
+
): Promise<CompletionResponse> {
|
|
132
|
+
if (!request.model) {
|
|
133
|
+
throw new LLMError('Model is required', 'MISSING_MODEL');
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
const providerType = getProviderType(request.model);
|
|
137
|
+
const provider = createProvider(providerType, request);
|
|
138
|
+
|
|
139
|
+
try {
|
|
140
|
+
return await provider.complete(request);
|
|
141
|
+
} catch (error) {
|
|
142
|
+
if (error instanceof LLMError) {
|
|
143
|
+
throw error;
|
|
144
|
+
}
|
|
145
|
+
throw new LLMError(
|
|
146
|
+
`Completion failed: ${error instanceof Error ? error.message : String(error)}`,
|
|
147
|
+
'COMPLETION_FAILED',
|
|
148
|
+
undefined,
|
|
149
|
+
error
|
|
150
|
+
);
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
/**
|
|
155
|
+
* Stream a completion request
|
|
156
|
+
*
|
|
157
|
+
* @example
|
|
158
|
+
* import { streamCompletion } from 'llmjs2';
|
|
159
|
+
*
|
|
160
|
+
* const stream = streamCompletion({
|
|
161
|
+
* model: 'openai/gpt-4',
|
|
162
|
+
* apiKey: 'sk-...',
|
|
163
|
+
* messages: [
|
|
164
|
+
* { role: 'user', content: 'Write a poem' }
|
|
165
|
+
* ]
|
|
166
|
+
* });
|
|
167
|
+
*
|
|
168
|
+
* for await (const chunk of stream) {
|
|
169
|
+
* process.stdout.write(chunk.delta);
|
|
170
|
+
* }
|
|
171
|
+
*/
|
|
172
|
+
export async function* streamCompletion(
|
|
173
|
+
request: CompletionRequest
|
|
174
|
+
): AsyncIterable<CompletionChunk> {
|
|
175
|
+
if (!request.model) {
|
|
176
|
+
throw new LLMError('Model is required', 'MISSING_MODEL');
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
const providerType = getProviderType(request.model);
|
|
180
|
+
const provider = createProvider(providerType, request);
|
|
181
|
+
|
|
182
|
+
try {
|
|
183
|
+
yield* provider.completeStream(request);
|
|
184
|
+
} catch (error) {
|
|
185
|
+
if (error instanceof LLMError) {
|
|
186
|
+
throw error;
|
|
187
|
+
}
|
|
188
|
+
throw new LLMError(
|
|
189
|
+
`Stream failed: ${error instanceof Error ? error.message : String(error)}`,
|
|
190
|
+
'STREAM_FAILED',
|
|
191
|
+
undefined,
|
|
192
|
+
error
|
|
193
|
+
);
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
/**
|
|
198
|
+
* Validate provider connectivity and configuration
|
|
199
|
+
*/
|
|
200
|
+
export async function validateProvider(
|
|
201
|
+
model: string,
|
|
202
|
+
apiKey?: string,
|
|
203
|
+
baseUrl?: string
|
|
204
|
+
): Promise<void> {
|
|
205
|
+
const providerType = getProviderType(model);
|
|
206
|
+
|
|
207
|
+
const request: CompletionRequest = {
|
|
208
|
+
model,
|
|
209
|
+
apiKey,
|
|
210
|
+
baseUrl,
|
|
211
|
+
messages: [{ role: 'user', content: 'test' }],
|
|
212
|
+
};
|
|
213
|
+
|
|
214
|
+
const provider = createProvider(providerType, request);
|
|
215
|
+
|
|
216
|
+
try {
|
|
217
|
+
await provider.validate();
|
|
218
|
+
} catch (error) {
|
|
219
|
+
if (error instanceof LLMError) {
|
|
220
|
+
throw error;
|
|
221
|
+
}
|
|
222
|
+
throw new LLMError(
|
|
223
|
+
`Validation failed: ${error instanceof Error ? error.message : String(error)}`,
|
|
224
|
+
'VALIDATION_FAILED'
|
|
225
|
+
);
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
/**
|
|
230
|
+
* Clear provider cache
|
|
231
|
+
*/
|
|
232
|
+
export function clearProviderCache(): void {
|
|
233
|
+
providerCache.clear();
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
/**
|
|
237
|
+
* Export types for consumers
|
|
238
|
+
*/
|
|
239
|
+
export type {
|
|
240
|
+
CompletionRequest,
|
|
241
|
+
CompletionResponse,
|
|
242
|
+
CompletionChunk,
|
|
243
|
+
CompletionOptions,
|
|
244
|
+
Message,
|
|
245
|
+
MessageRole,
|
|
246
|
+
Tool,
|
|
247
|
+
ProviderType,
|
|
248
|
+
ProviderConfig,
|
|
249
|
+
ProviderError,
|
|
250
|
+
} from './types.js';
|
|
251
|
+
|
|
252
|
+
/**
|
|
253
|
+
* Export error class
|
|
254
|
+
*/
|
|
255
|
+
export { LLMError };
|
|
256
|
+
|
|
257
|
+
/**
|
|
258
|
+
* Export provider classes for advanced use cases
|
|
259
|
+
*/
|
|
260
|
+
export { OpenAIProvider } from './providers/openai.js';
|
|
261
|
+
export { OllamaProvider } from './providers/ollama.js';
|
|
262
|
+
export { BaseProvider } from './providers/base.js';
|
|
263
|
+
|
|
264
|
+
/**
|
|
265
|
+
* Export Agent for stateful conversations
|
|
266
|
+
*/
|
|
267
|
+
export { Agent } from './agent.js';
|
|
268
|
+
export type { AgentConfig, AgentGenerateRequest, AgentGenerateResponse } from './agent.js';
|