wu-framework 1.1.14 → 1.1.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +39 -39
- package/README.md +408 -408
- package/dist/wu-framework.cjs.js.map +1 -1
- package/dist/wu-framework.dev.js +15151 -15151
- package/dist/wu-framework.dev.js.map +1 -1
- package/dist/wu-framework.esm.js.map +1 -1
- package/dist/wu-framework.umd.js.map +1 -1
- package/integrations/astro/README.md +127 -127
- package/integrations/astro/WuApp.astro +63 -63
- package/integrations/astro/WuShell.astro +39 -39
- package/integrations/astro/index.js +68 -68
- package/integrations/astro/package.json +38 -38
- package/integrations/astro/types.d.ts +53 -53
- package/package.json +161 -161
- package/src/adapters/angular/ai.js +30 -30
- package/src/adapters/angular/index.d.ts +154 -154
- package/src/adapters/angular/index.js +932 -932
- package/src/adapters/angular.d.ts +3 -3
- package/src/adapters/angular.js +3 -3
- package/src/adapters/index.js +168 -168
- package/src/adapters/lit/ai.js +20 -20
- package/src/adapters/lit/index.d.ts +120 -120
- package/src/adapters/lit/index.js +721 -721
- package/src/adapters/lit.d.ts +3 -3
- package/src/adapters/lit.js +3 -3
- package/src/adapters/preact/ai.js +33 -33
- package/src/adapters/preact/index.d.ts +108 -108
- package/src/adapters/preact/index.js +661 -661
- package/src/adapters/preact.d.ts +3 -3
- package/src/adapters/preact.js +3 -3
- package/src/adapters/react/index.js +48 -54
- package/src/adapters/react.d.ts +3 -3
- package/src/adapters/react.js +3 -3
- package/src/adapters/shared.js +64 -64
- package/src/adapters/solid/ai.js +32 -32
- package/src/adapters/solid/index.d.ts +101 -101
- package/src/adapters/solid/index.js +586 -586
- package/src/adapters/solid.d.ts +3 -3
- package/src/adapters/solid.js +3 -3
- package/src/adapters/svelte/ai.js +31 -31
- package/src/adapters/svelte/index.d.ts +166 -166
- package/src/adapters/svelte/index.js +798 -798
- package/src/adapters/svelte.d.ts +3 -3
- package/src/adapters/svelte.js +3 -3
- package/src/adapters/vanilla/ai.js +30 -30
- package/src/adapters/vanilla/index.d.ts +179 -179
- package/src/adapters/vanilla/index.js +785 -785
- package/src/adapters/vanilla.d.ts +3 -3
- package/src/adapters/vanilla.js +3 -3
- package/src/adapters/vue/ai.js +52 -52
- package/src/adapters/vue/index.d.ts +299 -299
- package/src/adapters/vue/index.js +610 -610
- package/src/adapters/vue.d.ts +3 -3
- package/src/adapters/vue.js +3 -3
- package/src/ai/wu-ai-actions.js +261 -261
- package/src/ai/wu-ai-agent.js +546 -546
- package/src/ai/wu-ai-browser-primitives.js +354 -354
- package/src/ai/wu-ai-browser.js +380 -380
- package/src/ai/wu-ai-context.js +332 -332
- package/src/ai/wu-ai-conversation.js +613 -613
- package/src/ai/wu-ai-orchestrate.js +1021 -1021
- package/src/ai/wu-ai-permissions.js +381 -381
- package/src/ai/wu-ai-provider.js +700 -700
- package/src/ai/wu-ai-schema.js +225 -225
- package/src/ai/wu-ai-triggers.js +396 -396
- package/src/ai/wu-ai.js +804 -804
- package/src/core/wu-app.js +236 -236
- package/src/core/wu-cache.js +477 -477
- package/src/core/wu-core.js +1398 -1398
- package/src/core/wu-error-boundary.js +382 -382
- package/src/core/wu-event-bus.js +348 -348
- package/src/core/wu-hooks.js +350 -350
- package/src/core/wu-html-parser.js +190 -190
- package/src/core/wu-iframe-sandbox.js +328 -328
- package/src/core/wu-loader.js +272 -272
- package/src/core/wu-logger.js +134 -134
- package/src/core/wu-manifest.js +509 -509
- package/src/core/wu-mcp-bridge.js +432 -432
- package/src/core/wu-overrides.js +510 -510
- package/src/core/wu-performance.js +228 -228
- package/src/core/wu-plugin.js +348 -348
- package/src/core/wu-prefetch.js +414 -414
- package/src/core/wu-proxy-sandbox.js +476 -476
- package/src/core/wu-sandbox.js +779 -779
- package/src/core/wu-script-executor.js +113 -113
- package/src/core/wu-snapshot-sandbox.js +227 -227
- package/src/core/wu-strategies.js +256 -256
- package/src/core/wu-style-bridge.js +477 -477
- package/src/index.js +224 -224
- package/src/utils/dependency-resolver.js +327 -327
package/src/ai/wu-ai-provider.js
CHANGED
|
@@ -1,700 +1,700 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* WU-AI-PROVIDER: BYOL (Bring Your Own LLM) provider system
|
|
3
|
-
*
|
|
4
|
-
* Pure fetch(), zero dependencies. Adapters normalize request/response
|
|
5
|
-
* across OpenAI, Anthropic, Ollama, and custom providers.
|
|
6
|
-
*
|
|
7
|
-
* Internal normalized format:
|
|
8
|
-
* Request: { role, content, tool_calls?, tool_call_id? }
|
|
9
|
-
* Response: { content, tool_calls?, usage? }
|
|
10
|
-
*/
|
|
11
|
-
|
|
12
|
-
import { logger } from '../core/wu-logger.js';
|
|
13
|
-
|
|
14
|
-
// ─── Normalized types (internal) ─────────────────────────────────
|
|
15
|
-
//
|
|
16
|
-
// Message: { role: 'system'|'user'|'assistant'|'tool', content: string,
|
|
17
|
-
// tool_calls?: ToolCall[], tool_call_id?: string }
|
|
18
|
-
//
|
|
19
|
-
// ToolCall: { id: string, name: string, arguments: object }
|
|
20
|
-
//
|
|
21
|
-
// Response: { content: string, tool_calls?: ToolCall[], usage?: { prompt_tokens, completion_tokens } }
|
|
22
|
-
//
|
|
23
|
-
// StreamChunk: { type: 'text'|'tool_call'|'done'|'error', content?: string,
|
|
24
|
-
// tool_call?: ToolCall, usage?: object, error?: string }
|
|
25
|
-
|
|
26
|
-
// ─── Base Adapter ────────────────────────────────────────────────
|
|
27
|
-
|
|
28
|
-
class BaseAdapter {
|
|
29
|
-
constructor(config = {}) {
|
|
30
|
-
this.model = config.model || '';
|
|
31
|
-
}
|
|
32
|
-
|
|
33
|
-
/** Format messages + options into provider-specific request body */
|
|
34
|
-
formatRequest(/* messages, options */) {
|
|
35
|
-
throw new Error('Adapter must implement formatRequest()');
|
|
36
|
-
}
|
|
37
|
-
|
|
38
|
-
/** Parse provider response into normalized Response */
|
|
39
|
-
parseResponse(/* rawData */) {
|
|
40
|
-
throw new Error('Adapter must implement parseResponse()');
|
|
41
|
-
}
|
|
42
|
-
|
|
43
|
-
/** Parse a streaming SSE line into a StreamChunk (or null to skip) */
|
|
44
|
-
parseStreamChunk(/* line */) {
|
|
45
|
-
throw new Error('Adapter must implement parseStreamChunk()');
|
|
46
|
-
}
|
|
47
|
-
|
|
48
|
-
/** Get required headers for the provider */
|
|
49
|
-
getHeaders(/* config */) {
|
|
50
|
-
return { 'Content-Type': 'application/json' };
|
|
51
|
-
}
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
// ─── OpenAI Adapter ──────────────────────────────────────────────
|
|
55
|
-
|
|
56
|
-
class OpenAIAdapter extends BaseAdapter {
|
|
57
|
-
constructor(config) {
|
|
58
|
-
super(config);
|
|
59
|
-
this.model = config.model || 'gpt-4o';
|
|
60
|
-
}
|
|
61
|
-
|
|
62
|
-
getHeaders(config) {
|
|
63
|
-
const h = { 'Content-Type': 'application/json' };
|
|
64
|
-
if (config.apiKey) h['Authorization'] = `Bearer ${config.apiKey}`;
|
|
65
|
-
return h;
|
|
66
|
-
}
|
|
67
|
-
|
|
68
|
-
formatRequest(messages, options = {}) {
|
|
69
|
-
const body = {
|
|
70
|
-
model: options.model || this.model,
|
|
71
|
-
messages: messages.map(m => {
|
|
72
|
-
const msg = { role: m.role, content: m.content };
|
|
73
|
-
if (m.tool_call_id) msg.tool_call_id = m.tool_call_id;
|
|
74
|
-
if (m.tool_calls) msg.tool_calls = m.tool_calls.map(tc => ({
|
|
75
|
-
id: tc.id,
|
|
76
|
-
type: 'function',
|
|
77
|
-
function: { name: tc.name, arguments: JSON.stringify(tc.arguments) },
|
|
78
|
-
}));
|
|
79
|
-
return msg;
|
|
80
|
-
}),
|
|
81
|
-
};
|
|
82
|
-
if (options.tools?.length) {
|
|
83
|
-
body.tools = options.tools.map(t => ({
|
|
84
|
-
type: 'function',
|
|
85
|
-
function: { name: t.name, description: t.description, parameters: t.parameters },
|
|
86
|
-
}));
|
|
87
|
-
}
|
|
88
|
-
if (options.temperature !== undefined) body.temperature = options.temperature;
|
|
89
|
-
if (options.maxTokens) body.max_tokens = options.maxTokens;
|
|
90
|
-
if (options.stream) body.stream = true;
|
|
91
|
-
|
|
92
|
-
// Structured output / JSON mode
|
|
93
|
-
if (options.responseFormat) {
|
|
94
|
-
const rf = options.responseFormat;
|
|
95
|
-
if (rf === 'json' || rf?.type === 'json_object') {
|
|
96
|
-
body.response_format = { type: 'json_object' };
|
|
97
|
-
} else if (rf?.type === 'json_schema') {
|
|
98
|
-
body.response_format = {
|
|
99
|
-
type: 'json_schema',
|
|
100
|
-
json_schema: {
|
|
101
|
-
name: rf.name || 'response',
|
|
102
|
-
schema: rf.schema,
|
|
103
|
-
strict: rf.strict !== false,
|
|
104
|
-
},
|
|
105
|
-
};
|
|
106
|
-
}
|
|
107
|
-
}
|
|
108
|
-
|
|
109
|
-
return body;
|
|
110
|
-
}
|
|
111
|
-
|
|
112
|
-
parseResponse(data) {
|
|
113
|
-
const choice = data.choices?.[0];
|
|
114
|
-
if (!choice) return { content: '', tool_calls: [], usage: data.usage };
|
|
115
|
-
|
|
116
|
-
const msg = choice.message || {};
|
|
117
|
-
const toolCalls = (msg.tool_calls || []).map(tc => ({
|
|
118
|
-
id: tc.id,
|
|
119
|
-
name: tc.function?.name,
|
|
120
|
-
arguments: this._safeParseArgs(tc.function?.arguments),
|
|
121
|
-
}));
|
|
122
|
-
|
|
123
|
-
return {
|
|
124
|
-
content: msg.content || '',
|
|
125
|
-
tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
126
|
-
usage: data.usage ? {
|
|
127
|
-
prompt_tokens: data.usage.prompt_tokens,
|
|
128
|
-
completion_tokens: data.usage.completion_tokens,
|
|
129
|
-
} : undefined,
|
|
130
|
-
};
|
|
131
|
-
}
|
|
132
|
-
|
|
133
|
-
parseStreamChunk(line) {
|
|
134
|
-
if (!line.startsWith('data: ')) return null;
|
|
135
|
-
const raw = line.slice(6).trim();
|
|
136
|
-
if (raw === '[DONE]') return { type: 'done' };
|
|
137
|
-
|
|
138
|
-
try {
|
|
139
|
-
const data = JSON.parse(raw);
|
|
140
|
-
const delta = data.choices?.[0]?.delta;
|
|
141
|
-
if (!delta) return null;
|
|
142
|
-
|
|
143
|
-
if (delta.tool_calls?.length) {
|
|
144
|
-
const tc = delta.tool_calls[0];
|
|
145
|
-
return {
|
|
146
|
-
type: 'tool_call_delta',
|
|
147
|
-
index: tc.index,
|
|
148
|
-
id: tc.id,
|
|
149
|
-
name: tc.function?.name,
|
|
150
|
-
argumentsDelta: tc.function?.arguments || '',
|
|
151
|
-
};
|
|
152
|
-
}
|
|
153
|
-
|
|
154
|
-
if (delta.content) {
|
|
155
|
-
return { type: 'text', content: delta.content };
|
|
156
|
-
}
|
|
157
|
-
|
|
158
|
-
if (data.usage) {
|
|
159
|
-
return { type: 'usage', usage: data.usage };
|
|
160
|
-
}
|
|
161
|
-
|
|
162
|
-
return null;
|
|
163
|
-
} catch {
|
|
164
|
-
return null;
|
|
165
|
-
}
|
|
166
|
-
}
|
|
167
|
-
|
|
168
|
-
_safeParseArgs(str) {
|
|
169
|
-
if (!str) return {};
|
|
170
|
-
try { return JSON.parse(str); } catch { return {}; }
|
|
171
|
-
}
|
|
172
|
-
}
|
|
173
|
-
|
|
174
|
-
// ─── Anthropic Adapter ───────────────────────────────────────────
|
|
175
|
-
|
|
176
|
-
class AnthropicAdapter extends BaseAdapter {
|
|
177
|
-
constructor(config) {
|
|
178
|
-
super(config);
|
|
179
|
-
this.model = config.model || 'claude-sonnet-4-5-20250929';
|
|
180
|
-
}
|
|
181
|
-
|
|
182
|
-
getHeaders(config) {
|
|
183
|
-
const h = { 'Content-Type': 'application/json' };
|
|
184
|
-
if (config.apiKey) {
|
|
185
|
-
h['x-api-key'] = config.apiKey;
|
|
186
|
-
h['anthropic-version'] = '2023-06-01';
|
|
187
|
-
}
|
|
188
|
-
return h;
|
|
189
|
-
}
|
|
190
|
-
|
|
191
|
-
formatRequest(messages, options = {}) {
|
|
192
|
-
// Anthropic separates system from messages
|
|
193
|
-
const systemMsgs = messages.filter(m => m.role === 'system');
|
|
194
|
-
const otherMsgs = messages.filter(m => m.role !== 'system');
|
|
195
|
-
|
|
196
|
-
const body = {
|
|
197
|
-
model: options.model || this.model,
|
|
198
|
-
max_tokens: options.maxTokens || 4096,
|
|
199
|
-
messages: otherMsgs.map(m => {
|
|
200
|
-
if (m.role === 'tool') {
|
|
201
|
-
return {
|
|
202
|
-
role: 'user',
|
|
203
|
-
content: [{
|
|
204
|
-
type: 'tool_result',
|
|
205
|
-
tool_use_id: m.tool_call_id,
|
|
206
|
-
content: m.content,
|
|
207
|
-
}],
|
|
208
|
-
};
|
|
209
|
-
}
|
|
210
|
-
if (m.tool_calls) {
|
|
211
|
-
return {
|
|
212
|
-
role: 'assistant',
|
|
213
|
-
content: m.tool_calls.map(tc => ({
|
|
214
|
-
type: 'tool_use',
|
|
215
|
-
id: tc.id,
|
|
216
|
-
name: tc.name,
|
|
217
|
-
input: tc.arguments,
|
|
218
|
-
})),
|
|
219
|
-
};
|
|
220
|
-
}
|
|
221
|
-
return { role: m.role, content: m.content };
|
|
222
|
-
}),
|
|
223
|
-
};
|
|
224
|
-
|
|
225
|
-
if (systemMsgs.length) {
|
|
226
|
-
body.system = systemMsgs.map(m => m.content).join('\n\n');
|
|
227
|
-
}
|
|
228
|
-
|
|
229
|
-
// Structured output / JSON mode (Anthropic has no native support)
|
|
230
|
-
// Strategy: augment system prompt + prefill assistant turn with '{'
|
|
231
|
-
if (options.responseFormat) {
|
|
232
|
-
const rf = options.responseFormat;
|
|
233
|
-
const jsonInstruction = '\n\nYou MUST respond with valid JSON only. No markdown, no explanation.';
|
|
234
|
-
|
|
235
|
-
if (rf === 'json' || rf?.type === 'json_object') {
|
|
236
|
-
body.system = (body.system || '') + jsonInstruction;
|
|
237
|
-
} else if (rf?.type === 'json_schema') {
|
|
238
|
-
const schemaStr = JSON.stringify(rf.schema, null, 2);
|
|
239
|
-
body.system = (body.system || '') +
|
|
240
|
-
jsonInstruction +
|
|
241
|
-
`\n\nYour response MUST conform to this JSON schema:\n${schemaStr}`;
|
|
242
|
-
}
|
|
243
|
-
|
|
244
|
-
// Prefill assistant message with '{' to force JSON output
|
|
245
|
-
body.messages.push({ role: 'assistant', content: '{' });
|
|
246
|
-
}
|
|
247
|
-
|
|
248
|
-
if (options.tools?.length) {
|
|
249
|
-
body.tools = options.tools.map(t => ({
|
|
250
|
-
name: t.name,
|
|
251
|
-
description: t.description,
|
|
252
|
-
input_schema: t.parameters,
|
|
253
|
-
}));
|
|
254
|
-
}
|
|
255
|
-
if (options.temperature !== undefined) body.temperature = options.temperature;
|
|
256
|
-
if (options.stream) body.stream = true;
|
|
257
|
-
return body;
|
|
258
|
-
}
|
|
259
|
-
|
|
260
|
-
parseResponse(data) {
|
|
261
|
-
const textBlocks = (data.content || []).filter(b => b.type === 'text');
|
|
262
|
-
const toolBlocks = (data.content || []).filter(b => b.type === 'tool_use');
|
|
263
|
-
|
|
264
|
-
const content = textBlocks.map(b => b.text).join('');
|
|
265
|
-
const toolCalls = toolBlocks.map(b => ({
|
|
266
|
-
id: b.id,
|
|
267
|
-
name: b.name,
|
|
268
|
-
arguments: b.input || {},
|
|
269
|
-
}));
|
|
270
|
-
|
|
271
|
-
return {
|
|
272
|
-
content,
|
|
273
|
-
tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
274
|
-
usage: data.usage ? {
|
|
275
|
-
prompt_tokens: data.usage.input_tokens,
|
|
276
|
-
completion_tokens: data.usage.output_tokens,
|
|
277
|
-
} : undefined,
|
|
278
|
-
};
|
|
279
|
-
}
|
|
280
|
-
|
|
281
|
-
parseStreamChunk(line) {
|
|
282
|
-
if (!line.startsWith('data: ')) return null;
|
|
283
|
-
const raw = line.slice(6).trim();
|
|
284
|
-
|
|
285
|
-
try {
|
|
286
|
-
const data = JSON.parse(raw);
|
|
287
|
-
|
|
288
|
-
if (data.type === 'content_block_delta') {
|
|
289
|
-
if (data.delta?.type === 'text_delta') {
|
|
290
|
-
return { type: 'text', content: data.delta.text };
|
|
291
|
-
}
|
|
292
|
-
if (data.delta?.type === 'input_json_delta') {
|
|
293
|
-
return { type: 'tool_call_delta', argumentsDelta: data.delta.partial_json || '' };
|
|
294
|
-
}
|
|
295
|
-
}
|
|
296
|
-
|
|
297
|
-
if (data.type === 'content_block_start' && data.content_block?.type === 'tool_use') {
|
|
298
|
-
return {
|
|
299
|
-
type: 'tool_call_start',
|
|
300
|
-
id: data.content_block.id,
|
|
301
|
-
name: data.content_block.name,
|
|
302
|
-
};
|
|
303
|
-
}
|
|
304
|
-
|
|
305
|
-
if (data.type === 'message_delta' && data.usage) {
|
|
306
|
-
return {
|
|
307
|
-
type: 'usage',
|
|
308
|
-
usage: { prompt_tokens: data.usage.input_tokens, completion_tokens: data.usage.output_tokens },
|
|
309
|
-
};
|
|
310
|
-
}
|
|
311
|
-
|
|
312
|
-
if (data.type === 'message_stop') {
|
|
313
|
-
return { type: 'done' };
|
|
314
|
-
}
|
|
315
|
-
|
|
316
|
-
return null;
|
|
317
|
-
} catch {
|
|
318
|
-
return null;
|
|
319
|
-
}
|
|
320
|
-
}
|
|
321
|
-
}
|
|
322
|
-
|
|
323
|
-
// ─── Ollama Adapter ──────────────────────────────────────────────
|
|
324
|
-
|
|
325
|
-
class OllamaAdapter extends BaseAdapter {
|
|
326
|
-
constructor(config) {
|
|
327
|
-
super(config);
|
|
328
|
-
this.model = config.model || 'llama3';
|
|
329
|
-
}
|
|
330
|
-
|
|
331
|
-
getHeaders() {
|
|
332
|
-
return { 'Content-Type': 'application/json' };
|
|
333
|
-
}
|
|
334
|
-
|
|
335
|
-
formatRequest(messages, options = {}) {
|
|
336
|
-
const body = {
|
|
337
|
-
model: options.model || this.model,
|
|
338
|
-
messages: messages.map(m => ({ role: m.role, content: m.content })),
|
|
339
|
-
};
|
|
340
|
-
if (options.tools?.length) {
|
|
341
|
-
body.tools = options.tools.map(t => ({
|
|
342
|
-
type: 'function',
|
|
343
|
-
function: { name: t.name, description: t.description, parameters: t.parameters },
|
|
344
|
-
}));
|
|
345
|
-
}
|
|
346
|
-
if (options.temperature !== undefined) body.options = { temperature: options.temperature };
|
|
347
|
-
if (options.stream !== undefined) body.stream = options.stream;
|
|
348
|
-
|
|
349
|
-
// Structured output / JSON mode
|
|
350
|
-
if (options.responseFormat) {
|
|
351
|
-
const rf = options.responseFormat;
|
|
352
|
-
if (rf === 'json' || rf?.type === 'json_object') {
|
|
353
|
-
body.format = 'json';
|
|
354
|
-
} else if (rf?.type === 'json_schema') {
|
|
355
|
-
body.format = rf.schema;
|
|
356
|
-
}
|
|
357
|
-
}
|
|
358
|
-
|
|
359
|
-
return body;
|
|
360
|
-
}
|
|
361
|
-
|
|
362
|
-
parseResponse(data) {
|
|
363
|
-
const msg = data.message || {};
|
|
364
|
-
const toolCalls = (msg.tool_calls || []).map((tc, i) => ({
|
|
365
|
-
id: `ollama_${i}_${Date.now()}`,
|
|
366
|
-
name: tc.function?.name,
|
|
367
|
-
arguments: tc.function?.arguments || {},
|
|
368
|
-
}));
|
|
369
|
-
|
|
370
|
-
return {
|
|
371
|
-
content: msg.content || '',
|
|
372
|
-
tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
373
|
-
usage: data.eval_count ? {
|
|
374
|
-
prompt_tokens: data.prompt_eval_count || 0,
|
|
375
|
-
completion_tokens: data.eval_count || 0,
|
|
376
|
-
} : undefined,
|
|
377
|
-
};
|
|
378
|
-
}
|
|
379
|
-
|
|
380
|
-
parseStreamChunk(line) {
|
|
381
|
-
try {
|
|
382
|
-
const data = JSON.parse(line);
|
|
383
|
-
if (data.done) return { type: 'done' };
|
|
384
|
-
if (data.message?.content) return { type: 'text', content: data.message.content };
|
|
385
|
-
return null;
|
|
386
|
-
} catch {
|
|
387
|
-
return null;
|
|
388
|
-
}
|
|
389
|
-
}
|
|
390
|
-
}
|
|
391
|
-
|
|
392
|
-
// ─── Custom Adapter (user-provided send/stream) ──────────────────
|
|
393
|
-
|
|
394
|
-
class CustomAdapter extends BaseAdapter {
|
|
395
|
-
constructor(config) {
|
|
396
|
-
super(config);
|
|
397
|
-
this._sendFn = config.send || null;
|
|
398
|
-
this._streamFn = config.stream || null;
|
|
399
|
-
}
|
|
400
|
-
|
|
401
|
-
/** Custom adapters bypass formatRequest/parseResponse */
|
|
402
|
-
get isCustom() { return true; }
|
|
403
|
-
}
|
|
404
|
-
|
|
405
|
-
// ─── Provider Registry ───────────────────────────────────────────
|
|
406
|
-
|
|
407
|
-
const BUILTIN_ADAPTERS = {
|
|
408
|
-
openai: OpenAIAdapter,
|
|
409
|
-
anthropic: AnthropicAdapter,
|
|
410
|
-
ollama: OllamaAdapter,
|
|
411
|
-
};
|
|
412
|
-
|
|
413
|
-
// ─── Main Provider Class ─────────────────────────────────────────
|
|
414
|
-
|
|
415
|
-
export class WuAIProvider {
|
|
416
|
-
constructor() {
|
|
417
|
-
this._providers = new Map();
|
|
418
|
-
this._active = null;
|
|
419
|
-
this._activeName = null;
|
|
420
|
-
this._activeConfig = {};
|
|
421
|
-
this._retryConfig = { maxRetries: 3, baseDelayMs: 1000 };
|
|
422
|
-
}
|
|
423
|
-
|
|
424
|
-
/**
|
|
425
|
-
* Register and activate a provider.
|
|
426
|
-
*
|
|
427
|
-
* @param {string} name - Provider name or built-in adapter ('openai', 'anthropic', 'ollama', 'custom')
|
|
428
|
-
* @param {object} config - Provider configuration
|
|
429
|
-
* @param {string} [config.endpoint] - API endpoint URL
|
|
430
|
-
* @param {string} [config.adapter] - Built-in adapter name (if name is custom)
|
|
431
|
-
* @param {string} [config.apiKey] - API key (WARNING: exposed in browser)
|
|
432
|
-
* @param {string} [config.model] - Model name
|
|
433
|
-
* @param {Function} [config.send] - Custom send function
|
|
434
|
-
* @param {Function} [config.stream] - Custom stream generator function
|
|
435
|
-
*/
|
|
436
|
-
register(name, config = {}) {
|
|
437
|
-
const adapterName = config.adapter || name;
|
|
438
|
-
const AdapterClass = BUILTIN_ADAPTERS[adapterName];
|
|
439
|
-
|
|
440
|
-
let adapter;
|
|
441
|
-
if (config.send || config.stream) {
|
|
442
|
-
adapter = new CustomAdapter(config);
|
|
443
|
-
} else if (AdapterClass) {
|
|
444
|
-
adapter = new AdapterClass(config);
|
|
445
|
-
} else {
|
|
446
|
-
throw new Error(
|
|
447
|
-
`[wu-ai] Unknown adapter '${adapterName}'. ` +
|
|
448
|
-
`Available: ${Object.keys(BUILTIN_ADAPTERS).join(', ')}, or provide custom send/stream.`
|
|
449
|
-
);
|
|
450
|
-
}
|
|
451
|
-
|
|
452
|
-
this._providers.set(name, { adapter, config });
|
|
453
|
-
|
|
454
|
-
// Auto-activate if first provider or explicitly active
|
|
455
|
-
if (!this._active || config.active !== false) {
|
|
456
|
-
this._active = adapter;
|
|
457
|
-
this._activeName = name;
|
|
458
|
-
this._activeConfig = config;
|
|
459
|
-
}
|
|
460
|
-
|
|
461
|
-
logger.wuInfo(`[wu-ai] Provider registered: '${name}' (adapter: ${adapterName})`);
|
|
462
|
-
}
|
|
463
|
-
|
|
464
|
-
/**
|
|
465
|
-
* Switch active provider.
|
|
466
|
-
*/
|
|
467
|
-
use(name) {
|
|
468
|
-
const entry = this._providers.get(name);
|
|
469
|
-
if (!entry) throw new Error(`[wu-ai] Provider '${name}' not registered`);
|
|
470
|
-
this._active = entry.adapter;
|
|
471
|
-
this._activeName = name;
|
|
472
|
-
this._activeConfig = entry.config;
|
|
473
|
-
}
|
|
474
|
-
|
|
475
|
-
/**
|
|
476
|
-
* Send a non-streaming request.
|
|
477
|
-
*
|
|
478
|
-
* @param {Array} messages - Normalized messages
|
|
479
|
-
* @param {object} [options] - { tools, temperature, maxTokens, signal }
|
|
480
|
-
* @returns {Promise<{ content: string, tool_calls?: Array, usage?: object }>}
|
|
481
|
-
*/
|
|
482
|
-
async send(messages, options = {}) {
|
|
483
|
-
const { adapter, config } = this._resolveProvider(options.provider);
|
|
484
|
-
|
|
485
|
-
// Custom adapter: call user function directly
|
|
486
|
-
if (adapter.isCustom && adapter._sendFn) {
|
|
487
|
-
return adapter._sendFn(messages, options);
|
|
488
|
-
}
|
|
489
|
-
|
|
490
|
-
const endpoint = config.endpoint || config.baseUrl;
|
|
491
|
-
if (!endpoint) {
|
|
492
|
-
throw new Error('[wu-ai] No endpoint configured. Set config.endpoint or config.baseUrl.');
|
|
493
|
-
}
|
|
494
|
-
|
|
495
|
-
const url = this._resolveUrl(endpoint);
|
|
496
|
-
const body = adapter.formatRequest(messages, { ...options, stream: false });
|
|
497
|
-
const headers = adapter.getHeaders(config);
|
|
498
|
-
|
|
499
|
-
const response = await this._fetchWithRetry(url, {
|
|
500
|
-
method: 'POST',
|
|
501
|
-
headers,
|
|
502
|
-
body: JSON.stringify(body),
|
|
503
|
-
signal: options.signal,
|
|
504
|
-
});
|
|
505
|
-
|
|
506
|
-
const data = await response.json();
|
|
507
|
-
const result = adapter.parseResponse(data);
|
|
508
|
-
|
|
509
|
-
// Anthropic prefill compensation: we prepended '{' to force JSON,
|
|
510
|
-
// so the response content is the continuation — restore the full JSON
|
|
511
|
-
if (adapter instanceof AnthropicAdapter && options.responseFormat && result.content) {
|
|
512
|
-
result.content = '{' + result.content;
|
|
513
|
-
}
|
|
514
|
-
|
|
515
|
-
// Validate JSON when responseFormat was requested
|
|
516
|
-
if (options.responseFormat && result.content) {
|
|
517
|
-
try {
|
|
518
|
-
result.parsed = JSON.parse(result.content);
|
|
519
|
-
} catch {
|
|
520
|
-
result.parseError = 'Response is not valid JSON';
|
|
521
|
-
logger.wuDebug('[wu-ai] responseFormat requested but LLM returned invalid JSON');
|
|
522
|
-
}
|
|
523
|
-
}
|
|
524
|
-
|
|
525
|
-
return result;
|
|
526
|
-
}
|
|
527
|
-
|
|
528
|
-
/**
|
|
529
|
-
* Send a streaming request. Returns an async generator of chunks.
|
|
530
|
-
*
|
|
531
|
-
* @param {Array} messages - Normalized messages
|
|
532
|
-
* @param {object} [options] - { tools, temperature, maxTokens, signal }
|
|
533
|
-
* @yields {StreamChunk}
|
|
534
|
-
*/
|
|
535
|
-
async *stream(messages, options = {}) {
|
|
536
|
-
const { adapter, config } = this._resolveProvider(options.provider);
|
|
537
|
-
|
|
538
|
-
// Custom adapter: call user generator directly
|
|
539
|
-
if (adapter.isCustom && adapter._streamFn) {
|
|
540
|
-
yield* adapter._streamFn(messages, options);
|
|
541
|
-
return;
|
|
542
|
-
}
|
|
543
|
-
|
|
544
|
-
const endpoint = config.endpoint || config.baseUrl;
|
|
545
|
-
if (!endpoint) {
|
|
546
|
-
throw new Error('[wu-ai] No endpoint configured. Set config.endpoint or config.baseUrl.');
|
|
547
|
-
}
|
|
548
|
-
|
|
549
|
-
const url = this._resolveUrl(endpoint);
|
|
550
|
-
const body = adapter.formatRequest(messages, { ...options, stream: true });
|
|
551
|
-
const headers = adapter.getHeaders(config);
|
|
552
|
-
|
|
553
|
-
const response = await fetch(url, {
|
|
554
|
-
method: 'POST',
|
|
555
|
-
headers,
|
|
556
|
-
body: JSON.stringify(body),
|
|
557
|
-
signal: options.signal,
|
|
558
|
-
});
|
|
559
|
-
|
|
560
|
-
if (!response.ok) {
|
|
561
|
-
throw new Error(`[wu-ai] Stream request failed: ${response.status} ${response.statusText}`);
|
|
562
|
-
}
|
|
563
|
-
|
|
564
|
-
const reader = response.body.getReader();
|
|
565
|
-
const decoder = new TextDecoder();
|
|
566
|
-
let buffer = '';
|
|
567
|
-
|
|
568
|
-
// Anthropic prefill compensation for streaming:
|
|
569
|
-
// emit the '{' we used as prefill before the first real chunk
|
|
570
|
-
let needsPrefill = adapter instanceof AnthropicAdapter && !!options.responseFormat;
|
|
571
|
-
|
|
572
|
-
try {
|
|
573
|
-
while (true) {
|
|
574
|
-
const { done, value } = await reader.read();
|
|
575
|
-
if (done) break;
|
|
576
|
-
|
|
577
|
-
buffer += decoder.decode(value, { stream: true });
|
|
578
|
-
const lines = buffer.split('\n');
|
|
579
|
-
buffer = lines.pop() || ''; // keep incomplete last line
|
|
580
|
-
|
|
581
|
-
for (const line of lines) {
|
|
582
|
-
const trimmed = line.trim();
|
|
583
|
-
if (!trimmed) continue;
|
|
584
|
-
|
|
585
|
-
const chunk = adapter.parseStreamChunk(trimmed);
|
|
586
|
-
if (chunk) {
|
|
587
|
-
if (needsPrefill && chunk.type === 'text') {
|
|
588
|
-
chunk.content = '{' + chunk.content;
|
|
589
|
-
needsPrefill = false;
|
|
590
|
-
}
|
|
591
|
-
yield chunk;
|
|
592
|
-
}
|
|
593
|
-
if (chunk?.type === 'done') return;
|
|
594
|
-
}
|
|
595
|
-
}
|
|
596
|
-
|
|
597
|
-
// Process remaining buffer
|
|
598
|
-
if (buffer.trim()) {
|
|
599
|
-
const chunk = adapter.parseStreamChunk(buffer.trim());
|
|
600
|
-
if (chunk) yield chunk;
|
|
601
|
-
}
|
|
602
|
-
} finally {
|
|
603
|
-
reader.releaseLock();
|
|
604
|
-
}
|
|
605
|
-
}
|
|
606
|
-
|
|
607
|
-
// ── Retry logic ──
|
|
608
|
-
|
|
609
|
-
async _fetchWithRetry(url, options) {
|
|
610
|
-
let lastError;
|
|
611
|
-
for (let attempt = 0; attempt <= this._retryConfig.maxRetries; attempt++) {
|
|
612
|
-
try {
|
|
613
|
-
const response = await fetch(url, options);
|
|
614
|
-
|
|
615
|
-
// Only retry on 429 (rate limit) and 5xx
|
|
616
|
-
if (response.ok) return response;
|
|
617
|
-
|
|
618
|
-
if (response.status === 429 || response.status >= 500) {
|
|
619
|
-
lastError = new Error(`HTTP ${response.status}: ${response.statusText}`);
|
|
620
|
-
if (attempt < this._retryConfig.maxRetries) {
|
|
621
|
-
const delay = this._retryConfig.baseDelayMs * Math.pow(2, attempt);
|
|
622
|
-
logger.wuDebug(`[wu-ai] Retry ${attempt + 1}/${this._retryConfig.maxRetries} in ${delay}ms (${response.status})`);
|
|
623
|
-
await new Promise(r => setTimeout(r, delay));
|
|
624
|
-
continue;
|
|
625
|
-
}
|
|
626
|
-
}
|
|
627
|
-
|
|
628
|
-
// 4xx (except 429) — don't retry, fail immediately
|
|
629
|
-
const clientError = new Error(`[wu-ai] Request failed: ${response.status} ${response.statusText}`);
|
|
630
|
-
clientError._noRetry = true;
|
|
631
|
-
throw clientError;
|
|
632
|
-
} catch (err) {
|
|
633
|
-
if (err.name === 'AbortError') throw err;
|
|
634
|
-
if (err._noRetry) throw err; // 4xx — don't retry
|
|
635
|
-
lastError = err;
|
|
636
|
-
if (attempt < this._retryConfig.maxRetries) {
|
|
637
|
-
const delay = this._retryConfig.baseDelayMs * Math.pow(2, attempt);
|
|
638
|
-
await new Promise(r => setTimeout(r, delay));
|
|
639
|
-
continue;
|
|
640
|
-
}
|
|
641
|
-
}
|
|
642
|
-
}
|
|
643
|
-
throw lastError;
|
|
644
|
-
}
|
|
645
|
-
|
|
646
|
-
// ── Helpers ──
|
|
647
|
-
|
|
648
|
-
_resolveUrl(endpoint) {
|
|
649
|
-
// Relative URLs (e.g., '/api/ai/chat') resolve against current origin
|
|
650
|
-
if (endpoint.startsWith('/')) {
|
|
651
|
-
return typeof window !== 'undefined'
|
|
652
|
-
? `${window.location.origin}${endpoint}`
|
|
653
|
-
: endpoint;
|
|
654
|
-
}
|
|
655
|
-
return endpoint;
|
|
656
|
-
}
|
|
657
|
-
|
|
658
|
-
/**
|
|
659
|
-
* Resolve which provider/adapter to use for a request.
|
|
660
|
-
* Supports per-call selection: options.provider = 'anthropic'
|
|
661
|
-
*
|
|
662
|
-
* @param {string} [providerName] - Optional provider name override
|
|
663
|
-
* @returns {{ adapter: BaseAdapter, config: object }}
|
|
664
|
-
*/
|
|
665
|
-
_resolveProvider(providerName) {
|
|
666
|
-
if (providerName) {
|
|
667
|
-
const entry = this._providers.get(providerName);
|
|
668
|
-
if (!entry) {
|
|
669
|
-
throw new Error(`[wu-ai] Provider '${providerName}' not registered. Available: ${[...this._providers.keys()].join(', ')}`);
|
|
670
|
-
}
|
|
671
|
-
return { adapter: entry.adapter, config: entry.config };
|
|
672
|
-
}
|
|
673
|
-
this._ensureActive();
|
|
674
|
-
return { adapter: this._active, config: this._activeConfig };
|
|
675
|
-
}
|
|
676
|
-
|
|
677
|
-
_ensureActive() {
|
|
678
|
-
if (!this._active) {
|
|
679
|
-
throw new Error(
|
|
680
|
-
'[wu-ai] No provider configured. Call wu.ai.provider("name", { endpoint, adapter }) first.'
|
|
681
|
-
);
|
|
682
|
-
}
|
|
683
|
-
}
|
|
684
|
-
|
|
685
|
-
configureRetry(config) {
|
|
686
|
-
if (config.maxRetries !== undefined) this._retryConfig.maxRetries = config.maxRetries;
|
|
687
|
-
if (config.baseDelayMs !== undefined) this._retryConfig.baseDelayMs = config.baseDelayMs;
|
|
688
|
-
}
|
|
689
|
-
|
|
690
|
-
getActiveProvider() {
|
|
691
|
-
return this._activeName;
|
|
692
|
-
}
|
|
693
|
-
|
|
694
|
-
getStats() {
|
|
695
|
-
return {
|
|
696
|
-
activeProvider: this._activeName,
|
|
697
|
-
registeredProviders: [...this._providers.keys()],
|
|
698
|
-
};
|
|
699
|
-
}
|
|
700
|
-
}
|
|
1
|
+
/**
|
|
2
|
+
* WU-AI-PROVIDER: BYOL (Bring Your Own LLM) provider system
|
|
3
|
+
*
|
|
4
|
+
* Pure fetch(), zero dependencies. Adapters normalize request/response
|
|
5
|
+
* across OpenAI, Anthropic, Ollama, and custom providers.
|
|
6
|
+
*
|
|
7
|
+
* Internal normalized format:
|
|
8
|
+
* Request: { role, content, tool_calls?, tool_call_id? }
|
|
9
|
+
* Response: { content, tool_calls?, usage? }
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
import { logger } from '../core/wu-logger.js';
|
|
13
|
+
|
|
14
|
+
// ─── Normalized types (internal) ─────────────────────────────────
|
|
15
|
+
//
|
|
16
|
+
// Message: { role: 'system'|'user'|'assistant'|'tool', content: string,
|
|
17
|
+
// tool_calls?: ToolCall[], tool_call_id?: string }
|
|
18
|
+
//
|
|
19
|
+
// ToolCall: { id: string, name: string, arguments: object }
|
|
20
|
+
//
|
|
21
|
+
// Response: { content: string, tool_calls?: ToolCall[], usage?: { prompt_tokens, completion_tokens } }
|
|
22
|
+
//
|
|
23
|
+
// StreamChunk: { type: 'text'|'tool_call'|'done'|'error', content?: string,
|
|
24
|
+
// tool_call?: ToolCall, usage?: object, error?: string }
|
|
25
|
+
|
|
26
|
+
// ─── Base Adapter ────────────────────────────────────────────────
|
|
27
|
+
|
|
28
|
+
class BaseAdapter {
|
|
29
|
+
constructor(config = {}) {
|
|
30
|
+
this.model = config.model || '';
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/** Format messages + options into provider-specific request body */
|
|
34
|
+
formatRequest(/* messages, options */) {
|
|
35
|
+
throw new Error('Adapter must implement formatRequest()');
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
/** Parse provider response into normalized Response */
|
|
39
|
+
parseResponse(/* rawData */) {
|
|
40
|
+
throw new Error('Adapter must implement parseResponse()');
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
/** Parse a streaming SSE line into a StreamChunk (or null to skip) */
|
|
44
|
+
parseStreamChunk(/* line */) {
|
|
45
|
+
throw new Error('Adapter must implement parseStreamChunk()');
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
/** Get required headers for the provider */
|
|
49
|
+
getHeaders(/* config */) {
|
|
50
|
+
return { 'Content-Type': 'application/json' };
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
// ─── OpenAI Adapter ──────────────────────────────────────────────
|
|
55
|
+
|
|
56
|
+
class OpenAIAdapter extends BaseAdapter {
|
|
57
|
+
constructor(config) {
|
|
58
|
+
super(config);
|
|
59
|
+
this.model = config.model || 'gpt-4o';
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
getHeaders(config) {
|
|
63
|
+
const h = { 'Content-Type': 'application/json' };
|
|
64
|
+
if (config.apiKey) h['Authorization'] = `Bearer ${config.apiKey}`;
|
|
65
|
+
return h;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
formatRequest(messages, options = {}) {
|
|
69
|
+
const body = {
|
|
70
|
+
model: options.model || this.model,
|
|
71
|
+
messages: messages.map(m => {
|
|
72
|
+
const msg = { role: m.role, content: m.content };
|
|
73
|
+
if (m.tool_call_id) msg.tool_call_id = m.tool_call_id;
|
|
74
|
+
if (m.tool_calls) msg.tool_calls = m.tool_calls.map(tc => ({
|
|
75
|
+
id: tc.id,
|
|
76
|
+
type: 'function',
|
|
77
|
+
function: { name: tc.name, arguments: JSON.stringify(tc.arguments) },
|
|
78
|
+
}));
|
|
79
|
+
return msg;
|
|
80
|
+
}),
|
|
81
|
+
};
|
|
82
|
+
if (options.tools?.length) {
|
|
83
|
+
body.tools = options.tools.map(t => ({
|
|
84
|
+
type: 'function',
|
|
85
|
+
function: { name: t.name, description: t.description, parameters: t.parameters },
|
|
86
|
+
}));
|
|
87
|
+
}
|
|
88
|
+
if (options.temperature !== undefined) body.temperature = options.temperature;
|
|
89
|
+
if (options.maxTokens) body.max_tokens = options.maxTokens;
|
|
90
|
+
if (options.stream) body.stream = true;
|
|
91
|
+
|
|
92
|
+
// Structured output / JSON mode
|
|
93
|
+
if (options.responseFormat) {
|
|
94
|
+
const rf = options.responseFormat;
|
|
95
|
+
if (rf === 'json' || rf?.type === 'json_object') {
|
|
96
|
+
body.response_format = { type: 'json_object' };
|
|
97
|
+
} else if (rf?.type === 'json_schema') {
|
|
98
|
+
body.response_format = {
|
|
99
|
+
type: 'json_schema',
|
|
100
|
+
json_schema: {
|
|
101
|
+
name: rf.name || 'response',
|
|
102
|
+
schema: rf.schema,
|
|
103
|
+
strict: rf.strict !== false,
|
|
104
|
+
},
|
|
105
|
+
};
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
return body;
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
parseResponse(data) {
|
|
113
|
+
const choice = data.choices?.[0];
|
|
114
|
+
if (!choice) return { content: '', tool_calls: [], usage: data.usage };
|
|
115
|
+
|
|
116
|
+
const msg = choice.message || {};
|
|
117
|
+
const toolCalls = (msg.tool_calls || []).map(tc => ({
|
|
118
|
+
id: tc.id,
|
|
119
|
+
name: tc.function?.name,
|
|
120
|
+
arguments: this._safeParseArgs(tc.function?.arguments),
|
|
121
|
+
}));
|
|
122
|
+
|
|
123
|
+
return {
|
|
124
|
+
content: msg.content || '',
|
|
125
|
+
tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
126
|
+
usage: data.usage ? {
|
|
127
|
+
prompt_tokens: data.usage.prompt_tokens,
|
|
128
|
+
completion_tokens: data.usage.completion_tokens,
|
|
129
|
+
} : undefined,
|
|
130
|
+
};
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
parseStreamChunk(line) {
|
|
134
|
+
if (!line.startsWith('data: ')) return null;
|
|
135
|
+
const raw = line.slice(6).trim();
|
|
136
|
+
if (raw === '[DONE]') return { type: 'done' };
|
|
137
|
+
|
|
138
|
+
try {
|
|
139
|
+
const data = JSON.parse(raw);
|
|
140
|
+
const delta = data.choices?.[0]?.delta;
|
|
141
|
+
if (!delta) return null;
|
|
142
|
+
|
|
143
|
+
if (delta.tool_calls?.length) {
|
|
144
|
+
const tc = delta.tool_calls[0];
|
|
145
|
+
return {
|
|
146
|
+
type: 'tool_call_delta',
|
|
147
|
+
index: tc.index,
|
|
148
|
+
id: tc.id,
|
|
149
|
+
name: tc.function?.name,
|
|
150
|
+
argumentsDelta: tc.function?.arguments || '',
|
|
151
|
+
};
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
if (delta.content) {
|
|
155
|
+
return { type: 'text', content: delta.content };
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
if (data.usage) {
|
|
159
|
+
return { type: 'usage', usage: data.usage };
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
return null;
|
|
163
|
+
} catch {
|
|
164
|
+
return null;
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
_safeParseArgs(str) {
|
|
169
|
+
if (!str) return {};
|
|
170
|
+
try { return JSON.parse(str); } catch { return {}; }
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
// ─── Anthropic Adapter ───────────────────────────────────────────
|
|
175
|
+
|
|
176
|
+
class AnthropicAdapter extends BaseAdapter {
|
|
177
|
+
constructor(config) {
|
|
178
|
+
super(config);
|
|
179
|
+
this.model = config.model || 'claude-sonnet-4-5-20250929';
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
getHeaders(config) {
|
|
183
|
+
const h = { 'Content-Type': 'application/json' };
|
|
184
|
+
if (config.apiKey) {
|
|
185
|
+
h['x-api-key'] = config.apiKey;
|
|
186
|
+
h['anthropic-version'] = '2023-06-01';
|
|
187
|
+
}
|
|
188
|
+
return h;
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
formatRequest(messages, options = {}) {
|
|
192
|
+
// Anthropic separates system from messages
|
|
193
|
+
const systemMsgs = messages.filter(m => m.role === 'system');
|
|
194
|
+
const otherMsgs = messages.filter(m => m.role !== 'system');
|
|
195
|
+
|
|
196
|
+
const body = {
|
|
197
|
+
model: options.model || this.model,
|
|
198
|
+
max_tokens: options.maxTokens || 4096,
|
|
199
|
+
messages: otherMsgs.map(m => {
|
|
200
|
+
if (m.role === 'tool') {
|
|
201
|
+
return {
|
|
202
|
+
role: 'user',
|
|
203
|
+
content: [{
|
|
204
|
+
type: 'tool_result',
|
|
205
|
+
tool_use_id: m.tool_call_id,
|
|
206
|
+
content: m.content,
|
|
207
|
+
}],
|
|
208
|
+
};
|
|
209
|
+
}
|
|
210
|
+
if (m.tool_calls) {
|
|
211
|
+
return {
|
|
212
|
+
role: 'assistant',
|
|
213
|
+
content: m.tool_calls.map(tc => ({
|
|
214
|
+
type: 'tool_use',
|
|
215
|
+
id: tc.id,
|
|
216
|
+
name: tc.name,
|
|
217
|
+
input: tc.arguments,
|
|
218
|
+
})),
|
|
219
|
+
};
|
|
220
|
+
}
|
|
221
|
+
return { role: m.role, content: m.content };
|
|
222
|
+
}),
|
|
223
|
+
};
|
|
224
|
+
|
|
225
|
+
if (systemMsgs.length) {
|
|
226
|
+
body.system = systemMsgs.map(m => m.content).join('\n\n');
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
// Structured output / JSON mode (Anthropic has no native support)
|
|
230
|
+
// Strategy: augment system prompt + prefill assistant turn with '{'
|
|
231
|
+
if (options.responseFormat) {
|
|
232
|
+
const rf = options.responseFormat;
|
|
233
|
+
const jsonInstruction = '\n\nYou MUST respond with valid JSON only. No markdown, no explanation.';
|
|
234
|
+
|
|
235
|
+
if (rf === 'json' || rf?.type === 'json_object') {
|
|
236
|
+
body.system = (body.system || '') + jsonInstruction;
|
|
237
|
+
} else if (rf?.type === 'json_schema') {
|
|
238
|
+
const schemaStr = JSON.stringify(rf.schema, null, 2);
|
|
239
|
+
body.system = (body.system || '') +
|
|
240
|
+
jsonInstruction +
|
|
241
|
+
`\n\nYour response MUST conform to this JSON schema:\n${schemaStr}`;
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
// Prefill assistant message with '{' to force JSON output
|
|
245
|
+
body.messages.push({ role: 'assistant', content: '{' });
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
if (options.tools?.length) {
|
|
249
|
+
body.tools = options.tools.map(t => ({
|
|
250
|
+
name: t.name,
|
|
251
|
+
description: t.description,
|
|
252
|
+
input_schema: t.parameters,
|
|
253
|
+
}));
|
|
254
|
+
}
|
|
255
|
+
if (options.temperature !== undefined) body.temperature = options.temperature;
|
|
256
|
+
if (options.stream) body.stream = true;
|
|
257
|
+
return body;
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
parseResponse(data) {
|
|
261
|
+
const textBlocks = (data.content || []).filter(b => b.type === 'text');
|
|
262
|
+
const toolBlocks = (data.content || []).filter(b => b.type === 'tool_use');
|
|
263
|
+
|
|
264
|
+
const content = textBlocks.map(b => b.text).join('');
|
|
265
|
+
const toolCalls = toolBlocks.map(b => ({
|
|
266
|
+
id: b.id,
|
|
267
|
+
name: b.name,
|
|
268
|
+
arguments: b.input || {},
|
|
269
|
+
}));
|
|
270
|
+
|
|
271
|
+
return {
|
|
272
|
+
content,
|
|
273
|
+
tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
274
|
+
usage: data.usage ? {
|
|
275
|
+
prompt_tokens: data.usage.input_tokens,
|
|
276
|
+
completion_tokens: data.usage.output_tokens,
|
|
277
|
+
} : undefined,
|
|
278
|
+
};
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
parseStreamChunk(line) {
|
|
282
|
+
if (!line.startsWith('data: ')) return null;
|
|
283
|
+
const raw = line.slice(6).trim();
|
|
284
|
+
|
|
285
|
+
try {
|
|
286
|
+
const data = JSON.parse(raw);
|
|
287
|
+
|
|
288
|
+
if (data.type === 'content_block_delta') {
|
|
289
|
+
if (data.delta?.type === 'text_delta') {
|
|
290
|
+
return { type: 'text', content: data.delta.text };
|
|
291
|
+
}
|
|
292
|
+
if (data.delta?.type === 'input_json_delta') {
|
|
293
|
+
return { type: 'tool_call_delta', argumentsDelta: data.delta.partial_json || '' };
|
|
294
|
+
}
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
if (data.type === 'content_block_start' && data.content_block?.type === 'tool_use') {
|
|
298
|
+
return {
|
|
299
|
+
type: 'tool_call_start',
|
|
300
|
+
id: data.content_block.id,
|
|
301
|
+
name: data.content_block.name,
|
|
302
|
+
};
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
if (data.type === 'message_delta' && data.usage) {
|
|
306
|
+
return {
|
|
307
|
+
type: 'usage',
|
|
308
|
+
usage: { prompt_tokens: data.usage.input_tokens, completion_tokens: data.usage.output_tokens },
|
|
309
|
+
};
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
if (data.type === 'message_stop') {
|
|
313
|
+
return { type: 'done' };
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
return null;
|
|
317
|
+
} catch {
|
|
318
|
+
return null;
|
|
319
|
+
}
|
|
320
|
+
}
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
// ─── Ollama Adapter ──────────────────────────────────────────────
|
|
324
|
+
|
|
325
|
+
class OllamaAdapter extends BaseAdapter {
|
|
326
|
+
constructor(config) {
|
|
327
|
+
super(config);
|
|
328
|
+
this.model = config.model || 'llama3';
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
getHeaders() {
|
|
332
|
+
return { 'Content-Type': 'application/json' };
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
formatRequest(messages, options = {}) {
|
|
336
|
+
const body = {
|
|
337
|
+
model: options.model || this.model,
|
|
338
|
+
messages: messages.map(m => ({ role: m.role, content: m.content })),
|
|
339
|
+
};
|
|
340
|
+
if (options.tools?.length) {
|
|
341
|
+
body.tools = options.tools.map(t => ({
|
|
342
|
+
type: 'function',
|
|
343
|
+
function: { name: t.name, description: t.description, parameters: t.parameters },
|
|
344
|
+
}));
|
|
345
|
+
}
|
|
346
|
+
if (options.temperature !== undefined) body.options = { temperature: options.temperature };
|
|
347
|
+
if (options.stream !== undefined) body.stream = options.stream;
|
|
348
|
+
|
|
349
|
+
// Structured output / JSON mode
|
|
350
|
+
if (options.responseFormat) {
|
|
351
|
+
const rf = options.responseFormat;
|
|
352
|
+
if (rf === 'json' || rf?.type === 'json_object') {
|
|
353
|
+
body.format = 'json';
|
|
354
|
+
} else if (rf?.type === 'json_schema') {
|
|
355
|
+
body.format = rf.schema;
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
return body;
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
parseResponse(data) {
|
|
363
|
+
const msg = data.message || {};
|
|
364
|
+
const toolCalls = (msg.tool_calls || []).map((tc, i) => ({
|
|
365
|
+
id: `ollama_${i}_${Date.now()}`,
|
|
366
|
+
name: tc.function?.name,
|
|
367
|
+
arguments: tc.function?.arguments || {},
|
|
368
|
+
}));
|
|
369
|
+
|
|
370
|
+
return {
|
|
371
|
+
content: msg.content || '',
|
|
372
|
+
tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
373
|
+
usage: data.eval_count ? {
|
|
374
|
+
prompt_tokens: data.prompt_eval_count || 0,
|
|
375
|
+
completion_tokens: data.eval_count || 0,
|
|
376
|
+
} : undefined,
|
|
377
|
+
};
|
|
378
|
+
}
|
|
379
|
+
|
|
380
|
+
parseStreamChunk(line) {
|
|
381
|
+
try {
|
|
382
|
+
const data = JSON.parse(line);
|
|
383
|
+
if (data.done) return { type: 'done' };
|
|
384
|
+
if (data.message?.content) return { type: 'text', content: data.message.content };
|
|
385
|
+
return null;
|
|
386
|
+
} catch {
|
|
387
|
+
return null;
|
|
388
|
+
}
|
|
389
|
+
}
|
|
390
|
+
}
|
|
391
|
+
|
|
392
|
+
// ─── Custom Adapter (user-provided send/stream) ──────────────────
|
|
393
|
+
|
|
394
|
+
class CustomAdapter extends BaseAdapter {
|
|
395
|
+
constructor(config) {
|
|
396
|
+
super(config);
|
|
397
|
+
this._sendFn = config.send || null;
|
|
398
|
+
this._streamFn = config.stream || null;
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
/** Custom adapters bypass formatRequest/parseResponse */
|
|
402
|
+
get isCustom() { return true; }
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
// ─── Provider Registry ───────────────────────────────────────────
|
|
406
|
+
|
|
407
|
+
const BUILTIN_ADAPTERS = {
|
|
408
|
+
openai: OpenAIAdapter,
|
|
409
|
+
anthropic: AnthropicAdapter,
|
|
410
|
+
ollama: OllamaAdapter,
|
|
411
|
+
};
|
|
412
|
+
|
|
413
|
+
// ─── Main Provider Class ─────────────────────────────────────────
|
|
414
|
+
|
|
415
|
+
export class WuAIProvider {
|
|
416
|
+
constructor() {
|
|
417
|
+
this._providers = new Map();
|
|
418
|
+
this._active = null;
|
|
419
|
+
this._activeName = null;
|
|
420
|
+
this._activeConfig = {};
|
|
421
|
+
this._retryConfig = { maxRetries: 3, baseDelayMs: 1000 };
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
/**
|
|
425
|
+
* Register and activate a provider.
|
|
426
|
+
*
|
|
427
|
+
* @param {string} name - Provider name or built-in adapter ('openai', 'anthropic', 'ollama', 'custom')
|
|
428
|
+
* @param {object} config - Provider configuration
|
|
429
|
+
* @param {string} [config.endpoint] - API endpoint URL
|
|
430
|
+
* @param {string} [config.adapter] - Built-in adapter name (if name is custom)
|
|
431
|
+
* @param {string} [config.apiKey] - API key (WARNING: exposed in browser)
|
|
432
|
+
* @param {string} [config.model] - Model name
|
|
433
|
+
* @param {Function} [config.send] - Custom send function
|
|
434
|
+
* @param {Function} [config.stream] - Custom stream generator function
|
|
435
|
+
*/
|
|
436
|
+
register(name, config = {}) {
|
|
437
|
+
const adapterName = config.adapter || name;
|
|
438
|
+
const AdapterClass = BUILTIN_ADAPTERS[adapterName];
|
|
439
|
+
|
|
440
|
+
let adapter;
|
|
441
|
+
if (config.send || config.stream) {
|
|
442
|
+
adapter = new CustomAdapter(config);
|
|
443
|
+
} else if (AdapterClass) {
|
|
444
|
+
adapter = new AdapterClass(config);
|
|
445
|
+
} else {
|
|
446
|
+
throw new Error(
|
|
447
|
+
`[wu-ai] Unknown adapter '${adapterName}'. ` +
|
|
448
|
+
`Available: ${Object.keys(BUILTIN_ADAPTERS).join(', ')}, or provide custom send/stream.`
|
|
449
|
+
);
|
|
450
|
+
}
|
|
451
|
+
|
|
452
|
+
this._providers.set(name, { adapter, config });
|
|
453
|
+
|
|
454
|
+
// Auto-activate if first provider or explicitly active
|
|
455
|
+
if (!this._active || config.active !== false) {
|
|
456
|
+
this._active = adapter;
|
|
457
|
+
this._activeName = name;
|
|
458
|
+
this._activeConfig = config;
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
logger.wuInfo(`[wu-ai] Provider registered: '${name}' (adapter: ${adapterName})`);
|
|
462
|
+
}
|
|
463
|
+
|
|
464
|
+
/**
|
|
465
|
+
* Switch active provider.
|
|
466
|
+
*/
|
|
467
|
+
use(name) {
|
|
468
|
+
const entry = this._providers.get(name);
|
|
469
|
+
if (!entry) throw new Error(`[wu-ai] Provider '${name}' not registered`);
|
|
470
|
+
this._active = entry.adapter;
|
|
471
|
+
this._activeName = name;
|
|
472
|
+
this._activeConfig = entry.config;
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
/**
|
|
476
|
+
* Send a non-streaming request.
|
|
477
|
+
*
|
|
478
|
+
* @param {Array} messages - Normalized messages
|
|
479
|
+
* @param {object} [options] - { tools, temperature, maxTokens, signal }
|
|
480
|
+
* @returns {Promise<{ content: string, tool_calls?: Array, usage?: object }>}
|
|
481
|
+
*/
|
|
482
|
+
async send(messages, options = {}) {
|
|
483
|
+
const { adapter, config } = this._resolveProvider(options.provider);
|
|
484
|
+
|
|
485
|
+
// Custom adapter: call user function directly
|
|
486
|
+
if (adapter.isCustom && adapter._sendFn) {
|
|
487
|
+
return adapter._sendFn(messages, options);
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
const endpoint = config.endpoint || config.baseUrl;
|
|
491
|
+
if (!endpoint) {
|
|
492
|
+
throw new Error('[wu-ai] No endpoint configured. Set config.endpoint or config.baseUrl.');
|
|
493
|
+
}
|
|
494
|
+
|
|
495
|
+
const url = this._resolveUrl(endpoint);
|
|
496
|
+
const body = adapter.formatRequest(messages, { ...options, stream: false });
|
|
497
|
+
const headers = adapter.getHeaders(config);
|
|
498
|
+
|
|
499
|
+
const response = await this._fetchWithRetry(url, {
|
|
500
|
+
method: 'POST',
|
|
501
|
+
headers,
|
|
502
|
+
body: JSON.stringify(body),
|
|
503
|
+
signal: options.signal,
|
|
504
|
+
});
|
|
505
|
+
|
|
506
|
+
const data = await response.json();
|
|
507
|
+
const result = adapter.parseResponse(data);
|
|
508
|
+
|
|
509
|
+
// Anthropic prefill compensation: we prepended '{' to force JSON,
|
|
510
|
+
// so the response content is the continuation — restore the full JSON
|
|
511
|
+
if (adapter instanceof AnthropicAdapter && options.responseFormat && result.content) {
|
|
512
|
+
result.content = '{' + result.content;
|
|
513
|
+
}
|
|
514
|
+
|
|
515
|
+
// Validate JSON when responseFormat was requested
|
|
516
|
+
if (options.responseFormat && result.content) {
|
|
517
|
+
try {
|
|
518
|
+
result.parsed = JSON.parse(result.content);
|
|
519
|
+
} catch {
|
|
520
|
+
result.parseError = 'Response is not valid JSON';
|
|
521
|
+
logger.wuDebug('[wu-ai] responseFormat requested but LLM returned invalid JSON');
|
|
522
|
+
}
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
return result;
|
|
526
|
+
}
|
|
527
|
+
|
|
528
|
+
/**
|
|
529
|
+
* Send a streaming request. Returns an async generator of chunks.
|
|
530
|
+
*
|
|
531
|
+
* @param {Array} messages - Normalized messages
|
|
532
|
+
* @param {object} [options] - { tools, temperature, maxTokens, signal }
|
|
533
|
+
* @yields {StreamChunk}
|
|
534
|
+
*/
|
|
535
|
+
async *stream(messages, options = {}) {
|
|
536
|
+
const { adapter, config } = this._resolveProvider(options.provider);
|
|
537
|
+
|
|
538
|
+
// Custom adapter: call user generator directly
|
|
539
|
+
if (adapter.isCustom && adapter._streamFn) {
|
|
540
|
+
yield* adapter._streamFn(messages, options);
|
|
541
|
+
return;
|
|
542
|
+
}
|
|
543
|
+
|
|
544
|
+
const endpoint = config.endpoint || config.baseUrl;
|
|
545
|
+
if (!endpoint) {
|
|
546
|
+
throw new Error('[wu-ai] No endpoint configured. Set config.endpoint or config.baseUrl.');
|
|
547
|
+
}
|
|
548
|
+
|
|
549
|
+
const url = this._resolveUrl(endpoint);
|
|
550
|
+
const body = adapter.formatRequest(messages, { ...options, stream: true });
|
|
551
|
+
const headers = adapter.getHeaders(config);
|
|
552
|
+
|
|
553
|
+
const response = await fetch(url, {
|
|
554
|
+
method: 'POST',
|
|
555
|
+
headers,
|
|
556
|
+
body: JSON.stringify(body),
|
|
557
|
+
signal: options.signal,
|
|
558
|
+
});
|
|
559
|
+
|
|
560
|
+
if (!response.ok) {
|
|
561
|
+
throw new Error(`[wu-ai] Stream request failed: ${response.status} ${response.statusText}`);
|
|
562
|
+
}
|
|
563
|
+
|
|
564
|
+
const reader = response.body.getReader();
|
|
565
|
+
const decoder = new TextDecoder();
|
|
566
|
+
let buffer = '';
|
|
567
|
+
|
|
568
|
+
// Anthropic prefill compensation for streaming:
|
|
569
|
+
// emit the '{' we used as prefill before the first real chunk
|
|
570
|
+
let needsPrefill = adapter instanceof AnthropicAdapter && !!options.responseFormat;
|
|
571
|
+
|
|
572
|
+
try {
|
|
573
|
+
while (true) {
|
|
574
|
+
const { done, value } = await reader.read();
|
|
575
|
+
if (done) break;
|
|
576
|
+
|
|
577
|
+
buffer += decoder.decode(value, { stream: true });
|
|
578
|
+
const lines = buffer.split('\n');
|
|
579
|
+
buffer = lines.pop() || ''; // keep incomplete last line
|
|
580
|
+
|
|
581
|
+
for (const line of lines) {
|
|
582
|
+
const trimmed = line.trim();
|
|
583
|
+
if (!trimmed) continue;
|
|
584
|
+
|
|
585
|
+
const chunk = adapter.parseStreamChunk(trimmed);
|
|
586
|
+
if (chunk) {
|
|
587
|
+
if (needsPrefill && chunk.type === 'text') {
|
|
588
|
+
chunk.content = '{' + chunk.content;
|
|
589
|
+
needsPrefill = false;
|
|
590
|
+
}
|
|
591
|
+
yield chunk;
|
|
592
|
+
}
|
|
593
|
+
if (chunk?.type === 'done') return;
|
|
594
|
+
}
|
|
595
|
+
}
|
|
596
|
+
|
|
597
|
+
// Process remaining buffer
|
|
598
|
+
if (buffer.trim()) {
|
|
599
|
+
const chunk = adapter.parseStreamChunk(buffer.trim());
|
|
600
|
+
if (chunk) yield chunk;
|
|
601
|
+
}
|
|
602
|
+
} finally {
|
|
603
|
+
reader.releaseLock();
|
|
604
|
+
}
|
|
605
|
+
}
|
|
606
|
+
|
|
607
|
+
// ── Retry logic ──
|
|
608
|
+
|
|
609
|
+
async _fetchWithRetry(url, options) {
|
|
610
|
+
let lastError;
|
|
611
|
+
for (let attempt = 0; attempt <= this._retryConfig.maxRetries; attempt++) {
|
|
612
|
+
try {
|
|
613
|
+
const response = await fetch(url, options);
|
|
614
|
+
|
|
615
|
+
// Only retry on 429 (rate limit) and 5xx
|
|
616
|
+
if (response.ok) return response;
|
|
617
|
+
|
|
618
|
+
if (response.status === 429 || response.status >= 500) {
|
|
619
|
+
lastError = new Error(`HTTP ${response.status}: ${response.statusText}`);
|
|
620
|
+
if (attempt < this._retryConfig.maxRetries) {
|
|
621
|
+
const delay = this._retryConfig.baseDelayMs * Math.pow(2, attempt);
|
|
622
|
+
logger.wuDebug(`[wu-ai] Retry ${attempt + 1}/${this._retryConfig.maxRetries} in ${delay}ms (${response.status})`);
|
|
623
|
+
await new Promise(r => setTimeout(r, delay));
|
|
624
|
+
continue;
|
|
625
|
+
}
|
|
626
|
+
}
|
|
627
|
+
|
|
628
|
+
// 4xx (except 429) — don't retry, fail immediately
|
|
629
|
+
const clientError = new Error(`[wu-ai] Request failed: ${response.status} ${response.statusText}`);
|
|
630
|
+
clientError._noRetry = true;
|
|
631
|
+
throw clientError;
|
|
632
|
+
} catch (err) {
|
|
633
|
+
if (err.name === 'AbortError') throw err;
|
|
634
|
+
if (err._noRetry) throw err; // 4xx — don't retry
|
|
635
|
+
lastError = err;
|
|
636
|
+
if (attempt < this._retryConfig.maxRetries) {
|
|
637
|
+
const delay = this._retryConfig.baseDelayMs * Math.pow(2, attempt);
|
|
638
|
+
await new Promise(r => setTimeout(r, delay));
|
|
639
|
+
continue;
|
|
640
|
+
}
|
|
641
|
+
}
|
|
642
|
+
}
|
|
643
|
+
throw lastError;
|
|
644
|
+
}
|
|
645
|
+
|
|
646
|
+
// ── Helpers ──
|
|
647
|
+
|
|
648
|
+
_resolveUrl(endpoint) {
|
|
649
|
+
// Relative URLs (e.g., '/api/ai/chat') resolve against current origin
|
|
650
|
+
if (endpoint.startsWith('/')) {
|
|
651
|
+
return typeof window !== 'undefined'
|
|
652
|
+
? `${window.location.origin}${endpoint}`
|
|
653
|
+
: endpoint;
|
|
654
|
+
}
|
|
655
|
+
return endpoint;
|
|
656
|
+
}
|
|
657
|
+
|
|
658
|
+
/**
|
|
659
|
+
* Resolve which provider/adapter to use for a request.
|
|
660
|
+
* Supports per-call selection: options.provider = 'anthropic'
|
|
661
|
+
*
|
|
662
|
+
* @param {string} [providerName] - Optional provider name override
|
|
663
|
+
* @returns {{ adapter: BaseAdapter, config: object }}
|
|
664
|
+
*/
|
|
665
|
+
_resolveProvider(providerName) {
|
|
666
|
+
if (providerName) {
|
|
667
|
+
const entry = this._providers.get(providerName);
|
|
668
|
+
if (!entry) {
|
|
669
|
+
throw new Error(`[wu-ai] Provider '${providerName}' not registered. Available: ${[...this._providers.keys()].join(', ')}`);
|
|
670
|
+
}
|
|
671
|
+
return { adapter: entry.adapter, config: entry.config };
|
|
672
|
+
}
|
|
673
|
+
this._ensureActive();
|
|
674
|
+
return { adapter: this._active, config: this._activeConfig };
|
|
675
|
+
}
|
|
676
|
+
|
|
677
|
+
_ensureActive() {
|
|
678
|
+
if (!this._active) {
|
|
679
|
+
throw new Error(
|
|
680
|
+
'[wu-ai] No provider configured. Call wu.ai.provider("name", { endpoint, adapter }) first.'
|
|
681
|
+
);
|
|
682
|
+
}
|
|
683
|
+
}
|
|
684
|
+
|
|
685
|
+
configureRetry(config) {
|
|
686
|
+
if (config.maxRetries !== undefined) this._retryConfig.maxRetries = config.maxRetries;
|
|
687
|
+
if (config.baseDelayMs !== undefined) this._retryConfig.baseDelayMs = config.baseDelayMs;
|
|
688
|
+
}
|
|
689
|
+
|
|
690
|
+
getActiveProvider() {
|
|
691
|
+
return this._activeName;
|
|
692
|
+
}
|
|
693
|
+
|
|
694
|
+
getStats() {
|
|
695
|
+
return {
|
|
696
|
+
activeProvider: this._activeName,
|
|
697
|
+
registeredProviders: [...this._providers.keys()],
|
|
698
|
+
};
|
|
699
|
+
}
|
|
700
|
+
}
|