wu-framework 1.1.7 → 1.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. package/README.md +511 -977
  2. package/dist/wu-framework.cjs.js +3 -1
  3. package/dist/wu-framework.cjs.js.map +1 -0
  4. package/dist/wu-framework.dev.js +7533 -2761
  5. package/dist/wu-framework.dev.js.map +1 -1
  6. package/dist/wu-framework.esm.js +3 -0
  7. package/dist/wu-framework.esm.js.map +1 -0
  8. package/dist/wu-framework.umd.js +3 -1
  9. package/dist/wu-framework.umd.js.map +1 -0
  10. package/integrations/astro/README.md +127 -0
  11. package/integrations/astro/WuApp.astro +63 -0
  12. package/integrations/astro/WuShell.astro +39 -0
  13. package/integrations/astro/index.js +68 -0
  14. package/integrations/astro/package.json +38 -0
  15. package/integrations/astro/types.d.ts +53 -0
  16. package/package.json +89 -71
  17. package/src/adapters/angular/ai.js +30 -0
  18. package/src/adapters/angular/index.d.ts +154 -0
  19. package/src/adapters/angular/index.js +932 -0
  20. package/src/adapters/angular.d.ts +3 -154
  21. package/src/adapters/angular.js +3 -813
  22. package/src/adapters/index.js +35 -24
  23. package/src/adapters/lit/ai.js +20 -0
  24. package/src/adapters/lit/index.d.ts +120 -0
  25. package/src/adapters/lit/index.js +721 -0
  26. package/src/adapters/lit.d.ts +3 -120
  27. package/src/adapters/lit.js +3 -726
  28. package/src/adapters/preact/ai.js +33 -0
  29. package/src/adapters/preact/index.d.ts +108 -0
  30. package/src/adapters/preact/index.js +661 -0
  31. package/src/adapters/preact.d.ts +3 -108
  32. package/src/adapters/preact.js +3 -665
  33. package/src/adapters/react/ai.js +135 -0
  34. package/src/adapters/react/index.d.ts +246 -0
  35. package/src/adapters/react/index.js +689 -0
  36. package/src/adapters/react.d.ts +3 -212
  37. package/src/adapters/react.js +3 -513
  38. package/src/adapters/shared.js +64 -0
  39. package/src/adapters/solid/ai.js +32 -0
  40. package/src/adapters/solid/index.d.ts +101 -0
  41. package/src/adapters/solid/index.js +586 -0
  42. package/src/adapters/solid.d.ts +3 -101
  43. package/src/adapters/solid.js +3 -591
  44. package/src/adapters/svelte/ai.js +31 -0
  45. package/src/adapters/svelte/index.d.ts +166 -0
  46. package/src/adapters/svelte/index.js +798 -0
  47. package/src/adapters/svelte.d.ts +3 -166
  48. package/src/adapters/svelte.js +3 -803
  49. package/src/adapters/vanilla/ai.js +30 -0
  50. package/src/adapters/vanilla/index.d.ts +179 -0
  51. package/src/adapters/vanilla/index.js +785 -0
  52. package/src/adapters/vanilla.d.ts +3 -179
  53. package/src/adapters/vanilla.js +3 -791
  54. package/src/adapters/vue/ai.js +52 -0
  55. package/src/adapters/vue/index.d.ts +299 -0
  56. package/src/adapters/vue/index.js +608 -0
  57. package/src/adapters/vue.d.ts +3 -299
  58. package/src/adapters/vue.js +3 -611
  59. package/src/ai/wu-ai-actions.js +261 -0
  60. package/src/ai/wu-ai-browser.js +663 -0
  61. package/src/ai/wu-ai-context.js +332 -0
  62. package/src/ai/wu-ai-conversation.js +554 -0
  63. package/src/ai/wu-ai-permissions.js +381 -0
  64. package/src/ai/wu-ai-provider.js +605 -0
  65. package/src/ai/wu-ai-schema.js +225 -0
  66. package/src/ai/wu-ai-triggers.js +396 -0
  67. package/src/ai/wu-ai.js +474 -0
  68. package/src/core/wu-app.js +50 -8
  69. package/src/core/wu-cache.js +1 -1
  70. package/src/core/wu-core.js +645 -677
  71. package/src/core/wu-html-parser.js +121 -211
  72. package/src/core/wu-iframe-sandbox.js +328 -0
  73. package/src/core/wu-mcp-bridge.js +647 -0
  74. package/src/core/wu-overrides.js +510 -0
  75. package/src/core/wu-prefetch.js +414 -0
  76. package/src/core/wu-proxy-sandbox.js +398 -75
  77. package/src/core/wu-sandbox.js +86 -268
  78. package/src/core/wu-script-executor.js +79 -182
  79. package/src/core/wu-snapshot-sandbox.js +149 -106
  80. package/src/core/wu-strategies.js +13 -0
  81. package/src/core/wu-style-bridge.js +0 -2
  82. package/src/index.js +139 -665
  83. package/dist/wu-framework.hex.js +0 -23
  84. package/dist/wu-framework.min.js +0 -1
  85. package/dist/wu-framework.obf.js +0 -1
  86. package/scripts/build-protected.js +0 -366
  87. package/scripts/build.js +0 -212
  88. package/scripts/rollup-plugin-hex.js +0 -143
  89. package/src/core/wu-registry.js +0 -60
  90. package/src/core/wu-sandbox-pool.js +0 -390
@@ -0,0 +1,605 @@
1
+ /**
2
+ * WU-AI-PROVIDER: BYOL (Bring Your Own LLM) provider system
3
+ *
4
+ * Pure fetch(), zero dependencies. Adapters normalize request/response
5
+ * across OpenAI, Anthropic, Ollama, and custom providers.
6
+ *
7
+ * Internal normalized format:
8
+ * Request: { role, content, tool_calls?, tool_call_id? }
9
+ * Response: { content, tool_calls?, usage? }
10
+ */
11
+
12
+ import { logger } from '../core/wu-logger.js';
13
+
14
+ // ─── Normalized types (internal) ─────────────────────────────────
15
+ //
16
+ // Message: { role: 'system'|'user'|'assistant'|'tool', content: string,
17
+ // tool_calls?: ToolCall[], tool_call_id?: string }
18
+ //
19
+ // ToolCall: { id: string, name: string, arguments: object }
20
+ //
21
+ // Response: { content: string, tool_calls?: ToolCall[], usage?: { prompt_tokens, completion_tokens } }
22
+ //
23
+ // StreamChunk: { type: 'text'|'tool_call'|'done'|'error', content?: string,
24
+ // tool_call?: ToolCall, usage?: object, error?: string }
25
+
26
+ // ─── Base Adapter ────────────────────────────────────────────────
27
+
28
+ class BaseAdapter {
29
+ constructor(config = {}) {
30
+ this.model = config.model || '';
31
+ }
32
+
33
+ /** Format messages + options into provider-specific request body */
34
+ formatRequest(/* messages, options */) {
35
+ throw new Error('Adapter must implement formatRequest()');
36
+ }
37
+
38
+ /** Parse provider response into normalized Response */
39
+ parseResponse(/* rawData */) {
40
+ throw new Error('Adapter must implement parseResponse()');
41
+ }
42
+
43
+ /** Parse a streaming SSE line into a StreamChunk (or null to skip) */
44
+ parseStreamChunk(/* line */) {
45
+ throw new Error('Adapter must implement parseStreamChunk()');
46
+ }
47
+
48
+ /** Get required headers for the provider */
49
+ getHeaders(/* config */) {
50
+ return { 'Content-Type': 'application/json' };
51
+ }
52
+ }
53
+
54
+ // ─── OpenAI Adapter ──────────────────────────────────────────────
55
+
56
+ class OpenAIAdapter extends BaseAdapter {
57
+ constructor(config) {
58
+ super(config);
59
+ this.model = config.model || 'gpt-4o';
60
+ }
61
+
62
+ getHeaders(config) {
63
+ const h = { 'Content-Type': 'application/json' };
64
+ if (config.apiKey) h['Authorization'] = `Bearer ${config.apiKey}`;
65
+ return h;
66
+ }
67
+
68
+ formatRequest(messages, options = {}) {
69
+ const body = {
70
+ model: options.model || this.model,
71
+ messages: messages.map(m => {
72
+ const msg = { role: m.role, content: m.content };
73
+ if (m.tool_call_id) msg.tool_call_id = m.tool_call_id;
74
+ if (m.tool_calls) msg.tool_calls = m.tool_calls.map(tc => ({
75
+ id: tc.id,
76
+ type: 'function',
77
+ function: { name: tc.name, arguments: JSON.stringify(tc.arguments) },
78
+ }));
79
+ return msg;
80
+ }),
81
+ };
82
+ if (options.tools?.length) {
83
+ body.tools = options.tools.map(t => ({
84
+ type: 'function',
85
+ function: { name: t.name, description: t.description, parameters: t.parameters },
86
+ }));
87
+ }
88
+ if (options.temperature !== undefined) body.temperature = options.temperature;
89
+ if (options.maxTokens) body.max_tokens = options.maxTokens;
90
+ if (options.stream) body.stream = true;
91
+ return body;
92
+ }
93
+
94
+ parseResponse(data) {
95
+ const choice = data.choices?.[0];
96
+ if (!choice) return { content: '', tool_calls: [], usage: data.usage };
97
+
98
+ const msg = choice.message || {};
99
+ const toolCalls = (msg.tool_calls || []).map(tc => ({
100
+ id: tc.id,
101
+ name: tc.function?.name,
102
+ arguments: this._safeParseArgs(tc.function?.arguments),
103
+ }));
104
+
105
+ return {
106
+ content: msg.content || '',
107
+ tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
108
+ usage: data.usage ? {
109
+ prompt_tokens: data.usage.prompt_tokens,
110
+ completion_tokens: data.usage.completion_tokens,
111
+ } : undefined,
112
+ };
113
+ }
114
+
115
+ parseStreamChunk(line) {
116
+ if (!line.startsWith('data: ')) return null;
117
+ const raw = line.slice(6).trim();
118
+ if (raw === '[DONE]') return { type: 'done' };
119
+
120
+ try {
121
+ const data = JSON.parse(raw);
122
+ const delta = data.choices?.[0]?.delta;
123
+ if (!delta) return null;
124
+
125
+ if (delta.tool_calls?.length) {
126
+ const tc = delta.tool_calls[0];
127
+ return {
128
+ type: 'tool_call_delta',
129
+ index: tc.index,
130
+ id: tc.id,
131
+ name: tc.function?.name,
132
+ argumentsDelta: tc.function?.arguments || '',
133
+ };
134
+ }
135
+
136
+ if (delta.content) {
137
+ return { type: 'text', content: delta.content };
138
+ }
139
+
140
+ if (data.usage) {
141
+ return { type: 'usage', usage: data.usage };
142
+ }
143
+
144
+ return null;
145
+ } catch {
146
+ return null;
147
+ }
148
+ }
149
+
150
+ _safeParseArgs(str) {
151
+ if (!str) return {};
152
+ try { return JSON.parse(str); } catch { return {}; }
153
+ }
154
+ }
155
+
156
+ // ─── Anthropic Adapter ───────────────────────────────────────────
157
+
158
+ class AnthropicAdapter extends BaseAdapter {
159
+ constructor(config) {
160
+ super(config);
161
+ this.model = config.model || 'claude-sonnet-4-5-20250929';
162
+ }
163
+
164
+ getHeaders(config) {
165
+ const h = { 'Content-Type': 'application/json' };
166
+ if (config.apiKey) {
167
+ h['x-api-key'] = config.apiKey;
168
+ h['anthropic-version'] = '2023-06-01';
169
+ }
170
+ return h;
171
+ }
172
+
173
+ formatRequest(messages, options = {}) {
174
+ // Anthropic separates system from messages
175
+ const systemMsgs = messages.filter(m => m.role === 'system');
176
+ const otherMsgs = messages.filter(m => m.role !== 'system');
177
+
178
+ const body = {
179
+ model: options.model || this.model,
180
+ max_tokens: options.maxTokens || 4096,
181
+ messages: otherMsgs.map(m => {
182
+ if (m.role === 'tool') {
183
+ return {
184
+ role: 'user',
185
+ content: [{
186
+ type: 'tool_result',
187
+ tool_use_id: m.tool_call_id,
188
+ content: m.content,
189
+ }],
190
+ };
191
+ }
192
+ if (m.tool_calls) {
193
+ return {
194
+ role: 'assistant',
195
+ content: m.tool_calls.map(tc => ({
196
+ type: 'tool_use',
197
+ id: tc.id,
198
+ name: tc.name,
199
+ input: tc.arguments,
200
+ })),
201
+ };
202
+ }
203
+ return { role: m.role, content: m.content };
204
+ }),
205
+ };
206
+
207
+ if (systemMsgs.length) {
208
+ body.system = systemMsgs.map(m => m.content).join('\n\n');
209
+ }
210
+ if (options.tools?.length) {
211
+ body.tools = options.tools.map(t => ({
212
+ name: t.name,
213
+ description: t.description,
214
+ input_schema: t.parameters,
215
+ }));
216
+ }
217
+ if (options.temperature !== undefined) body.temperature = options.temperature;
218
+ if (options.stream) body.stream = true;
219
+ return body;
220
+ }
221
+
222
+ parseResponse(data) {
223
+ const textBlocks = (data.content || []).filter(b => b.type === 'text');
224
+ const toolBlocks = (data.content || []).filter(b => b.type === 'tool_use');
225
+
226
+ const content = textBlocks.map(b => b.text).join('');
227
+ const toolCalls = toolBlocks.map(b => ({
228
+ id: b.id,
229
+ name: b.name,
230
+ arguments: b.input || {},
231
+ }));
232
+
233
+ return {
234
+ content,
235
+ tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
236
+ usage: data.usage ? {
237
+ prompt_tokens: data.usage.input_tokens,
238
+ completion_tokens: data.usage.output_tokens,
239
+ } : undefined,
240
+ };
241
+ }
242
+
243
+ parseStreamChunk(line) {
244
+ if (!line.startsWith('data: ')) return null;
245
+ const raw = line.slice(6).trim();
246
+
247
+ try {
248
+ const data = JSON.parse(raw);
249
+
250
+ if (data.type === 'content_block_delta') {
251
+ if (data.delta?.type === 'text_delta') {
252
+ return { type: 'text', content: data.delta.text };
253
+ }
254
+ if (data.delta?.type === 'input_json_delta') {
255
+ return { type: 'tool_call_delta', argumentsDelta: data.delta.partial_json || '' };
256
+ }
257
+ }
258
+
259
+ if (data.type === 'content_block_start' && data.content_block?.type === 'tool_use') {
260
+ return {
261
+ type: 'tool_call_start',
262
+ id: data.content_block.id,
263
+ name: data.content_block.name,
264
+ };
265
+ }
266
+
267
+ if (data.type === 'message_delta' && data.usage) {
268
+ return {
269
+ type: 'usage',
270
+ usage: { prompt_tokens: data.usage.input_tokens, completion_tokens: data.usage.output_tokens },
271
+ };
272
+ }
273
+
274
+ if (data.type === 'message_stop') {
275
+ return { type: 'done' };
276
+ }
277
+
278
+ return null;
279
+ } catch {
280
+ return null;
281
+ }
282
+ }
283
+ }
284
+
285
+ // ─── Ollama Adapter ──────────────────────────────────────────────
286
+
287
+ class OllamaAdapter extends BaseAdapter {
288
+ constructor(config) {
289
+ super(config);
290
+ this.model = config.model || 'llama3';
291
+ }
292
+
293
+ getHeaders() {
294
+ return { 'Content-Type': 'application/json' };
295
+ }
296
+
297
+ formatRequest(messages, options = {}) {
298
+ const body = {
299
+ model: options.model || this.model,
300
+ messages: messages.map(m => ({ role: m.role, content: m.content })),
301
+ };
302
+ if (options.tools?.length) {
303
+ body.tools = options.tools.map(t => ({
304
+ type: 'function',
305
+ function: { name: t.name, description: t.description, parameters: t.parameters },
306
+ }));
307
+ }
308
+ if (options.temperature !== undefined) body.options = { temperature: options.temperature };
309
+ if (options.stream !== undefined) body.stream = options.stream;
310
+ return body;
311
+ }
312
+
313
+ parseResponse(data) {
314
+ const msg = data.message || {};
315
+ const toolCalls = (msg.tool_calls || []).map((tc, i) => ({
316
+ id: `ollama_${i}_${Date.now()}`,
317
+ name: tc.function?.name,
318
+ arguments: tc.function?.arguments || {},
319
+ }));
320
+
321
+ return {
322
+ content: msg.content || '',
323
+ tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
324
+ usage: data.eval_count ? {
325
+ prompt_tokens: data.prompt_eval_count || 0,
326
+ completion_tokens: data.eval_count || 0,
327
+ } : undefined,
328
+ };
329
+ }
330
+
331
+ parseStreamChunk(line) {
332
+ try {
333
+ const data = JSON.parse(line);
334
+ if (data.done) return { type: 'done' };
335
+ if (data.message?.content) return { type: 'text', content: data.message.content };
336
+ return null;
337
+ } catch {
338
+ return null;
339
+ }
340
+ }
341
+ }
342
+
343
+ // ─── Custom Adapter (user-provided send/stream) ──────────────────
344
+
345
+ class CustomAdapter extends BaseAdapter {
346
+ constructor(config) {
347
+ super(config);
348
+ this._sendFn = config.send || null;
349
+ this._streamFn = config.stream || null;
350
+ }
351
+
352
+ /** Custom adapters bypass formatRequest/parseResponse */
353
+ get isCustom() { return true; }
354
+ }
355
+
356
+ // ─── Provider Registry ───────────────────────────────────────────
357
+
358
+ const BUILTIN_ADAPTERS = {
359
+ openai: OpenAIAdapter,
360
+ anthropic: AnthropicAdapter,
361
+ ollama: OllamaAdapter,
362
+ };
363
+
364
+ // ─── Main Provider Class ─────────────────────────────────────────
365
+
366
+ export class WuAIProvider {
367
+ constructor() {
368
+ this._providers = new Map();
369
+ this._active = null;
370
+ this._activeName = null;
371
+ this._activeConfig = {};
372
+ this._retryConfig = { maxRetries: 3, baseDelayMs: 1000 };
373
+ }
374
+
375
+ /**
376
+ * Register and activate a provider.
377
+ *
378
+ * @param {string} name - Provider name or built-in adapter ('openai', 'anthropic', 'ollama', 'custom')
379
+ * @param {object} config - Provider configuration
380
+ * @param {string} [config.endpoint] - API endpoint URL
381
+ * @param {string} [config.adapter] - Built-in adapter name (if name is custom)
382
+ * @param {string} [config.apiKey] - API key (WARNING: exposed in browser)
383
+ * @param {string} [config.model] - Model name
384
+ * @param {Function} [config.send] - Custom send function
385
+ * @param {Function} [config.stream] - Custom stream generator function
386
+ */
387
+ register(name, config = {}) {
388
+ const adapterName = config.adapter || name;
389
+ const AdapterClass = BUILTIN_ADAPTERS[adapterName];
390
+
391
+ let adapter;
392
+ if (config.send || config.stream) {
393
+ adapter = new CustomAdapter(config);
394
+ } else if (AdapterClass) {
395
+ adapter = new AdapterClass(config);
396
+ } else {
397
+ throw new Error(
398
+ `[wu-ai] Unknown adapter '${adapterName}'. ` +
399
+ `Available: ${Object.keys(BUILTIN_ADAPTERS).join(', ')}, or provide custom send/stream.`
400
+ );
401
+ }
402
+
403
+ this._providers.set(name, { adapter, config });
404
+
405
+ // Auto-activate if first provider or explicitly active
406
+ if (!this._active || config.active !== false) {
407
+ this._active = adapter;
408
+ this._activeName = name;
409
+ this._activeConfig = config;
410
+ }
411
+
412
+ logger.wuInfo(`[wu-ai] Provider registered: '${name}' (adapter: ${adapterName})`);
413
+ }
414
+
415
+ /**
416
+ * Switch active provider.
417
+ */
418
+ use(name) {
419
+ const entry = this._providers.get(name);
420
+ if (!entry) throw new Error(`[wu-ai] Provider '${name}' not registered`);
421
+ this._active = entry.adapter;
422
+ this._activeName = name;
423
+ this._activeConfig = entry.config;
424
+ }
425
+
426
+ /**
427
+ * Send a non-streaming request.
428
+ *
429
+ * @param {Array} messages - Normalized messages
430
+ * @param {object} [options] - { tools, temperature, maxTokens, signal }
431
+ * @returns {Promise<{ content: string, tool_calls?: Array, usage?: object }>}
432
+ */
433
+ async send(messages, options = {}) {
434
+ this._ensureActive();
435
+ const adapter = this._active;
436
+ const config = this._activeConfig;
437
+
438
+ // Custom adapter: call user function directly
439
+ if (adapter.isCustom && adapter._sendFn) {
440
+ return adapter._sendFn(messages, options);
441
+ }
442
+
443
+ const endpoint = config.endpoint || config.baseUrl;
444
+ if (!endpoint) {
445
+ throw new Error('[wu-ai] No endpoint configured. Set config.endpoint or config.baseUrl.');
446
+ }
447
+
448
+ const url = this._resolveUrl(endpoint);
449
+ const body = adapter.formatRequest(messages, { ...options, stream: false });
450
+ const headers = adapter.getHeaders(config);
451
+
452
+ const response = await this._fetchWithRetry(url, {
453
+ method: 'POST',
454
+ headers,
455
+ body: JSON.stringify(body),
456
+ signal: options.signal,
457
+ });
458
+
459
+ const data = await response.json();
460
+ return adapter.parseResponse(data);
461
+ }
462
+
463
+ /**
464
+ * Send a streaming request. Returns an async generator of chunks.
465
+ *
466
+ * @param {Array} messages - Normalized messages
467
+ * @param {object} [options] - { tools, temperature, maxTokens, signal }
468
+ * @yields {StreamChunk}
469
+ */
470
+ async *stream(messages, options = {}) {
471
+ this._ensureActive();
472
+ const adapter = this._active;
473
+ const config = this._activeConfig;
474
+
475
+ // Custom adapter: call user generator directly
476
+ if (adapter.isCustom && adapter._streamFn) {
477
+ yield* adapter._streamFn(messages, options);
478
+ return;
479
+ }
480
+
481
+ const endpoint = config.endpoint || config.baseUrl;
482
+ if (!endpoint) {
483
+ throw new Error('[wu-ai] No endpoint configured. Set config.endpoint or config.baseUrl.');
484
+ }
485
+
486
+ const url = this._resolveUrl(endpoint);
487
+ const body = adapter.formatRequest(messages, { ...options, stream: true });
488
+ const headers = adapter.getHeaders(config);
489
+
490
+ const response = await fetch(url, {
491
+ method: 'POST',
492
+ headers,
493
+ body: JSON.stringify(body),
494
+ signal: options.signal,
495
+ });
496
+
497
+ if (!response.ok) {
498
+ throw new Error(`[wu-ai] Stream request failed: ${response.status} ${response.statusText}`);
499
+ }
500
+
501
+ const reader = response.body.getReader();
502
+ const decoder = new TextDecoder();
503
+ let buffer = '';
504
+
505
+ try {
506
+ while (true) {
507
+ const { done, value } = await reader.read();
508
+ if (done) break;
509
+
510
+ buffer += decoder.decode(value, { stream: true });
511
+ const lines = buffer.split('\n');
512
+ buffer = lines.pop() || ''; // keep incomplete last line
513
+
514
+ for (const line of lines) {
515
+ const trimmed = line.trim();
516
+ if (!trimmed) continue;
517
+
518
+ const chunk = adapter.parseStreamChunk(trimmed);
519
+ if (chunk) yield chunk;
520
+ if (chunk?.type === 'done') return;
521
+ }
522
+ }
523
+
524
+ // Process remaining buffer
525
+ if (buffer.trim()) {
526
+ const chunk = adapter.parseStreamChunk(buffer.trim());
527
+ if (chunk) yield chunk;
528
+ }
529
+ } finally {
530
+ reader.releaseLock();
531
+ }
532
+ }
533
+
534
+ // ── Retry logic ──
535
+
536
+ async _fetchWithRetry(url, options) {
537
+ let lastError;
538
+ for (let attempt = 0; attempt <= this._retryConfig.maxRetries; attempt++) {
539
+ try {
540
+ const response = await fetch(url, options);
541
+
542
+ // Only retry on 429 (rate limit) and 5xx
543
+ if (response.ok) return response;
544
+
545
+ if (response.status === 429 || response.status >= 500) {
546
+ lastError = new Error(`HTTP ${response.status}: ${response.statusText}`);
547
+ if (attempt < this._retryConfig.maxRetries) {
548
+ const delay = this._retryConfig.baseDelayMs * Math.pow(2, attempt);
549
+ logger.wuDebug(`[wu-ai] Retry ${attempt + 1}/${this._retryConfig.maxRetries} in ${delay}ms (${response.status})`);
550
+ await new Promise(r => setTimeout(r, delay));
551
+ continue;
552
+ }
553
+ }
554
+
555
+ // 4xx (except 429) — don't retry
556
+ throw new Error(`[wu-ai] Request failed: ${response.status} ${response.statusText}`);
557
+ } catch (err) {
558
+ if (err.name === 'AbortError') throw err;
559
+ lastError = err;
560
+ if (attempt < this._retryConfig.maxRetries) {
561
+ const delay = this._retryConfig.baseDelayMs * Math.pow(2, attempt);
562
+ await new Promise(r => setTimeout(r, delay));
563
+ continue;
564
+ }
565
+ }
566
+ }
567
+ throw lastError;
568
+ }
569
+
570
+ // ── Helpers ──
571
+
572
+ _resolveUrl(endpoint) {
573
+ // Relative URLs (e.g., '/api/ai/chat') resolve against current origin
574
+ if (endpoint.startsWith('/')) {
575
+ return typeof window !== 'undefined'
576
+ ? `${window.location.origin}${endpoint}`
577
+ : endpoint;
578
+ }
579
+ return endpoint;
580
+ }
581
+
582
+ _ensureActive() {
583
+ if (!this._active) {
584
+ throw new Error(
585
+ '[wu-ai] No provider configured. Call wu.ai.provider("name", { endpoint, adapter }) first.'
586
+ );
587
+ }
588
+ }
589
+
590
+ configureRetry(config) {
591
+ if (config.maxRetries !== undefined) this._retryConfig.maxRetries = config.maxRetries;
592
+ if (config.baseDelayMs !== undefined) this._retryConfig.baseDelayMs = config.baseDelayMs;
593
+ }
594
+
595
+ getActiveProvider() {
596
+ return this._activeName;
597
+ }
598
+
599
+ getStats() {
600
+ return {
601
+ activeProvider: this._activeName,
602
+ registeredProviders: [...this._providers.keys()],
603
+ };
604
+ }
605
+ }