wu-framework 1.1.8 → 1.1.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -88,6 +88,24 @@ class OpenAIAdapter extends BaseAdapter {
88
88
  if (options.temperature !== undefined) body.temperature = options.temperature;
89
89
  if (options.maxTokens) body.max_tokens = options.maxTokens;
90
90
  if (options.stream) body.stream = true;
91
+
92
+ // Structured output / JSON mode
93
+ if (options.responseFormat) {
94
+ const rf = options.responseFormat;
95
+ if (rf === 'json' || rf?.type === 'json_object') {
96
+ body.response_format = { type: 'json_object' };
97
+ } else if (rf?.type === 'json_schema') {
98
+ body.response_format = {
99
+ type: 'json_schema',
100
+ json_schema: {
101
+ name: rf.name || 'response',
102
+ schema: rf.schema,
103
+ strict: rf.strict !== false,
104
+ },
105
+ };
106
+ }
107
+ }
108
+
91
109
  return body;
92
110
  }
93
111
 
@@ -207,6 +225,26 @@ class AnthropicAdapter extends BaseAdapter {
207
225
  if (systemMsgs.length) {
208
226
  body.system = systemMsgs.map(m => m.content).join('\n\n');
209
227
  }
228
+
229
+ // Structured output / JSON mode (Anthropic has no native support)
230
+ // Strategy: augment system prompt + prefill assistant turn with '{'
231
+ if (options.responseFormat) {
232
+ const rf = options.responseFormat;
233
+ const jsonInstruction = '\n\nYou MUST respond with valid JSON only. No markdown, no explanation.';
234
+
235
+ if (rf === 'json' || rf?.type === 'json_object') {
236
+ body.system = (body.system || '') + jsonInstruction;
237
+ } else if (rf?.type === 'json_schema') {
238
+ const schemaStr = JSON.stringify(rf.schema, null, 2);
239
+ body.system = (body.system || '') +
240
+ jsonInstruction +
241
+ `\n\nYour response MUST conform to this JSON schema:\n${schemaStr}`;
242
+ }
243
+
244
+ // Prefill assistant message with '{' to force JSON output
245
+ body.messages.push({ role: 'assistant', content: '{' });
246
+ }
247
+
210
248
  if (options.tools?.length) {
211
249
  body.tools = options.tools.map(t => ({
212
250
  name: t.name,
@@ -307,6 +345,17 @@ class OllamaAdapter extends BaseAdapter {
307
345
  }
308
346
  if (options.temperature !== undefined) body.options = { temperature: options.temperature };
309
347
  if (options.stream !== undefined) body.stream = options.stream;
348
+
349
+ // Structured output / JSON mode
350
+ if (options.responseFormat) {
351
+ const rf = options.responseFormat;
352
+ if (rf === 'json' || rf?.type === 'json_object') {
353
+ body.format = 'json';
354
+ } else if (rf?.type === 'json_schema') {
355
+ body.format = rf.schema;
356
+ }
357
+ }
358
+
310
359
  return body;
311
360
  }
312
361
 
@@ -431,9 +480,7 @@ export class WuAIProvider {
431
480
  * @returns {Promise<{ content: string, tool_calls?: Array, usage?: object }>}
432
481
  */
433
482
  async send(messages, options = {}) {
434
- this._ensureActive();
435
- const adapter = this._active;
436
- const config = this._activeConfig;
483
+ const { adapter, config } = this._resolveProvider(options.provider);
437
484
 
438
485
  // Custom adapter: call user function directly
439
486
  if (adapter.isCustom && adapter._sendFn) {
@@ -457,7 +504,25 @@ export class WuAIProvider {
457
504
  });
458
505
 
459
506
  const data = await response.json();
460
- return adapter.parseResponse(data);
507
+ const result = adapter.parseResponse(data);
508
+
509
+ // Anthropic prefill compensation: we prepended '{' to force JSON,
510
+ // so the response content is the continuation — restore the full JSON
511
+ if (adapter instanceof AnthropicAdapter && options.responseFormat && result.content) {
512
+ result.content = '{' + result.content;
513
+ }
514
+
515
+ // Validate JSON when responseFormat was requested
516
+ if (options.responseFormat && result.content) {
517
+ try {
518
+ result.parsed = JSON.parse(result.content);
519
+ } catch {
520
+ result.parseError = 'Response is not valid JSON';
521
+ logger.wuDebug('[wu-ai] responseFormat requested but LLM returned invalid JSON');
522
+ }
523
+ }
524
+
525
+ return result;
461
526
  }
462
527
 
463
528
  /**
@@ -468,9 +533,7 @@ export class WuAIProvider {
468
533
  * @yields {StreamChunk}
469
534
  */
470
535
  async *stream(messages, options = {}) {
471
- this._ensureActive();
472
- const adapter = this._active;
473
- const config = this._activeConfig;
536
+ const { adapter, config } = this._resolveProvider(options.provider);
474
537
 
475
538
  // Custom adapter: call user generator directly
476
539
  if (adapter.isCustom && adapter._streamFn) {
@@ -502,6 +565,10 @@ export class WuAIProvider {
502
565
  const decoder = new TextDecoder();
503
566
  let buffer = '';
504
567
 
568
+ // Anthropic prefill compensation for streaming:
569
+ // emit the '{' we used as prefill before the first real chunk
570
+ let needsPrefill = adapter instanceof AnthropicAdapter && !!options.responseFormat;
571
+
505
572
  try {
506
573
  while (true) {
507
574
  const { done, value } = await reader.read();
@@ -516,7 +583,13 @@ export class WuAIProvider {
516
583
  if (!trimmed) continue;
517
584
 
518
585
  const chunk = adapter.parseStreamChunk(trimmed);
519
- if (chunk) yield chunk;
586
+ if (chunk) {
587
+ if (needsPrefill && chunk.type === 'text') {
588
+ chunk.content = '{' + chunk.content;
589
+ needsPrefill = false;
590
+ }
591
+ yield chunk;
592
+ }
520
593
  if (chunk?.type === 'done') return;
521
594
  }
522
595
  }
@@ -552,10 +625,13 @@ export class WuAIProvider {
552
625
  }
553
626
  }
554
627
 
555
- // 4xx (except 429) — don't retry
556
- throw new Error(`[wu-ai] Request failed: ${response.status} ${response.statusText}`);
628
+ // 4xx (except 429) — don't retry, fail immediately
629
+ const clientError = new Error(`[wu-ai] Request failed: ${response.status} ${response.statusText}`);
630
+ clientError._noRetry = true;
631
+ throw clientError;
557
632
  } catch (err) {
558
633
  if (err.name === 'AbortError') throw err;
634
+ if (err._noRetry) throw err; // 4xx — don't retry
559
635
  lastError = err;
560
636
  if (attempt < this._retryConfig.maxRetries) {
561
637
  const delay = this._retryConfig.baseDelayMs * Math.pow(2, attempt);
@@ -579,6 +655,25 @@ export class WuAIProvider {
579
655
  return endpoint;
580
656
  }
581
657
 
658
+ /**
659
+ * Resolve which provider/adapter to use for a request.
660
+ * Supports per-call selection: options.provider = 'anthropic'
661
+ *
662
+ * @param {string} [providerName] - Optional provider name override
663
+ * @returns {{ adapter: BaseAdapter, config: object }}
664
+ */
665
+ _resolveProvider(providerName) {
666
+ if (providerName) {
667
+ const entry = this._providers.get(providerName);
668
+ if (!entry) {
669
+ throw new Error(`[wu-ai] Provider '${providerName}' not registered. Available: ${[...this._providers.keys()].join(', ')}`);
670
+ }
671
+ return { adapter: entry.adapter, config: entry.config };
672
+ }
673
+ this._ensureActive();
674
+ return { adapter: this._active, config: this._activeConfig };
675
+ }
676
+
582
677
  _ensureActive() {
583
678
  if (!this._active) {
584
679
  throw new Error(
package/src/ai/wu-ai.js CHANGED
@@ -6,19 +6,35 @@
6
6
  *
7
7
  * Architecture:
8
8
  * WuAI (this file)
9
- * ├── WuAIProvider → BYOL provider management
10
- * ├── WuAIPermissions Security, rate limiting, circuit breaker
11
- * ├── WuAIContext → Auto context collection for LLM
12
- * ├── WuAIActions → Tool/action registry and execution
13
- * ├── WuAIConversation → Multi-turn conversation manager
14
- * └── WuAITriggers → Event-to-AI bridge (reactive AI)
9
+ * ├── WuAIProvider → BYOL provider management (OpenAI, Anthropic, Ollama, Custom)
10
+ * ├── WuAIPermissions 4-layer security (perms, rate limit, circuit breaker, loop guard)
11
+ * ├── WuAIContext → Auto context collection with token budget
12
+ * ├── WuAIActions → Tool/action registry and sandboxed execution
13
+ * ├── WuAIConversation → Multi-turn conversation manager with namespaces
14
+ * ├── WuAITriggers → Event-to-AI reactive bridge
15
+ * ├── WuAIAgent → Autonomous agent loop (goal → steps → done)
16
+ * ├── WuAIOrchestrate → Cross-micro-app AI coordination (capabilities + intents)
17
+ * └── BrowserPrimitives → Shared screenshot, click, type, a11y tree, interceptors
18
+ *
19
+ * Four Paradigms:
20
+ * 1. App → LLM send/stream/json → conversation with tool loops
21
+ * 2. LLM → App tools/execute/expose → external agents call into the app
22
+ * 3. AI Director agent(goal) → autonomous multi-step loop
23
+ * 4. MF Glue capability/intent → cross-app coordination via AI
15
24
  *
16
25
  * Public API (accessible via wu.ai):
17
26
  * wu.ai.provider(name, config) → Register LLM provider
18
27
  * wu.ai.send(message, opts) → Send message (non-streaming)
19
28
  * wu.ai.stream(message, opts) → Send message (streaming)
29
+ * wu.ai.json(message, schema?) → Send and get parsed JSON back
30
+ * wu.ai.agent(goal, opts) → Run autonomous agent loop
20
31
  * wu.ai.action(name, config) → Register an action/tool
21
32
  * wu.ai.trigger(name, config) → Register an event trigger
33
+ * wu.ai.capability(app, name, c) → Register app-scoped capability
34
+ * wu.ai.intent(desc, opts) → Resolve cross-app intent
35
+ * wu.ai.removeApp(appName) → Remove app capabilities (unmount)
36
+ * wu.ai.workflow(name, config) → Register reusable AI workflow
37
+ * wu.ai.runWorkflow(name, params)→ Execute workflow (async generator)
22
38
  * wu.ai.context.configure(...) → Configure context collection
23
39
  * wu.ai.abort(namespace?) → Abort active request
24
40
  *
@@ -35,6 +51,8 @@ import { WuAIContext } from './wu-ai-context.js';
35
51
  import { WuAIActions } from './wu-ai-actions.js';
36
52
  import { WuAIConversation } from './wu-ai-conversation.js';
37
53
  import { WuAITriggers } from './wu-ai-triggers.js';
54
+ import { WuAIAgent } from './wu-ai-agent.js';
55
+ import { WuAIOrchestrate } from './wu-ai-orchestrate.js';
38
56
  import { registerBrowserActions } from './wu-ai-browser.js';
39
57
 
40
58
  export class WuAI {
@@ -125,10 +143,38 @@ export class WuAI {
125
143
  this._modules.triggers.configure(config.triggers);
126
144
  }
127
145
 
146
+ // 7. Agent (depends on conversation, actions, context, permissions, eventBus)
147
+ this._modules.agent = new WuAIAgent({
148
+ conversation: this._modules.conversation,
149
+ actions: this._modules.actions,
150
+ context: this._modules.context,
151
+ permissions: this._modules.permissions,
152
+ eventBus: this._eventBus,
153
+ });
154
+ if (config.agent) {
155
+ this._modules.agent.configure(config.agent);
156
+ }
157
+
158
+ // 8. Orchestrate — Paradigm 4: AI as microfrontend glue
159
+ // Agent ref is passed so workflows can delegate to the agent loop
160
+ // Store ref is passed for deterministic setState steps
161
+ this._modules.orchestrate = new WuAIOrchestrate({
162
+ actions: this._modules.actions,
163
+ conversation: this._modules.conversation,
164
+ context: this._modules.context,
165
+ permissions: this._modules.permissions,
166
+ eventBus: this._eventBus,
167
+ agent: this._modules.agent,
168
+ store: this._store,
169
+ });
170
+ if (config.orchestrate) {
171
+ this._modules.orchestrate.configure(config.orchestrate);
172
+ }
173
+
128
174
  this._initialized = true;
129
175
  logger.wuInfo('[wu-ai] Initialized');
130
176
 
131
- // 7. Browser automation actions (screenshot, click, type, network, etc.)
177
+ // 9. Browser automation actions (screenshot, click, type, network, etc.)
132
178
  // Must be AFTER _initialized = true to prevent recursive init loop
133
179
  if (typeof window !== 'undefined') {
134
180
  registerBrowserActions(this, this._core);
@@ -177,7 +223,12 @@ export class WuAI {
177
223
  * Send a message to the LLM and get a complete response.
178
224
  *
179
225
  * @param {string} message - User message
180
- * @param {object} [options] - { namespace, systemPrompt, templateVars, temperature, maxTokens, signal }
226
+ * @param {object} [options] - { namespace, systemPrompt, templateVars, temperature, maxTokens, provider, responseFormat, signal }
227
+ * @param {string} [options.provider] - Use a specific registered provider (e.g., 'anthropic', 'openai')
228
+ * @param {string|object} [options.responseFormat] - Request JSON output.
229
+ * - `'json'` — simple JSON mode (OpenAI: json_object, Ollama: format:"json", Anthropic: prompt injection)
230
+ * - `{ type: 'json_schema', schema: {...}, name?: string }` — structured output with JSON Schema
231
+ * (OpenAI: native json_schema mode, Ollama: schema in format, Anthropic: schema in system prompt)
181
232
  * @returns {Promise<{ content: string, tool_results?: Array, usage?: object, namespace: string }>}
182
233
  *
183
234
  * @example
@@ -186,6 +237,21 @@ export class WuAI {
186
237
  *
187
238
  * // With namespace for separate conversation
188
239
  * const response = await wu.ai.send('Analyze this chart', { namespace: 'analytics' });
240
+ *
241
+ * // Use a specific provider for this message
242
+ * const response = await wu.ai.send('Translate this', { provider: 'anthropic' });
243
+ *
244
+ * // Simple JSON mode
245
+ * const response = await wu.ai.send('List 5 colors', { responseFormat: 'json' });
246
+ *
247
+ * // Structured output with JSON Schema
248
+ * const response = await wu.ai.send('List 5 colors', {
249
+ * responseFormat: {
250
+ * type: 'json_schema',
251
+ * schema: { type: 'object', properties: { colors: { type: 'array', items: { type: 'string' } } } },
252
+ * name: 'color_list',
253
+ * },
254
+ * });
189
255
  */
190
256
  async send(message, options = {}) {
191
257
  this._ensureInit();
@@ -210,6 +276,71 @@ export class WuAI {
210
276
  yield* this._modules.conversation.stream(message, options);
211
277
  }
212
278
 
279
+ /**
280
+ * Send a message and get a parsed JSON response.
281
+ * Shortcut for send() with responseFormat + automatic JSON.parse().
282
+ *
283
+ * @param {string} message - User message
284
+ * @param {object} [options] - All send() options plus:
285
+ * @param {object} [options.schema] - JSON Schema for structured output
286
+ * @param {string} [options.schemaName='response'] - Schema name (required by OpenAI)
287
+ * @returns {Promise<{ data: object|null, raw: string, error?: string, usage?: object, namespace: string }>}
288
+ *
289
+ * @example
290
+ * // Simple JSON (no schema)
291
+ * const { data } = await wu.ai.json('List 5 colors as a JSON array');
292
+ * // data = ["red", "blue", ...]
293
+ *
294
+ * // With schema
295
+ * const { data } = await wu.ai.json('List 5 colors', {
296
+ * schema: { type: 'object', properties: { colors: { type: 'array', items: { type: 'string' } } } },
297
+ * });
298
+ * // data = { colors: ["red", "blue", ...] }
299
+ *
300
+ * // With schema + provider
301
+ * const { data } = await wu.ai.json('List 5 colors', {
302
+ * schema: mySchema,
303
+ * provider: 'openai',
304
+ * temperature: 0,
305
+ * });
306
+ */
307
+ async json(message, options = {}) {
308
+ this._ensureInit();
309
+
310
+ const { schema, schemaName, ...rest } = options;
311
+
312
+ let responseFormat;
313
+ if (schema) {
314
+ responseFormat = { type: 'json_schema', schema, name: schemaName || 'response' };
315
+ } else {
316
+ responseFormat = options.responseFormat || 'json';
317
+ }
318
+
319
+ const response = await this._modules.conversation.send(message, { ...rest, responseFormat });
320
+
321
+ // The provider already attempts parse and sets response.parsed / response.parseError
322
+ let data = null;
323
+ let error;
324
+
325
+ if (response.parsed !== undefined) {
326
+ data = response.parsed;
327
+ } else if (response.content) {
328
+ try {
329
+ data = JSON.parse(response.content);
330
+ } catch {
331
+ error = 'LLM response is not valid JSON';
332
+ }
333
+ }
334
+
335
+ return {
336
+ data,
337
+ raw: response.content || '',
338
+ error: error || response.parseError,
339
+ usage: response.usage,
340
+ namespace: response.namespace,
341
+ };
342
+ }
343
+
213
344
  /**
214
345
  * Abort active request(s).
215
346
  *
@@ -300,6 +431,199 @@ export class WuAI {
300
431
  return this._modules.triggers.fire(name, eventData);
301
432
  }
302
433
 
434
+ // ─── Agent (Paradigm 3: Autonomous AI) ─────────────────────────
435
+
436
+ /**
437
+ * Run an autonomous agent that pursues a goal using available tools.
438
+ * Returns an async generator that yields step-by-step results.
439
+ *
440
+ * @param {string} goal - What the agent should accomplish
441
+ * @param {object} [options]
442
+ * @param {number} [options.maxSteps=10] - Maximum autonomous steps
443
+ * @param {string} [options.provider] - Which LLM provider to use
444
+ * @param {string} [options.namespace] - Conversation namespace
445
+ * @param {string} [options.systemPrompt] - Override system prompt
446
+ * @param {Function} [options.onStep] - Callback per step: (stepResult) => void
447
+ * @param {Function} [options.shouldContinue] - Human-in-the-loop: (stepResult) => boolean|Promise<boolean>
448
+ * @param {AbortSignal} [options.signal] - Abort signal
449
+ * @returns {AsyncGenerator<AgentStepResult>}
450
+ *
451
+ * @example
452
+ * // Basic usage
453
+ * for await (const step of wu.ai.agent('Find all orders above $100 and summarize them')) {
454
+ * console.log(`Step ${step.step}: ${step.content?.slice(0, 100)}`);
455
+ * if (step.done) console.log('Agent finished!');
456
+ * }
457
+ *
458
+ * // With human-in-the-loop
459
+ * for await (const step of wu.ai.agent('Reorganize the product catalog', {
460
+ * shouldContinue: (step) => confirm(`Continue? Step ${step.step}: ${step.content?.slice(0, 50)}`),
461
+ * })) {
462
+ * updateUI(step);
463
+ * }
464
+ */
465
+ async *agent(goal, options = {}) {
466
+ this._ensureInit();
467
+ yield* this._modules.agent.run(goal, options);
468
+ }
469
+
470
+ // ─── Paradigm 4: AI as Microfrontend Glue ─────────────────────
471
+
472
+ /**
473
+ * Register a capability scoped to a specific micro-app.
474
+ *
475
+ * Each micro-app calls this to declare what it can do. The AI uses
476
+ * the capability map to resolve cross-app intents.
477
+ *
478
+ * @param {string} appName - The micro-app name (e.g., 'orders', 'dashboard')
479
+ * @param {string} actionName - The capability name (e.g., 'getRecent', 'updateKPIs')
480
+ * @param {object} config - Same as wu.ai.action() config:
481
+ * { description, parameters, handler, confirm?, permissions?, dangerous? }
482
+ *
483
+ * @example
484
+ * // In orders micro-app (React):
485
+ * wu.ai.capability('orders', 'getRecent', {
486
+ * description: 'Get the N most recent orders',
487
+ * parameters: { limit: { type: 'number' } },
488
+ * handler: async (params) => fetchOrders({ limit: params.limit || 10 }),
489
+ * });
490
+ *
491
+ * // In dashboard micro-app (Svelte):
492
+ * wu.ai.capability('dashboard', 'updateKPIs', {
493
+ * description: 'Refresh the KPI cards with latest data',
494
+ * handler: async () => { refreshKPIs(); return { updated: true }; },
495
+ * });
496
+ */
497
+ capability(appName, actionName, config) {
498
+ this._ensureInit();
499
+ this._modules.orchestrate.register(appName, actionName, config);
500
+ return this;
501
+ }
502
+
503
+ /**
504
+ * Resolve a cross-app intent in a single conversation turn.
505
+ *
506
+ * The AI receives the full capability map (what each app can do),
507
+ * current application state, and mounted apps. It resolves the
508
+ * intent by calling the right capabilities across app boundaries.
509
+ *
510
+ * @param {string} description - Natural language intent
511
+ * @param {object} [options]
512
+ * @param {string[]} [options.plan] - Optional action sequence hint
513
+ * @param {string} [options.provider] - LLM provider override
514
+ * @param {number} [options.temperature] - Temperature override
515
+ * @param {number} [options.maxTokens] - Max tokens override
516
+ * @param {AbortSignal} [options.signal] - Abort signal
517
+ * @param {string|object} [options.responseFormat] - Response format
518
+ * @returns {Promise<{ content: string, tool_results: Array, usage: object|null, resolved: boolean, appsInvolved: string[] }>}
519
+ *
520
+ * @example
521
+ * // Simple cross-app query
522
+ * const result = await wu.ai.intent('Show me the top customer by order count');
523
+ * // AI calls orders:getRecent → aggregates → returns answer
524
+ *
525
+ * // With plan hint
526
+ * const result = await wu.ai.intent('Update all views after a new order', {
527
+ * plan: ['orders:getRecent', 'dashboard:updateKPIs', 'analytics:refresh'],
528
+ * });
529
+ *
530
+ * // With JSON response
531
+ * const result = await wu.ai.intent('Get order stats by status', {
532
+ * responseFormat: 'json',
533
+ * });
534
+ */
535
+ async intent(description, options = {}) {
536
+ this._ensureInit();
537
+ return this._modules.orchestrate.resolve(description, options);
538
+ }
539
+
540
+ /**
541
+ * Remove all capabilities for a micro-app.
542
+ * Call this when a micro-app is unmounted to prevent stale
543
+ * capabilities from appearing in the AI's capability map.
544
+ *
545
+ * @param {string} appName - The micro-app name
546
+ *
547
+ * @example
548
+ * // In unmount lifecycle:
549
+ * wu.ai.removeApp('orders');
550
+ */
551
+ removeApp(appName) {
552
+ this._ensureInit();
553
+ this._modules.orchestrate.removeApp(appName);
554
+ return this;
555
+ }
556
+
557
+ /**
558
+ * Register a reusable AI workflow — a named, step-by-step recipe
559
+ * that the AI agent follows using browser automation.
560
+ *
561
+ * @param {string} name - Workflow name (e.g., 'register-user')
562
+ * @param {object} config
563
+ * @param {string} config.description - What this workflow does
564
+ * @param {string[]} config.steps - Step-by-step instructions
565
+ * Use {{paramName}} for parameter interpolation.
566
+ * @param {object} [config.parameters] - Parameter definitions
567
+ * @param {number} [config.maxSteps=15] - Max agent steps
568
+ * @param {string} [config.provider] - LLM provider
569
+ *
570
+ * @example
571
+ * wu.ai.workflow('register-user', {
572
+ * description: 'Register a new user in the system',
573
+ * steps: [
574
+ * 'Navigate to the Customers section',
575
+ * 'Click the "Add Customer" button',
576
+ * 'Type "{{name}}" into the name field',
577
+ * 'Type "{{email}}" into the email field',
578
+ * 'Click Submit',
579
+ * 'Verify the success message appears',
580
+ * ],
581
+ * parameters: {
582
+ * name: { type: 'string', required: true },
583
+ * email: { type: 'string', required: true },
584
+ * },
585
+ * });
586
+ */
587
+ workflow(name, config) {
588
+ this._ensureInit();
589
+ this._modules.orchestrate.registerWorkflow(name, config);
590
+ return this;
591
+ }
592
+
593
+ /**
594
+ * Execute a registered workflow. Returns an async generator
595
+ * so you can observe each step in real time.
596
+ *
597
+ * @param {string} name - Workflow name
598
+ * @param {object} [params={}] - Parameters to fill into the steps
599
+ * @param {object} [options={}]
600
+ * @param {Function} [options.onStep] - Callback per step
601
+ * @param {Function} [options.shouldContinue] - Human-in-the-loop gate
602
+ * @param {AbortSignal} [options.signal] - Abort signal
603
+ * @returns {AsyncGenerator<AgentStepResult>}
604
+ *
605
+ * @example
606
+ * // Run and watch every step
607
+ * for await (const step of wu.ai.runWorkflow('register-user', {
608
+ * name: 'Juan Pérez',
609
+ * email: 'juan@test.com',
610
+ * })) {
611
+ * console.log(`Step ${step.step}: ${step.content}`);
612
+ * if (step.type === 'done') console.log('Workflow complete!');
613
+ * }
614
+ *
615
+ * // With human approval per step
616
+ * for await (const step of wu.ai.runWorkflow('register-user', params, {
617
+ * shouldContinue: (s) => confirm(`Continue? ${s.content?.slice(0, 60)}`),
618
+ * })) {
619
+ * renderStep(step);
620
+ * }
621
+ */
622
+ async *runWorkflow(name, params = {}, options = {}) {
623
+ this._ensureInit();
624
+ yield* this._modules.orchestrate.executeWorkflow(name, params, options);
625
+ }
626
+
303
627
  // ─── Context ───────────────────────────────────────────────────
304
628
 
305
629
  /**
@@ -436,6 +760,8 @@ export class WuAI {
436
760
  actions: this._modules.actions.getStats(),
437
761
  conversation: this._modules.conversation.getStats(),
438
762
  triggers: this._modules.triggers.getStats(),
763
+ agent: this._modules.agent.getStats(),
764
+ orchestrate: this._modules.orchestrate.getStats(),
439
765
  };
440
766
  }
441
767
 
@@ -445,6 +771,8 @@ export class WuAI {
445
771
  destroy() {
446
772
  if (!this._initialized) return;
447
773
 
774
+ this._modules.orchestrate.destroy();
775
+ this._modules.agent.destroy();
448
776
  this._modules.conversation.abortAll();
449
777
  this._modules.triggers.destroy();
450
778
  this._modules = {};
@@ -470,5 +798,7 @@ export class WuAI {
470
798
  if (config.context) this._modules.context.configure(config.context);
471
799
  if (config.conversation) this._modules.conversation.configure(config.conversation);
472
800
  if (config.triggers) this._modules.triggers.configure(config.triggers);
801
+ if (config.agent) this._modules.agent.configure(config.agent);
802
+ if (config.orchestrate) this._modules.orchestrate.configure(config.orchestrate);
473
803
  }
474
804
  }
@@ -16,8 +16,7 @@ export class WuCache {
16
16
  maxItems: options.maxItems || 100,
17
17
  defaultTTL: options.defaultTTL || 3600000, // 1 hour
18
18
  persistent: options.persistent !== false,
19
- storage: options.storage || 'memory',
20
- compression: options.compression || false
19
+ storage: options.storage || 'memory'
21
20
  };
22
21
 
23
22
  // 🔐 Rate limiting configuration
@@ -123,7 +123,7 @@ export class WuCore {
123
123
 
124
124
  /**
125
125
  * Registrar una aplicacion
126
- * @param {Object} appConfig - { name, url }
126
+ * @param {Object} appConfig - { name, url, keepAlive, sandbox, container, ... }
127
127
  */
128
128
  async registerApp(appConfig) {
129
129
  const { name, url } = appConfig;
@@ -135,10 +135,9 @@ export class WuCore {
135
135
  const manifestData = await this.manifest.load(url);
136
136
  this.manifests.set(name, manifestData);
137
137
 
138
- // Registrar la app
138
+ // Registrar la app — preserve all config fields (keepAlive, sandbox, container, etc.)
139
139
  this.apps.set(name, {
140
- name,
141
- url,
140
+ ...appConfig,
142
141
  manifest: manifestData,
143
142
  status: 'registered'
144
143
  });