llmist 0.1.3 → 0.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,6 +1,6 @@
1
1
  import {
2
2
  createGadget
3
- } from "./chunk-JEBGLCDW.js";
3
+ } from "./chunk-MO5ONHPZ.js";
4
4
  import {
5
5
  MockBuilder,
6
6
  MockManager,
@@ -13,7 +13,7 @@ import {
13
13
  mockLLM,
14
14
  validateAndApplyDefaults,
15
15
  validateGadgetParams
16
- } from "./chunk-DCW33WV7.js";
16
+ } from "./chunk-PVHHXDCV.js";
17
17
  import {
18
18
  AgentBuilder,
19
19
  AnthropicMessagesProvider,
@@ -70,7 +70,7 @@ import {
70
70
  resolveRulesTemplate,
71
71
  runWithHandlers,
72
72
  stream
73
- } from "./chunk-TP7HE3MN.js";
73
+ } from "./chunk-J3NCIWMY.js";
74
74
 
75
75
  // src/index.ts
76
76
  init_builder();
@@ -80,18 +80,59 @@ import { z } from "zod";
80
80
  // src/agent/hook-presets.ts
81
81
  var HookPresets = class _HookPresets {
82
82
  /**
83
- * Preset: Basic logging of all events.
83
+ * Logs LLM calls and gadget execution to console with optional verbosity.
84
84
  *
85
- * Logs LLM calls and gadget executions to console.
85
+ * **Output (basic mode):**
86
+ * - LLM call start/complete events with iteration numbers
87
+ * - Gadget execution start/complete with gadget names
88
+ * - Token counts when available
89
+ *
90
+ * **Output (verbose mode):**
91
+ * - All basic mode output
92
+ * - Full gadget parameters (formatted JSON)
93
+ * - Full gadget results
94
+ * - Complete LLM response text
95
+ *
96
+ * **Use cases:**
97
+ * - Basic development debugging and execution flow visibility
98
+ * - Understanding agent decision-making and tool usage
99
+ * - Troubleshooting gadget invocations
100
+ *
101
+ * **Performance:** Minimal overhead. Console writes are synchronous but fast.
86
102
  *
87
103
  * @param options - Logging options
88
- * @returns Hook configuration
104
+ * @param options.verbose - Include full parameters and results. Default: false
105
+ * @returns Hook configuration that can be passed to .withHooks()
106
+ *
107
+ * @example
108
+ * ```typescript
109
+ * // Basic logging
110
+ * await LLMist.createAgent()
111
+ * .withHooks(HookPresets.logging())
112
+ * .ask("Calculate 15 * 23");
113
+ * // Output: [LLM] Starting call (iteration 0)
114
+ * // [GADGET] Executing Calculator
115
+ * // [GADGET] Completed Calculator
116
+ * // [LLM] Completed (tokens: 245)
117
+ * ```
89
118
  *
90
119
  * @example
91
120
  * ```typescript
92
- * .withHooks(HookPresets.logging())
93
- * .withHooks(HookPresets.logging({ verbose: true }))
121
+ * // Verbose logging with full details
122
+ * await LLMist.createAgent()
123
+ * .withHooks(HookPresets.logging({ verbose: true }))
124
+ * .ask("Calculate 15 * 23");
125
+ * // Output includes: parameters, results, and full responses
94
126
  * ```
127
+ *
128
+ * @example
129
+ * ```typescript
130
+ * // Environment-based verbosity
131
+ * const isDev = process.env.NODE_ENV === 'development';
132
+ * .withHooks(HookPresets.logging({ verbose: isDev }))
133
+ * ```
134
+ *
135
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsloggingoptions | Full documentation}
95
136
  */
96
137
  static logging(options = {}) {
97
138
  return {
@@ -123,16 +164,54 @@ var HookPresets = class _HookPresets {
123
164
  };
124
165
  }
125
166
  /**
126
- * Preset: Performance timing for all operations.
127
- *
128
167
  * Measures and logs execution time for LLM calls and gadgets.
129
168
  *
130
- * @returns Hook configuration
169
+ * **Output:**
170
+ * - Duration in milliseconds with ⏱️ emoji for each operation
171
+ * - Separate timing for each LLM iteration
172
+ * - Separate timing for each gadget execution
173
+ *
174
+ * **Use cases:**
175
+ * - Performance profiling and optimization
176
+ * - Identifying slow operations (LLM calls vs gadget execution)
177
+ * - Monitoring response times in production
178
+ * - Capacity planning and SLA tracking
179
+ *
180
+ * **Performance:** Negligible overhead. Uses Date.now() for timing measurements.
181
+ *
182
+ * @returns Hook configuration that can be passed to .withHooks()
183
+ *
184
+ * @example
185
+ * ```typescript
186
+ * // Basic timing
187
+ * await LLMist.createAgent()
188
+ * .withHooks(HookPresets.timing())
189
+ * .withGadgets(Weather, Database)
190
+ * .ask("What's the weather in NYC?");
191
+ * // Output: ⏱️ LLM call took 1234ms
192
+ * // ⏱️ Gadget Weather took 567ms
193
+ * // ⏱️ LLM call took 890ms
194
+ * ```
195
+ *
196
+ * @example
197
+ * ```typescript
198
+ * // Combined with logging for full context
199
+ * .withHooks(HookPresets.merge(
200
+ * HookPresets.logging(),
201
+ * HookPresets.timing()
202
+ * ))
203
+ * ```
131
204
  *
132
205
  * @example
133
206
  * ```typescript
134
- * .withHooks(HookPresets.timing())
207
+ * // Correlate performance with cost
208
+ * .withHooks(HookPresets.merge(
209
+ * HookPresets.timing(),
210
+ * HookPresets.tokenTracking()
211
+ * ))
135
212
  * ```
213
+ *
214
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetstiming | Full documentation}
136
215
  */
137
216
  static timing() {
138
217
  const timings = /* @__PURE__ */ new Map();
@@ -169,16 +248,57 @@ var HookPresets = class _HookPresets {
169
248
  };
170
249
  }
171
250
  /**
172
- * Preset: Token usage tracking.
251
+ * Tracks cumulative token usage across all LLM calls.
252
+ *
253
+ * **Output:**
254
+ * - Per-call token count with 📊 emoji
255
+ * - Cumulative total across all calls
256
+ * - Call count for average calculations
257
+ *
258
+ * **Use cases:**
259
+ * - Cost monitoring and budget tracking
260
+ * - Optimizing prompts to reduce token usage
261
+ * - Comparing token efficiency across different approaches
262
+ * - Real-time cost estimation
173
263
  *
174
- * Tracks and logs cumulative token usage across all LLM calls.
264
+ * **Performance:** Minimal overhead. Simple counter increments.
175
265
  *
176
- * @returns Hook configuration
266
+ * **Note:** Token counts depend on the provider's response. Some providers
267
+ * may not include usage data, in which case counts won't be logged.
268
+ *
269
+ * @returns Hook configuration that can be passed to .withHooks()
270
+ *
271
+ * @example
272
+ * ```typescript
273
+ * // Basic token tracking
274
+ * await LLMist.createAgent()
275
+ * .withHooks(HookPresets.tokenTracking())
276
+ * .ask("Summarize this document...");
277
+ * // Output: 📊 Tokens this call: 1,234
278
+ * // 📊 Total tokens: 1,234 (across 1 calls)
279
+ * // 📊 Tokens this call: 567
280
+ * // 📊 Total tokens: 1,801 (across 2 calls)
281
+ * ```
177
282
  *
178
283
  * @example
179
284
  * ```typescript
180
- * .withHooks(HookPresets.tokenTracking())
285
+ * // Cost calculation with custom hook
286
+ * let totalTokens = 0;
287
+ * .withHooks(HookPresets.merge(
288
+ * HookPresets.tokenTracking(),
289
+ * {
290
+ * observers: {
291
+ * onLLMCallComplete: async (ctx) => {
292
+ * totalTokens += ctx.usage?.totalTokens ?? 0;
293
+ * const cost = (totalTokens / 1_000_000) * 3.0; // $3 per 1M tokens
294
+ * console.log(`💰 Estimated cost: $${cost.toFixed(4)}`);
295
+ * },
296
+ * },
297
+ * }
298
+ * ))
181
299
  * ```
300
+ *
301
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetstokentracking | Full documentation}
182
302
  */
183
303
  static tokenTracking() {
184
304
  let totalTokens = 0;
@@ -197,16 +317,64 @@ var HookPresets = class _HookPresets {
197
317
  };
198
318
  }
199
319
  /**
200
- * Preset: Error logging.
320
+ * Logs detailed error information for debugging and troubleshooting.
321
+ *
322
+ * **Output:**
323
+ * - LLM errors with ❌ emoji, including model and recovery status
324
+ * - Gadget errors with full context (parameters, error message)
325
+ * - Separate logging for LLM and gadget failures
201
326
  *
202
- * Logs detailed error information for debugging.
327
+ * **Use cases:**
328
+ * - Troubleshooting production issues
329
+ * - Understanding error patterns and frequency
330
+ * - Debugging error recovery behavior
331
+ * - Collecting error metrics for monitoring
203
332
  *
204
- * @returns Hook configuration
333
+ * **Performance:** Minimal overhead. Only logs when errors occur.
334
+ *
335
+ * @returns Hook configuration that can be passed to .withHooks()
336
+ *
337
+ * @example
338
+ * ```typescript
339
+ * // Basic error logging
340
+ * await LLMist.createAgent()
341
+ * .withHooks(HookPresets.errorLogging())
342
+ * .withGadgets(Database)
343
+ * .ask("Fetch user data");
344
+ * // Output (on LLM error): ❌ LLM Error (iteration 1): Rate limit exceeded
345
+ * // Model: gpt-5-nano
346
+ * // Recovered: true
347
+ * // Output (on gadget error): ❌ Gadget Error: Database
348
+ * // Error: Connection timeout
349
+ * // Parameters: {...}
350
+ * ```
205
351
  *
206
352
  * @example
207
353
  * ```typescript
208
- * .withHooks(HookPresets.errorLogging())
354
+ * // Combine with monitoring for full context
355
+ * .withHooks(HookPresets.merge(
356
+ * HookPresets.monitoring(), // Includes errorLogging
357
+ * customErrorAnalytics
358
+ * ))
209
359
  * ```
360
+ *
361
+ * @example
362
+ * ```typescript
363
+ * // Error analytics collection
364
+ * const errors: any[] = [];
365
+ * .withHooks(HookPresets.merge(
366
+ * HookPresets.errorLogging(),
367
+ * {
368
+ * observers: {
369
+ * onLLMCallError: async (ctx) => {
370
+ * errors.push({ type: 'llm', error: ctx.error, recovered: ctx.recovered });
371
+ * },
372
+ * },
373
+ * }
374
+ * ))
375
+ * ```
376
+ *
377
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetserrorlogging | Full documentation}
210
378
  */
211
379
  static errorLogging() {
212
380
  return {
@@ -227,48 +395,131 @@ var HookPresets = class _HookPresets {
227
395
  };
228
396
  }
229
397
  /**
230
- * Preset: Silent (no output).
398
+ * Returns empty hook configuration for clean output without any logging.
231
399
  *
232
- * Useful for testing or when you want complete control.
400
+ * **Output:**
401
+ * - None. Returns {} (empty object).
402
+ *
403
+ * **Use cases:**
404
+ * - Clean test output without console noise
405
+ * - Production environments where logging is handled externally
406
+ * - Baseline for custom hook development
407
+ * - Temporary disable of all hook output
408
+ *
409
+ * **Performance:** Zero overhead. No-op hook configuration.
233
410
  *
234
411
  * @returns Empty hook configuration
235
412
  *
236
413
  * @example
237
414
  * ```typescript
238
- * .withHooks(HookPresets.silent())
415
+ * // Clean test output
416
+ * describe('Agent tests', () => {
417
+ * it('should calculate correctly', async () => {
418
+ * const result = await LLMist.createAgent()
419
+ * .withHooks(HookPresets.silent()) // No console output
420
+ * .withGadgets(Calculator)
421
+ * .askAndCollect("What is 15 times 23?");
422
+ *
423
+ * expect(result).toContain("345");
424
+ * });
425
+ * });
426
+ * ```
427
+ *
428
+ * @example
429
+ * ```typescript
430
+ * // Conditional silence based on environment
431
+ * const isTesting = process.env.NODE_ENV === 'test';
432
+ * .withHooks(isTesting ? HookPresets.silent() : HookPresets.monitoring())
239
433
  * ```
434
+ *
435
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetssilent | Full documentation}
240
436
  */
241
437
  static silent() {
242
438
  return {};
243
439
  }
244
440
  /**
245
- * Merge multiple hook configurations.
441
+ * Combines multiple hook configurations into one.
442
+ *
443
+ * Merge allows you to compose preset and custom hooks for modular monitoring
444
+ * configurations. Understanding merge behavior is crucial for proper composition.
445
+ *
446
+ * **Merge behavior:**
447
+ * - **Observers:** Composed - all handlers run sequentially in order
448
+ * - **Interceptors:** Last one wins - only the last interceptor applies
449
+ * - **Controllers:** Last one wins - only the last controller applies
450
+ *
451
+ * **Why interceptors/controllers don't compose:**
452
+ * - Interceptors have different signatures per method, making composition impractical
453
+ * - Controllers return specific actions that can't be meaningfully combined
454
+ * - Only observers support composition because they're read-only and independent
246
455
  *
247
- * Combines hook presets or custom configurations into a single object.
248
- * When multiple hooks target the same lifecycle event, they are composed
249
- * to run sequentially (all handlers will execute).
456
+ * **Use cases:**
457
+ * - Combining multiple presets (logging + timing + tokens)
458
+ * - Adding custom hooks to presets
459
+ * - Building modular, reusable monitoring configurations
460
+ * - Environment-specific hook composition
250
461
  *
251
- * @param hookSets - Array of hook configurations to merge
252
- * @returns Merged hook configuration with composed handlers
462
+ * **Performance:** Minimal overhead for merging. Runtime performance depends on merged hooks.
463
+ *
464
+ * @param hookSets - Variable number of hook configurations to merge
465
+ * @returns Single merged hook configuration with composed/overridden handlers
253
466
  *
254
467
  * @example
255
468
  * ```typescript
469
+ * // Combine multiple presets
470
+ * .withHooks(HookPresets.merge(
471
+ * HookPresets.logging(),
472
+ * HookPresets.timing(),
473
+ * HookPresets.tokenTracking()
474
+ * ))
475
+ * // All observers from all three presets will run
476
+ * ```
477
+ *
478
+ * @example
479
+ * ```typescript
480
+ * // Add custom observer to preset (both run)
256
481
  * .withHooks(HookPresets.merge(
257
- * HookPresets.logging({ verbose: true }),
258
482
  * HookPresets.timing(),
259
- * HookPresets.tokenTracking(),
260
483
  * {
261
- * // Custom hook
262
484
  * observers: {
263
485
  * onLLMCallComplete: async (ctx) => {
264
- * saveToDatabase(ctx);
265
- * }
266
- * }
486
+ * await saveMetrics({ tokens: ctx.usage?.totalTokens });
487
+ * },
488
+ * },
267
489
  * }
268
490
  * ))
269
- * // All onLLMCallComplete handlers from logging, timing, tokenTracking,
270
- * // and the custom hook will execute in order
271
491
  * ```
492
+ *
493
+ * @example
494
+ * ```typescript
495
+ * // Multiple interceptors (last wins!)
496
+ * .withHooks(HookPresets.merge(
497
+ * {
498
+ * interceptors: {
499
+ * interceptTextChunk: (chunk) => chunk.toUpperCase(), // Ignored
500
+ * },
501
+ * },
502
+ * {
503
+ * interceptors: {
504
+ * interceptTextChunk: (chunk) => chunk.toLowerCase(), // This wins
505
+ * },
506
+ * }
507
+ * ))
508
+ * // Result: text will be lowercase
509
+ * ```
510
+ *
511
+ * @example
512
+ * ```typescript
513
+ * // Modular environment-based configuration
514
+ * const baseHooks = HookPresets.errorLogging();
515
+ * const devHooks = HookPresets.merge(baseHooks, HookPresets.monitoring({ verbose: true }));
516
+ * const prodHooks = HookPresets.merge(baseHooks, HookPresets.tokenTracking());
517
+ *
518
+ * const hooks = process.env.NODE_ENV === 'production' ? prodHooks : devHooks;
519
+ * .withHooks(hooks)
520
+ * ```
521
+ *
522
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsmergehooksets | Full documentation}
272
523
  */
273
524
  static merge(...hookSets) {
274
525
  const merged = {
@@ -301,18 +552,62 @@ var HookPresets = class _HookPresets {
301
552
  return merged;
302
553
  }
303
554
  /**
304
- * Preset: Complete monitoring suite.
555
+ * Composite preset combining logging, timing, tokenTracking, and errorLogging.
305
556
  *
306
- * Combines logging, timing, and token tracking.
557
+ * This is the recommended preset for development and initial production deployments,
558
+ * providing comprehensive observability with a single method call.
307
559
  *
308
- * @param options - Options for monitoring
309
- * @returns Merged hook configuration
560
+ * **Includes:**
561
+ * - All output from `logging()` preset (with optional verbosity)
562
+ * - All output from `timing()` preset (execution times)
563
+ * - All output from `tokenTracking()` preset (token usage)
564
+ * - All output from `errorLogging()` preset (error details)
565
+ *
566
+ * **Output format:**
567
+ * - Event logging: [LLM]/[GADGET] messages
568
+ * - Timing: ⏱️ emoji with milliseconds
569
+ * - Tokens: 📊 emoji with per-call and cumulative counts
570
+ * - Errors: ❌ emoji with full error details
571
+ *
572
+ * **Use cases:**
573
+ * - Full observability during development
574
+ * - Comprehensive monitoring in production
575
+ * - One-liner for complete agent visibility
576
+ * - Troubleshooting and debugging with full context
577
+ *
578
+ * **Performance:** Combined overhead of all four presets, but still minimal in practice.
579
+ *
580
+ * @param options - Monitoring options
581
+ * @param options.verbose - Passed to logging() preset for detailed output. Default: false
582
+ * @returns Merged hook configuration combining all monitoring presets
310
583
  *
311
584
  * @example
312
585
  * ```typescript
313
- * .withHooks(HookPresets.monitoring())
314
- * .withHooks(HookPresets.monitoring({ verbose: true }))
586
+ * // Basic monitoring (recommended for development)
587
+ * await LLMist.createAgent()
588
+ * .withHooks(HookPresets.monitoring())
589
+ * .withGadgets(Calculator, Weather)
590
+ * .ask("What is 15 times 23, and what's the weather in NYC?");
591
+ * // Output: All events, timing, tokens, and errors in one place
315
592
  * ```
593
+ *
594
+ * @example
595
+ * ```typescript
596
+ * // Verbose monitoring with full details
597
+ * await LLMist.createAgent()
598
+ * .withHooks(HookPresets.monitoring({ verbose: true }))
599
+ * .ask("Your prompt");
600
+ * // Output includes: parameters, results, and complete responses
601
+ * ```
602
+ *
603
+ * @example
604
+ * ```typescript
605
+ * // Environment-based monitoring
606
+ * const isDev = process.env.NODE_ENV === 'development';
607
+ * .withHooks(HookPresets.monitoring({ verbose: isDev }))
608
+ * ```
609
+ *
610
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsmonitoringoptions | Full documentation}
316
611
  */
317
612
  static monitoring(options = {}) {
318
613
  return _HookPresets.merge(
package/dist/index.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/index.ts","../src/agent/hook-presets.ts","../src/agent/index.ts","../src/gadgets/typed-gadget.ts"],"sourcesContent":["// Re-export Zod's z for schema definitions\n// Using llmist's z ensures .describe() metadata is preserved in JSON schemas\nexport { z } from \"zod\";\n// Syntactic sugar: Agent builder and event handlers\nexport type { HistoryMessage } from \"./agent/builder.js\";\nexport { AgentBuilder } from \"./agent/builder.js\";\nexport type { EventHandlers } from \"./agent/event-handlers.js\";\nexport { collectEvents, collectText, runWithHandlers } from \"./agent/event-handlers.js\";\n// Syntactic sugar: Hook presets\nexport type { LoggingOptions } from \"./agent/hook-presets.js\";\nexport { HookPresets } from \"./agent/hook-presets.js\";\n// Agent infrastructure\n// New clean hooks system\nexport type {\n AfterGadgetExecutionAction,\n AfterGadgetExecutionControllerContext,\n AfterLLMCallAction,\n AfterLLMCallControllerContext,\n AfterLLMErrorAction,\n AgentHooks,\n AgentOptions,\n BeforeGadgetExecutionAction,\n BeforeLLMCallAction,\n // Interceptor contexts\n ChunkInterceptorContext,\n Controllers,\n GadgetExecutionControllerContext,\n GadgetParameterInterceptorContext,\n GadgetResultInterceptorContext,\n IConversationManager,\n Interceptors,\n // Controller contexts and actions\n LLMCallControllerContext,\n LLMErrorControllerContext,\n MessageInterceptorContext,\n ObserveChunkContext,\n ObserveGadgetCompleteContext,\n ObserveGadgetStartContext,\n // Observer contexts\n ObserveLLMCallContext,\n ObserveLLMCompleteContext,\n ObserveLLMErrorContext,\n Observers,\n StreamProcessingResult,\n StreamProcessorOptions,\n} from \"./agent/index.js\";\nexport { ConversationManager, StreamProcessor } from \"./agent/index.js\";\nexport type { LLMistOptions } from \"./core/client.js\";\nexport { LLMist } from \"./core/client.js\";\nexport type { LLMMessage, LLMRole } from \"./core/messages.js\";\nexport { LLMMessageBuilder } from \"./core/messages.js\";\n// Model catalog\nexport type {\n CostEstimate,\n ModelFeatures,\n ModelLimits,\n ModelPricing,\n ModelSpec,\n} from \"./core/model-catalog.js\";\nexport { ModelRegistry } from \"./core/model-registry.js\";\n\n// Syntactic sugar: Model shortcuts and quick methods\nexport {\n getModelId,\n getProvider,\n hasProviderPrefix,\n MODEL_ALIASES,\n resolveModel,\n} from \"./core/model-shortcuts.js\";\nexport type {\n LLMGenerationOptions,\n LLMStream,\n LLMStreamChunk,\n ModelDescriptor,\n ProviderIdentifier,\n TokenUsage,\n} from \"./core/options.js\";\nexport { ModelIdentifierParser } from \"./core/options.js\";\nexport type { PromptConfig, PromptContext, PromptTemplate } from \"./core/prompt-config.js\";\nexport {\n DEFAULT_PROMPTS,\n resolvePromptTemplate,\n resolveRulesTemplate,\n} from \"./core/prompt-config.js\";\nexport type { QuickOptions } from \"./core/quick-methods.js\";\nexport { complete, stream } from \"./core/quick-methods.js\";\nexport type { CreateGadgetConfig } from \"./gadgets/create-gadget.js\";\nexport { createGadget } from \"./gadgets/create-gadget.js\";\n// Gadget infrastructure\nexport { BreakLoopException, HumanInputException } from \"./gadgets/exceptions.js\";\nexport { GadgetExecutor } from \"./gadgets/executor.js\";\nexport { BaseGadget } from \"./gadgets/gadget.js\";\nexport { StreamParser } from \"./gadgets/parser.js\";\nexport type { GadgetClass, GadgetOrClass } from \"./gadgets/registry.js\";\nexport { GadgetRegistry } from \"./gadgets/registry.js\";\n\n// Syntactic sugar: Typed gadgets and helpers\nexport type { GadgetConfig } from \"./gadgets/typed-gadget.js\";\nexport { Gadget } from \"./gadgets/typed-gadget.js\";\nexport type {\n GadgetExecutionResult,\n ParsedGadgetCall,\n StreamEvent,\n TextOnlyAction,\n TextOnlyContext,\n TextOnlyCustomHandler,\n TextOnlyGadgetConfig,\n TextOnlyHandler,\n TextOnlyStrategy,\n} from \"./gadgets/types.js\";\nexport type { ValidationIssue, ValidationResult } from \"./gadgets/validation.js\";\nexport { validateAndApplyDefaults, validateGadgetParams } from \"./gadgets/validation.js\";\nexport type { LoggerOptions } from \"./logging/logger.js\";\nexport { createLogger, defaultLogger } from \"./logging/logger.js\";\nexport {\n AnthropicMessagesProvider,\n createAnthropicProviderFromEnv,\n} from \"./providers/anthropic.js\";\nexport { discoverProviderAdapters } from \"./providers/discovery.js\";\nexport { createGeminiProviderFromEnv, GeminiGenerativeProvider } from \"./providers/gemini.js\";\nexport { createOpenAIProviderFromEnv, OpenAIChatProvider } from \"./providers/openai.js\";\nexport type { ProviderAdapter } from \"./providers/provider.js\";\n\n// Testing/Mock infrastructure\nexport type {\n MockMatcher,\n MockMatcherContext,\n MockOptions,\n MockRegistration,\n MockResponse,\n MockStats,\n} from \"./testing/index.js\";\nexport {\n createMockAdapter,\n createMockClient,\n createMockStream,\n createTextMockStream,\n getMockManager,\n MockBuilder,\n MockManager,\n MockProviderAdapter,\n mockLLM,\n} from \"./testing/index.js\";\n","/**\n * Common hook presets for logging, timing, and monitoring.\n *\n * @example\n * ```typescript\n * import { HookPresets } from 'llmist/hooks';\n *\n * const agent = LLMist.createAgent()\n * .withHooks(HookPresets.logging())\n * .ask(\"...\");\n *\n * // Or combine multiple presets\n * const agent = LLMist.createAgent()\n * .withHooks(HookPresets.merge(\n * HookPresets.logging({ verbose: true }),\n * HookPresets.timing(),\n * HookPresets.tokenTracking()\n * ))\n * .ask(\"...\");\n * ```\n */\n\nimport type { AgentHooks } from \"./hooks.js\";\n\n/**\n * Options for logging preset.\n */\nexport interface LoggingOptions {\n /** Include verbose details like parameters and results */\n verbose?: boolean;\n}\n\n/**\n * Common hook presets.\n */\nexport class HookPresets {\n /**\n * Preset: Basic logging of all events.\n *\n * Logs LLM calls and gadget executions to console.\n *\n * @param options - Logging options\n * @returns Hook configuration\n *\n * @example\n * ```typescript\n * .withHooks(HookPresets.logging())\n * .withHooks(HookPresets.logging({ verbose: true }))\n * ```\n */\n static logging(options: LoggingOptions = {}): AgentHooks {\n return {\n observers: {\n onLLMCallStart: async (ctx) => {\n console.log(`[LLM] Starting call (iteration ${ctx.iteration})`);\n },\n onLLMCallComplete: async (ctx) => {\n const tokens = ctx.usage?.totalTokens ?? \"unknown\";\n console.log(`[LLM] Completed (tokens: ${tokens})`);\n if (options.verbose && ctx.finalMessage) {\n console.log(`[LLM] Response: ${ctx.finalMessage}`);\n }\n },\n onGadgetExecutionStart: async (ctx) => {\n console.log(`[GADGET] Executing ${ctx.gadgetName}`);\n if (options.verbose) {\n console.log(`[GADGET] Parameters:`, JSON.stringify(ctx.parameters, null, 2));\n }\n },\n onGadgetExecutionComplete: async (ctx) => {\n console.log(`[GADGET] Completed ${ctx.gadgetName}`);\n if (options.verbose) {\n const display = ctx.error ?? ctx.finalResult ?? \"(no result)\";\n console.log(`[GADGET] Result: ${display}`);\n }\n },\n },\n };\n }\n\n /**\n * Preset: Performance timing for all operations.\n *\n * Measures and logs execution time for LLM calls and gadgets.\n *\n * @returns Hook configuration\n *\n * @example\n * ```typescript\n * .withHooks(HookPresets.timing())\n * ```\n */\n static timing(): AgentHooks {\n const timings = new Map<string, number>();\n\n return {\n observers: {\n onLLMCallStart: async (ctx) => {\n timings.set(`llm-${ctx.iteration}`, Date.now());\n },\n onLLMCallComplete: async (ctx) => {\n const start = timings.get(`llm-${ctx.iteration}`);\n if (start) {\n const duration = Date.now() - start;\n console.log(`⏱️ LLM call took ${duration}ms`);\n timings.delete(`llm-${ctx.iteration}`);\n }\n },\n onGadgetExecutionStart: async (ctx) => {\n const key = `gadget-${ctx.gadgetName}-${Date.now()}`;\n timings.set(key, Date.now());\n // Store key for lookup in complete handler\n (ctx as any)._timingKey = key;\n },\n onGadgetExecutionComplete: async (ctx) => {\n const key = (ctx as any)._timingKey;\n if (key) {\n const start = timings.get(key);\n if (start) {\n const duration = Date.now() - start;\n console.log(`⏱️ Gadget ${ctx.gadgetName} took ${duration}ms`);\n timings.delete(key);\n }\n }\n },\n },\n };\n }\n\n /**\n * Preset: Token usage tracking.\n *\n * Tracks and logs cumulative token usage across all LLM calls.\n *\n * @returns Hook configuration\n *\n * @example\n * ```typescript\n * .withHooks(HookPresets.tokenTracking())\n * ```\n */\n static tokenTracking(): AgentHooks {\n let totalTokens = 0;\n let totalCalls = 0;\n\n return {\n observers: {\n onLLMCallComplete: async (ctx) => {\n totalCalls++;\n if (ctx.usage?.totalTokens) {\n totalTokens += ctx.usage.totalTokens;\n console.log(`📊 Tokens this call: ${ctx.usage.totalTokens}`);\n console.log(`📊 Total tokens: ${totalTokens} (across ${totalCalls} calls)`);\n }\n },\n },\n };\n }\n\n /**\n * Preset: Error logging.\n *\n * Logs detailed error information for debugging.\n *\n * @returns Hook configuration\n *\n * @example\n * ```typescript\n * .withHooks(HookPresets.errorLogging())\n * ```\n */\n static errorLogging(): AgentHooks {\n return {\n observers: {\n onLLMCallError: async (ctx) => {\n console.error(`❌ LLM Error (iteration ${ctx.iteration}):`, ctx.error.message);\n console.error(` Model: ${ctx.options.model}`);\n console.error(` Recovered: ${ctx.recovered}`);\n },\n onGadgetExecutionComplete: async (ctx) => {\n if (ctx.error) {\n console.error(`❌ Gadget Error: ${ctx.gadgetName}`);\n console.error(` Error: ${ctx.error}`);\n console.error(` Parameters:`, JSON.stringify(ctx.parameters, null, 2));\n }\n },\n },\n };\n }\n\n /**\n * Preset: Silent (no output).\n *\n * Useful for testing or when you want complete control.\n *\n * @returns Empty hook configuration\n *\n * @example\n * ```typescript\n * .withHooks(HookPresets.silent())\n * ```\n */\n static silent(): AgentHooks {\n return {};\n }\n\n /**\n * Merge multiple hook configurations.\n *\n * Combines hook presets or custom configurations into a single object.\n * When multiple hooks target the same lifecycle event, they are composed\n * to run sequentially (all handlers will execute).\n *\n * @param hookSets - Array of hook configurations to merge\n * @returns Merged hook configuration with composed handlers\n *\n * @example\n * ```typescript\n * .withHooks(HookPresets.merge(\n * HookPresets.logging({ verbose: true }),\n * HookPresets.timing(),\n * HookPresets.tokenTracking(),\n * {\n * // Custom hook\n * observers: {\n * onLLMCallComplete: async (ctx) => {\n * saveToDatabase(ctx);\n * }\n * }\n * }\n * ))\n * // All onLLMCallComplete handlers from logging, timing, tokenTracking,\n * // and the custom hook will execute in order\n * ```\n */\n static merge(...hookSets: AgentHooks[]): AgentHooks {\n const merged: AgentHooks = {\n observers: {},\n interceptors: {},\n controllers: {},\n };\n\n // Compose observers: run all handlers for the same event\n for (const hooks of hookSets) {\n if (hooks.observers) {\n for (const [key, handler] of Object.entries(hooks.observers)) {\n const typedKey = key as keyof typeof hooks.observers;\n if (merged.observers![typedKey]) {\n // Compose: run both existing and new handler\n const existing = merged.observers![typedKey];\n merged.observers![typedKey] = async (ctx: any) => {\n await existing(ctx);\n await handler(ctx);\n };\n } else {\n merged.observers![typedKey] = handler as any;\n }\n }\n }\n\n // Interceptors: last one wins (complex signatures make composition impractical)\n // Each interceptor has different parameters (chunk, message, parameters, etc.)\n // so we can't meaningfully compose them like we do with observers\n if (hooks.interceptors) {\n Object.assign(merged.interceptors!, hooks.interceptors);\n }\n\n // Controllers: last one wins (can't meaningfully compose boolean returns)\n if (hooks.controllers) {\n Object.assign(merged.controllers!, hooks.controllers);\n }\n }\n\n return merged;\n }\n\n /**\n * Preset: Complete monitoring suite.\n *\n * Combines logging, timing, and token tracking.\n *\n * @param options - Options for monitoring\n * @returns Merged hook configuration\n *\n * @example\n * ```typescript\n * .withHooks(HookPresets.monitoring())\n * .withHooks(HookPresets.monitoring({ verbose: true }))\n * ```\n */\n static monitoring(options: LoggingOptions = {}): AgentHooks {\n return HookPresets.merge(\n HookPresets.logging(options),\n HookPresets.timing(),\n HookPresets.tokenTracking(),\n HookPresets.errorLogging(),\n );\n }\n}\n","/**\n * Agent module - Composable, single-responsibility architecture for LLM agents.\n * This module provides a cleaner alternative to the monolithic AgentLoop.\n */\n\nexport type { AgentOptions } from \"./agent.js\";\nexport { ConversationManager } from \"./conversation-manager.js\";\n// New clean hooks system\nexport type {\n AfterGadgetExecutionAction,\n AfterGadgetExecutionControllerContext,\n AfterLLMCallAction,\n AfterLLMCallControllerContext,\n AfterLLMErrorAction,\n AgentHooks,\n BeforeGadgetExecutionAction,\n BeforeLLMCallAction,\n // Interceptor contexts\n ChunkInterceptorContext,\n Controllers,\n GadgetExecutionControllerContext,\n GadgetParameterInterceptorContext,\n GadgetResultInterceptorContext,\n Interceptors,\n // Controller contexts and actions\n LLMCallControllerContext,\n LLMErrorControllerContext,\n MessageInterceptorContext,\n ObserveChunkContext,\n ObserveGadgetCompleteContext,\n ObserveGadgetStartContext,\n // Observer contexts\n ObserveLLMCallContext,\n ObserveLLMCompleteContext,\n ObserveLLMErrorContext,\n Observers,\n} from \"./hooks.js\";\nexport type { IConversationManager } from \"./interfaces.js\";\n\n// StreamProcessor for advanced use cases\nexport {\n type StreamProcessingResult,\n StreamProcessor,\n type StreamProcessorOptions,\n} from \"./stream-processor.js\";\n","/**\n * Type-safe gadget factory with automatic parameter inference.\n *\n * Gadget eliminates the need for manual type assertions\n * by automatically inferring parameter types from the Zod schema.\n *\n * @example\n * ```typescript\n * class Calculator extends Gadget({\n * description: \"Performs arithmetic operations\",\n * schema: z.object({\n * operation: z.enum([\"add\", \"subtract\"]),\n * a: z.number(),\n * b: z.number(),\n * }),\n * }) {\n * // ✨ params is automatically typed!\n * execute(params: this['params']): string {\n * const { operation, a, b } = params; // All typed!\n * return operation === \"add\" ? String(a + b) : String(a - b);\n * }\n * }\n * ```\n */\n\nimport type { ZodType } from \"zod\";\nimport { BaseGadget } from \"./gadget.js\";\n\n/**\n * Infer the TypeScript type from a Zod schema.\n */\ntype InferSchema<T> = T extends ZodType<infer U> ? U : never;\n\n/**\n * Configuration for creating a typed gadget.\n */\nexport interface GadgetConfig<TSchema extends ZodType> {\n /** Human-readable description of what the gadget does */\n description: string;\n\n /** Zod schema for parameter validation */\n schema: TSchema;\n\n /** Optional custom name (defaults to class name) */\n name?: string;\n\n /** Optional timeout in milliseconds */\n timeoutMs?: number;\n}\n\n/**\n * Factory function to create a typed gadget base class.\n *\n * The returned class automatically infers parameter types from the Zod schema,\n * eliminating the need for manual type assertions in the execute method.\n *\n * @param config - Configuration with description and schema\n * @returns Base class to extend with typed execute method\n *\n * @example\n * ```typescript\n * import { z } from 'zod';\n * import { Gadget } from 'llmist';\n *\n * class Calculator extends Gadget({\n * description: \"Performs arithmetic operations\",\n * schema: z.object({\n * operation: z.enum([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n * a: z.number().describe(\"First number\"),\n * b: z.number().describe(\"Second number\"),\n * }),\n * }) {\n * execute(params: this['params']): string {\n * // params is automatically typed as:\n * // { operation: \"add\" | \"subtract\" | \"multiply\" | \"divide\"; a: number; b: number }\n * const { operation, a, b } = params;\n *\n * switch (operation) {\n * case \"add\": return String(a + b);\n * case \"subtract\": return String(a - b);\n * case \"multiply\": return String(a * b);\n * case \"divide\": return String(a / b);\n * }\n * }\n * }\n * ```\n *\n * @example\n * ```typescript\n * // With async execution\n * class WeatherGadget extends Gadget({\n * description: \"Fetches weather for a city\",\n * schema: z.object({\n * city: z.string().min(1).describe(\"City name\"),\n * }),\n * timeoutMs: 10000,\n * }) {\n * async execute(params: this['params']): Promise<string> {\n * const { city } = params; // Automatically typed as { city: string }\n * const weather = await fetchWeather(city);\n * return `Weather in ${city}: ${weather}`;\n * }\n * }\n * ```\n */\nexport function Gadget<TSchema extends ZodType>(config: GadgetConfig<TSchema>) {\n abstract class GadgetBase extends BaseGadget {\n description = config.description;\n parameterSchema = config.schema;\n name = config.name;\n timeoutMs = config.timeoutMs;\n\n /**\n * Type helper property for accessing inferred parameter type.\n * This is used in the execute method signature: `execute(params: this['params'])`\n *\n * Note: This is just for type inference - the actual params in execute()\n * will be Record<string, unknown> which you can safely cast to this['params']\n */\n readonly params!: InferSchema<TSchema>;\n\n /**\n * Execute the gadget. Subclasses should cast params to this['params'].\n *\n * @param params - Validated parameters from the LLM\n * @returns Result as a string (or Promise<string> for async gadgets)\n *\n * @example\n * ```typescript\n * execute(params: Record<string, unknown>): string {\n * const typed = params as this['params'];\n * // Now 'typed' is fully typed!\n * return String(typed.a + typed.b);\n * }\n * ```\n */\n abstract execute(params: Record<string, unknown>): string | Promise<string>;\n }\n\n return GadgetBase as {\n new (): GadgetBase & { params: InferSchema<TSchema> };\n };\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAKA;AAEA;AALA,SAAS,SAAS;;;ACiCX,IAAM,cAAN,MAAM,aAAY;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAevB,OAAO,QAAQ,UAA0B,CAAC,GAAe;AACvD,WAAO;AAAA,MACL,WAAW;AAAA,QACT,gBAAgB,OAAO,QAAQ;AAC7B,kBAAQ,IAAI,kCAAkC,IAAI,SAAS,GAAG;AAAA,QAChE;AAAA,QACA,mBAAmB,OAAO,QAAQ;AAChC,gBAAM,SAAS,IAAI,OAAO,eAAe;AACzC,kBAAQ,IAAI,4BAA4B,MAAM,GAAG;AACjD,cAAI,QAAQ,WAAW,IAAI,cAAc;AACvC,oBAAQ,IAAI,mBAAmB,IAAI,YAAY,EAAE;AAAA,UACnD;AAAA,QACF;AAAA,QACA,wBAAwB,OAAO,QAAQ;AACrC,kBAAQ,IAAI,sBAAsB,IAAI,UAAU,EAAE;AAClD,cAAI,QAAQ,SAAS;AACnB,oBAAQ,IAAI,wBAAwB,KAAK,UAAU,IAAI,YAAY,MAAM,CAAC,CAAC;AAAA,UAC7E;AAAA,QACF;AAAA,QACA,2BAA2B,OAAO,QAAQ;AACxC,kBAAQ,IAAI,sBAAsB,IAAI,UAAU,EAAE;AAClD,cAAI,QAAQ,SAAS;AACnB,kBAAM,UAAU,IAAI,SAAS,IAAI,eAAe;AAChD,oBAAQ,IAAI,oBAAoB,OAAO,EAAE;AAAA,UAC3C;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,OAAO,SAAqB;AAC1B,UAAM,UAAU,oBAAI,IAAoB;AAExC,WAAO;AAAA,MACL,WAAW;AAAA,QACT,gBAAgB,OAAO,QAAQ;AAC7B,kBAAQ,IAAI,OAAO,IAAI,SAAS,IAAI,KAAK,IAAI,CAAC;AAAA,QAChD;AAAA,QACA,mBAAmB,OAAO,QAAQ;AAChC,gBAAM,QAAQ,QAAQ,IAAI,OAAO,IAAI,SAAS,EAAE;AAChD,cAAI,OAAO;AACT,kBAAM,WAAW,KAAK,IAAI,IAAI;AAC9B,oBAAQ,IAAI,+BAAqB,QAAQ,IAAI;AAC7C,oBAAQ,OAAO,OAAO,IAAI,SAAS,EAAE;AAAA,UACvC;AAAA,QACF;AAAA,QACA,wBAAwB,OAAO,QAAQ;AACrC,gBAAM,MAAM,UAAU,IAAI,UAAU,IAAI,KAAK,IAAI,CAAC;AAClD,kBAAQ,IAAI,KAAK,KAAK,IAAI,CAAC;AAE3B,UAAC,IAAY,aAAa;AAAA,QAC5B;AAAA,QACA,2BAA2B,OAAO,QAAQ;AACxC,gBAAM,MAAO,IAAY;AACzB,cAAI,KAAK;AACP,kBAAM,QAAQ,QAAQ,IAAI,GAAG;AAC7B,gBAAI,OAAO;AACT,oBAAM,WAAW,KAAK,IAAI,IAAI;AAC9B,sBAAQ,IAAI,wBAAc,IAAI,UAAU,SAAS,QAAQ,IAAI;AAC7D,sBAAQ,OAAO,GAAG;AAAA,YACpB;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,OAAO,gBAA4B;AACjC,QAAI,cAAc;AAClB,QAAI,aAAa;AAEjB,WAAO;AAAA,MACL,WAAW;AAAA,QACT,mBAAmB,OAAO,QAAQ;AAChC;AACA,cAAI,IAAI,OAAO,aAAa;AAC1B,2BAAe,IAAI,MAAM;AACzB,oBAAQ,IAAI,+BAAwB,IAAI,MAAM,WAAW,EAAE;AAC3D,oBAAQ,IAAI,2BAAoB,WAAW,YAAY,UAAU,SAAS;AAAA,UAC5E;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,OAAO,eAA2B;AAChC,WAAO;AAAA,MACL,WAAW;AAAA,QACT,gBAAgB,OAAO,QAAQ;AAC7B,kBAAQ,MAAM,+BAA0B,IAAI,SAAS,MAAM,IAAI,MAAM,OAAO;AAC5E,kBAAQ,MAAM,aAAa,IAAI,QAAQ,KAAK,EAAE;AAC9C,kBAAQ,MAAM,iBAAiB,IAAI,SAAS,EAAE;AAAA,QAChD;AAAA,QACA,2BAA2B,OAAO,QAAQ;AACxC,cAAI,IAAI,OAAO;AACb,oBAAQ,MAAM,wBAAmB,IAAI,UAAU,EAAE;AACjD,oBAAQ,MAAM,aAAa,IAAI,KAAK,EAAE;AACtC,oBAAQ,MAAM,kBAAkB,KAAK,UAAU,IAAI,YAAY,MAAM,CAAC,CAAC;AAAA,UACzE;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,OAAO,SAAqB;AAC1B,WAAO,CAAC;AAAA,EACV;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA+BA,OAAO,SAAS,UAAoC;AAClD,UAAM,SAAqB;AAAA,MACzB,WAAW,CAAC;AAAA,MACZ,cAAc,CAAC;AAAA,MACf,aAAa,CAAC;AAAA,IAChB;AAGA,eAAW,SAAS,UAAU;AAC5B,UAAI,MAAM,WAAW;AACnB,mBAAW,CAAC,KAAK,OAAO,KAAK,OAAO,QAAQ,MAAM,SAAS,GAAG;AAC5D,gBAAM,WAAW;AACjB,cAAI,OAAO,UAAW,QAAQ,GAAG;AAE/B,kBAAM,WAAW,OAAO,UAAW,QAAQ;AAC3C,mBAAO,UAAW,QAAQ,IAAI,OAAO,QAAa;AAChD,oBAAM,SAAS,GAAG;AAClB,oBAAM,QAAQ,GAAG;AAAA,YACnB;AAAA,UACF,OAAO;AACL,mBAAO,UAAW,QAAQ,IAAI;AAAA,UAChC;AAAA,QACF;AAAA,MACF;AAKA,UAAI,MAAM,cAAc;AACtB,eAAO,OAAO,OAAO,cAAe,MAAM,YAAY;AAAA,MACxD;AAGA,UAAI,MAAM,aAAa;AACrB,eAAO,OAAO,OAAO,aAAc,MAAM,WAAW;AAAA,MACtD;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAgBA,OAAO,WAAW,UAA0B,CAAC,GAAe;AAC1D,WAAO,aAAY;AAAA,MACjB,aAAY,QAAQ,OAAO;AAAA,MAC3B,aAAY,OAAO;AAAA,MACnB,aAAY,cAAc;AAAA,MAC1B,aAAY,aAAa;AAAA,IAC3B;AAAA,EACF;AACF;;;ACpSA;AAkCA;;;AFQA;AAEA;AASA;AAGA;AAeA;AAEA;AAMA;AAIA;AACA;AAEA;AAEA;;;AGWO,SAAS,OAAgC,QAA+B;AAAA,EAC7E,MAAe,mBAAmB,WAAW;AAAA,IAC3C,cAAc,OAAO;AAAA,IACrB,kBAAkB,OAAO;AAAA,IACzB,OAAO,OAAO;AAAA,IACd,YAAY,OAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IASV;AAAA,EAkBX;AAEA,SAAO;AAGT;;;AH7BA;AACA;AAIA;AACA;AACA;","names":[]}
1
+ {"version":3,"sources":["../src/index.ts","../src/agent/hook-presets.ts","../src/agent/index.ts","../src/gadgets/typed-gadget.ts"],"sourcesContent":["// Re-export Zod's z for schema definitions\n// Using llmist's z ensures .describe() metadata is preserved in JSON schemas\nexport { z } from \"zod\";\n// Syntactic sugar: Agent builder and event handlers\nexport type { HistoryMessage } from \"./agent/builder.js\";\nexport { AgentBuilder } from \"./agent/builder.js\";\nexport type { EventHandlers } from \"./agent/event-handlers.js\";\nexport { collectEvents, collectText, runWithHandlers } from \"./agent/event-handlers.js\";\n// Syntactic sugar: Hook presets\nexport type { LoggingOptions } from \"./agent/hook-presets.js\";\nexport { HookPresets } from \"./agent/hook-presets.js\";\n// Agent infrastructure\n// New clean hooks system\nexport type {\n AfterGadgetExecutionAction,\n AfterGadgetExecutionControllerContext,\n AfterLLMCallAction,\n AfterLLMCallControllerContext,\n AfterLLMErrorAction,\n AgentHooks,\n AgentOptions,\n BeforeGadgetExecutionAction,\n BeforeLLMCallAction,\n // Interceptor contexts\n ChunkInterceptorContext,\n Controllers,\n GadgetExecutionControllerContext,\n GadgetParameterInterceptorContext,\n GadgetResultInterceptorContext,\n IConversationManager,\n Interceptors,\n // Controller contexts and actions\n LLMCallControllerContext,\n LLMErrorControllerContext,\n MessageInterceptorContext,\n ObserveChunkContext,\n ObserveGadgetCompleteContext,\n ObserveGadgetStartContext,\n // Observer contexts\n ObserveLLMCallContext,\n ObserveLLMCompleteContext,\n ObserveLLMErrorContext,\n Observers,\n StreamProcessingResult,\n StreamProcessorOptions,\n} from \"./agent/index.js\";\nexport { ConversationManager, StreamProcessor } from \"./agent/index.js\";\nexport type { LLMistOptions } from \"./core/client.js\";\nexport { LLMist } from \"./core/client.js\";\nexport type { LLMMessage, LLMRole } from \"./core/messages.js\";\nexport { LLMMessageBuilder } from \"./core/messages.js\";\n// Model catalog\nexport type {\n CostEstimate,\n ModelFeatures,\n ModelLimits,\n ModelPricing,\n ModelSpec,\n} from \"./core/model-catalog.js\";\nexport { ModelRegistry } from \"./core/model-registry.js\";\n\n// Syntactic sugar: Model shortcuts and quick methods\nexport {\n getModelId,\n getProvider,\n hasProviderPrefix,\n MODEL_ALIASES,\n resolveModel,\n} from \"./core/model-shortcuts.js\";\nexport type {\n LLMGenerationOptions,\n LLMStream,\n LLMStreamChunk,\n ModelDescriptor,\n ProviderIdentifier,\n TokenUsage,\n} from \"./core/options.js\";\nexport { ModelIdentifierParser } from \"./core/options.js\";\nexport type { PromptConfig, PromptContext, PromptTemplate } from \"./core/prompt-config.js\";\nexport {\n DEFAULT_PROMPTS,\n resolvePromptTemplate,\n resolveRulesTemplate,\n} from \"./core/prompt-config.js\";\nexport type { QuickOptions } from \"./core/quick-methods.js\";\nexport { complete, stream } from \"./core/quick-methods.js\";\nexport type { CreateGadgetConfig } from \"./gadgets/create-gadget.js\";\nexport { createGadget } from \"./gadgets/create-gadget.js\";\n// Gadget infrastructure\nexport { BreakLoopException, HumanInputException } from \"./gadgets/exceptions.js\";\nexport { GadgetExecutor } from \"./gadgets/executor.js\";\nexport { BaseGadget } from \"./gadgets/gadget.js\";\nexport { StreamParser } from \"./gadgets/parser.js\";\nexport type { GadgetClass, GadgetOrClass } from \"./gadgets/registry.js\";\nexport { GadgetRegistry } from \"./gadgets/registry.js\";\n\n// Syntactic sugar: Typed gadgets and helpers\nexport type { GadgetConfig } from \"./gadgets/typed-gadget.js\";\nexport { Gadget } from \"./gadgets/typed-gadget.js\";\nexport type {\n GadgetExecutionResult,\n ParsedGadgetCall,\n StreamEvent,\n TextOnlyAction,\n TextOnlyContext,\n TextOnlyCustomHandler,\n TextOnlyGadgetConfig,\n TextOnlyHandler,\n TextOnlyStrategy,\n} from \"./gadgets/types.js\";\nexport type { ValidationIssue, ValidationResult } from \"./gadgets/validation.js\";\nexport { validateAndApplyDefaults, validateGadgetParams } from \"./gadgets/validation.js\";\nexport type { LoggerOptions } from \"./logging/logger.js\";\nexport { createLogger, defaultLogger } from \"./logging/logger.js\";\nexport {\n AnthropicMessagesProvider,\n createAnthropicProviderFromEnv,\n} from \"./providers/anthropic.js\";\nexport { discoverProviderAdapters } from \"./providers/discovery.js\";\nexport { createGeminiProviderFromEnv, GeminiGenerativeProvider } from \"./providers/gemini.js\";\nexport { createOpenAIProviderFromEnv, OpenAIChatProvider } from \"./providers/openai.js\";\nexport type { ProviderAdapter } from \"./providers/provider.js\";\n\n// Testing/Mock infrastructure\nexport type {\n MockMatcher,\n MockMatcherContext,\n MockOptions,\n MockRegistration,\n MockResponse,\n MockStats,\n} from \"./testing/index.js\";\nexport {\n createMockAdapter,\n createMockClient,\n createMockStream,\n createTextMockStream,\n getMockManager,\n MockBuilder,\n MockManager,\n MockProviderAdapter,\n mockLLM,\n} from \"./testing/index.js\";\n","/**\n * Ready-to-use hook configurations for common monitoring, logging, and debugging tasks.\n *\n * HookPresets provide instant observability without writing custom hooks. They're the\n * fastest way to add monitoring to your agents during development and production.\n *\n * ## Available Presets\n *\n * - **logging(options?)** - Log LLM calls and gadget execution\n * - **timing()** - Measure execution time for operations\n * - **tokenTracking()** - Track cumulative token usage and costs\n * - **errorLogging()** - Log detailed error information\n * - **silent()** - No output (useful for testing)\n * - **monitoring(options?)** - All-in-one preset combining logging, timing, tokens, and errors\n * - **merge(...hookSets)** - Combine multiple hook configurations\n *\n * ## Quick Start\n *\n * @example\n * ```typescript\n * import { LLMist, HookPresets } from 'llmist';\n *\n * // Basic logging\n * await LLMist.createAgent()\n * .withHooks(HookPresets.logging())\n * .ask(\"Your prompt\");\n *\n * // Full monitoring suite (recommended for development)\n * await LLMist.createAgent()\n * .withHooks(HookPresets.monitoring({ verbose: true }))\n * .ask(\"Your prompt\");\n *\n * // Combine multiple presets\n * await LLMist.createAgent()\n * .withHooks(HookPresets.merge(\n * HookPresets.timing(),\n * HookPresets.tokenTracking()\n * ))\n * .ask(\"Your prompt\");\n *\n * // Environment-based configuration\n * const hooks = process.env.NODE_ENV === 'production'\n * ? HookPresets.merge(HookPresets.errorLogging(), HookPresets.tokenTracking())\n * : HookPresets.monitoring({ verbose: true });\n *\n * await LLMist.createAgent()\n * .withHooks(hooks)\n * .ask(\"Your prompt\");\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md | Full documentation}\n */\n\nimport type { AgentHooks } from \"./hooks.js\";\n\n/**\n * Options for logging preset.\n */\nexport interface LoggingOptions {\n /** Include verbose details like parameters and results */\n verbose?: boolean;\n}\n\n/**\n * Common hook presets.\n */\nexport class HookPresets {\n /**\n * Logs LLM calls and gadget execution to console with optional verbosity.\n *\n * **Output (basic mode):**\n * - LLM call start/complete events with iteration numbers\n * - Gadget execution start/complete with gadget names\n * - Token counts when available\n *\n * **Output (verbose mode):**\n * - All basic mode output\n * - Full gadget parameters (formatted JSON)\n * - Full gadget results\n * - Complete LLM response text\n *\n * **Use cases:**\n * - Basic development debugging and execution flow visibility\n * - Understanding agent decision-making and tool usage\n * - Troubleshooting gadget invocations\n *\n * **Performance:** Minimal overhead. Console writes are synchronous but fast.\n *\n * @param options - Logging options\n * @param options.verbose - Include full parameters and results. Default: false\n * @returns Hook configuration that can be passed to .withHooks()\n *\n * @example\n * ```typescript\n * // Basic logging\n * await LLMist.createAgent()\n * .withHooks(HookPresets.logging())\n * .ask(\"Calculate 15 * 23\");\n * // Output: [LLM] Starting call (iteration 0)\n * // [GADGET] Executing Calculator\n * // [GADGET] Completed Calculator\n * // [LLM] Completed (tokens: 245)\n * ```\n *\n * @example\n * ```typescript\n * // Verbose logging with full details\n * await LLMist.createAgent()\n * .withHooks(HookPresets.logging({ verbose: true }))\n * .ask(\"Calculate 15 * 23\");\n * // Output includes: parameters, results, and full responses\n * ```\n *\n * @example\n * ```typescript\n * // Environment-based verbosity\n * const isDev = process.env.NODE_ENV === 'development';\n * .withHooks(HookPresets.logging({ verbose: isDev }))\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsloggingoptions | Full documentation}\n */\n static logging(options: LoggingOptions = {}): AgentHooks {\n return {\n observers: {\n onLLMCallStart: async (ctx) => {\n console.log(`[LLM] Starting call (iteration ${ctx.iteration})`);\n },\n onLLMCallComplete: async (ctx) => {\n const tokens = ctx.usage?.totalTokens ?? \"unknown\";\n console.log(`[LLM] Completed (tokens: ${tokens})`);\n if (options.verbose && ctx.finalMessage) {\n console.log(`[LLM] Response: ${ctx.finalMessage}`);\n }\n },\n onGadgetExecutionStart: async (ctx) => {\n console.log(`[GADGET] Executing ${ctx.gadgetName}`);\n if (options.verbose) {\n console.log(`[GADGET] Parameters:`, JSON.stringify(ctx.parameters, null, 2));\n }\n },\n onGadgetExecutionComplete: async (ctx) => {\n console.log(`[GADGET] Completed ${ctx.gadgetName}`);\n if (options.verbose) {\n const display = ctx.error ?? ctx.finalResult ?? \"(no result)\";\n console.log(`[GADGET] Result: ${display}`);\n }\n },\n },\n };\n }\n\n /**\n * Measures and logs execution time for LLM calls and gadgets.\n *\n * **Output:**\n * - Duration in milliseconds with ⏱️ emoji for each operation\n * - Separate timing for each LLM iteration\n * - Separate timing for each gadget execution\n *\n * **Use cases:**\n * - Performance profiling and optimization\n * - Identifying slow operations (LLM calls vs gadget execution)\n * - Monitoring response times in production\n * - Capacity planning and SLA tracking\n *\n * **Performance:** Negligible overhead. Uses Date.now() for timing measurements.\n *\n * @returns Hook configuration that can be passed to .withHooks()\n *\n * @example\n * ```typescript\n * // Basic timing\n * await LLMist.createAgent()\n * .withHooks(HookPresets.timing())\n * .withGadgets(Weather, Database)\n * .ask(\"What's the weather in NYC?\");\n * // Output: ⏱️ LLM call took 1234ms\n * // ⏱️ Gadget Weather took 567ms\n * // ⏱️ LLM call took 890ms\n * ```\n *\n * @example\n * ```typescript\n * // Combined with logging for full context\n * .withHooks(HookPresets.merge(\n * HookPresets.logging(),\n * HookPresets.timing()\n * ))\n * ```\n *\n * @example\n * ```typescript\n * // Correlate performance with cost\n * .withHooks(HookPresets.merge(\n * HookPresets.timing(),\n * HookPresets.tokenTracking()\n * ))\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetstiming | Full documentation}\n */\n static timing(): AgentHooks {\n const timings = new Map<string, number>();\n\n return {\n observers: {\n onLLMCallStart: async (ctx) => {\n timings.set(`llm-${ctx.iteration}`, Date.now());\n },\n onLLMCallComplete: async (ctx) => {\n const start = timings.get(`llm-${ctx.iteration}`);\n if (start) {\n const duration = Date.now() - start;\n console.log(`⏱️ LLM call took ${duration}ms`);\n timings.delete(`llm-${ctx.iteration}`);\n }\n },\n onGadgetExecutionStart: async (ctx) => {\n const key = `gadget-${ctx.gadgetName}-${Date.now()}`;\n timings.set(key, Date.now());\n // Store key for lookup in complete handler\n (ctx as any)._timingKey = key;\n },\n onGadgetExecutionComplete: async (ctx) => {\n const key = (ctx as any)._timingKey;\n if (key) {\n const start = timings.get(key);\n if (start) {\n const duration = Date.now() - start;\n console.log(`⏱️ Gadget ${ctx.gadgetName} took ${duration}ms`);\n timings.delete(key);\n }\n }\n },\n },\n };\n }\n\n /**\n * Tracks cumulative token usage across all LLM calls.\n *\n * **Output:**\n * - Per-call token count with 📊 emoji\n * - Cumulative total across all calls\n * - Call count for average calculations\n *\n * **Use cases:**\n * - Cost monitoring and budget tracking\n * - Optimizing prompts to reduce token usage\n * - Comparing token efficiency across different approaches\n * - Real-time cost estimation\n *\n * **Performance:** Minimal overhead. Simple counter increments.\n *\n * **Note:** Token counts depend on the provider's response. Some providers\n * may not include usage data, in which case counts won't be logged.\n *\n * @returns Hook configuration that can be passed to .withHooks()\n *\n * @example\n * ```typescript\n * // Basic token tracking\n * await LLMist.createAgent()\n * .withHooks(HookPresets.tokenTracking())\n * .ask(\"Summarize this document...\");\n * // Output: 📊 Tokens this call: 1,234\n * // 📊 Total tokens: 1,234 (across 1 calls)\n * // 📊 Tokens this call: 567\n * // 📊 Total tokens: 1,801 (across 2 calls)\n * ```\n *\n * @example\n * ```typescript\n * // Cost calculation with custom hook\n * let totalTokens = 0;\n * .withHooks(HookPresets.merge(\n * HookPresets.tokenTracking(),\n * {\n * observers: {\n * onLLMCallComplete: async (ctx) => {\n * totalTokens += ctx.usage?.totalTokens ?? 0;\n * const cost = (totalTokens / 1_000_000) * 3.0; // $3 per 1M tokens\n * console.log(`💰 Estimated cost: $${cost.toFixed(4)}`);\n * },\n * },\n * }\n * ))\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetstokentracking | Full documentation}\n */\n static tokenTracking(): AgentHooks {\n let totalTokens = 0;\n let totalCalls = 0;\n\n return {\n observers: {\n onLLMCallComplete: async (ctx) => {\n totalCalls++;\n if (ctx.usage?.totalTokens) {\n totalTokens += ctx.usage.totalTokens;\n console.log(`📊 Tokens this call: ${ctx.usage.totalTokens}`);\n console.log(`📊 Total tokens: ${totalTokens} (across ${totalCalls} calls)`);\n }\n },\n },\n };\n }\n\n /**\n * Logs detailed error information for debugging and troubleshooting.\n *\n * **Output:**\n * - LLM errors with ❌ emoji, including model and recovery status\n * - Gadget errors with full context (parameters, error message)\n * - Separate logging for LLM and gadget failures\n *\n * **Use cases:**\n * - Troubleshooting production issues\n * - Understanding error patterns and frequency\n * - Debugging error recovery behavior\n * - Collecting error metrics for monitoring\n *\n * **Performance:** Minimal overhead. Only logs when errors occur.\n *\n * @returns Hook configuration that can be passed to .withHooks()\n *\n * @example\n * ```typescript\n * // Basic error logging\n * await LLMist.createAgent()\n * .withHooks(HookPresets.errorLogging())\n * .withGadgets(Database)\n * .ask(\"Fetch user data\");\n * // Output (on LLM error): ❌ LLM Error (iteration 1): Rate limit exceeded\n * // Model: gpt-5-nano\n * // Recovered: true\n * // Output (on gadget error): ❌ Gadget Error: Database\n * // Error: Connection timeout\n * // Parameters: {...}\n * ```\n *\n * @example\n * ```typescript\n * // Combine with monitoring for full context\n * .withHooks(HookPresets.merge(\n * HookPresets.monitoring(), // Includes errorLogging\n * customErrorAnalytics\n * ))\n * ```\n *\n * @example\n * ```typescript\n * // Error analytics collection\n * const errors: any[] = [];\n * .withHooks(HookPresets.merge(\n * HookPresets.errorLogging(),\n * {\n * observers: {\n * onLLMCallError: async (ctx) => {\n * errors.push({ type: 'llm', error: ctx.error, recovered: ctx.recovered });\n * },\n * },\n * }\n * ))\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetserrorlogging | Full documentation}\n */\n static errorLogging(): AgentHooks {\n return {\n observers: {\n onLLMCallError: async (ctx) => {\n console.error(`❌ LLM Error (iteration ${ctx.iteration}):`, ctx.error.message);\n console.error(` Model: ${ctx.options.model}`);\n console.error(` Recovered: ${ctx.recovered}`);\n },\n onGadgetExecutionComplete: async (ctx) => {\n if (ctx.error) {\n console.error(`❌ Gadget Error: ${ctx.gadgetName}`);\n console.error(` Error: ${ctx.error}`);\n console.error(` Parameters:`, JSON.stringify(ctx.parameters, null, 2));\n }\n },\n },\n };\n }\n\n /**\n * Returns empty hook configuration for clean output without any logging.\n *\n * **Output:**\n * - None. Returns {} (empty object).\n *\n * **Use cases:**\n * - Clean test output without console noise\n * - Production environments where logging is handled externally\n * - Baseline for custom hook development\n * - Temporary disable of all hook output\n *\n * **Performance:** Zero overhead. No-op hook configuration.\n *\n * @returns Empty hook configuration\n *\n * @example\n * ```typescript\n * // Clean test output\n * describe('Agent tests', () => {\n * it('should calculate correctly', async () => {\n * const result = await LLMist.createAgent()\n * .withHooks(HookPresets.silent()) // No console output\n * .withGadgets(Calculator)\n * .askAndCollect(\"What is 15 times 23?\");\n *\n * expect(result).toContain(\"345\");\n * });\n * });\n * ```\n *\n * @example\n * ```typescript\n * // Conditional silence based on environment\n * const isTesting = process.env.NODE_ENV === 'test';\n * .withHooks(isTesting ? HookPresets.silent() : HookPresets.monitoring())\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetssilent | Full documentation}\n */\n static silent(): AgentHooks {\n return {};\n }\n\n /**\n * Combines multiple hook configurations into one.\n *\n * Merge allows you to compose preset and custom hooks for modular monitoring\n * configurations. Understanding merge behavior is crucial for proper composition.\n *\n * **Merge behavior:**\n * - **Observers:** Composed - all handlers run sequentially in order\n * - **Interceptors:** Last one wins - only the last interceptor applies\n * - **Controllers:** Last one wins - only the last controller applies\n *\n * **Why interceptors/controllers don't compose:**\n * - Interceptors have different signatures per method, making composition impractical\n * - Controllers return specific actions that can't be meaningfully combined\n * - Only observers support composition because they're read-only and independent\n *\n * **Use cases:**\n * - Combining multiple presets (logging + timing + tokens)\n * - Adding custom hooks to presets\n * - Building modular, reusable monitoring configurations\n * - Environment-specific hook composition\n *\n * **Performance:** Minimal overhead for merging. Runtime performance depends on merged hooks.\n *\n * @param hookSets - Variable number of hook configurations to merge\n * @returns Single merged hook configuration with composed/overridden handlers\n *\n * @example\n * ```typescript\n * // Combine multiple presets\n * .withHooks(HookPresets.merge(\n * HookPresets.logging(),\n * HookPresets.timing(),\n * HookPresets.tokenTracking()\n * ))\n * // All observers from all three presets will run\n * ```\n *\n * @example\n * ```typescript\n * // Add custom observer to preset (both run)\n * .withHooks(HookPresets.merge(\n * HookPresets.timing(),\n * {\n * observers: {\n * onLLMCallComplete: async (ctx) => {\n * await saveMetrics({ tokens: ctx.usage?.totalTokens });\n * },\n * },\n * }\n * ))\n * ```\n *\n * @example\n * ```typescript\n * // Multiple interceptors (last wins!)\n * .withHooks(HookPresets.merge(\n * {\n * interceptors: {\n * interceptTextChunk: (chunk) => chunk.toUpperCase(), // Ignored\n * },\n * },\n * {\n * interceptors: {\n * interceptTextChunk: (chunk) => chunk.toLowerCase(), // This wins\n * },\n * }\n * ))\n * // Result: text will be lowercase\n * ```\n *\n * @example\n * ```typescript\n * // Modular environment-based configuration\n * const baseHooks = HookPresets.errorLogging();\n * const devHooks = HookPresets.merge(baseHooks, HookPresets.monitoring({ verbose: true }));\n * const prodHooks = HookPresets.merge(baseHooks, HookPresets.tokenTracking());\n *\n * const hooks = process.env.NODE_ENV === 'production' ? prodHooks : devHooks;\n * .withHooks(hooks)\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsmergehooksets | Full documentation}\n */\n static merge(...hookSets: AgentHooks[]): AgentHooks {\n const merged: AgentHooks = {\n observers: {},\n interceptors: {},\n controllers: {},\n };\n\n // Compose observers: run all handlers for the same event\n for (const hooks of hookSets) {\n if (hooks.observers) {\n for (const [key, handler] of Object.entries(hooks.observers)) {\n const typedKey = key as keyof typeof hooks.observers;\n if (merged.observers![typedKey]) {\n // Compose: run both existing and new handler\n const existing = merged.observers![typedKey];\n merged.observers![typedKey] = async (ctx: any) => {\n await existing(ctx);\n await handler(ctx);\n };\n } else {\n merged.observers![typedKey] = handler as any;\n }\n }\n }\n\n // Interceptors: last one wins (complex signatures make composition impractical)\n // Each interceptor has different parameters (chunk, message, parameters, etc.)\n // so we can't meaningfully compose them like we do with observers\n if (hooks.interceptors) {\n Object.assign(merged.interceptors!, hooks.interceptors);\n }\n\n // Controllers: last one wins (can't meaningfully compose boolean returns)\n if (hooks.controllers) {\n Object.assign(merged.controllers!, hooks.controllers);\n }\n }\n\n return merged;\n }\n\n /**\n * Composite preset combining logging, timing, tokenTracking, and errorLogging.\n *\n * This is the recommended preset for development and initial production deployments,\n * providing comprehensive observability with a single method call.\n *\n * **Includes:**\n * - All output from `logging()` preset (with optional verbosity)\n * - All output from `timing()` preset (execution times)\n * - All output from `tokenTracking()` preset (token usage)\n * - All output from `errorLogging()` preset (error details)\n *\n * **Output format:**\n * - Event logging: [LLM]/[GADGET] messages\n * - Timing: ⏱️ emoji with milliseconds\n * - Tokens: 📊 emoji with per-call and cumulative counts\n * - Errors: ❌ emoji with full error details\n *\n * **Use cases:**\n * - Full observability during development\n * - Comprehensive monitoring in production\n * - One-liner for complete agent visibility\n * - Troubleshooting and debugging with full context\n *\n * **Performance:** Combined overhead of all four presets, but still minimal in practice.\n *\n * @param options - Monitoring options\n * @param options.verbose - Passed to logging() preset for detailed output. Default: false\n * @returns Merged hook configuration combining all monitoring presets\n *\n * @example\n * ```typescript\n * // Basic monitoring (recommended for development)\n * await LLMist.createAgent()\n * .withHooks(HookPresets.monitoring())\n * .withGadgets(Calculator, Weather)\n * .ask(\"What is 15 times 23, and what's the weather in NYC?\");\n * // Output: All events, timing, tokens, and errors in one place\n * ```\n *\n * @example\n * ```typescript\n * // Verbose monitoring with full details\n * await LLMist.createAgent()\n * .withHooks(HookPresets.monitoring({ verbose: true }))\n * .ask(\"Your prompt\");\n * // Output includes: parameters, results, and complete responses\n * ```\n *\n * @example\n * ```typescript\n * // Environment-based monitoring\n * const isDev = process.env.NODE_ENV === 'development';\n * .withHooks(HookPresets.monitoring({ verbose: isDev }))\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsmonitoringoptions | Full documentation}\n */\n static monitoring(options: LoggingOptions = {}): AgentHooks {\n return HookPresets.merge(\n HookPresets.logging(options),\n HookPresets.timing(),\n HookPresets.tokenTracking(),\n HookPresets.errorLogging(),\n );\n }\n}\n","/**\n * Agent module - Composable, single-responsibility architecture for LLM agents.\n * This module provides a cleaner alternative to the monolithic AgentLoop.\n */\n\nexport type { AgentOptions } from \"./agent.js\";\nexport { ConversationManager } from \"./conversation-manager.js\";\n// New clean hooks system\nexport type {\n AfterGadgetExecutionAction,\n AfterGadgetExecutionControllerContext,\n AfterLLMCallAction,\n AfterLLMCallControllerContext,\n AfterLLMErrorAction,\n AgentHooks,\n BeforeGadgetExecutionAction,\n BeforeLLMCallAction,\n // Interceptor contexts\n ChunkInterceptorContext,\n Controllers,\n GadgetExecutionControllerContext,\n GadgetParameterInterceptorContext,\n GadgetResultInterceptorContext,\n Interceptors,\n // Controller contexts and actions\n LLMCallControllerContext,\n LLMErrorControllerContext,\n MessageInterceptorContext,\n ObserveChunkContext,\n ObserveGadgetCompleteContext,\n ObserveGadgetStartContext,\n // Observer contexts\n ObserveLLMCallContext,\n ObserveLLMCompleteContext,\n ObserveLLMErrorContext,\n Observers,\n} from \"./hooks.js\";\nexport type { IConversationManager } from \"./interfaces.js\";\n\n// StreamProcessor for advanced use cases\nexport {\n type StreamProcessingResult,\n StreamProcessor,\n type StreamProcessorOptions,\n} from \"./stream-processor.js\";\n","/**\n * Type-safe gadget factory with automatic parameter inference.\n *\n * Gadget eliminates the need for manual type assertions\n * by automatically inferring parameter types from the Zod schema.\n *\n * @example\n * ```typescript\n * class Calculator extends Gadget({\n * description: \"Performs arithmetic operations\",\n * schema: z.object({\n * operation: z.enum([\"add\", \"subtract\"]),\n * a: z.number(),\n * b: z.number(),\n * }),\n * }) {\n * // ✨ params is automatically typed!\n * execute(params: this['params']): string {\n * const { operation, a, b } = params; // All typed!\n * return operation === \"add\" ? String(a + b) : String(a - b);\n * }\n * }\n * ```\n */\n\nimport type { ZodType } from \"zod\";\nimport { BaseGadget } from \"./gadget.js\";\n\n/**\n * Infer the TypeScript type from a Zod schema.\n */\ntype InferSchema<T> = T extends ZodType<infer U> ? U : never;\n\n/**\n * Configuration for creating a typed gadget.\n */\nexport interface GadgetConfig<TSchema extends ZodType> {\n /** Human-readable description of what the gadget does */\n description: string;\n\n /** Zod schema for parameter validation */\n schema: TSchema;\n\n /** Optional custom name (defaults to class name) */\n name?: string;\n\n /** Optional timeout in milliseconds */\n timeoutMs?: number;\n}\n\n/**\n * Factory function to create a typed gadget base class.\n *\n * The returned class automatically infers parameter types from the Zod schema,\n * eliminating the need for manual type assertions in the execute method.\n *\n * @param config - Configuration with description and schema\n * @returns Base class to extend with typed execute method\n *\n * @example\n * ```typescript\n * import { z } from 'zod';\n * import { Gadget } from 'llmist';\n *\n * class Calculator extends Gadget({\n * description: \"Performs arithmetic operations\",\n * schema: z.object({\n * operation: z.enum([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n * a: z.number().describe(\"First number\"),\n * b: z.number().describe(\"Second number\"),\n * }),\n * }) {\n * execute(params: this['params']): string {\n * // params is automatically typed as:\n * // { operation: \"add\" | \"subtract\" | \"multiply\" | \"divide\"; a: number; b: number }\n * const { operation, a, b } = params;\n *\n * switch (operation) {\n * case \"add\": return String(a + b);\n * case \"subtract\": return String(a - b);\n * case \"multiply\": return String(a * b);\n * case \"divide\": return String(a / b);\n * }\n * }\n * }\n * ```\n *\n * @example\n * ```typescript\n * // With async execution\n * class WeatherGadget extends Gadget({\n * description: \"Fetches weather for a city\",\n * schema: z.object({\n * city: z.string().min(1).describe(\"City name\"),\n * }),\n * timeoutMs: 10000,\n * }) {\n * async execute(params: this['params']): Promise<string> {\n * const { city } = params; // Automatically typed as { city: string }\n * const weather = await fetchWeather(city);\n * return `Weather in ${city}: ${weather}`;\n * }\n * }\n * ```\n */\nexport function Gadget<TSchema extends ZodType>(config: GadgetConfig<TSchema>) {\n abstract class GadgetBase extends BaseGadget {\n description = config.description;\n parameterSchema = config.schema;\n name = config.name;\n timeoutMs = config.timeoutMs;\n\n /**\n * Type helper property for accessing inferred parameter type.\n * This is used in the execute method signature: `execute(params: this['params'])`\n *\n * Note: This is just for type inference - the actual params in execute()\n * will be Record<string, unknown> which you can safely cast to this['params']\n */\n readonly params!: InferSchema<TSchema>;\n\n /**\n * Execute the gadget. Subclasses should cast params to this['params'].\n *\n * @param params - Validated parameters from the LLM\n * @returns Result as a string (or Promise<string> for async gadgets)\n *\n * @example\n * ```typescript\n * execute(params: Record<string, unknown>): string {\n * const typed = params as this['params'];\n * // Now 'typed' is fully typed!\n * return String(typed.a + typed.b);\n * }\n * ```\n */\n abstract execute(params: Record<string, unknown>): string | Promise<string>;\n }\n\n return GadgetBase as {\n new (): GadgetBase & { params: InferSchema<TSchema> };\n };\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAKA;AAEA;AALA,SAAS,SAAS;;;ACgEX,IAAM,cAAN,MAAM,aAAY;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAwDvB,OAAO,QAAQ,UAA0B,CAAC,GAAe;AACvD,WAAO;AAAA,MACL,WAAW;AAAA,QACT,gBAAgB,OAAO,QAAQ;AAC7B,kBAAQ,IAAI,kCAAkC,IAAI,SAAS,GAAG;AAAA,QAChE;AAAA,QACA,mBAAmB,OAAO,QAAQ;AAChC,gBAAM,SAAS,IAAI,OAAO,eAAe;AACzC,kBAAQ,IAAI,4BAA4B,MAAM,GAAG;AACjD,cAAI,QAAQ,WAAW,IAAI,cAAc;AACvC,oBAAQ,IAAI,mBAAmB,IAAI,YAAY,EAAE;AAAA,UACnD;AAAA,QACF;AAAA,QACA,wBAAwB,OAAO,QAAQ;AACrC,kBAAQ,IAAI,sBAAsB,IAAI,UAAU,EAAE;AAClD,cAAI,QAAQ,SAAS;AACnB,oBAAQ,IAAI,wBAAwB,KAAK,UAAU,IAAI,YAAY,MAAM,CAAC,CAAC;AAAA,UAC7E;AAAA,QACF;AAAA,QACA,2BAA2B,OAAO,QAAQ;AACxC,kBAAQ,IAAI,sBAAsB,IAAI,UAAU,EAAE;AAClD,cAAI,QAAQ,SAAS;AACnB,kBAAM,UAAU,IAAI,SAAS,IAAI,eAAe;AAChD,oBAAQ,IAAI,oBAAoB,OAAO,EAAE;AAAA,UAC3C;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAoDA,OAAO,SAAqB;AAC1B,UAAM,UAAU,oBAAI,IAAoB;AAExC,WAAO;AAAA,MACL,WAAW;AAAA,QACT,gBAAgB,OAAO,QAAQ;AAC7B,kBAAQ,IAAI,OAAO,IAAI,SAAS,IAAI,KAAK,IAAI,CAAC;AAAA,QAChD;AAAA,QACA,mBAAmB,OAAO,QAAQ;AAChC,gBAAM,QAAQ,QAAQ,IAAI,OAAO,IAAI,SAAS,EAAE;AAChD,cAAI,OAAO;AACT,kBAAM,WAAW,KAAK,IAAI,IAAI;AAC9B,oBAAQ,IAAI,+BAAqB,QAAQ,IAAI;AAC7C,oBAAQ,OAAO,OAAO,IAAI,SAAS,EAAE;AAAA,UACvC;AAAA,QACF;AAAA,QACA,wBAAwB,OAAO,QAAQ;AACrC,gBAAM,MAAM,UAAU,IAAI,UAAU,IAAI,KAAK,IAAI,CAAC;AAClD,kBAAQ,IAAI,KAAK,KAAK,IAAI,CAAC;AAE3B,UAAC,IAAY,aAAa;AAAA,QAC5B;AAAA,QACA,2BAA2B,OAAO,QAAQ;AACxC,gBAAM,MAAO,IAAY;AACzB,cAAI,KAAK;AACP,kBAAM,QAAQ,QAAQ,IAAI,GAAG;AAC7B,gBAAI,OAAO;AACT,oBAAM,WAAW,KAAK,IAAI,IAAI;AAC9B,sBAAQ,IAAI,wBAAc,IAAI,UAAU,SAAS,QAAQ,IAAI;AAC7D,sBAAQ,OAAO,GAAG;AAAA,YACpB;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAuDA,OAAO,gBAA4B;AACjC,QAAI,cAAc;AAClB,QAAI,aAAa;AAEjB,WAAO;AAAA,MACL,WAAW;AAAA,QACT,mBAAmB,OAAO,QAAQ;AAChC;AACA,cAAI,IAAI,OAAO,aAAa;AAC1B,2BAAe,IAAI,MAAM;AACzB,oBAAQ,IAAI,+BAAwB,IAAI,MAAM,WAAW,EAAE;AAC3D,oBAAQ,IAAI,2BAAoB,WAAW,YAAY,UAAU,SAAS;AAAA,UAC5E;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA8DA,OAAO,eAA2B;AAChC,WAAO;AAAA,MACL,WAAW;AAAA,QACT,gBAAgB,OAAO,QAAQ;AAC7B,kBAAQ,MAAM,+BAA0B,IAAI,SAAS,MAAM,IAAI,MAAM,OAAO;AAC5E,kBAAQ,MAAM,aAAa,IAAI,QAAQ,KAAK,EAAE;AAC9C,kBAAQ,MAAM,iBAAiB,IAAI,SAAS,EAAE;AAAA,QAChD;AAAA,QACA,2BAA2B,OAAO,QAAQ;AACxC,cAAI,IAAI,OAAO;AACb,oBAAQ,MAAM,wBAAmB,IAAI,UAAU,EAAE;AACjD,oBAAQ,MAAM,aAAa,IAAI,KAAK,EAAE;AACtC,oBAAQ,MAAM,kBAAkB,KAAK,UAAU,IAAI,YAAY,MAAM,CAAC,CAAC;AAAA,UACzE;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA0CA,OAAO,SAAqB;AAC1B,WAAO,CAAC;AAAA,EACV;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAsFA,OAAO,SAAS,UAAoC;AAClD,UAAM,SAAqB;AAAA,MACzB,WAAW,CAAC;AAAA,MACZ,cAAc,CAAC;AAAA,MACf,aAAa,CAAC;AAAA,IAChB;AAGA,eAAW,SAAS,UAAU;AAC5B,UAAI,MAAM,WAAW;AACnB,mBAAW,CAAC,KAAK,OAAO,KAAK,OAAO,QAAQ,MAAM,SAAS,GAAG;AAC5D,gBAAM,WAAW;AACjB,cAAI,OAAO,UAAW,QAAQ,GAAG;AAE/B,kBAAM,WAAW,OAAO,UAAW,QAAQ;AAC3C,mBAAO,UAAW,QAAQ,IAAI,OAAO,QAAa;AAChD,oBAAM,SAAS,GAAG;AAClB,oBAAM,QAAQ,GAAG;AAAA,YACnB;AAAA,UACF,OAAO;AACL,mBAAO,UAAW,QAAQ,IAAI;AAAA,UAChC;AAAA,QACF;AAAA,MACF;AAKA,UAAI,MAAM,cAAc;AACtB,eAAO,OAAO,OAAO,cAAe,MAAM,YAAY;AAAA,MACxD;AAGA,UAAI,MAAM,aAAa;AACrB,eAAO,OAAO,OAAO,aAAc,MAAM,WAAW;AAAA,MACtD;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4DA,OAAO,WAAW,UAA0B,CAAC,GAAe;AAC1D,WAAO,aAAY;AAAA,MACjB,aAAY,QAAQ,OAAO;AAAA,MAC3B,aAAY,OAAO;AAAA,MACnB,aAAY,cAAc;AAAA,MAC1B,aAAY,aAAa;AAAA,IAC3B;AAAA,EACF;AACF;;;AC1mBA;AAkCA;;;AFQA;AAEA;AASA;AAGA;AAeA;AAEA;AAMA;AAIA;AACA;AAEA;AAEA;;;AGWO,SAAS,OAAgC,QAA+B;AAAA,EAC7E,MAAe,mBAAmB,WAAW;AAAA,IAC3C,cAAc,OAAO;AAAA,IACrB,kBAAkB,OAAO;AAAA,IACzB,OAAO,OAAO;AAAA,IACd,YAAY,OAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IASV;AAAA,EAkBX;AAEA,SAAO;AAGT;;;AH7BA;AACA;AAIA;AACA;AACA;","names":[]}
@@ -108,7 +108,6 @@ declare class StreamParser {
108
108
  private readonly startPrefix;
109
109
  private readonly endPrefix;
110
110
  private readonly parameterFormat;
111
- private invocationCounter;
112
111
  constructor(options?: StreamParserOptions);
113
112
  private takeTextUntil;
114
113
  /**
@@ -291,7 +290,7 @@ declare class LLMMessageBuilder {
291
290
  startPrefix?: string;
292
291
  endPrefix?: string;
293
292
  }): this;
294
- private buildGadgetsXmlSection;
293
+ private buildGadgetsSection;
295
294
  private buildUsageSection;
296
295
  private buildExamplesSection;
297
296
  private buildRulesSection;