llmist 0.1.3 → 0.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.cts CHANGED
@@ -1,32 +1,63 @@
1
1
  import { ZodType, ZodTypeAny } from 'zod';
2
2
  export { z } from 'zod';
3
- import { A as AgentHooks, L as LLMMessage, P as ParameterFormat, S as StreamEvent, G as GadgetRegistry, o as LLMStreamChunk, B as BaseGadget, p as ParsedGadgetCall, q as GadgetExecutionResult, r as ProviderAdapter, s as ModelDescriptor, t as ModelSpec, u as LLMGenerationOptions, v as LLMStream } from './mock-stream-D4erlo7B.cjs';
4
- export { C as AfterGadgetExecutionAction, D as AfterGadgetExecutionControllerContext, F as AfterLLMCallAction, I as AfterLLMCallControllerContext, J as AfterLLMErrorAction, w as AgentBuilder, K as AgentOptions, N as BeforeGadgetExecutionAction, O as BeforeLLMCallAction, Q as ChunkInterceptorContext, R as Controllers, a9 as CostEstimate, ak as DEFAULT_PROMPTS, E as EventHandlers, ar as GadgetClass, T as GadgetExecutionControllerContext, as as GadgetOrClass, U as GadgetParameterInterceptorContext, V as GadgetResultInterceptorContext, H as HistoryMessage, W as Interceptors, X as LLMCallControllerContext, Y as LLMErrorControllerContext, a8 as LLMMessageBuilder, a7 as LLMRole, a6 as LLMist, a5 as LLMistOptions, Z as MessageInterceptorContext, a as MockBuilder, d as MockManager, h as MockMatcher, i as MockMatcherContext, j as MockOptions, M as MockProviderAdapter, k as MockRegistration, l as MockResponse, n as MockStats, aa as ModelFeatures, ag as ModelIdentifierParser, ab as ModelLimits, ac as ModelPricing, ad as ModelRegistry, _ as ObserveChunkContext, $ as ObserveGadgetCompleteContext, a0 as ObserveGadgetStartContext, a1 as ObserveLLMCallContext, a2 as ObserveLLMCompleteContext, a3 as ObserveLLMErrorContext, a4 as Observers, ah as PromptConfig, ai as PromptContext, aj as PromptTemplate, ae as ProviderIdentifier, an as QuickOptions, aq as StreamParser, at as TextOnlyAction, au as TextOnlyContext, av as TextOnlyCustomHandler, aw as TextOnlyGadgetConfig, ax as TextOnlyHandler, ay as TextOnlyStrategy, af as TokenUsage, x as collectEvents, y as collectText, ao as complete, c as createMockAdapter, b as createMockClient, e as createMockStream, f as createTextMockStream, g as getMockManager, m as mockLLM, al as resolvePromptTemplate, am as resolveRulesTemplate, z as runWithHandlers, ap as stream } from './mock-stream-D4erlo7B.cjs';
3
+ import { A as AgentHooks, L as LLMMessage, P as ParameterFormat, S as StreamEvent, G as GadgetRegistry, o as LLMStreamChunk, B as BaseGadget, p as ParsedGadgetCall, q as GadgetExecutionResult, r as ProviderAdapter, s as ModelDescriptor, t as ModelSpec, u as LLMGenerationOptions, v as LLMStream } from './mock-stream-C2sBQlvc.cjs';
4
+ export { C as AfterGadgetExecutionAction, D as AfterGadgetExecutionControllerContext, F as AfterLLMCallAction, I as AfterLLMCallControllerContext, J as AfterLLMErrorAction, w as AgentBuilder, K as AgentOptions, N as BeforeGadgetExecutionAction, O as BeforeLLMCallAction, Q as ChunkInterceptorContext, R as Controllers, a9 as CostEstimate, ak as DEFAULT_PROMPTS, E as EventHandlers, ar as GadgetClass, T as GadgetExecutionControllerContext, as as GadgetOrClass, U as GadgetParameterInterceptorContext, V as GadgetResultInterceptorContext, H as HistoryMessage, W as Interceptors, X as LLMCallControllerContext, Y as LLMErrorControllerContext, a8 as LLMMessageBuilder, a7 as LLMRole, a6 as LLMist, a5 as LLMistOptions, Z as MessageInterceptorContext, a as MockBuilder, d as MockManager, h as MockMatcher, i as MockMatcherContext, j as MockOptions, M as MockProviderAdapter, k as MockRegistration, l as MockResponse, n as MockStats, aa as ModelFeatures, ag as ModelIdentifierParser, ab as ModelLimits, ac as ModelPricing, ad as ModelRegistry, _ as ObserveChunkContext, $ as ObserveGadgetCompleteContext, a0 as ObserveGadgetStartContext, a1 as ObserveLLMCallContext, a2 as ObserveLLMCompleteContext, a3 as ObserveLLMErrorContext, a4 as Observers, ah as PromptConfig, ai as PromptContext, aj as PromptTemplate, ae as ProviderIdentifier, an as QuickOptions, aq as StreamParser, at as TextOnlyAction, au as TextOnlyContext, av as TextOnlyCustomHandler, aw as TextOnlyGadgetConfig, ax as TextOnlyHandler, ay as TextOnlyStrategy, af as TokenUsage, x as collectEvents, y as collectText, ao as complete, c as createMockAdapter, b as createMockClient, e as createMockStream, f as createTextMockStream, g as getMockManager, m as mockLLM, al as resolvePromptTemplate, am as resolveRulesTemplate, z as runWithHandlers, ap as stream } from './mock-stream-C2sBQlvc.cjs';
5
5
  import { Logger, ILogObj } from 'tslog';
6
6
  import { MessageCreateParamsStreaming, MessageStreamEvent } from '@anthropic-ai/sdk/resources/messages';
7
7
  import OpenAI from 'openai';
8
8
  import { ChatCompletionChunk } from 'openai/resources/chat/completions';
9
9
 
10
10
  /**
11
- * Common hook presets for logging, timing, and monitoring.
11
+ * Ready-to-use hook configurations for common monitoring, logging, and debugging tasks.
12
+ *
13
+ * HookPresets provide instant observability without writing custom hooks. They're the
14
+ * fastest way to add monitoring to your agents during development and production.
15
+ *
16
+ * ## Available Presets
17
+ *
18
+ * - **logging(options?)** - Log LLM calls and gadget execution
19
+ * - **timing()** - Measure execution time for operations
20
+ * - **tokenTracking()** - Track cumulative token usage and costs
21
+ * - **errorLogging()** - Log detailed error information
22
+ * - **silent()** - No output (useful for testing)
23
+ * - **monitoring(options?)** - All-in-one preset combining logging, timing, tokens, and errors
24
+ * - **merge(...hookSets)** - Combine multiple hook configurations
25
+ *
26
+ * ## Quick Start
12
27
  *
13
28
  * @example
14
29
  * ```typescript
15
- * import { HookPresets } from 'llmist/hooks';
30
+ * import { LLMist, HookPresets } from 'llmist';
16
31
  *
17
- * const agent = LLMist.createAgent()
32
+ * // Basic logging
33
+ * await LLMist.createAgent()
18
34
  * .withHooks(HookPresets.logging())
19
- * .ask("...");
35
+ * .ask("Your prompt");
20
36
  *
21
- * // Or combine multiple presets
22
- * const agent = LLMist.createAgent()
37
+ * // Full monitoring suite (recommended for development)
38
+ * await LLMist.createAgent()
39
+ * .withHooks(HookPresets.monitoring({ verbose: true }))
40
+ * .ask("Your prompt");
41
+ *
42
+ * // Combine multiple presets
43
+ * await LLMist.createAgent()
23
44
  * .withHooks(HookPresets.merge(
24
- * HookPresets.logging({ verbose: true }),
25
45
  * HookPresets.timing(),
26
46
  * HookPresets.tokenTracking()
27
47
  * ))
28
- * .ask("...");
48
+ * .ask("Your prompt");
49
+ *
50
+ * // Environment-based configuration
51
+ * const hooks = process.env.NODE_ENV === 'production'
52
+ * ? HookPresets.merge(HookPresets.errorLogging(), HookPresets.tokenTracking())
53
+ * : HookPresets.monitoring({ verbose: true });
54
+ *
55
+ * await LLMist.createAgent()
56
+ * .withHooks(hooks)
57
+ * .ask("Your prompt");
29
58
  * ```
59
+ *
60
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md | Full documentation}
30
61
  */
31
62
 
32
63
  /**
@@ -41,115 +72,410 @@ interface LoggingOptions {
41
72
  */
42
73
  declare class HookPresets {
43
74
  /**
44
- * Preset: Basic logging of all events.
75
+ * Logs LLM calls and gadget execution to console with optional verbosity.
76
+ *
77
+ * **Output (basic mode):**
78
+ * - LLM call start/complete events with iteration numbers
79
+ * - Gadget execution start/complete with gadget names
80
+ * - Token counts when available
81
+ *
82
+ * **Output (verbose mode):**
83
+ * - All basic mode output
84
+ * - Full gadget parameters (formatted JSON)
85
+ * - Full gadget results
86
+ * - Complete LLM response text
45
87
  *
46
- * Logs LLM calls and gadget executions to console.
88
+ * **Use cases:**
89
+ * - Basic development debugging and execution flow visibility
90
+ * - Understanding agent decision-making and tool usage
91
+ * - Troubleshooting gadget invocations
92
+ *
93
+ * **Performance:** Minimal overhead. Console writes are synchronous but fast.
47
94
  *
48
95
  * @param options - Logging options
49
- * @returns Hook configuration
96
+ * @param options.verbose - Include full parameters and results. Default: false
97
+ * @returns Hook configuration that can be passed to .withHooks()
50
98
  *
51
99
  * @example
52
100
  * ```typescript
53
- * .withHooks(HookPresets.logging())
54
- * .withHooks(HookPresets.logging({ verbose: true }))
101
+ * // Basic logging
102
+ * await LLMist.createAgent()
103
+ * .withHooks(HookPresets.logging())
104
+ * .ask("Calculate 15 * 23");
105
+ * // Output: [LLM] Starting call (iteration 0)
106
+ * // [GADGET] Executing Calculator
107
+ * // [GADGET] Completed Calculator
108
+ * // [LLM] Completed (tokens: 245)
55
109
  * ```
110
+ *
111
+ * @example
112
+ * ```typescript
113
+ * // Verbose logging with full details
114
+ * await LLMist.createAgent()
115
+ * .withHooks(HookPresets.logging({ verbose: true }))
116
+ * .ask("Calculate 15 * 23");
117
+ * // Output includes: parameters, results, and full responses
118
+ * ```
119
+ *
120
+ * @example
121
+ * ```typescript
122
+ * // Environment-based verbosity
123
+ * const isDev = process.env.NODE_ENV === 'development';
124
+ * .withHooks(HookPresets.logging({ verbose: isDev }))
125
+ * ```
126
+ *
127
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsloggingoptions | Full documentation}
56
128
  */
57
129
  static logging(options?: LoggingOptions): AgentHooks;
58
130
  /**
59
- * Preset: Performance timing for all operations.
60
- *
61
131
  * Measures and logs execution time for LLM calls and gadgets.
62
132
  *
63
- * @returns Hook configuration
133
+ * **Output:**
134
+ * - Duration in milliseconds with ⏱️ emoji for each operation
135
+ * - Separate timing for each LLM iteration
136
+ * - Separate timing for each gadget execution
137
+ *
138
+ * **Use cases:**
139
+ * - Performance profiling and optimization
140
+ * - Identifying slow operations (LLM calls vs gadget execution)
141
+ * - Monitoring response times in production
142
+ * - Capacity planning and SLA tracking
143
+ *
144
+ * **Performance:** Negligible overhead. Uses Date.now() for timing measurements.
145
+ *
146
+ * @returns Hook configuration that can be passed to .withHooks()
64
147
  *
65
148
  * @example
66
149
  * ```typescript
67
- * .withHooks(HookPresets.timing())
150
+ * // Basic timing
151
+ * await LLMist.createAgent()
152
+ * .withHooks(HookPresets.timing())
153
+ * .withGadgets(Weather, Database)
154
+ * .ask("What's the weather in NYC?");
155
+ * // Output: ⏱️ LLM call took 1234ms
156
+ * // ⏱️ Gadget Weather took 567ms
157
+ * // ⏱️ LLM call took 890ms
68
158
  * ```
159
+ *
160
+ * @example
161
+ * ```typescript
162
+ * // Combined with logging for full context
163
+ * .withHooks(HookPresets.merge(
164
+ * HookPresets.logging(),
165
+ * HookPresets.timing()
166
+ * ))
167
+ * ```
168
+ *
169
+ * @example
170
+ * ```typescript
171
+ * // Correlate performance with cost
172
+ * .withHooks(HookPresets.merge(
173
+ * HookPresets.timing(),
174
+ * HookPresets.tokenTracking()
175
+ * ))
176
+ * ```
177
+ *
178
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetstiming | Full documentation}
69
179
  */
70
180
  static timing(): AgentHooks;
71
181
  /**
72
- * Preset: Token usage tracking.
182
+ * Tracks cumulative token usage across all LLM calls.
183
+ *
184
+ * **Output:**
185
+ * - Per-call token count with 📊 emoji
186
+ * - Cumulative total across all calls
187
+ * - Call count for average calculations
188
+ *
189
+ * **Use cases:**
190
+ * - Cost monitoring and budget tracking
191
+ * - Optimizing prompts to reduce token usage
192
+ * - Comparing token efficiency across different approaches
193
+ * - Real-time cost estimation
194
+ *
195
+ * **Performance:** Minimal overhead. Simple counter increments.
73
196
  *
74
- * Tracks and logs cumulative token usage across all LLM calls.
197
+ * **Note:** Token counts depend on the provider's response. Some providers
198
+ * may not include usage data, in which case counts won't be logged.
75
199
  *
76
- * @returns Hook configuration
200
+ * @returns Hook configuration that can be passed to .withHooks()
201
+ *
202
+ * @example
203
+ * ```typescript
204
+ * // Basic token tracking
205
+ * await LLMist.createAgent()
206
+ * .withHooks(HookPresets.tokenTracking())
207
+ * .ask("Summarize this document...");
208
+ * // Output: 📊 Tokens this call: 1,234
209
+ * // 📊 Total tokens: 1,234 (across 1 calls)
210
+ * // 📊 Tokens this call: 567
211
+ * // 📊 Total tokens: 1,801 (across 2 calls)
212
+ * ```
77
213
  *
78
214
  * @example
79
215
  * ```typescript
80
- * .withHooks(HookPresets.tokenTracking())
216
+ * // Cost calculation with custom hook
217
+ * let totalTokens = 0;
218
+ * .withHooks(HookPresets.merge(
219
+ * HookPresets.tokenTracking(),
220
+ * {
221
+ * observers: {
222
+ * onLLMCallComplete: async (ctx) => {
223
+ * totalTokens += ctx.usage?.totalTokens ?? 0;
224
+ * const cost = (totalTokens / 1_000_000) * 3.0; // $3 per 1M tokens
225
+ * console.log(`💰 Estimated cost: $${cost.toFixed(4)}`);
226
+ * },
227
+ * },
228
+ * }
229
+ * ))
81
230
  * ```
231
+ *
232
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetstokentracking | Full documentation}
82
233
  */
83
234
  static tokenTracking(): AgentHooks;
84
235
  /**
85
- * Preset: Error logging.
236
+ * Logs detailed error information for debugging and troubleshooting.
237
+ *
238
+ * **Output:**
239
+ * - LLM errors with ❌ emoji, including model and recovery status
240
+ * - Gadget errors with full context (parameters, error message)
241
+ * - Separate logging for LLM and gadget failures
86
242
  *
87
- * Logs detailed error information for debugging.
243
+ * **Use cases:**
244
+ * - Troubleshooting production issues
245
+ * - Understanding error patterns and frequency
246
+ * - Debugging error recovery behavior
247
+ * - Collecting error metrics for monitoring
88
248
  *
89
- * @returns Hook configuration
249
+ * **Performance:** Minimal overhead. Only logs when errors occur.
250
+ *
251
+ * @returns Hook configuration that can be passed to .withHooks()
90
252
  *
91
253
  * @example
92
254
  * ```typescript
93
- * .withHooks(HookPresets.errorLogging())
255
+ * // Basic error logging
256
+ * await LLMist.createAgent()
257
+ * .withHooks(HookPresets.errorLogging())
258
+ * .withGadgets(Database)
259
+ * .ask("Fetch user data");
260
+ * // Output (on LLM error): ❌ LLM Error (iteration 1): Rate limit exceeded
261
+ * // Model: gpt-5-nano
262
+ * // Recovered: true
263
+ * // Output (on gadget error): ❌ Gadget Error: Database
264
+ * // Error: Connection timeout
265
+ * // Parameters: {...}
94
266
  * ```
267
+ *
268
+ * @example
269
+ * ```typescript
270
+ * // Combine with monitoring for full context
271
+ * .withHooks(HookPresets.merge(
272
+ * HookPresets.monitoring(), // Includes errorLogging
273
+ * customErrorAnalytics
274
+ * ))
275
+ * ```
276
+ *
277
+ * @example
278
+ * ```typescript
279
+ * // Error analytics collection
280
+ * const errors: any[] = [];
281
+ * .withHooks(HookPresets.merge(
282
+ * HookPresets.errorLogging(),
283
+ * {
284
+ * observers: {
285
+ * onLLMCallError: async (ctx) => {
286
+ * errors.push({ type: 'llm', error: ctx.error, recovered: ctx.recovered });
287
+ * },
288
+ * },
289
+ * }
290
+ * ))
291
+ * ```
292
+ *
293
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetserrorlogging | Full documentation}
95
294
  */
96
295
  static errorLogging(): AgentHooks;
97
296
  /**
98
- * Preset: Silent (no output).
297
+ * Returns empty hook configuration for clean output without any logging.
99
298
  *
100
- * Useful for testing or when you want complete control.
299
+ * **Output:**
300
+ * - None. Returns {} (empty object).
301
+ *
302
+ * **Use cases:**
303
+ * - Clean test output without console noise
304
+ * - Production environments where logging is handled externally
305
+ * - Baseline for custom hook development
306
+ * - Temporary disable of all hook output
307
+ *
308
+ * **Performance:** Zero overhead. No-op hook configuration.
101
309
  *
102
310
  * @returns Empty hook configuration
103
311
  *
104
312
  * @example
105
313
  * ```typescript
106
- * .withHooks(HookPresets.silent())
314
+ * // Clean test output
315
+ * describe('Agent tests', () => {
316
+ * it('should calculate correctly', async () => {
317
+ * const result = await LLMist.createAgent()
318
+ * .withHooks(HookPresets.silent()) // No console output
319
+ * .withGadgets(Calculator)
320
+ * .askAndCollect("What is 15 times 23?");
321
+ *
322
+ * expect(result).toContain("345");
323
+ * });
324
+ * });
325
+ * ```
326
+ *
327
+ * @example
328
+ * ```typescript
329
+ * // Conditional silence based on environment
330
+ * const isTesting = process.env.NODE_ENV === 'test';
331
+ * .withHooks(isTesting ? HookPresets.silent() : HookPresets.monitoring())
107
332
  * ```
333
+ *
334
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetssilent | Full documentation}
108
335
  */
109
336
  static silent(): AgentHooks;
110
337
  /**
111
- * Merge multiple hook configurations.
338
+ * Combines multiple hook configurations into one.
339
+ *
340
+ * Merge allows you to compose preset and custom hooks for modular monitoring
341
+ * configurations. Understanding merge behavior is crucial for proper composition.
342
+ *
343
+ * **Merge behavior:**
344
+ * - **Observers:** Composed - all handlers run sequentially in order
345
+ * - **Interceptors:** Last one wins - only the last interceptor applies
346
+ * - **Controllers:** Last one wins - only the last controller applies
347
+ *
348
+ * **Why interceptors/controllers don't compose:**
349
+ * - Interceptors have different signatures per method, making composition impractical
350
+ * - Controllers return specific actions that can't be meaningfully combined
351
+ * - Only observers support composition because they're read-only and independent
112
352
  *
113
- * Combines hook presets or custom configurations into a single object.
114
- * When multiple hooks target the same lifecycle event, they are composed
115
- * to run sequentially (all handlers will execute).
353
+ * **Use cases:**
354
+ * - Combining multiple presets (logging + timing + tokens)
355
+ * - Adding custom hooks to presets
356
+ * - Building modular, reusable monitoring configurations
357
+ * - Environment-specific hook composition
116
358
  *
117
- * @param hookSets - Array of hook configurations to merge
118
- * @returns Merged hook configuration with composed handlers
359
+ * **Performance:** Minimal overhead for merging. Runtime performance depends on merged hooks.
360
+ *
361
+ * @param hookSets - Variable number of hook configurations to merge
362
+ * @returns Single merged hook configuration with composed/overridden handlers
119
363
  *
120
364
  * @example
121
365
  * ```typescript
366
+ * // Combine multiple presets
367
+ * .withHooks(HookPresets.merge(
368
+ * HookPresets.logging(),
369
+ * HookPresets.timing(),
370
+ * HookPresets.tokenTracking()
371
+ * ))
372
+ * // All observers from all three presets will run
373
+ * ```
374
+ *
375
+ * @example
376
+ * ```typescript
377
+ * // Add custom observer to preset (both run)
122
378
  * .withHooks(HookPresets.merge(
123
- * HookPresets.logging({ verbose: true }),
124
379
  * HookPresets.timing(),
125
- * HookPresets.tokenTracking(),
126
380
  * {
127
- * // Custom hook
128
381
  * observers: {
129
382
  * onLLMCallComplete: async (ctx) => {
130
- * saveToDatabase(ctx);
131
- * }
132
- * }
383
+ * await saveMetrics({ tokens: ctx.usage?.totalTokens });
384
+ * },
385
+ * },
133
386
  * }
134
387
  * ))
135
- * // All onLLMCallComplete handlers from logging, timing, tokenTracking,
136
- * // and the custom hook will execute in order
137
388
  * ```
389
+ *
390
+ * @example
391
+ * ```typescript
392
+ * // Multiple interceptors (last wins!)
393
+ * .withHooks(HookPresets.merge(
394
+ * {
395
+ * interceptors: {
396
+ * interceptTextChunk: (chunk) => chunk.toUpperCase(), // Ignored
397
+ * },
398
+ * },
399
+ * {
400
+ * interceptors: {
401
+ * interceptTextChunk: (chunk) => chunk.toLowerCase(), // This wins
402
+ * },
403
+ * }
404
+ * ))
405
+ * // Result: text will be lowercase
406
+ * ```
407
+ *
408
+ * @example
409
+ * ```typescript
410
+ * // Modular environment-based configuration
411
+ * const baseHooks = HookPresets.errorLogging();
412
+ * const devHooks = HookPresets.merge(baseHooks, HookPresets.monitoring({ verbose: true }));
413
+ * const prodHooks = HookPresets.merge(baseHooks, HookPresets.tokenTracking());
414
+ *
415
+ * const hooks = process.env.NODE_ENV === 'production' ? prodHooks : devHooks;
416
+ * .withHooks(hooks)
417
+ * ```
418
+ *
419
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsmergehooksets | Full documentation}
138
420
  */
139
421
  static merge(...hookSets: AgentHooks[]): AgentHooks;
140
422
  /**
141
- * Preset: Complete monitoring suite.
423
+ * Composite preset combining logging, timing, tokenTracking, and errorLogging.
424
+ *
425
+ * This is the recommended preset for development and initial production deployments,
426
+ * providing comprehensive observability with a single method call.
427
+ *
428
+ * **Includes:**
429
+ * - All output from `logging()` preset (with optional verbosity)
430
+ * - All output from `timing()` preset (execution times)
431
+ * - All output from `tokenTracking()` preset (token usage)
432
+ * - All output from `errorLogging()` preset (error details)
142
433
  *
143
- * Combines logging, timing, and token tracking.
434
+ * **Output format:**
435
+ * - Event logging: [LLM]/[GADGET] messages
436
+ * - Timing: ⏱️ emoji with milliseconds
437
+ * - Tokens: 📊 emoji with per-call and cumulative counts
438
+ * - Errors: ❌ emoji with full error details
144
439
  *
145
- * @param options - Options for monitoring
146
- * @returns Merged hook configuration
440
+ * **Use cases:**
441
+ * - Full observability during development
442
+ * - Comprehensive monitoring in production
443
+ * - One-liner for complete agent visibility
444
+ * - Troubleshooting and debugging with full context
445
+ *
446
+ * **Performance:** Combined overhead of all four presets, but still minimal in practice.
447
+ *
448
+ * @param options - Monitoring options
449
+ * @param options.verbose - Passed to logging() preset for detailed output. Default: false
450
+ * @returns Merged hook configuration combining all monitoring presets
147
451
  *
148
452
  * @example
149
453
  * ```typescript
150
- * .withHooks(HookPresets.monitoring())
151
- * .withHooks(HookPresets.monitoring({ verbose: true }))
454
+ * // Basic monitoring (recommended for development)
455
+ * await LLMist.createAgent()
456
+ * .withHooks(HookPresets.monitoring())
457
+ * .withGadgets(Calculator, Weather)
458
+ * .ask("What is 15 times 23, and what's the weather in NYC?");
459
+ * // Output: All events, timing, tokens, and errors in one place
152
460
  * ```
461
+ *
462
+ * @example
463
+ * ```typescript
464
+ * // Verbose monitoring with full details
465
+ * await LLMist.createAgent()
466
+ * .withHooks(HookPresets.monitoring({ verbose: true }))
467
+ * .ask("Your prompt");
468
+ * // Output includes: parameters, results, and complete responses
469
+ * ```
470
+ *
471
+ * @example
472
+ * ```typescript
473
+ * // Environment-based monitoring
474
+ * const isDev = process.env.NODE_ENV === 'development';
475
+ * .withHooks(HookPresets.monitoring({ verbose: isDev }))
476
+ * ```
477
+ *
478
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsmonitoringoptions | Full documentation}
153
479
  */
154
480
  static monitoring(options?: LoggingOptions): AgentHooks;
155
481
  }