llmist 1.3.0 → 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,974 @@
1
+ import {
2
+ BaseGadget,
3
+ DEFAULT_HINTS,
4
+ init_anthropic,
5
+ init_builder,
6
+ init_client,
7
+ init_config,
8
+ init_conversation_manager,
9
+ init_create_gadget,
10
+ init_discovery,
11
+ init_event_handlers,
12
+ init_exceptions,
13
+ init_executor,
14
+ init_gadget,
15
+ init_gadget_output_store,
16
+ init_gemini,
17
+ init_logger,
18
+ init_manager,
19
+ init_messages,
20
+ init_model_registry,
21
+ init_model_shortcuts,
22
+ init_openai,
23
+ init_options,
24
+ init_output_viewer,
25
+ init_parser,
26
+ init_prompt_config,
27
+ init_quick_methods,
28
+ init_registry,
29
+ init_strategies,
30
+ init_strategy,
31
+ init_stream_processor,
32
+ resolveHintTemplate
33
+ } from "./chunk-VGZCFUPX.js";
34
+
35
+ // src/index.ts
36
+ init_builder();
37
+ init_event_handlers();
38
+ import { z } from "zod";
39
+
40
+ // src/agent/hook-presets.ts
41
+ var HookPresets = class _HookPresets {
42
+ /**
43
+ * Logs LLM calls and gadget execution to console with optional verbosity.
44
+ *
45
+ * **Output (basic mode):**
46
+ * - LLM call start/complete events with iteration numbers
47
+ * - Gadget execution start/complete with gadget names
48
+ * - Token counts when available
49
+ *
50
+ * **Output (verbose mode):**
51
+ * - All basic mode output
52
+ * - Full gadget parameters (formatted JSON)
53
+ * - Full gadget results
54
+ * - Complete LLM response text
55
+ *
56
+ * **Use cases:**
57
+ * - Basic development debugging and execution flow visibility
58
+ * - Understanding agent decision-making and tool usage
59
+ * - Troubleshooting gadget invocations
60
+ *
61
+ * **Performance:** Minimal overhead. Console writes are synchronous but fast.
62
+ *
63
+ * @param options - Logging options
64
+ * @param options.verbose - Include full parameters and results. Default: false
65
+ * @returns Hook configuration that can be passed to .withHooks()
66
+ *
67
+ * @example
68
+ * ```typescript
69
+ * // Basic logging
70
+ * await LLMist.createAgent()
71
+ * .withHooks(HookPresets.logging())
72
+ * .ask("Calculate 15 * 23");
73
+ * // Output: [LLM] Starting call (iteration 0)
74
+ * // [GADGET] Executing Calculator
75
+ * // [GADGET] Completed Calculator
76
+ * // [LLM] Completed (tokens: 245)
77
+ * ```
78
+ *
79
+ * @example
80
+ * ```typescript
81
+ * // Verbose logging with full details
82
+ * await LLMist.createAgent()
83
+ * .withHooks(HookPresets.logging({ verbose: true }))
84
+ * .ask("Calculate 15 * 23");
85
+ * // Output includes: parameters, results, and full responses
86
+ * ```
87
+ *
88
+ * @example
89
+ * ```typescript
90
+ * // Environment-based verbosity
91
+ * const isDev = process.env.NODE_ENV === 'development';
92
+ * .withHooks(HookPresets.logging({ verbose: isDev }))
93
+ * ```
94
+ *
95
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsloggingoptions | Full documentation}
96
+ */
97
+ static logging(options = {}) {
98
+ return {
99
+ observers: {
100
+ onLLMCallStart: async (ctx) => {
101
+ console.log(`[LLM] Starting call (iteration ${ctx.iteration})`);
102
+ },
103
+ onLLMCallComplete: async (ctx) => {
104
+ const tokens = ctx.usage?.totalTokens ?? "unknown";
105
+ console.log(`[LLM] Completed (tokens: ${tokens})`);
106
+ if (options.verbose && ctx.finalMessage) {
107
+ console.log(`[LLM] Response: ${ctx.finalMessage}`);
108
+ }
109
+ },
110
+ onGadgetExecutionStart: async (ctx) => {
111
+ console.log(`[GADGET] Executing ${ctx.gadgetName}`);
112
+ if (options.verbose) {
113
+ console.log(`[GADGET] Parameters:`, JSON.stringify(ctx.parameters, null, 2));
114
+ }
115
+ },
116
+ onGadgetExecutionComplete: async (ctx) => {
117
+ console.log(`[GADGET] Completed ${ctx.gadgetName}`);
118
+ if (options.verbose) {
119
+ const display = ctx.error ?? ctx.finalResult ?? "(no result)";
120
+ console.log(`[GADGET] Result: ${display}`);
121
+ }
122
+ }
123
+ }
124
+ };
125
+ }
126
+ /**
127
+ * Measures and logs execution time for LLM calls and gadgets.
128
+ *
129
+ * **Output:**
130
+ * - Duration in milliseconds with ⏱️ emoji for each operation
131
+ * - Separate timing for each LLM iteration
132
+ * - Separate timing for each gadget execution
133
+ *
134
+ * **Use cases:**
135
+ * - Performance profiling and optimization
136
+ * - Identifying slow operations (LLM calls vs gadget execution)
137
+ * - Monitoring response times in production
138
+ * - Capacity planning and SLA tracking
139
+ *
140
+ * **Performance:** Negligible overhead. Uses Date.now() for timing measurements.
141
+ *
142
+ * @returns Hook configuration that can be passed to .withHooks()
143
+ *
144
+ * @example
145
+ * ```typescript
146
+ * // Basic timing
147
+ * await LLMist.createAgent()
148
+ * .withHooks(HookPresets.timing())
149
+ * .withGadgets(Weather, Database)
150
+ * .ask("What's the weather in NYC?");
151
+ * // Output: ⏱️ LLM call took 1234ms
152
+ * // ⏱️ Gadget Weather took 567ms
153
+ * // ⏱️ LLM call took 890ms
154
+ * ```
155
+ *
156
+ * @example
157
+ * ```typescript
158
+ * // Combined with logging for full context
159
+ * .withHooks(HookPresets.merge(
160
+ * HookPresets.logging(),
161
+ * HookPresets.timing()
162
+ * ))
163
+ * ```
164
+ *
165
+ * @example
166
+ * ```typescript
167
+ * // Correlate performance with cost
168
+ * .withHooks(HookPresets.merge(
169
+ * HookPresets.timing(),
170
+ * HookPresets.tokenTracking()
171
+ * ))
172
+ * ```
173
+ *
174
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetstiming | Full documentation}
175
+ */
176
+ static timing() {
177
+ const timings = /* @__PURE__ */ new Map();
178
+ return {
179
+ observers: {
180
+ onLLMCallStart: async (ctx) => {
181
+ timings.set(`llm-${ctx.iteration}`, Date.now());
182
+ },
183
+ onLLMCallComplete: async (ctx) => {
184
+ const start = timings.get(`llm-${ctx.iteration}`);
185
+ if (start) {
186
+ const duration = Date.now() - start;
187
+ console.log(`\u23F1\uFE0F LLM call took ${duration}ms`);
188
+ timings.delete(`llm-${ctx.iteration}`);
189
+ }
190
+ },
191
+ onGadgetExecutionStart: async (ctx) => {
192
+ const key = `gadget-${ctx.gadgetName}-${Date.now()}`;
193
+ timings.set(key, Date.now());
194
+ ctx._timingKey = key;
195
+ },
196
+ onGadgetExecutionComplete: async (ctx) => {
197
+ const key = ctx._timingKey;
198
+ if (key) {
199
+ const start = timings.get(key);
200
+ if (start) {
201
+ const duration = Date.now() - start;
202
+ console.log(`\u23F1\uFE0F Gadget ${ctx.gadgetName} took ${duration}ms`);
203
+ timings.delete(key);
204
+ }
205
+ }
206
+ }
207
+ }
208
+ };
209
+ }
210
+ /**
211
+ * Tracks cumulative token usage across all LLM calls.
212
+ *
213
+ * **Output:**
214
+ * - Per-call token count with 📊 emoji
215
+ * - Cumulative total across all calls
216
+ * - Call count for average calculations
217
+ *
218
+ * **Use cases:**
219
+ * - Cost monitoring and budget tracking
220
+ * - Optimizing prompts to reduce token usage
221
+ * - Comparing token efficiency across different approaches
222
+ * - Real-time cost estimation
223
+ *
224
+ * **Performance:** Minimal overhead. Simple counter increments.
225
+ *
226
+ * **Note:** Token counts depend on the provider's response. Some providers
227
+ * may not include usage data, in which case counts won't be logged.
228
+ *
229
+ * @returns Hook configuration that can be passed to .withHooks()
230
+ *
231
+ * @example
232
+ * ```typescript
233
+ * // Basic token tracking
234
+ * await LLMist.createAgent()
235
+ * .withHooks(HookPresets.tokenTracking())
236
+ * .ask("Summarize this document...");
237
+ * // Output: 📊 Tokens this call: 1,234
238
+ * // 📊 Total tokens: 1,234 (across 1 calls)
239
+ * // 📊 Tokens this call: 567
240
+ * // 📊 Total tokens: 1,801 (across 2 calls)
241
+ * ```
242
+ *
243
+ * @example
244
+ * ```typescript
245
+ * // Cost calculation with custom hook
246
+ * let totalTokens = 0;
247
+ * .withHooks(HookPresets.merge(
248
+ * HookPresets.tokenTracking(),
249
+ * {
250
+ * observers: {
251
+ * onLLMCallComplete: async (ctx) => {
252
+ * totalTokens += ctx.usage?.totalTokens ?? 0;
253
+ * const cost = (totalTokens / 1_000_000) * 3.0; // $3 per 1M tokens
254
+ * console.log(`💰 Estimated cost: $${cost.toFixed(4)}`);
255
+ * },
256
+ * },
257
+ * }
258
+ * ))
259
+ * ```
260
+ *
261
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetstokentracking | Full documentation}
262
+ */
263
+ static tokenTracking() {
264
+ let totalTokens = 0;
265
+ let totalCalls = 0;
266
+ return {
267
+ observers: {
268
+ onLLMCallComplete: async (ctx) => {
269
+ totalCalls++;
270
+ if (ctx.usage?.totalTokens) {
271
+ totalTokens += ctx.usage.totalTokens;
272
+ console.log(`\u{1F4CA} Tokens this call: ${ctx.usage.totalTokens}`);
273
+ console.log(`\u{1F4CA} Total tokens: ${totalTokens} (across ${totalCalls} calls)`);
274
+ }
275
+ }
276
+ }
277
+ };
278
+ }
279
+ /**
280
+ * Tracks comprehensive progress metrics including iterations, tokens, cost, and timing.
281
+ *
282
+ * **This preset showcases llmist's core capabilities by demonstrating:**
283
+ * - Observer pattern for non-intrusive monitoring
284
+ * - Integration with ModelRegistry for cost estimation
285
+ * - Callback-based architecture for flexible UI updates
286
+ * - Provider-agnostic token and cost tracking
287
+ *
288
+ * Unlike `tokenTracking()` which only logs to console, this preset provides
289
+ * structured data through callbacks, making it perfect for building custom UIs,
290
+ * dashboards, or progress indicators (like the llmist CLI).
291
+ *
292
+ * **Output (when logProgress: true):**
293
+ * - Iteration number and call count
294
+ * - Cumulative token usage (input + output)
295
+ * - Cumulative cost in USD (requires modelRegistry)
296
+ * - Elapsed time in seconds
297
+ *
298
+ * **Use cases:**
299
+ * - Building CLI progress indicators with live updates
300
+ * - Creating web dashboards with real-time metrics
301
+ * - Budget monitoring and cost alerts
302
+ * - Performance tracking and optimization
303
+ * - Custom logging to external systems (Datadog, CloudWatch, etc.)
304
+ *
305
+ * **Performance:** Minimal overhead. Uses Date.now() for timing and optional
306
+ * ModelRegistry.estimateCost() which is O(1) lookup. Callback invocation is
307
+ * synchronous and fast.
308
+ *
309
+ * @param options - Progress tracking options
310
+ * @param options.modelRegistry - ModelRegistry for cost estimation (optional)
311
+ * @param options.onProgress - Callback invoked after each LLM call (optional)
312
+ * @param options.logProgress - Log progress to console (default: false)
313
+ * @returns Hook configuration with progress tracking observers
314
+ *
315
+ * @example
316
+ * ```typescript
317
+ * // Basic usage with callback (RECOMMENDED - used by llmist CLI)
318
+ * import { LLMist, HookPresets } from 'llmist';
319
+ *
320
+ * const client = LLMist.create();
321
+ *
322
+ * await client.agent()
323
+ * .withHooks(HookPresets.progressTracking({
324
+ * modelRegistry: client.modelRegistry,
325
+ * onProgress: (stats) => {
326
+ * // Update your UI with stats
327
+ * console.log(`#${stats.currentIteration} | ${stats.totalTokens} tokens | $${stats.totalCost.toFixed(4)}`);
328
+ * }
329
+ * }))
330
+ * .withGadgets(Calculator)
331
+ * .ask("Calculate 15 * 23");
332
+ * // Output: #1 | 245 tokens | $0.0012
333
+ * ```
334
+ *
335
+ * @example
336
+ * ```typescript
337
+ * // Console logging mode (quick debugging)
338
+ * await client.agent()
339
+ * .withHooks(HookPresets.progressTracking({
340
+ * modelRegistry: client.modelRegistry,
341
+ * logProgress: true // Simple console output
342
+ * }))
343
+ * .ask("Your prompt");
344
+ * // Output: 📊 Progress: Iteration #1 | 245 tokens | $0.0012 | 1.2s
345
+ * ```
346
+ *
347
+ * @example
348
+ * ```typescript
349
+ * // Budget monitoring with alerts
350
+ * const BUDGET_USD = 0.10;
351
+ *
352
+ * await client.agent()
353
+ * .withHooks(HookPresets.progressTracking({
354
+ * modelRegistry: client.modelRegistry,
355
+ * onProgress: (stats) => {
356
+ * if (stats.totalCost > BUDGET_USD) {
357
+ * throw new Error(`Budget exceeded: $${stats.totalCost.toFixed(4)}`);
358
+ * }
359
+ * }
360
+ * }))
361
+ * .ask("Long running task...");
362
+ * ```
363
+ *
364
+ * @example
365
+ * ```typescript
366
+ * // Web dashboard integration
367
+ * let progressBar: HTMLElement;
368
+ *
369
+ * await client.agent()
370
+ * .withHooks(HookPresets.progressTracking({
371
+ * modelRegistry: client.modelRegistry,
372
+ * onProgress: (stats) => {
373
+ * // Update web UI in real-time
374
+ * progressBar.textContent = `Iteration ${stats.currentIteration}`;
375
+ * progressBar.dataset.cost = stats.totalCost.toFixed(4);
376
+ * progressBar.dataset.tokens = stats.totalTokens.toString();
377
+ * }
378
+ * }))
379
+ * .ask("Your prompt");
380
+ * ```
381
+ *
382
+ * @example
383
+ * ```typescript
384
+ * // External logging (Datadog, CloudWatch, etc.)
385
+ * await client.agent()
386
+ * .withHooks(HookPresets.progressTracking({
387
+ * modelRegistry: client.modelRegistry,
388
+ * onProgress: async (stats) => {
389
+ * await metrics.gauge('llm.iteration', stats.currentIteration);
390
+ * await metrics.gauge('llm.cost', stats.totalCost);
391
+ * await metrics.gauge('llm.tokens', stats.totalTokens);
392
+ * }
393
+ * }))
394
+ * .ask("Your prompt");
395
+ * ```
396
+ *
397
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsprogresstrackingoptions | Full documentation}
398
+ * @see {@link ProgressTrackingOptions} for detailed options
399
+ * @see {@link ProgressStats} for the callback data structure
400
+ */
401
+ static progressTracking(options) {
402
+ const { modelRegistry, onProgress, logProgress = false } = options ?? {};
403
+ let totalCalls = 0;
404
+ let currentIteration = 0;
405
+ let totalInputTokens = 0;
406
+ let totalOutputTokens = 0;
407
+ let totalCost = 0;
408
+ const startTime = Date.now();
409
+ return {
410
+ observers: {
411
+ // Track iteration on each LLM call start
412
+ onLLMCallStart: async (ctx) => {
413
+ currentIteration++;
414
+ },
415
+ // Accumulate metrics and report progress on each LLM call completion
416
+ onLLMCallComplete: async (ctx) => {
417
+ totalCalls++;
418
+ if (ctx.usage) {
419
+ totalInputTokens += ctx.usage.inputTokens;
420
+ totalOutputTokens += ctx.usage.outputTokens;
421
+ if (modelRegistry) {
422
+ try {
423
+ const modelName = ctx.options.model.includes(":") ? ctx.options.model.split(":")[1] : ctx.options.model;
424
+ const costEstimate = modelRegistry.estimateCost(
425
+ modelName,
426
+ ctx.usage.inputTokens,
427
+ ctx.usage.outputTokens
428
+ );
429
+ if (costEstimate) {
430
+ totalCost += costEstimate.totalCost;
431
+ }
432
+ } catch (error) {
433
+ if (logProgress) {
434
+ console.warn(`\u26A0\uFE0F Cost estimation failed:`, error);
435
+ }
436
+ }
437
+ }
438
+ }
439
+ const stats = {
440
+ currentIteration,
441
+ totalCalls,
442
+ totalInputTokens,
443
+ totalOutputTokens,
444
+ totalTokens: totalInputTokens + totalOutputTokens,
445
+ totalCost,
446
+ elapsedSeconds: Number(((Date.now() - startTime) / 1e3).toFixed(1))
447
+ };
448
+ if (onProgress) {
449
+ onProgress(stats);
450
+ }
451
+ if (logProgress) {
452
+ const formattedTokens = stats.totalTokens >= 1e3 ? `${(stats.totalTokens / 1e3).toFixed(1)}k` : `${stats.totalTokens}`;
453
+ const formattedCost = stats.totalCost > 0 ? `$${stats.totalCost.toFixed(4)}` : "$0";
454
+ console.log(
455
+ `\u{1F4CA} Progress: Iteration #${stats.currentIteration} | ${formattedTokens} tokens | ${formattedCost} | ${stats.elapsedSeconds}s`
456
+ );
457
+ }
458
+ }
459
+ }
460
+ };
461
+ }
462
+ /**
463
+ * Logs detailed error information for debugging and troubleshooting.
464
+ *
465
+ * **Output:**
466
+ * - LLM errors with ❌ emoji, including model and recovery status
467
+ * - Gadget errors with full context (parameters, error message)
468
+ * - Separate logging for LLM and gadget failures
469
+ *
470
+ * **Use cases:**
471
+ * - Troubleshooting production issues
472
+ * - Understanding error patterns and frequency
473
+ * - Debugging error recovery behavior
474
+ * - Collecting error metrics for monitoring
475
+ *
476
+ * **Performance:** Minimal overhead. Only logs when errors occur.
477
+ *
478
+ * @returns Hook configuration that can be passed to .withHooks()
479
+ *
480
+ * @example
481
+ * ```typescript
482
+ * // Basic error logging
483
+ * await LLMist.createAgent()
484
+ * .withHooks(HookPresets.errorLogging())
485
+ * .withGadgets(Database)
486
+ * .ask("Fetch user data");
487
+ * // Output (on LLM error): ❌ LLM Error (iteration 1): Rate limit exceeded
488
+ * // Model: gpt-5-nano
489
+ * // Recovered: true
490
+ * // Output (on gadget error): ❌ Gadget Error: Database
491
+ * // Error: Connection timeout
492
+ * // Parameters: {...}
493
+ * ```
494
+ *
495
+ * @example
496
+ * ```typescript
497
+ * // Combine with monitoring for full context
498
+ * .withHooks(HookPresets.merge(
499
+ * HookPresets.monitoring(), // Includes errorLogging
500
+ * customErrorAnalytics
501
+ * ))
502
+ * ```
503
+ *
504
+ * @example
505
+ * ```typescript
506
+ * // Error analytics collection
507
+ * const errors: any[] = [];
508
+ * .withHooks(HookPresets.merge(
509
+ * HookPresets.errorLogging(),
510
+ * {
511
+ * observers: {
512
+ * onLLMCallError: async (ctx) => {
513
+ * errors.push({ type: 'llm', error: ctx.error, recovered: ctx.recovered });
514
+ * },
515
+ * },
516
+ * }
517
+ * ))
518
+ * ```
519
+ *
520
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetserrorlogging | Full documentation}
521
+ */
522
+ static errorLogging() {
523
+ return {
524
+ observers: {
525
+ onLLMCallError: async (ctx) => {
526
+ console.error(`\u274C LLM Error (iteration ${ctx.iteration}):`, ctx.error.message);
527
+ console.error(` Model: ${ctx.options.model}`);
528
+ console.error(` Recovered: ${ctx.recovered}`);
529
+ },
530
+ onGadgetExecutionComplete: async (ctx) => {
531
+ if (ctx.error) {
532
+ console.error(`\u274C Gadget Error: ${ctx.gadgetName}`);
533
+ console.error(` Error: ${ctx.error}`);
534
+ console.error(` Parameters:`, JSON.stringify(ctx.parameters, null, 2));
535
+ }
536
+ }
537
+ }
538
+ };
539
+ }
540
+ /**
541
+ * Tracks context compaction events.
542
+ *
543
+ * **Output:**
544
+ * - Compaction events with 🗜️ emoji
545
+ * - Strategy name, tokens before/after, and savings
546
+ * - Cumulative statistics
547
+ *
548
+ * **Use cases:**
549
+ * - Monitoring long-running conversations
550
+ * - Understanding when and how compaction occurs
551
+ * - Debugging context management issues
552
+ *
553
+ * **Performance:** Minimal overhead. Simple console output.
554
+ *
555
+ * @returns Hook configuration that can be passed to .withHooks()
556
+ *
557
+ * @example
558
+ * ```typescript
559
+ * await LLMist.createAgent()
560
+ * .withHooks(HookPresets.compactionTracking())
561
+ * .ask("Your prompt");
562
+ * ```
563
+ */
564
+ static compactionTracking() {
565
+ return {
566
+ observers: {
567
+ onCompaction: async (ctx) => {
568
+ const saved = ctx.event.tokensBefore - ctx.event.tokensAfter;
569
+ const percent = (saved / ctx.event.tokensBefore * 100).toFixed(1);
570
+ console.log(
571
+ `\u{1F5DC}\uFE0F Compaction (${ctx.event.strategy}): ${ctx.event.tokensBefore} \u2192 ${ctx.event.tokensAfter} tokens (saved ${saved}, ${percent}%)`
572
+ );
573
+ console.log(
574
+ ` Messages: ${ctx.event.messagesBefore} \u2192 ${ctx.event.messagesAfter}`
575
+ );
576
+ if (ctx.stats.totalCompactions > 1) {
577
+ console.log(
578
+ ` Cumulative: ${ctx.stats.totalCompactions} compactions, ${ctx.stats.totalTokensSaved} tokens saved`
579
+ );
580
+ }
581
+ }
582
+ }
583
+ };
584
+ }
585
+ /**
586
+ * Returns empty hook configuration for clean output without any logging.
587
+ *
588
+ * **Output:**
589
+ * - None. Returns {} (empty object).
590
+ *
591
+ * **Use cases:**
592
+ * - Clean test output without console noise
593
+ * - Production environments where logging is handled externally
594
+ * - Baseline for custom hook development
595
+ * - Temporary disable of all hook output
596
+ *
597
+ * **Performance:** Zero overhead. No-op hook configuration.
598
+ *
599
+ * @returns Empty hook configuration
600
+ *
601
+ * @example
602
+ * ```typescript
603
+ * // Clean test output
604
+ * describe('Agent tests', () => {
605
+ * it('should calculate correctly', async () => {
606
+ * const result = await LLMist.createAgent()
607
+ * .withHooks(HookPresets.silent()) // No console output
608
+ * .withGadgets(Calculator)
609
+ * .askAndCollect("What is 15 times 23?");
610
+ *
611
+ * expect(result).toContain("345");
612
+ * });
613
+ * });
614
+ * ```
615
+ *
616
+ * @example
617
+ * ```typescript
618
+ * // Conditional silence based on environment
619
+ * const isTesting = process.env.NODE_ENV === 'test';
620
+ * .withHooks(isTesting ? HookPresets.silent() : HookPresets.monitoring())
621
+ * ```
622
+ *
623
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetssilent | Full documentation}
624
+ */
625
+ static silent() {
626
+ return {};
627
+ }
628
+ /**
629
+ * Combines multiple hook configurations into one.
630
+ *
631
+ * Merge allows you to compose preset and custom hooks for modular monitoring
632
+ * configurations. Understanding merge behavior is crucial for proper composition.
633
+ *
634
+ * **Merge behavior:**
635
+ * - **Observers:** Composed - all handlers run sequentially in order
636
+ * - **Interceptors:** Last one wins - only the last interceptor applies
637
+ * - **Controllers:** Last one wins - only the last controller applies
638
+ *
639
+ * **Why interceptors/controllers don't compose:**
640
+ * - Interceptors have different signatures per method, making composition impractical
641
+ * - Controllers return specific actions that can't be meaningfully combined
642
+ * - Only observers support composition because they're read-only and independent
643
+ *
644
+ * **Use cases:**
645
+ * - Combining multiple presets (logging + timing + tokens)
646
+ * - Adding custom hooks to presets
647
+ * - Building modular, reusable monitoring configurations
648
+ * - Environment-specific hook composition
649
+ *
650
+ * **Performance:** Minimal overhead for merging. Runtime performance depends on merged hooks.
651
+ *
652
+ * @param hookSets - Variable number of hook configurations to merge
653
+ * @returns Single merged hook configuration with composed/overridden handlers
654
+ *
655
+ * @example
656
+ * ```typescript
657
+ * // Combine multiple presets
658
+ * .withHooks(HookPresets.merge(
659
+ * HookPresets.logging(),
660
+ * HookPresets.timing(),
661
+ * HookPresets.tokenTracking()
662
+ * ))
663
+ * // All observers from all three presets will run
664
+ * ```
665
+ *
666
+ * @example
667
+ * ```typescript
668
+ * // Add custom observer to preset (both run)
669
+ * .withHooks(HookPresets.merge(
670
+ * HookPresets.timing(),
671
+ * {
672
+ * observers: {
673
+ * onLLMCallComplete: async (ctx) => {
674
+ * await saveMetrics({ tokens: ctx.usage?.totalTokens });
675
+ * },
676
+ * },
677
+ * }
678
+ * ))
679
+ * ```
680
+ *
681
+ * @example
682
+ * ```typescript
683
+ * // Multiple interceptors (last wins!)
684
+ * .withHooks(HookPresets.merge(
685
+ * {
686
+ * interceptors: {
687
+ * interceptTextChunk: (chunk) => chunk.toUpperCase(), // Ignored
688
+ * },
689
+ * },
690
+ * {
691
+ * interceptors: {
692
+ * interceptTextChunk: (chunk) => chunk.toLowerCase(), // This wins
693
+ * },
694
+ * }
695
+ * ))
696
+ * // Result: text will be lowercase
697
+ * ```
698
+ *
699
+ * @example
700
+ * ```typescript
701
+ * // Modular environment-based configuration
702
+ * const baseHooks = HookPresets.errorLogging();
703
+ * const devHooks = HookPresets.merge(baseHooks, HookPresets.monitoring({ verbose: true }));
704
+ * const prodHooks = HookPresets.merge(baseHooks, HookPresets.tokenTracking());
705
+ *
706
+ * const hooks = process.env.NODE_ENV === 'production' ? prodHooks : devHooks;
707
+ * .withHooks(hooks)
708
+ * ```
709
+ *
710
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsmergehooksets | Full documentation}
711
+ */
712
+ static merge(...hookSets) {
713
+ const merged = {
714
+ observers: {},
715
+ interceptors: {},
716
+ controllers: {}
717
+ };
718
+ for (const hooks of hookSets) {
719
+ if (hooks.observers) {
720
+ for (const [key, handler] of Object.entries(hooks.observers)) {
721
+ const typedKey = key;
722
+ if (merged.observers[typedKey]) {
723
+ const existing = merged.observers[typedKey];
724
+ merged.observers[typedKey] = async (ctx) => {
725
+ await existing(ctx);
726
+ await handler(ctx);
727
+ };
728
+ } else {
729
+ merged.observers[typedKey] = handler;
730
+ }
731
+ }
732
+ }
733
+ if (hooks.interceptors) {
734
+ Object.assign(merged.interceptors, hooks.interceptors);
735
+ }
736
+ if (hooks.controllers) {
737
+ Object.assign(merged.controllers, hooks.controllers);
738
+ }
739
+ }
740
+ return merged;
741
+ }
742
+ /**
743
+ * Composite preset combining logging, timing, tokenTracking, and errorLogging.
744
+ *
745
+ * This is the recommended preset for development and initial production deployments,
746
+ * providing comprehensive observability with a single method call.
747
+ *
748
+ * **Includes:**
749
+ * - All output from `logging()` preset (with optional verbosity)
750
+ * - All output from `timing()` preset (execution times)
751
+ * - All output from `tokenTracking()` preset (token usage)
752
+ * - All output from `errorLogging()` preset (error details)
753
+ *
754
+ * **Output format:**
755
+ * - Event logging: [LLM]/[GADGET] messages
756
+ * - Timing: ⏱️ emoji with milliseconds
757
+ * - Tokens: 📊 emoji with per-call and cumulative counts
758
+ * - Errors: ❌ emoji with full error details
759
+ *
760
+ * **Use cases:**
761
+ * - Full observability during development
762
+ * - Comprehensive monitoring in production
763
+ * - One-liner for complete agent visibility
764
+ * - Troubleshooting and debugging with full context
765
+ *
766
+ * **Performance:** Combined overhead of all four presets, but still minimal in practice.
767
+ *
768
+ * @param options - Monitoring options
769
+ * @param options.verbose - Passed to logging() preset for detailed output. Default: false
770
+ * @returns Merged hook configuration combining all monitoring presets
771
+ *
772
+ * @example
773
+ * ```typescript
774
+ * // Basic monitoring (recommended for development)
775
+ * await LLMist.createAgent()
776
+ * .withHooks(HookPresets.monitoring())
777
+ * .withGadgets(Calculator, Weather)
778
+ * .ask("What is 15 times 23, and what's the weather in NYC?");
779
+ * // Output: All events, timing, tokens, and errors in one place
780
+ * ```
781
+ *
782
+ * @example
783
+ * ```typescript
784
+ * // Verbose monitoring with full details
785
+ * await LLMist.createAgent()
786
+ * .withHooks(HookPresets.monitoring({ verbose: true }))
787
+ * .ask("Your prompt");
788
+ * // Output includes: parameters, results, and complete responses
789
+ * ```
790
+ *
791
+ * @example
792
+ * ```typescript
793
+ * // Environment-based monitoring
794
+ * const isDev = process.env.NODE_ENV === 'development';
795
+ * .withHooks(HookPresets.monitoring({ verbose: isDev }))
796
+ * ```
797
+ *
798
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsmonitoringoptions | Full documentation}
799
+ */
800
+ static monitoring(options = {}) {
801
+ return _HookPresets.merge(
802
+ _HookPresets.logging(options),
803
+ _HookPresets.timing(),
804
+ _HookPresets.tokenTracking(),
805
+ _HookPresets.errorLogging()
806
+ );
807
+ }
808
+ };
809
+
810
+ // src/agent/index.ts
811
+ init_conversation_manager();
812
+ init_stream_processor();
813
+ init_gadget_output_store();
814
+
815
+ // src/agent/compaction/index.ts
816
+ init_config();
817
+ init_strategy();
818
+ init_strategies();
819
+ init_manager();
820
+
821
+ // src/agent/hints.ts
822
+ init_prompt_config();
823
+ function iterationProgressHint(options) {
824
+ const { timing = "always", showUrgency = true, template } = options ?? {};
825
+ return {
826
+ controllers: {
827
+ beforeLLMCall: async (ctx) => {
828
+ const iteration = ctx.iteration + 1;
829
+ const maxIterations = ctx.maxIterations;
830
+ const progress = iteration / maxIterations;
831
+ if (timing === "late" && progress < 0.5) {
832
+ return { action: "proceed" };
833
+ }
834
+ if (timing === "urgent" && progress < 0.8) {
835
+ return { action: "proceed" };
836
+ }
837
+ const remaining = maxIterations - iteration;
838
+ const hintContext = {
839
+ iteration,
840
+ maxIterations,
841
+ remaining
842
+ };
843
+ let hint = resolveHintTemplate(
844
+ template,
845
+ DEFAULT_HINTS.iterationProgressHint,
846
+ hintContext
847
+ );
848
+ if (showUrgency && progress >= 0.8) {
849
+ hint += " \u26A0\uFE0F Running low on iterations - focus on completing the task.";
850
+ }
851
+ const messages = [...ctx.options.messages];
852
+ let lastUserIndex = -1;
853
+ for (let i = messages.length - 1; i >= 0; i--) {
854
+ if (messages[i].role === "user") {
855
+ lastUserIndex = i;
856
+ break;
857
+ }
858
+ }
859
+ if (lastUserIndex >= 0) {
860
+ messages.splice(lastUserIndex + 1, 0, {
861
+ role: "user",
862
+ content: `[System Hint] ${hint}`
863
+ });
864
+ } else {
865
+ messages.push({
866
+ role: "user",
867
+ content: `[System Hint] ${hint}`
868
+ });
869
+ }
870
+ return {
871
+ action: "proceed",
872
+ modifiedOptions: { messages }
873
+ };
874
+ }
875
+ }
876
+ };
877
+ }
878
+ function parallelGadgetHint(options) {
879
+ const {
880
+ minGadgetsForEfficiency = 2,
881
+ message = DEFAULT_HINTS.parallelGadgetsHint,
882
+ enabled = true
883
+ } = options ?? {};
884
+ return {
885
+ controllers: {
886
+ afterLLMCall: async (ctx) => {
887
+ if (!enabled) {
888
+ return { action: "continue" };
889
+ }
890
+ if (ctx.gadgetCallCount > 0 && ctx.gadgetCallCount < minGadgetsForEfficiency) {
891
+ return {
892
+ action: "append_messages",
893
+ messages: [
894
+ {
895
+ role: "user",
896
+ content: `[System Hint] ${message}`
897
+ }
898
+ ]
899
+ };
900
+ }
901
+ return { action: "continue" };
902
+ }
903
+ }
904
+ };
905
+ }
906
+ function createHints(config) {
907
+ const hooksToMerge = [];
908
+ if (config.iterationProgress) {
909
+ const options = typeof config.iterationProgress === "boolean" ? {} : config.iterationProgress;
910
+ hooksToMerge.push(iterationProgressHint(options));
911
+ }
912
+ if (config.parallelGadgets) {
913
+ const options = typeof config.parallelGadgets === "boolean" ? {} : config.parallelGadgets;
914
+ hooksToMerge.push(parallelGadgetHint(options));
915
+ }
916
+ if (config.custom) {
917
+ hooksToMerge.push(...config.custom);
918
+ }
919
+ return HookPresets.merge(...hooksToMerge);
920
+ }
921
+
922
+ // src/index.ts
923
+ init_client();
924
+ init_messages();
925
+ init_model_registry();
926
+ init_model_shortcuts();
927
+ init_options();
928
+ init_prompt_config();
929
+ init_quick_methods();
930
+ init_create_gadget();
931
+ init_output_viewer();
932
+ init_exceptions();
933
+ init_executor();
934
+ init_gadget();
935
+ init_parser();
936
+ init_registry();
937
+
938
+ // src/gadgets/typed-gadget.ts
939
+ init_gadget();
940
+ function Gadget(config) {
941
+ class GadgetBase extends BaseGadget {
942
+ description = config.description;
943
+ parameterSchema = config.schema;
944
+ name = config.name;
945
+ timeoutMs = config.timeoutMs;
946
+ examples = config.examples;
947
+ /**
948
+ * Type helper property for accessing inferred parameter type.
949
+ * This is used in the execute method signature: `execute(params: this['params'])`
950
+ *
951
+ * Note: This is just for type inference - the actual params in execute()
952
+ * will be Record<string, unknown> which you can safely cast to this['params']
953
+ */
954
+ params;
955
+ }
956
+ return GadgetBase;
957
+ }
958
+
959
+ // src/index.ts
960
+ init_logger();
961
+ init_anthropic();
962
+ init_discovery();
963
+ init_gemini();
964
+ init_openai();
965
+
966
+ export {
967
+ HookPresets,
968
+ iterationProgressHint,
969
+ parallelGadgetHint,
970
+ createHints,
971
+ Gadget,
972
+ z
973
+ };
974
+ //# sourceMappingURL=chunk-UEEESLOA.js.map