llmist 0.1.4 → 0.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -4319,18 +4319,59 @@ init_event_handlers();
4319
4319
  // src/agent/hook-presets.ts
4320
4320
  var HookPresets = class _HookPresets {
4321
4321
  /**
4322
- * Preset: Basic logging of all events.
4322
+ * Logs LLM calls and gadget execution to console with optional verbosity.
4323
4323
  *
4324
- * Logs LLM calls and gadget executions to console.
4324
+ * **Output (basic mode):**
4325
+ * - LLM call start/complete events with iteration numbers
4326
+ * - Gadget execution start/complete with gadget names
4327
+ * - Token counts when available
4328
+ *
4329
+ * **Output (verbose mode):**
4330
+ * - All basic mode output
4331
+ * - Full gadget parameters (formatted JSON)
4332
+ * - Full gadget results
4333
+ * - Complete LLM response text
4334
+ *
4335
+ * **Use cases:**
4336
+ * - Basic development debugging and execution flow visibility
4337
+ * - Understanding agent decision-making and tool usage
4338
+ * - Troubleshooting gadget invocations
4339
+ *
4340
+ * **Performance:** Minimal overhead. Console writes are synchronous but fast.
4325
4341
  *
4326
4342
  * @param options - Logging options
4327
- * @returns Hook configuration
4343
+ * @param options.verbose - Include full parameters and results. Default: false
4344
+ * @returns Hook configuration that can be passed to .withHooks()
4345
+ *
4346
+ * @example
4347
+ * ```typescript
4348
+ * // Basic logging
4349
+ * await LLMist.createAgent()
4350
+ * .withHooks(HookPresets.logging())
4351
+ * .ask("Calculate 15 * 23");
4352
+ * // Output: [LLM] Starting call (iteration 0)
4353
+ * // [GADGET] Executing Calculator
4354
+ * // [GADGET] Completed Calculator
4355
+ * // [LLM] Completed (tokens: 245)
4356
+ * ```
4328
4357
  *
4329
4358
  * @example
4330
4359
  * ```typescript
4331
- * .withHooks(HookPresets.logging())
4332
- * .withHooks(HookPresets.logging({ verbose: true }))
4360
+ * // Verbose logging with full details
4361
+ * await LLMist.createAgent()
4362
+ * .withHooks(HookPresets.logging({ verbose: true }))
4363
+ * .ask("Calculate 15 * 23");
4364
+ * // Output includes: parameters, results, and full responses
4333
4365
  * ```
4366
+ *
4367
+ * @example
4368
+ * ```typescript
4369
+ * // Environment-based verbosity
4370
+ * const isDev = process.env.NODE_ENV === 'development';
4371
+ * .withHooks(HookPresets.logging({ verbose: isDev }))
4372
+ * ```
4373
+ *
4374
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsloggingoptions | Full documentation}
4334
4375
  */
4335
4376
  static logging(options = {}) {
4336
4377
  return {
@@ -4362,16 +4403,54 @@ var HookPresets = class _HookPresets {
4362
4403
  };
4363
4404
  }
4364
4405
  /**
4365
- * Preset: Performance timing for all operations.
4366
- *
4367
4406
  * Measures and logs execution time for LLM calls and gadgets.
4368
4407
  *
4369
- * @returns Hook configuration
4408
+ * **Output:**
4409
+ * - Duration in milliseconds with ⏱️ emoji for each operation
4410
+ * - Separate timing for each LLM iteration
4411
+ * - Separate timing for each gadget execution
4412
+ *
4413
+ * **Use cases:**
4414
+ * - Performance profiling and optimization
4415
+ * - Identifying slow operations (LLM calls vs gadget execution)
4416
+ * - Monitoring response times in production
4417
+ * - Capacity planning and SLA tracking
4418
+ *
4419
+ * **Performance:** Negligible overhead. Uses Date.now() for timing measurements.
4420
+ *
4421
+ * @returns Hook configuration that can be passed to .withHooks()
4422
+ *
4423
+ * @example
4424
+ * ```typescript
4425
+ * // Basic timing
4426
+ * await LLMist.createAgent()
4427
+ * .withHooks(HookPresets.timing())
4428
+ * .withGadgets(Weather, Database)
4429
+ * .ask("What's the weather in NYC?");
4430
+ * // Output: ⏱️ LLM call took 1234ms
4431
+ * // ⏱️ Gadget Weather took 567ms
4432
+ * // ⏱️ LLM call took 890ms
4433
+ * ```
4434
+ *
4435
+ * @example
4436
+ * ```typescript
4437
+ * // Combined with logging for full context
4438
+ * .withHooks(HookPresets.merge(
4439
+ * HookPresets.logging(),
4440
+ * HookPresets.timing()
4441
+ * ))
4442
+ * ```
4370
4443
  *
4371
4444
  * @example
4372
4445
  * ```typescript
4373
- * .withHooks(HookPresets.timing())
4446
+ * // Correlate performance with cost
4447
+ * .withHooks(HookPresets.merge(
4448
+ * HookPresets.timing(),
4449
+ * HookPresets.tokenTracking()
4450
+ * ))
4374
4451
  * ```
4452
+ *
4453
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetstiming | Full documentation}
4375
4454
  */
4376
4455
  static timing() {
4377
4456
  const timings = /* @__PURE__ */ new Map();
@@ -4408,16 +4487,57 @@ var HookPresets = class _HookPresets {
4408
4487
  };
4409
4488
  }
4410
4489
  /**
4411
- * Preset: Token usage tracking.
4490
+ * Tracks cumulative token usage across all LLM calls.
4491
+ *
4492
+ * **Output:**
4493
+ * - Per-call token count with 📊 emoji
4494
+ * - Cumulative total across all calls
4495
+ * - Call count for average calculations
4496
+ *
4497
+ * **Use cases:**
4498
+ * - Cost monitoring and budget tracking
4499
+ * - Optimizing prompts to reduce token usage
4500
+ * - Comparing token efficiency across different approaches
4501
+ * - Real-time cost estimation
4412
4502
  *
4413
- * Tracks and logs cumulative token usage across all LLM calls.
4503
+ * **Performance:** Minimal overhead. Simple counter increments.
4414
4504
  *
4415
- * @returns Hook configuration
4505
+ * **Note:** Token counts depend on the provider's response. Some providers
4506
+ * may not include usage data, in which case counts won't be logged.
4507
+ *
4508
+ * @returns Hook configuration that can be passed to .withHooks()
4509
+ *
4510
+ * @example
4511
+ * ```typescript
4512
+ * // Basic token tracking
4513
+ * await LLMist.createAgent()
4514
+ * .withHooks(HookPresets.tokenTracking())
4515
+ * .ask("Summarize this document...");
4516
+ * // Output: 📊 Tokens this call: 1,234
4517
+ * // 📊 Total tokens: 1,234 (across 1 calls)
4518
+ * // 📊 Tokens this call: 567
4519
+ * // 📊 Total tokens: 1,801 (across 2 calls)
4520
+ * ```
4416
4521
  *
4417
4522
  * @example
4418
4523
  * ```typescript
4419
- * .withHooks(HookPresets.tokenTracking())
4524
+ * // Cost calculation with custom hook
4525
+ * let totalTokens = 0;
4526
+ * .withHooks(HookPresets.merge(
4527
+ * HookPresets.tokenTracking(),
4528
+ * {
4529
+ * observers: {
4530
+ * onLLMCallComplete: async (ctx) => {
4531
+ * totalTokens += ctx.usage?.totalTokens ?? 0;
4532
+ * const cost = (totalTokens / 1_000_000) * 3.0; // $3 per 1M tokens
4533
+ * console.log(`💰 Estimated cost: $${cost.toFixed(4)}`);
4534
+ * },
4535
+ * },
4536
+ * }
4537
+ * ))
4420
4538
  * ```
4539
+ *
4540
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetstokentracking | Full documentation}
4421
4541
  */
4422
4542
  static tokenTracking() {
4423
4543
  let totalTokens = 0;
@@ -4436,16 +4556,64 @@ var HookPresets = class _HookPresets {
4436
4556
  };
4437
4557
  }
4438
4558
  /**
4439
- * Preset: Error logging.
4559
+ * Logs detailed error information for debugging and troubleshooting.
4560
+ *
4561
+ * **Output:**
4562
+ * - LLM errors with ❌ emoji, including model and recovery status
4563
+ * - Gadget errors with full context (parameters, error message)
4564
+ * - Separate logging for LLM and gadget failures
4565
+ *
4566
+ * **Use cases:**
4567
+ * - Troubleshooting production issues
4568
+ * - Understanding error patterns and frequency
4569
+ * - Debugging error recovery behavior
4570
+ * - Collecting error metrics for monitoring
4571
+ *
4572
+ * **Performance:** Minimal overhead. Only logs when errors occur.
4573
+ *
4574
+ * @returns Hook configuration that can be passed to .withHooks()
4440
4575
  *
4441
- * Logs detailed error information for debugging.
4576
+ * @example
4577
+ * ```typescript
4578
+ * // Basic error logging
4579
+ * await LLMist.createAgent()
4580
+ * .withHooks(HookPresets.errorLogging())
4581
+ * .withGadgets(Database)
4582
+ * .ask("Fetch user data");
4583
+ * // Output (on LLM error): ❌ LLM Error (iteration 1): Rate limit exceeded
4584
+ * // Model: gpt-5-nano
4585
+ * // Recovered: true
4586
+ * // Output (on gadget error): ❌ Gadget Error: Database
4587
+ * // Error: Connection timeout
4588
+ * // Parameters: {...}
4589
+ * ```
4442
4590
  *
4443
- * @returns Hook configuration
4591
+ * @example
4592
+ * ```typescript
4593
+ * // Combine with monitoring for full context
4594
+ * .withHooks(HookPresets.merge(
4595
+ * HookPresets.monitoring(), // Includes errorLogging
4596
+ * customErrorAnalytics
4597
+ * ))
4598
+ * ```
4444
4599
  *
4445
4600
  * @example
4446
4601
  * ```typescript
4447
- * .withHooks(HookPresets.errorLogging())
4602
+ * // Error analytics collection
4603
+ * const errors: any[] = [];
4604
+ * .withHooks(HookPresets.merge(
4605
+ * HookPresets.errorLogging(),
4606
+ * {
4607
+ * observers: {
4608
+ * onLLMCallError: async (ctx) => {
4609
+ * errors.push({ type: 'llm', error: ctx.error, recovered: ctx.recovered });
4610
+ * },
4611
+ * },
4612
+ * }
4613
+ * ))
4448
4614
  * ```
4615
+ *
4616
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetserrorlogging | Full documentation}
4449
4617
  */
4450
4618
  static errorLogging() {
4451
4619
  return {
@@ -4466,48 +4634,131 @@ var HookPresets = class _HookPresets {
4466
4634
  };
4467
4635
  }
4468
4636
  /**
4469
- * Preset: Silent (no output).
4637
+ * Returns empty hook configuration for clean output without any logging.
4470
4638
  *
4471
- * Useful for testing or when you want complete control.
4639
+ * **Output:**
4640
+ * - None. Returns {} (empty object).
4641
+ *
4642
+ * **Use cases:**
4643
+ * - Clean test output without console noise
4644
+ * - Production environments where logging is handled externally
4645
+ * - Baseline for custom hook development
4646
+ * - Temporary disable of all hook output
4647
+ *
4648
+ * **Performance:** Zero overhead. No-op hook configuration.
4472
4649
  *
4473
4650
  * @returns Empty hook configuration
4474
4651
  *
4475
4652
  * @example
4476
4653
  * ```typescript
4477
- * .withHooks(HookPresets.silent())
4654
+ * // Clean test output
4655
+ * describe('Agent tests', () => {
4656
+ * it('should calculate correctly', async () => {
4657
+ * const result = await LLMist.createAgent()
4658
+ * .withHooks(HookPresets.silent()) // No console output
4659
+ * .withGadgets(Calculator)
4660
+ * .askAndCollect("What is 15 times 23?");
4661
+ *
4662
+ * expect(result).toContain("345");
4663
+ * });
4664
+ * });
4665
+ * ```
4666
+ *
4667
+ * @example
4668
+ * ```typescript
4669
+ * // Conditional silence based on environment
4670
+ * const isTesting = process.env.NODE_ENV === 'test';
4671
+ * .withHooks(isTesting ? HookPresets.silent() : HookPresets.monitoring())
4478
4672
  * ```
4673
+ *
4674
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetssilent | Full documentation}
4479
4675
  */
4480
4676
  static silent() {
4481
4677
  return {};
4482
4678
  }
4483
4679
  /**
4484
- * Merge multiple hook configurations.
4680
+ * Combines multiple hook configurations into one.
4681
+ *
4682
+ * Merge allows you to compose preset and custom hooks for modular monitoring
4683
+ * configurations. Understanding merge behavior is crucial for proper composition.
4684
+ *
4685
+ * **Merge behavior:**
4686
+ * - **Observers:** Composed - all handlers run sequentially in order
4687
+ * - **Interceptors:** Last one wins - only the last interceptor applies
4688
+ * - **Controllers:** Last one wins - only the last controller applies
4485
4689
  *
4486
- * Combines hook presets or custom configurations into a single object.
4487
- * When multiple hooks target the same lifecycle event, they are composed
4488
- * to run sequentially (all handlers will execute).
4690
+ * **Why interceptors/controllers don't compose:**
4691
+ * - Interceptors have different signatures per method, making composition impractical
4692
+ * - Controllers return specific actions that can't be meaningfully combined
4693
+ * - Only observers support composition because they're read-only and independent
4489
4694
  *
4490
- * @param hookSets - Array of hook configurations to merge
4491
- * @returns Merged hook configuration with composed handlers
4695
+ * **Use cases:**
4696
+ * - Combining multiple presets (logging + timing + tokens)
4697
+ * - Adding custom hooks to presets
4698
+ * - Building modular, reusable monitoring configurations
4699
+ * - Environment-specific hook composition
4700
+ *
4701
+ * **Performance:** Minimal overhead for merging. Runtime performance depends on merged hooks.
4702
+ *
4703
+ * @param hookSets - Variable number of hook configurations to merge
4704
+ * @returns Single merged hook configuration with composed/overridden handlers
4492
4705
  *
4493
4706
  * @example
4494
4707
  * ```typescript
4708
+ * // Combine multiple presets
4709
+ * .withHooks(HookPresets.merge(
4710
+ * HookPresets.logging(),
4711
+ * HookPresets.timing(),
4712
+ * HookPresets.tokenTracking()
4713
+ * ))
4714
+ * // All observers from all three presets will run
4715
+ * ```
4716
+ *
4717
+ * @example
4718
+ * ```typescript
4719
+ * // Add custom observer to preset (both run)
4495
4720
  * .withHooks(HookPresets.merge(
4496
- * HookPresets.logging({ verbose: true }),
4497
4721
  * HookPresets.timing(),
4498
- * HookPresets.tokenTracking(),
4499
4722
  * {
4500
- * // Custom hook
4501
4723
  * observers: {
4502
4724
  * onLLMCallComplete: async (ctx) => {
4503
- * saveToDatabase(ctx);
4504
- * }
4505
- * }
4725
+ * await saveMetrics({ tokens: ctx.usage?.totalTokens });
4726
+ * },
4727
+ * },
4506
4728
  * }
4507
4729
  * ))
4508
- * // All onLLMCallComplete handlers from logging, timing, tokenTracking,
4509
- * // and the custom hook will execute in order
4510
4730
  * ```
4731
+ *
4732
+ * @example
4733
+ * ```typescript
4734
+ * // Multiple interceptors (last wins!)
4735
+ * .withHooks(HookPresets.merge(
4736
+ * {
4737
+ * interceptors: {
4738
+ * interceptTextChunk: (chunk) => chunk.toUpperCase(), // Ignored
4739
+ * },
4740
+ * },
4741
+ * {
4742
+ * interceptors: {
4743
+ * interceptTextChunk: (chunk) => chunk.toLowerCase(), // This wins
4744
+ * },
4745
+ * }
4746
+ * ))
4747
+ * // Result: text will be lowercase
4748
+ * ```
4749
+ *
4750
+ * @example
4751
+ * ```typescript
4752
+ * // Modular environment-based configuration
4753
+ * const baseHooks = HookPresets.errorLogging();
4754
+ * const devHooks = HookPresets.merge(baseHooks, HookPresets.monitoring({ verbose: true }));
4755
+ * const prodHooks = HookPresets.merge(baseHooks, HookPresets.tokenTracking());
4756
+ *
4757
+ * const hooks = process.env.NODE_ENV === 'production' ? prodHooks : devHooks;
4758
+ * .withHooks(hooks)
4759
+ * ```
4760
+ *
4761
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsmergehooksets | Full documentation}
4511
4762
  */
4512
4763
  static merge(...hookSets) {
4513
4764
  const merged = {
@@ -4540,18 +4791,62 @@ var HookPresets = class _HookPresets {
4540
4791
  return merged;
4541
4792
  }
4542
4793
  /**
4543
- * Preset: Complete monitoring suite.
4794
+ * Composite preset combining logging, timing, tokenTracking, and errorLogging.
4795
+ *
4796
+ * This is the recommended preset for development and initial production deployments,
4797
+ * providing comprehensive observability with a single method call.
4798
+ *
4799
+ * **Includes:**
4800
+ * - All output from `logging()` preset (with optional verbosity)
4801
+ * - All output from `timing()` preset (execution times)
4802
+ * - All output from `tokenTracking()` preset (token usage)
4803
+ * - All output from `errorLogging()` preset (error details)
4804
+ *
4805
+ * **Output format:**
4806
+ * - Event logging: [LLM]/[GADGET] messages
4807
+ * - Timing: ⏱️ emoji with milliseconds
4808
+ * - Tokens: 📊 emoji with per-call and cumulative counts
4809
+ * - Errors: ❌ emoji with full error details
4810
+ *
4811
+ * **Use cases:**
4812
+ * - Full observability during development
4813
+ * - Comprehensive monitoring in production
4814
+ * - One-liner for complete agent visibility
4815
+ * - Troubleshooting and debugging with full context
4816
+ *
4817
+ * **Performance:** Combined overhead of all four presets, but still minimal in practice.
4818
+ *
4819
+ * @param options - Monitoring options
4820
+ * @param options.verbose - Passed to logging() preset for detailed output. Default: false
4821
+ * @returns Merged hook configuration combining all monitoring presets
4544
4822
  *
4545
- * Combines logging, timing, and token tracking.
4823
+ * @example
4824
+ * ```typescript
4825
+ * // Basic monitoring (recommended for development)
4826
+ * await LLMist.createAgent()
4827
+ * .withHooks(HookPresets.monitoring())
4828
+ * .withGadgets(Calculator, Weather)
4829
+ * .ask("What is 15 times 23, and what's the weather in NYC?");
4830
+ * // Output: All events, timing, tokens, and errors in one place
4831
+ * ```
4546
4832
  *
4547
- * @param options - Options for monitoring
4548
- * @returns Merged hook configuration
4833
+ * @example
4834
+ * ```typescript
4835
+ * // Verbose monitoring with full details
4836
+ * await LLMist.createAgent()
4837
+ * .withHooks(HookPresets.monitoring({ verbose: true }))
4838
+ * .ask("Your prompt");
4839
+ * // Output includes: parameters, results, and complete responses
4840
+ * ```
4549
4841
  *
4550
4842
  * @example
4551
4843
  * ```typescript
4552
- * .withHooks(HookPresets.monitoring())
4553
- * .withHooks(HookPresets.monitoring({ verbose: true }))
4844
+ * // Environment-based monitoring
4845
+ * const isDev = process.env.NODE_ENV === 'development';
4846
+ * .withHooks(HookPresets.monitoring({ verbose: isDev }))
4554
4847
  * ```
4848
+ *
4849
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsmonitoringoptions | Full documentation}
4555
4850
  */
4556
4851
  static monitoring(options = {}) {
4557
4852
  return _HookPresets.merge(