llmist 1.3.0 → 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,16 +1,11 @@
1
1
  import {
2
- MockBuilder,
3
- MockManager,
4
- MockProviderAdapter,
5
- createMockAdapter,
6
- createMockClient,
7
- createMockStream,
8
- createTextMockStream,
9
- getMockManager,
10
- mockLLM,
11
- validateAndApplyDefaults,
12
- validateGadgetParams
13
- } from "./chunk-TFIKR2RK.js";
2
+ Gadget,
3
+ HookPresets,
4
+ createHints,
5
+ iterationProgressHint,
6
+ parallelGadgetHint,
7
+ z
8
+ } from "./chunk-UEEESLOA.js";
14
9
  import {
15
10
  AgentBuilder,
16
11
  AnthropicMessagesProvider,
@@ -31,6 +26,9 @@ import {
31
26
  LLMMessageBuilder,
32
27
  LLMist,
33
28
  MODEL_ALIASES,
29
+ MockBuilder,
30
+ MockManager,
31
+ MockProviderAdapter,
34
32
  ModelIdentifierParser,
35
33
  ModelRegistry,
36
34
  OpenAIChatProvider,
@@ -46,978 +44,27 @@ import {
46
44
  createGadgetOutputViewer,
47
45
  createGeminiProviderFromEnv,
48
46
  createLogger,
47
+ createMockAdapter,
48
+ createMockClient,
49
+ createMockStream,
49
50
  createOpenAIProviderFromEnv,
51
+ createTextMockStream,
50
52
  defaultLogger,
51
53
  discoverProviderAdapters,
54
+ getMockManager,
52
55
  getModelId,
53
56
  getProvider,
54
57
  hasProviderPrefix,
55
- init_anthropic,
56
- init_builder,
57
- init_client,
58
- init_config,
59
- init_conversation_manager,
60
- init_create_gadget,
61
- init_discovery,
62
- init_event_handlers,
63
- init_exceptions,
64
- init_executor,
65
- init_gadget,
66
- init_gadget_output_store,
67
- init_gemini,
68
- init_logger,
69
- init_manager,
70
- init_messages,
71
- init_model_registry,
72
- init_model_shortcuts,
73
- init_openai,
74
- init_options,
75
- init_output_viewer,
76
- init_parser,
77
- init_prompt_config,
78
- init_quick_methods,
79
- init_registry,
80
- init_strategies,
81
- init_strategy,
82
- init_stream_processor,
58
+ mockLLM,
83
59
  resolveHintTemplate,
84
60
  resolveModel,
85
61
  resolvePromptTemplate,
86
62
  resolveRulesTemplate,
87
63
  runWithHandlers,
88
- stream
89
- } from "./chunk-RZTAKIDE.js";
90
-
91
- // src/index.ts
92
- init_builder();
93
- init_event_handlers();
94
- import { z } from "zod";
95
-
96
- // src/agent/hook-presets.ts
97
- var HookPresets = class _HookPresets {
98
- /**
99
- * Logs LLM calls and gadget execution to console with optional verbosity.
100
- *
101
- * **Output (basic mode):**
102
- * - LLM call start/complete events with iteration numbers
103
- * - Gadget execution start/complete with gadget names
104
- * - Token counts when available
105
- *
106
- * **Output (verbose mode):**
107
- * - All basic mode output
108
- * - Full gadget parameters (formatted JSON)
109
- * - Full gadget results
110
- * - Complete LLM response text
111
- *
112
- * **Use cases:**
113
- * - Basic development debugging and execution flow visibility
114
- * - Understanding agent decision-making and tool usage
115
- * - Troubleshooting gadget invocations
116
- *
117
- * **Performance:** Minimal overhead. Console writes are synchronous but fast.
118
- *
119
- * @param options - Logging options
120
- * @param options.verbose - Include full parameters and results. Default: false
121
- * @returns Hook configuration that can be passed to .withHooks()
122
- *
123
- * @example
124
- * ```typescript
125
- * // Basic logging
126
- * await LLMist.createAgent()
127
- * .withHooks(HookPresets.logging())
128
- * .ask("Calculate 15 * 23");
129
- * // Output: [LLM] Starting call (iteration 0)
130
- * // [GADGET] Executing Calculator
131
- * // [GADGET] Completed Calculator
132
- * // [LLM] Completed (tokens: 245)
133
- * ```
134
- *
135
- * @example
136
- * ```typescript
137
- * // Verbose logging with full details
138
- * await LLMist.createAgent()
139
- * .withHooks(HookPresets.logging({ verbose: true }))
140
- * .ask("Calculate 15 * 23");
141
- * // Output includes: parameters, results, and full responses
142
- * ```
143
- *
144
- * @example
145
- * ```typescript
146
- * // Environment-based verbosity
147
- * const isDev = process.env.NODE_ENV === 'development';
148
- * .withHooks(HookPresets.logging({ verbose: isDev }))
149
- * ```
150
- *
151
- * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsloggingoptions | Full documentation}
152
- */
153
- static logging(options = {}) {
154
- return {
155
- observers: {
156
- onLLMCallStart: async (ctx) => {
157
- console.log(`[LLM] Starting call (iteration ${ctx.iteration})`);
158
- },
159
- onLLMCallComplete: async (ctx) => {
160
- const tokens = ctx.usage?.totalTokens ?? "unknown";
161
- console.log(`[LLM] Completed (tokens: ${tokens})`);
162
- if (options.verbose && ctx.finalMessage) {
163
- console.log(`[LLM] Response: ${ctx.finalMessage}`);
164
- }
165
- },
166
- onGadgetExecutionStart: async (ctx) => {
167
- console.log(`[GADGET] Executing ${ctx.gadgetName}`);
168
- if (options.verbose) {
169
- console.log(`[GADGET] Parameters:`, JSON.stringify(ctx.parameters, null, 2));
170
- }
171
- },
172
- onGadgetExecutionComplete: async (ctx) => {
173
- console.log(`[GADGET] Completed ${ctx.gadgetName}`);
174
- if (options.verbose) {
175
- const display = ctx.error ?? ctx.finalResult ?? "(no result)";
176
- console.log(`[GADGET] Result: ${display}`);
177
- }
178
- }
179
- }
180
- };
181
- }
182
- /**
183
- * Measures and logs execution time for LLM calls and gadgets.
184
- *
185
- * **Output:**
186
- * - Duration in milliseconds with ⏱️ emoji for each operation
187
- * - Separate timing for each LLM iteration
188
- * - Separate timing for each gadget execution
189
- *
190
- * **Use cases:**
191
- * - Performance profiling and optimization
192
- * - Identifying slow operations (LLM calls vs gadget execution)
193
- * - Monitoring response times in production
194
- * - Capacity planning and SLA tracking
195
- *
196
- * **Performance:** Negligible overhead. Uses Date.now() for timing measurements.
197
- *
198
- * @returns Hook configuration that can be passed to .withHooks()
199
- *
200
- * @example
201
- * ```typescript
202
- * // Basic timing
203
- * await LLMist.createAgent()
204
- * .withHooks(HookPresets.timing())
205
- * .withGadgets(Weather, Database)
206
- * .ask("What's the weather in NYC?");
207
- * // Output: ⏱️ LLM call took 1234ms
208
- * // ⏱️ Gadget Weather took 567ms
209
- * // ⏱️ LLM call took 890ms
210
- * ```
211
- *
212
- * @example
213
- * ```typescript
214
- * // Combined with logging for full context
215
- * .withHooks(HookPresets.merge(
216
- * HookPresets.logging(),
217
- * HookPresets.timing()
218
- * ))
219
- * ```
220
- *
221
- * @example
222
- * ```typescript
223
- * // Correlate performance with cost
224
- * .withHooks(HookPresets.merge(
225
- * HookPresets.timing(),
226
- * HookPresets.tokenTracking()
227
- * ))
228
- * ```
229
- *
230
- * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetstiming | Full documentation}
231
- */
232
- static timing() {
233
- const timings = /* @__PURE__ */ new Map();
234
- return {
235
- observers: {
236
- onLLMCallStart: async (ctx) => {
237
- timings.set(`llm-${ctx.iteration}`, Date.now());
238
- },
239
- onLLMCallComplete: async (ctx) => {
240
- const start = timings.get(`llm-${ctx.iteration}`);
241
- if (start) {
242
- const duration = Date.now() - start;
243
- console.log(`\u23F1\uFE0F LLM call took ${duration}ms`);
244
- timings.delete(`llm-${ctx.iteration}`);
245
- }
246
- },
247
- onGadgetExecutionStart: async (ctx) => {
248
- const key = `gadget-${ctx.gadgetName}-${Date.now()}`;
249
- timings.set(key, Date.now());
250
- ctx._timingKey = key;
251
- },
252
- onGadgetExecutionComplete: async (ctx) => {
253
- const key = ctx._timingKey;
254
- if (key) {
255
- const start = timings.get(key);
256
- if (start) {
257
- const duration = Date.now() - start;
258
- console.log(`\u23F1\uFE0F Gadget ${ctx.gadgetName} took ${duration}ms`);
259
- timings.delete(key);
260
- }
261
- }
262
- }
263
- }
264
- };
265
- }
266
- /**
267
- * Tracks cumulative token usage across all LLM calls.
268
- *
269
- * **Output:**
270
- * - Per-call token count with 📊 emoji
271
- * - Cumulative total across all calls
272
- * - Call count for average calculations
273
- *
274
- * **Use cases:**
275
- * - Cost monitoring and budget tracking
276
- * - Optimizing prompts to reduce token usage
277
- * - Comparing token efficiency across different approaches
278
- * - Real-time cost estimation
279
- *
280
- * **Performance:** Minimal overhead. Simple counter increments.
281
- *
282
- * **Note:** Token counts depend on the provider's response. Some providers
283
- * may not include usage data, in which case counts won't be logged.
284
- *
285
- * @returns Hook configuration that can be passed to .withHooks()
286
- *
287
- * @example
288
- * ```typescript
289
- * // Basic token tracking
290
- * await LLMist.createAgent()
291
- * .withHooks(HookPresets.tokenTracking())
292
- * .ask("Summarize this document...");
293
- * // Output: 📊 Tokens this call: 1,234
294
- * // 📊 Total tokens: 1,234 (across 1 calls)
295
- * // 📊 Tokens this call: 567
296
- * // 📊 Total tokens: 1,801 (across 2 calls)
297
- * ```
298
- *
299
- * @example
300
- * ```typescript
301
- * // Cost calculation with custom hook
302
- * let totalTokens = 0;
303
- * .withHooks(HookPresets.merge(
304
- * HookPresets.tokenTracking(),
305
- * {
306
- * observers: {
307
- * onLLMCallComplete: async (ctx) => {
308
- * totalTokens += ctx.usage?.totalTokens ?? 0;
309
- * const cost = (totalTokens / 1_000_000) * 3.0; // $3 per 1M tokens
310
- * console.log(`💰 Estimated cost: $${cost.toFixed(4)}`);
311
- * },
312
- * },
313
- * }
314
- * ))
315
- * ```
316
- *
317
- * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetstokentracking | Full documentation}
318
- */
319
- static tokenTracking() {
320
- let totalTokens = 0;
321
- let totalCalls = 0;
322
- return {
323
- observers: {
324
- onLLMCallComplete: async (ctx) => {
325
- totalCalls++;
326
- if (ctx.usage?.totalTokens) {
327
- totalTokens += ctx.usage.totalTokens;
328
- console.log(`\u{1F4CA} Tokens this call: ${ctx.usage.totalTokens}`);
329
- console.log(`\u{1F4CA} Total tokens: ${totalTokens} (across ${totalCalls} calls)`);
330
- }
331
- }
332
- }
333
- };
334
- }
335
- /**
336
- * Tracks comprehensive progress metrics including iterations, tokens, cost, and timing.
337
- *
338
- * **This preset showcases llmist's core capabilities by demonstrating:**
339
- * - Observer pattern for non-intrusive monitoring
340
- * - Integration with ModelRegistry for cost estimation
341
- * - Callback-based architecture for flexible UI updates
342
- * - Provider-agnostic token and cost tracking
343
- *
344
- * Unlike `tokenTracking()` which only logs to console, this preset provides
345
- * structured data through callbacks, making it perfect for building custom UIs,
346
- * dashboards, or progress indicators (like the llmist CLI).
347
- *
348
- * **Output (when logProgress: true):**
349
- * - Iteration number and call count
350
- * - Cumulative token usage (input + output)
351
- * - Cumulative cost in USD (requires modelRegistry)
352
- * - Elapsed time in seconds
353
- *
354
- * **Use cases:**
355
- * - Building CLI progress indicators with live updates
356
- * - Creating web dashboards with real-time metrics
357
- * - Budget monitoring and cost alerts
358
- * - Performance tracking and optimization
359
- * - Custom logging to external systems (Datadog, CloudWatch, etc.)
360
- *
361
- * **Performance:** Minimal overhead. Uses Date.now() for timing and optional
362
- * ModelRegistry.estimateCost() which is O(1) lookup. Callback invocation is
363
- * synchronous and fast.
364
- *
365
- * @param options - Progress tracking options
366
- * @param options.modelRegistry - ModelRegistry for cost estimation (optional)
367
- * @param options.onProgress - Callback invoked after each LLM call (optional)
368
- * @param options.logProgress - Log progress to console (default: false)
369
- * @returns Hook configuration with progress tracking observers
370
- *
371
- * @example
372
- * ```typescript
373
- * // Basic usage with callback (RECOMMENDED - used by llmist CLI)
374
- * import { LLMist, HookPresets } from 'llmist';
375
- *
376
- * const client = LLMist.create();
377
- *
378
- * await client.agent()
379
- * .withHooks(HookPresets.progressTracking({
380
- * modelRegistry: client.modelRegistry,
381
- * onProgress: (stats) => {
382
- * // Update your UI with stats
383
- * console.log(`#${stats.currentIteration} | ${stats.totalTokens} tokens | $${stats.totalCost.toFixed(4)}`);
384
- * }
385
- * }))
386
- * .withGadgets(Calculator)
387
- * .ask("Calculate 15 * 23");
388
- * // Output: #1 | 245 tokens | $0.0012
389
- * ```
390
- *
391
- * @example
392
- * ```typescript
393
- * // Console logging mode (quick debugging)
394
- * await client.agent()
395
- * .withHooks(HookPresets.progressTracking({
396
- * modelRegistry: client.modelRegistry,
397
- * logProgress: true // Simple console output
398
- * }))
399
- * .ask("Your prompt");
400
- * // Output: 📊 Progress: Iteration #1 | 245 tokens | $0.0012 | 1.2s
401
- * ```
402
- *
403
- * @example
404
- * ```typescript
405
- * // Budget monitoring with alerts
406
- * const BUDGET_USD = 0.10;
407
- *
408
- * await client.agent()
409
- * .withHooks(HookPresets.progressTracking({
410
- * modelRegistry: client.modelRegistry,
411
- * onProgress: (stats) => {
412
- * if (stats.totalCost > BUDGET_USD) {
413
- * throw new Error(`Budget exceeded: $${stats.totalCost.toFixed(4)}`);
414
- * }
415
- * }
416
- * }))
417
- * .ask("Long running task...");
418
- * ```
419
- *
420
- * @example
421
- * ```typescript
422
- * // Web dashboard integration
423
- * let progressBar: HTMLElement;
424
- *
425
- * await client.agent()
426
- * .withHooks(HookPresets.progressTracking({
427
- * modelRegistry: client.modelRegistry,
428
- * onProgress: (stats) => {
429
- * // Update web UI in real-time
430
- * progressBar.textContent = `Iteration ${stats.currentIteration}`;
431
- * progressBar.dataset.cost = stats.totalCost.toFixed(4);
432
- * progressBar.dataset.tokens = stats.totalTokens.toString();
433
- * }
434
- * }))
435
- * .ask("Your prompt");
436
- * ```
437
- *
438
- * @example
439
- * ```typescript
440
- * // External logging (Datadog, CloudWatch, etc.)
441
- * await client.agent()
442
- * .withHooks(HookPresets.progressTracking({
443
- * modelRegistry: client.modelRegistry,
444
- * onProgress: async (stats) => {
445
- * await metrics.gauge('llm.iteration', stats.currentIteration);
446
- * await metrics.gauge('llm.cost', stats.totalCost);
447
- * await metrics.gauge('llm.tokens', stats.totalTokens);
448
- * }
449
- * }))
450
- * .ask("Your prompt");
451
- * ```
452
- *
453
- * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsprogresstrackingoptions | Full documentation}
454
- * @see {@link ProgressTrackingOptions} for detailed options
455
- * @see {@link ProgressStats} for the callback data structure
456
- */
457
- static progressTracking(options) {
458
- const { modelRegistry, onProgress, logProgress = false } = options ?? {};
459
- let totalCalls = 0;
460
- let currentIteration = 0;
461
- let totalInputTokens = 0;
462
- let totalOutputTokens = 0;
463
- let totalCost = 0;
464
- const startTime = Date.now();
465
- return {
466
- observers: {
467
- // Track iteration on each LLM call start
468
- onLLMCallStart: async (ctx) => {
469
- currentIteration++;
470
- },
471
- // Accumulate metrics and report progress on each LLM call completion
472
- onLLMCallComplete: async (ctx) => {
473
- totalCalls++;
474
- if (ctx.usage) {
475
- totalInputTokens += ctx.usage.inputTokens;
476
- totalOutputTokens += ctx.usage.outputTokens;
477
- if (modelRegistry) {
478
- try {
479
- const modelName = ctx.options.model.includes(":") ? ctx.options.model.split(":")[1] : ctx.options.model;
480
- const costEstimate = modelRegistry.estimateCost(
481
- modelName,
482
- ctx.usage.inputTokens,
483
- ctx.usage.outputTokens
484
- );
485
- if (costEstimate) {
486
- totalCost += costEstimate.totalCost;
487
- }
488
- } catch (error) {
489
- if (logProgress) {
490
- console.warn(`\u26A0\uFE0F Cost estimation failed:`, error);
491
- }
492
- }
493
- }
494
- }
495
- const stats = {
496
- currentIteration,
497
- totalCalls,
498
- totalInputTokens,
499
- totalOutputTokens,
500
- totalTokens: totalInputTokens + totalOutputTokens,
501
- totalCost,
502
- elapsedSeconds: Number(((Date.now() - startTime) / 1e3).toFixed(1))
503
- };
504
- if (onProgress) {
505
- onProgress(stats);
506
- }
507
- if (logProgress) {
508
- const formattedTokens = stats.totalTokens >= 1e3 ? `${(stats.totalTokens / 1e3).toFixed(1)}k` : `${stats.totalTokens}`;
509
- const formattedCost = stats.totalCost > 0 ? `$${stats.totalCost.toFixed(4)}` : "$0";
510
- console.log(
511
- `\u{1F4CA} Progress: Iteration #${stats.currentIteration} | ${formattedTokens} tokens | ${formattedCost} | ${stats.elapsedSeconds}s`
512
- );
513
- }
514
- }
515
- }
516
- };
517
- }
518
- /**
519
- * Logs detailed error information for debugging and troubleshooting.
520
- *
521
- * **Output:**
522
- * - LLM errors with ❌ emoji, including model and recovery status
523
- * - Gadget errors with full context (parameters, error message)
524
- * - Separate logging for LLM and gadget failures
525
- *
526
- * **Use cases:**
527
- * - Troubleshooting production issues
528
- * - Understanding error patterns and frequency
529
- * - Debugging error recovery behavior
530
- * - Collecting error metrics for monitoring
531
- *
532
- * **Performance:** Minimal overhead. Only logs when errors occur.
533
- *
534
- * @returns Hook configuration that can be passed to .withHooks()
535
- *
536
- * @example
537
- * ```typescript
538
- * // Basic error logging
539
- * await LLMist.createAgent()
540
- * .withHooks(HookPresets.errorLogging())
541
- * .withGadgets(Database)
542
- * .ask("Fetch user data");
543
- * // Output (on LLM error): ❌ LLM Error (iteration 1): Rate limit exceeded
544
- * // Model: gpt-5-nano
545
- * // Recovered: true
546
- * // Output (on gadget error): ❌ Gadget Error: Database
547
- * // Error: Connection timeout
548
- * // Parameters: {...}
549
- * ```
550
- *
551
- * @example
552
- * ```typescript
553
- * // Combine with monitoring for full context
554
- * .withHooks(HookPresets.merge(
555
- * HookPresets.monitoring(), // Includes errorLogging
556
- * customErrorAnalytics
557
- * ))
558
- * ```
559
- *
560
- * @example
561
- * ```typescript
562
- * // Error analytics collection
563
- * const errors: any[] = [];
564
- * .withHooks(HookPresets.merge(
565
- * HookPresets.errorLogging(),
566
- * {
567
- * observers: {
568
- * onLLMCallError: async (ctx) => {
569
- * errors.push({ type: 'llm', error: ctx.error, recovered: ctx.recovered });
570
- * },
571
- * },
572
- * }
573
- * ))
574
- * ```
575
- *
576
- * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetserrorlogging | Full documentation}
577
- */
578
- static errorLogging() {
579
- return {
580
- observers: {
581
- onLLMCallError: async (ctx) => {
582
- console.error(`\u274C LLM Error (iteration ${ctx.iteration}):`, ctx.error.message);
583
- console.error(` Model: ${ctx.options.model}`);
584
- console.error(` Recovered: ${ctx.recovered}`);
585
- },
586
- onGadgetExecutionComplete: async (ctx) => {
587
- if (ctx.error) {
588
- console.error(`\u274C Gadget Error: ${ctx.gadgetName}`);
589
- console.error(` Error: ${ctx.error}`);
590
- console.error(` Parameters:`, JSON.stringify(ctx.parameters, null, 2));
591
- }
592
- }
593
- }
594
- };
595
- }
596
- /**
597
- * Tracks context compaction events.
598
- *
599
- * **Output:**
600
- * - Compaction events with 🗜️ emoji
601
- * - Strategy name, tokens before/after, and savings
602
- * - Cumulative statistics
603
- *
604
- * **Use cases:**
605
- * - Monitoring long-running conversations
606
- * - Understanding when and how compaction occurs
607
- * - Debugging context management issues
608
- *
609
- * **Performance:** Minimal overhead. Simple console output.
610
- *
611
- * @returns Hook configuration that can be passed to .withHooks()
612
- *
613
- * @example
614
- * ```typescript
615
- * await LLMist.createAgent()
616
- * .withHooks(HookPresets.compactionTracking())
617
- * .ask("Your prompt");
618
- * ```
619
- */
620
- static compactionTracking() {
621
- return {
622
- observers: {
623
- onCompaction: async (ctx) => {
624
- const saved = ctx.event.tokensBefore - ctx.event.tokensAfter;
625
- const percent = (saved / ctx.event.tokensBefore * 100).toFixed(1);
626
- console.log(
627
- `\u{1F5DC}\uFE0F Compaction (${ctx.event.strategy}): ${ctx.event.tokensBefore} \u2192 ${ctx.event.tokensAfter} tokens (saved ${saved}, ${percent}%)`
628
- );
629
- console.log(
630
- ` Messages: ${ctx.event.messagesBefore} \u2192 ${ctx.event.messagesAfter}`
631
- );
632
- if (ctx.stats.totalCompactions > 1) {
633
- console.log(
634
- ` Cumulative: ${ctx.stats.totalCompactions} compactions, ${ctx.stats.totalTokensSaved} tokens saved`
635
- );
636
- }
637
- }
638
- }
639
- };
640
- }
641
- /**
642
- * Returns empty hook configuration for clean output without any logging.
643
- *
644
- * **Output:**
645
- * - None. Returns {} (empty object).
646
- *
647
- * **Use cases:**
648
- * - Clean test output without console noise
649
- * - Production environments where logging is handled externally
650
- * - Baseline for custom hook development
651
- * - Temporary disable of all hook output
652
- *
653
- * **Performance:** Zero overhead. No-op hook configuration.
654
- *
655
- * @returns Empty hook configuration
656
- *
657
- * @example
658
- * ```typescript
659
- * // Clean test output
660
- * describe('Agent tests', () => {
661
- * it('should calculate correctly', async () => {
662
- * const result = await LLMist.createAgent()
663
- * .withHooks(HookPresets.silent()) // No console output
664
- * .withGadgets(Calculator)
665
- * .askAndCollect("What is 15 times 23?");
666
- *
667
- * expect(result).toContain("345");
668
- * });
669
- * });
670
- * ```
671
- *
672
- * @example
673
- * ```typescript
674
- * // Conditional silence based on environment
675
- * const isTesting = process.env.NODE_ENV === 'test';
676
- * .withHooks(isTesting ? HookPresets.silent() : HookPresets.monitoring())
677
- * ```
678
- *
679
- * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetssilent | Full documentation}
680
- */
681
- static silent() {
682
- return {};
683
- }
684
- /**
685
- * Combines multiple hook configurations into one.
686
- *
687
- * Merge allows you to compose preset and custom hooks for modular monitoring
688
- * configurations. Understanding merge behavior is crucial for proper composition.
689
- *
690
- * **Merge behavior:**
691
- * - **Observers:** Composed - all handlers run sequentially in order
692
- * - **Interceptors:** Last one wins - only the last interceptor applies
693
- * - **Controllers:** Last one wins - only the last controller applies
694
- *
695
- * **Why interceptors/controllers don't compose:**
696
- * - Interceptors have different signatures per method, making composition impractical
697
- * - Controllers return specific actions that can't be meaningfully combined
698
- * - Only observers support composition because they're read-only and independent
699
- *
700
- * **Use cases:**
701
- * - Combining multiple presets (logging + timing + tokens)
702
- * - Adding custom hooks to presets
703
- * - Building modular, reusable monitoring configurations
704
- * - Environment-specific hook composition
705
- *
706
- * **Performance:** Minimal overhead for merging. Runtime performance depends on merged hooks.
707
- *
708
- * @param hookSets - Variable number of hook configurations to merge
709
- * @returns Single merged hook configuration with composed/overridden handlers
710
- *
711
- * @example
712
- * ```typescript
713
- * // Combine multiple presets
714
- * .withHooks(HookPresets.merge(
715
- * HookPresets.logging(),
716
- * HookPresets.timing(),
717
- * HookPresets.tokenTracking()
718
- * ))
719
- * // All observers from all three presets will run
720
- * ```
721
- *
722
- * @example
723
- * ```typescript
724
- * // Add custom observer to preset (both run)
725
- * .withHooks(HookPresets.merge(
726
- * HookPresets.timing(),
727
- * {
728
- * observers: {
729
- * onLLMCallComplete: async (ctx) => {
730
- * await saveMetrics({ tokens: ctx.usage?.totalTokens });
731
- * },
732
- * },
733
- * }
734
- * ))
735
- * ```
736
- *
737
- * @example
738
- * ```typescript
739
- * // Multiple interceptors (last wins!)
740
- * .withHooks(HookPresets.merge(
741
- * {
742
- * interceptors: {
743
- * interceptTextChunk: (chunk) => chunk.toUpperCase(), // Ignored
744
- * },
745
- * },
746
- * {
747
- * interceptors: {
748
- * interceptTextChunk: (chunk) => chunk.toLowerCase(), // This wins
749
- * },
750
- * }
751
- * ))
752
- * // Result: text will be lowercase
753
- * ```
754
- *
755
- * @example
756
- * ```typescript
757
- * // Modular environment-based configuration
758
- * const baseHooks = HookPresets.errorLogging();
759
- * const devHooks = HookPresets.merge(baseHooks, HookPresets.monitoring({ verbose: true }));
760
- * const prodHooks = HookPresets.merge(baseHooks, HookPresets.tokenTracking());
761
- *
762
- * const hooks = process.env.NODE_ENV === 'production' ? prodHooks : devHooks;
763
- * .withHooks(hooks)
764
- * ```
765
- *
766
- * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsmergehooksets | Full documentation}
767
- */
768
- static merge(...hookSets) {
769
- const merged = {
770
- observers: {},
771
- interceptors: {},
772
- controllers: {}
773
- };
774
- for (const hooks of hookSets) {
775
- if (hooks.observers) {
776
- for (const [key, handler] of Object.entries(hooks.observers)) {
777
- const typedKey = key;
778
- if (merged.observers[typedKey]) {
779
- const existing = merged.observers[typedKey];
780
- merged.observers[typedKey] = async (ctx) => {
781
- await existing(ctx);
782
- await handler(ctx);
783
- };
784
- } else {
785
- merged.observers[typedKey] = handler;
786
- }
787
- }
788
- }
789
- if (hooks.interceptors) {
790
- Object.assign(merged.interceptors, hooks.interceptors);
791
- }
792
- if (hooks.controllers) {
793
- Object.assign(merged.controllers, hooks.controllers);
794
- }
795
- }
796
- return merged;
797
- }
798
- /**
799
- * Composite preset combining logging, timing, tokenTracking, and errorLogging.
800
- *
801
- * This is the recommended preset for development and initial production deployments,
802
- * providing comprehensive observability with a single method call.
803
- *
804
- * **Includes:**
805
- * - All output from `logging()` preset (with optional verbosity)
806
- * - All output from `timing()` preset (execution times)
807
- * - All output from `tokenTracking()` preset (token usage)
808
- * - All output from `errorLogging()` preset (error details)
809
- *
810
- * **Output format:**
811
- * - Event logging: [LLM]/[GADGET] messages
812
- * - Timing: ⏱️ emoji with milliseconds
813
- * - Tokens: 📊 emoji with per-call and cumulative counts
814
- * - Errors: ❌ emoji with full error details
815
- *
816
- * **Use cases:**
817
- * - Full observability during development
818
- * - Comprehensive monitoring in production
819
- * - One-liner for complete agent visibility
820
- * - Troubleshooting and debugging with full context
821
- *
822
- * **Performance:** Combined overhead of all four presets, but still minimal in practice.
823
- *
824
- * @param options - Monitoring options
825
- * @param options.verbose - Passed to logging() preset for detailed output. Default: false
826
- * @returns Merged hook configuration combining all monitoring presets
827
- *
828
- * @example
829
- * ```typescript
830
- * // Basic monitoring (recommended for development)
831
- * await LLMist.createAgent()
832
- * .withHooks(HookPresets.monitoring())
833
- * .withGadgets(Calculator, Weather)
834
- * .ask("What is 15 times 23, and what's the weather in NYC?");
835
- * // Output: All events, timing, tokens, and errors in one place
836
- * ```
837
- *
838
- * @example
839
- * ```typescript
840
- * // Verbose monitoring with full details
841
- * await LLMist.createAgent()
842
- * .withHooks(HookPresets.monitoring({ verbose: true }))
843
- * .ask("Your prompt");
844
- * // Output includes: parameters, results, and complete responses
845
- * ```
846
- *
847
- * @example
848
- * ```typescript
849
- * // Environment-based monitoring
850
- * const isDev = process.env.NODE_ENV === 'development';
851
- * .withHooks(HookPresets.monitoring({ verbose: isDev }))
852
- * ```
853
- *
854
- * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsmonitoringoptions | Full documentation}
855
- */
856
- static monitoring(options = {}) {
857
- return _HookPresets.merge(
858
- _HookPresets.logging(options),
859
- _HookPresets.timing(),
860
- _HookPresets.tokenTracking(),
861
- _HookPresets.errorLogging()
862
- );
863
- }
864
- };
865
-
866
- // src/agent/index.ts
867
- init_conversation_manager();
868
- init_stream_processor();
869
- init_gadget_output_store();
870
-
871
- // src/agent/compaction/index.ts
872
- init_config();
873
- init_strategy();
874
- init_strategies();
875
- init_manager();
876
-
877
- // src/agent/hints.ts
878
- init_prompt_config();
879
- function iterationProgressHint(options) {
880
- const { timing = "always", showUrgency = true, template } = options ?? {};
881
- return {
882
- controllers: {
883
- beforeLLMCall: async (ctx) => {
884
- const iteration = ctx.iteration + 1;
885
- const maxIterations = ctx.maxIterations;
886
- const progress = iteration / maxIterations;
887
- if (timing === "late" && progress < 0.5) {
888
- return { action: "proceed" };
889
- }
890
- if (timing === "urgent" && progress < 0.8) {
891
- return { action: "proceed" };
892
- }
893
- const remaining = maxIterations - iteration;
894
- const hintContext = {
895
- iteration,
896
- maxIterations,
897
- remaining
898
- };
899
- let hint = resolveHintTemplate(
900
- template,
901
- DEFAULT_HINTS.iterationProgressHint,
902
- hintContext
903
- );
904
- if (showUrgency && progress >= 0.8) {
905
- hint += " \u26A0\uFE0F Running low on iterations - focus on completing the task.";
906
- }
907
- const messages = [...ctx.options.messages];
908
- let lastUserIndex = -1;
909
- for (let i = messages.length - 1; i >= 0; i--) {
910
- if (messages[i].role === "user") {
911
- lastUserIndex = i;
912
- break;
913
- }
914
- }
915
- if (lastUserIndex >= 0) {
916
- messages.splice(lastUserIndex + 1, 0, {
917
- role: "user",
918
- content: `[System Hint] ${hint}`
919
- });
920
- } else {
921
- messages.push({
922
- role: "user",
923
- content: `[System Hint] ${hint}`
924
- });
925
- }
926
- return {
927
- action: "proceed",
928
- modifiedOptions: { messages }
929
- };
930
- }
931
- }
932
- };
933
- }
934
- function parallelGadgetHint(options) {
935
- const {
936
- minGadgetsForEfficiency = 2,
937
- message = DEFAULT_HINTS.parallelGadgetsHint,
938
- enabled = true
939
- } = options ?? {};
940
- return {
941
- controllers: {
942
- afterLLMCall: async (ctx) => {
943
- if (!enabled) {
944
- return { action: "continue" };
945
- }
946
- if (ctx.gadgetCallCount > 0 && ctx.gadgetCallCount < minGadgetsForEfficiency) {
947
- return {
948
- action: "append_messages",
949
- messages: [
950
- {
951
- role: "user",
952
- content: `[System Hint] ${message}`
953
- }
954
- ]
955
- };
956
- }
957
- return { action: "continue" };
958
- }
959
- }
960
- };
961
- }
962
- function createHints(config) {
963
- const hooksToMerge = [];
964
- if (config.iterationProgress) {
965
- const options = typeof config.iterationProgress === "boolean" ? {} : config.iterationProgress;
966
- hooksToMerge.push(iterationProgressHint(options));
967
- }
968
- if (config.parallelGadgets) {
969
- const options = typeof config.parallelGadgets === "boolean" ? {} : config.parallelGadgets;
970
- hooksToMerge.push(parallelGadgetHint(options));
971
- }
972
- if (config.custom) {
973
- hooksToMerge.push(...config.custom);
974
- }
975
- return HookPresets.merge(...hooksToMerge);
976
- }
977
-
978
- // src/index.ts
979
- init_client();
980
- init_messages();
981
- init_model_registry();
982
- init_model_shortcuts();
983
- init_options();
984
- init_prompt_config();
985
- init_quick_methods();
986
- init_create_gadget();
987
- init_output_viewer();
988
- init_exceptions();
989
- init_executor();
990
- init_gadget();
991
- init_parser();
992
- init_registry();
993
-
994
- // src/gadgets/typed-gadget.ts
995
- init_gadget();
996
- function Gadget(config) {
997
- class GadgetBase extends BaseGadget {
998
- description = config.description;
999
- parameterSchema = config.schema;
1000
- name = config.name;
1001
- timeoutMs = config.timeoutMs;
1002
- examples = config.examples;
1003
- /**
1004
- * Type helper property for accessing inferred parameter type.
1005
- * This is used in the execute method signature: `execute(params: this['params'])`
1006
- *
1007
- * Note: This is just for type inference - the actual params in execute()
1008
- * will be Record<string, unknown> which you can safely cast to this['params']
1009
- */
1010
- params;
1011
- }
1012
- return GadgetBase;
1013
- }
1014
-
1015
- // src/index.ts
1016
- init_logger();
1017
- init_anthropic();
1018
- init_discovery();
1019
- init_gemini();
1020
- init_openai();
64
+ stream,
65
+ validateAndApplyDefaults,
66
+ validateGadgetParams
67
+ } from "./chunk-VGZCFUPX.js";
1021
68
  export {
1022
69
  AgentBuilder,
1023
70
  AnthropicMessagesProvider,