llmist 0.3.1 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.cts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { ZodType, ZodTypeAny } from 'zod';
2
2
  export { z } from 'zod';
3
- import { A as AgentHooks, L as LLMMessage, P as ParameterFormat, S as StreamEvent, G as GadgetRegistry, o as LLMStreamChunk, B as BaseGadget, p as ParsedGadgetCall, q as GadgetExecutionResult, r as ProviderAdapter, s as ModelDescriptor, t as ModelSpec, u as LLMGenerationOptions, v as LLMStream } from './mock-stream-C2sBQlvc.cjs';
4
- export { C as AfterGadgetExecutionAction, D as AfterGadgetExecutionControllerContext, F as AfterLLMCallAction, I as AfterLLMCallControllerContext, J as AfterLLMErrorAction, w as AgentBuilder, K as AgentOptions, N as BeforeGadgetExecutionAction, O as BeforeLLMCallAction, Q as ChunkInterceptorContext, R as Controllers, a9 as CostEstimate, ak as DEFAULT_PROMPTS, E as EventHandlers, ar as GadgetClass, T as GadgetExecutionControllerContext, as as GadgetOrClass, U as GadgetParameterInterceptorContext, V as GadgetResultInterceptorContext, H as HistoryMessage, W as Interceptors, X as LLMCallControllerContext, Y as LLMErrorControllerContext, a8 as LLMMessageBuilder, a7 as LLMRole, a6 as LLMist, a5 as LLMistOptions, Z as MessageInterceptorContext, a as MockBuilder, d as MockManager, h as MockMatcher, i as MockMatcherContext, j as MockOptions, M as MockProviderAdapter, k as MockRegistration, l as MockResponse, n as MockStats, aa as ModelFeatures, ag as ModelIdentifierParser, ab as ModelLimits, ac as ModelPricing, ad as ModelRegistry, _ as ObserveChunkContext, $ as ObserveGadgetCompleteContext, a0 as ObserveGadgetStartContext, a1 as ObserveLLMCallContext, a2 as ObserveLLMCompleteContext, a3 as ObserveLLMErrorContext, a4 as Observers, ah as PromptConfig, ai as PromptContext, aj as PromptTemplate, ae as ProviderIdentifier, an as QuickOptions, aq as StreamParser, at as TextOnlyAction, au as TextOnlyContext, av as TextOnlyCustomHandler, aw as TextOnlyGadgetConfig, ax as TextOnlyHandler, ay as TextOnlyStrategy, af as TokenUsage, x as collectEvents, y as collectText, ao as complete, c as createMockAdapter, b as createMockClient, e as createMockStream, f as createTextMockStream, g as getMockManager, m as mockLLM, al as resolvePromptTemplate, am as resolveRulesTemplate, z as runWithHandlers, ap as stream } from './mock-stream-C2sBQlvc.cjs';
3
+ import { A as AgentHooks, o as ModelRegistry, L as LLMMessage, P as ParameterFormat, S as StreamEvent, G as GadgetRegistry, p as LLMStreamChunk, B as BaseGadget, q as ParsedGadgetCall, r as GadgetExecutionResult, s as ProviderAdapter, t as ModelDescriptor, u as ModelSpec, v as LLMGenerationOptions, w as LLMStream } from './mock-stream-C0vOqI3L.cjs';
4
+ export { D as AfterGadgetExecutionAction, F as AfterGadgetExecutionControllerContext, I as AfterLLMCallAction, J as AfterLLMCallControllerContext, K as AfterLLMErrorAction, x as AgentBuilder, N as AgentOptions, O as BeforeGadgetExecutionAction, Q as BeforeLLMCallAction, R as ChunkInterceptorContext, T as Controllers, aa as CostEstimate, ak as DEFAULT_PROMPTS, E as EventHandlers, ar as GadgetClass, U as GadgetExecutionControllerContext, as as GadgetOrClass, V as GadgetParameterInterceptorContext, W as GadgetResultInterceptorContext, H as HistoryMessage, X as Interceptors, Y as LLMCallControllerContext, Z as LLMErrorControllerContext, a9 as LLMMessageBuilder, a8 as LLMRole, a7 as LLMist, a6 as LLMistOptions, _ as MessageInterceptorContext, a as MockBuilder, d as MockManager, h as MockMatcher, i as MockMatcherContext, j as MockOptions, M as MockProviderAdapter, k as MockRegistration, l as MockResponse, n as MockStats, ab as ModelFeatures, ag as ModelIdentifierParser, ac as ModelLimits, ad as ModelPricing, $ as ObserveChunkContext, a0 as ObserveGadgetCompleteContext, a1 as ObserveGadgetStartContext, a2 as ObserveLLMCallContext, a3 as ObserveLLMCompleteContext, a4 as ObserveLLMErrorContext, a5 as Observers, ah as PromptConfig, ai as PromptContext, aj as PromptTemplate, ae as ProviderIdentifier, an as QuickOptions, aq as StreamParser, at as TextOnlyAction, au as TextOnlyContext, av as TextOnlyCustomHandler, aw as TextOnlyGadgetConfig, ax as TextOnlyHandler, ay as TextOnlyStrategy, af as TokenUsage, y as collectEvents, z as collectText, ao as complete, c as createMockAdapter, b as createMockClient, e as createMockStream, f as createTextMockStream, g as getMockManager, m as mockLLM, al as resolvePromptTemplate, am as resolveRulesTemplate, C as runWithHandlers, ap as stream } from './mock-stream-C0vOqI3L.cjs';
5
5
  import { Logger, ILogObj } from 'tslog';
6
6
  import { MessageCreateParamsStreaming, MessageStreamEvent } from '@anthropic-ai/sdk/resources/messages';
7
7
  import OpenAI from 'openai';
@@ -18,6 +18,7 @@ import { ChatCompletionChunk } from 'openai/resources/chat/completions';
18
18
  * - **logging(options?)** - Log LLM calls and gadget execution
19
19
  * - **timing()** - Measure execution time for operations
20
20
  * - **tokenTracking()** - Track cumulative token usage and costs
21
+ * - **progressTracking(options?)** - Track progress with iterations, tokens, cost, and timing (SHOWCASE)
21
22
  * - **errorLogging()** - Log detailed error information
22
23
  * - **silent()** - No output (useful for testing)
23
24
  * - **monitoring(options?)** - All-in-one preset combining logging, timing, tokens, and errors
@@ -67,6 +68,89 @@ interface LoggingOptions {
67
68
  /** Include verbose details like parameters and results */
68
69
  verbose?: boolean;
69
70
  }
71
+ /**
72
+ * Progress statistics reported by progressTracking preset.
73
+ *
74
+ * Contains cumulative metrics across all LLM calls in the agent session,
75
+ * useful for building progress UI, cost monitoring, and performance tracking.
76
+ */
77
+ interface ProgressStats {
78
+ /** Current iteration number (increments on each LLM call start) */
79
+ currentIteration: number;
80
+ /** Total number of completed LLM calls */
81
+ totalCalls: number;
82
+ /** Cumulative input tokens across all calls */
83
+ totalInputTokens: number;
84
+ /** Cumulative output tokens across all calls */
85
+ totalOutputTokens: number;
86
+ /** Total tokens (input + output) */
87
+ totalTokens: number;
88
+ /** Cumulative cost in USD (requires modelRegistry) */
89
+ totalCost: number;
90
+ /** Elapsed time in seconds since first call */
91
+ elapsedSeconds: number;
92
+ }
93
+ /**
94
+ * Options for progressTracking preset.
95
+ *
96
+ * Controls how progress data is tracked and reported during agent execution.
97
+ */
98
+ interface ProgressTrackingOptions {
99
+ /**
100
+ * Model registry for cost calculation.
101
+ *
102
+ * If provided, enables automatic cost estimation based on token usage
103
+ * and model pricing data. Without it, totalCost will always be 0.
104
+ *
105
+ * @example
106
+ * ```typescript
107
+ * import { LLMist, HookPresets } from 'llmist';
108
+ *
109
+ * const client = LLMist.create();
110
+ * const hooks = HookPresets.progressTracking({
111
+ * modelRegistry: client.modelRegistry // Enable cost tracking
112
+ * });
113
+ * ```
114
+ */
115
+ modelRegistry?: ModelRegistry;
116
+ /**
117
+ * Callback invoked after each LLM call completion with cumulative stats.
118
+ *
119
+ * Use this to update progress UI, log metrics, or track budgets in real-time.
120
+ *
121
+ * @example
122
+ * ```typescript
123
+ * HookPresets.progressTracking({
124
+ * modelRegistry: client.modelRegistry,
125
+ * onProgress: (stats) => {
126
+ * console.log(`Iteration #${stats.currentIteration}`);
127
+ * console.log(`Cost so far: $${stats.totalCost.toFixed(4)}`);
128
+ * console.log(`Elapsed: ${stats.elapsedSeconds}s`);
129
+ * }
130
+ * })
131
+ * ```
132
+ */
133
+ onProgress?: (stats: ProgressStats) => void;
134
+ /**
135
+ * Whether to log progress to console after each LLM call.
136
+ *
137
+ * When enabled, prints a summary line with tokens, cost, and elapsed time.
138
+ * Useful for quick debugging without implementing a custom callback.
139
+ *
140
+ * Default: false
141
+ *
142
+ * @example
143
+ * ```typescript
144
+ * // Quick console-based progress tracking
145
+ * HookPresets.progressTracking({
146
+ * modelRegistry: client.modelRegistry,
147
+ * logProgress: true // Log to console
148
+ * })
149
+ * // Output: 📊 Progress: Iteration #2 | 1,234 tokens | $0.0056 | 12.3s
150
+ * ```
151
+ */
152
+ logProgress?: boolean;
153
+ }
70
154
  /**
71
155
  * Common hook presets.
72
156
  */
@@ -232,6 +316,129 @@ declare class HookPresets {
232
316
  * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetstokentracking | Full documentation}
233
317
  */
234
318
  static tokenTracking(): AgentHooks;
319
+ /**
320
+ * Tracks comprehensive progress metrics including iterations, tokens, cost, and timing.
321
+ *
322
+ * **This preset showcases llmist's core capabilities by demonstrating:**
323
+ * - Observer pattern for non-intrusive monitoring
324
+ * - Integration with ModelRegistry for cost estimation
325
+ * - Callback-based architecture for flexible UI updates
326
+ * - Provider-agnostic token and cost tracking
327
+ *
328
+ * Unlike `tokenTracking()` which only logs to console, this preset provides
329
+ * structured data through callbacks, making it perfect for building custom UIs,
330
+ * dashboards, or progress indicators (like the llmist CLI).
331
+ *
332
+ * **Output (when logProgress: true):**
333
+ * - Iteration number and call count
334
+ * - Cumulative token usage (input + output)
335
+ * - Cumulative cost in USD (requires modelRegistry)
336
+ * - Elapsed time in seconds
337
+ *
338
+ * **Use cases:**
339
+ * - Building CLI progress indicators with live updates
340
+ * - Creating web dashboards with real-time metrics
341
+ * - Budget monitoring and cost alerts
342
+ * - Performance tracking and optimization
343
+ * - Custom logging to external systems (Datadog, CloudWatch, etc.)
344
+ *
345
+ * **Performance:** Minimal overhead. Uses Date.now() for timing and optional
346
+ * ModelRegistry.estimateCost() which is O(1) lookup. Callback invocation is
347
+ * synchronous and fast.
348
+ *
349
+ * @param options - Progress tracking options
350
+ * @param options.modelRegistry - ModelRegistry for cost estimation (optional)
351
+ * @param options.onProgress - Callback invoked after each LLM call (optional)
352
+ * @param options.logProgress - Log progress to console (default: false)
353
+ * @returns Hook configuration with progress tracking observers
354
+ *
355
+ * @example
356
+ * ```typescript
357
+ * // Basic usage with callback (RECOMMENDED - used by llmist CLI)
358
+ * import { LLMist, HookPresets } from 'llmist';
359
+ *
360
+ * const client = LLMist.create();
361
+ *
362
+ * await client.agent()
363
+ * .withHooks(HookPresets.progressTracking({
364
+ * modelRegistry: client.modelRegistry,
365
+ * onProgress: (stats) => {
366
+ * // Update your UI with stats
367
+ * console.log(`#${stats.currentIteration} | ${stats.totalTokens} tokens | $${stats.totalCost.toFixed(4)}`);
368
+ * }
369
+ * }))
370
+ * .withGadgets(Calculator)
371
+ * .ask("Calculate 15 * 23");
372
+ * // Output: #1 | 245 tokens | $0.0012
373
+ * ```
374
+ *
375
+ * @example
376
+ * ```typescript
377
+ * // Console logging mode (quick debugging)
378
+ * await client.agent()
379
+ * .withHooks(HookPresets.progressTracking({
380
+ * modelRegistry: client.modelRegistry,
381
+ * logProgress: true // Simple console output
382
+ * }))
383
+ * .ask("Your prompt");
384
+ * // Output: 📊 Progress: Iteration #1 | 245 tokens | $0.0012 | 1.2s
385
+ * ```
386
+ *
387
+ * @example
388
+ * ```typescript
389
+ * // Budget monitoring with alerts
390
+ * const BUDGET_USD = 0.10;
391
+ *
392
+ * await client.agent()
393
+ * .withHooks(HookPresets.progressTracking({
394
+ * modelRegistry: client.modelRegistry,
395
+ * onProgress: (stats) => {
396
+ * if (stats.totalCost > BUDGET_USD) {
397
+ * throw new Error(`Budget exceeded: $${stats.totalCost.toFixed(4)}`);
398
+ * }
399
+ * }
400
+ * }))
401
+ * .ask("Long running task...");
402
+ * ```
403
+ *
404
+ * @example
405
+ * ```typescript
406
+ * // Web dashboard integration
407
+ * let progressBar: HTMLElement;
408
+ *
409
+ * await client.agent()
410
+ * .withHooks(HookPresets.progressTracking({
411
+ * modelRegistry: client.modelRegistry,
412
+ * onProgress: (stats) => {
413
+ * // Update web UI in real-time
414
+ * progressBar.textContent = `Iteration ${stats.currentIteration}`;
415
+ * progressBar.dataset.cost = stats.totalCost.toFixed(4);
416
+ * progressBar.dataset.tokens = stats.totalTokens.toString();
417
+ * }
418
+ * }))
419
+ * .ask("Your prompt");
420
+ * ```
421
+ *
422
+ * @example
423
+ * ```typescript
424
+ * // External logging (Datadog, CloudWatch, etc.)
425
+ * await client.agent()
426
+ * .withHooks(HookPresets.progressTracking({
427
+ * modelRegistry: client.modelRegistry,
428
+ * onProgress: async (stats) => {
429
+ * await metrics.gauge('llm.iteration', stats.currentIteration);
430
+ * await metrics.gauge('llm.cost', stats.totalCost);
431
+ * await metrics.gauge('llm.tokens', stats.totalTokens);
432
+ * }
433
+ * }))
434
+ * .ask("Your prompt");
435
+ * ```
436
+ *
437
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsprogresstrackingoptions | Full documentation}
438
+ * @see {@link ProgressTrackingOptions} for detailed options
439
+ * @see {@link ProgressStats} for the callback data structure
440
+ */
441
+ static progressTracking(options?: ProgressTrackingOptions): AgentHooks;
235
442
  /**
236
443
  * Logs detailed error information for debugging and troubleshooting.
237
444
  *
@@ -1424,4 +1631,4 @@ declare class OpenAIChatProvider extends BaseProviderAdapter {
1424
1631
  }
1425
1632
  declare function createOpenAIProviderFromEnv(): OpenAIChatProvider | null;
1426
1633
 
1427
- export { AgentHooks, AnthropicMessagesProvider, BaseGadget, BreakLoopException, ConversationManager, type CreateGadgetConfig, Gadget, type GadgetConfig, GadgetExecutionResult, GadgetExecutor, GadgetRegistry, GeminiGenerativeProvider, HookPresets, HumanInputException, type IConversationManager, LLMGenerationOptions, LLMMessage, LLMStream, LLMStreamChunk, type LoggerOptions, type LoggingOptions, MODEL_ALIASES, ModelDescriptor, ModelSpec, OpenAIChatProvider, ParsedGadgetCall, ProviderAdapter, StreamEvent, type StreamProcessingResult, StreamProcessor, type StreamProcessorOptions, type ValidationIssue, type ValidationResult, createAnthropicProviderFromEnv, createGadget, createGeminiProviderFromEnv, createLogger, createOpenAIProviderFromEnv, defaultLogger, discoverProviderAdapters, getModelId, getProvider, hasProviderPrefix, resolveModel, validateAndApplyDefaults, validateGadgetParams };
1634
+ export { AgentHooks, AnthropicMessagesProvider, BaseGadget, BreakLoopException, ConversationManager, type CreateGadgetConfig, Gadget, type GadgetConfig, GadgetExecutionResult, GadgetExecutor, GadgetRegistry, GeminiGenerativeProvider, HookPresets, HumanInputException, type IConversationManager, LLMGenerationOptions, LLMMessage, LLMStream, LLMStreamChunk, type LoggerOptions, type LoggingOptions, MODEL_ALIASES, ModelDescriptor, ModelRegistry, ModelSpec, OpenAIChatProvider, ParsedGadgetCall, ProviderAdapter, StreamEvent, type StreamProcessingResult, StreamProcessor, type StreamProcessorOptions, type ValidationIssue, type ValidationResult, createAnthropicProviderFromEnv, createGadget, createGeminiProviderFromEnv, createLogger, createOpenAIProviderFromEnv, defaultLogger, discoverProviderAdapters, getModelId, getProvider, hasProviderPrefix, resolveModel, validateAndApplyDefaults, validateGadgetParams };
package/dist/index.d.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { ZodType, ZodTypeAny } from 'zod';
2
2
  export { z } from 'zod';
3
- import { A as AgentHooks, L as LLMMessage, P as ParameterFormat, S as StreamEvent, G as GadgetRegistry, o as LLMStreamChunk, B as BaseGadget, p as ParsedGadgetCall, q as GadgetExecutionResult, r as ProviderAdapter, s as ModelDescriptor, t as ModelSpec, u as LLMGenerationOptions, v as LLMStream } from './mock-stream-C2sBQlvc.js';
4
- export { C as AfterGadgetExecutionAction, D as AfterGadgetExecutionControllerContext, F as AfterLLMCallAction, I as AfterLLMCallControllerContext, J as AfterLLMErrorAction, w as AgentBuilder, K as AgentOptions, N as BeforeGadgetExecutionAction, O as BeforeLLMCallAction, Q as ChunkInterceptorContext, R as Controllers, a9 as CostEstimate, ak as DEFAULT_PROMPTS, E as EventHandlers, ar as GadgetClass, T as GadgetExecutionControllerContext, as as GadgetOrClass, U as GadgetParameterInterceptorContext, V as GadgetResultInterceptorContext, H as HistoryMessage, W as Interceptors, X as LLMCallControllerContext, Y as LLMErrorControllerContext, a8 as LLMMessageBuilder, a7 as LLMRole, a6 as LLMist, a5 as LLMistOptions, Z as MessageInterceptorContext, a as MockBuilder, d as MockManager, h as MockMatcher, i as MockMatcherContext, j as MockOptions, M as MockProviderAdapter, k as MockRegistration, l as MockResponse, n as MockStats, aa as ModelFeatures, ag as ModelIdentifierParser, ab as ModelLimits, ac as ModelPricing, ad as ModelRegistry, _ as ObserveChunkContext, $ as ObserveGadgetCompleteContext, a0 as ObserveGadgetStartContext, a1 as ObserveLLMCallContext, a2 as ObserveLLMCompleteContext, a3 as ObserveLLMErrorContext, a4 as Observers, ah as PromptConfig, ai as PromptContext, aj as PromptTemplate, ae as ProviderIdentifier, an as QuickOptions, aq as StreamParser, at as TextOnlyAction, au as TextOnlyContext, av as TextOnlyCustomHandler, aw as TextOnlyGadgetConfig, ax as TextOnlyHandler, ay as TextOnlyStrategy, af as TokenUsage, x as collectEvents, y as collectText, ao as complete, c as createMockAdapter, b as createMockClient, e as createMockStream, f as createTextMockStream, g as getMockManager, m as mockLLM, al as resolvePromptTemplate, am as resolveRulesTemplate, z as runWithHandlers, ap as stream } from './mock-stream-C2sBQlvc.js';
3
+ import { A as AgentHooks, o as ModelRegistry, L as LLMMessage, P as ParameterFormat, S as StreamEvent, G as GadgetRegistry, p as LLMStreamChunk, B as BaseGadget, q as ParsedGadgetCall, r as GadgetExecutionResult, s as ProviderAdapter, t as ModelDescriptor, u as ModelSpec, v as LLMGenerationOptions, w as LLMStream } from './mock-stream-C0vOqI3L.js';
4
+ export { D as AfterGadgetExecutionAction, F as AfterGadgetExecutionControllerContext, I as AfterLLMCallAction, J as AfterLLMCallControllerContext, K as AfterLLMErrorAction, x as AgentBuilder, N as AgentOptions, O as BeforeGadgetExecutionAction, Q as BeforeLLMCallAction, R as ChunkInterceptorContext, T as Controllers, aa as CostEstimate, ak as DEFAULT_PROMPTS, E as EventHandlers, ar as GadgetClass, U as GadgetExecutionControllerContext, as as GadgetOrClass, V as GadgetParameterInterceptorContext, W as GadgetResultInterceptorContext, H as HistoryMessage, X as Interceptors, Y as LLMCallControllerContext, Z as LLMErrorControllerContext, a9 as LLMMessageBuilder, a8 as LLMRole, a7 as LLMist, a6 as LLMistOptions, _ as MessageInterceptorContext, a as MockBuilder, d as MockManager, h as MockMatcher, i as MockMatcherContext, j as MockOptions, M as MockProviderAdapter, k as MockRegistration, l as MockResponse, n as MockStats, ab as ModelFeatures, ag as ModelIdentifierParser, ac as ModelLimits, ad as ModelPricing, $ as ObserveChunkContext, a0 as ObserveGadgetCompleteContext, a1 as ObserveGadgetStartContext, a2 as ObserveLLMCallContext, a3 as ObserveLLMCompleteContext, a4 as ObserveLLMErrorContext, a5 as Observers, ah as PromptConfig, ai as PromptContext, aj as PromptTemplate, ae as ProviderIdentifier, an as QuickOptions, aq as StreamParser, at as TextOnlyAction, au as TextOnlyContext, av as TextOnlyCustomHandler, aw as TextOnlyGadgetConfig, ax as TextOnlyHandler, ay as TextOnlyStrategy, af as TokenUsage, y as collectEvents, z as collectText, ao as complete, c as createMockAdapter, b as createMockClient, e as createMockStream, f as createTextMockStream, g as getMockManager, m as mockLLM, al as resolvePromptTemplate, am as resolveRulesTemplate, C as runWithHandlers, ap as stream } from './mock-stream-C0vOqI3L.js';
5
5
  import { Logger, ILogObj } from 'tslog';
6
6
  import { MessageCreateParamsStreaming, MessageStreamEvent } from '@anthropic-ai/sdk/resources/messages';
7
7
  import OpenAI from 'openai';
@@ -18,6 +18,7 @@ import { ChatCompletionChunk } from 'openai/resources/chat/completions';
18
18
  * - **logging(options?)** - Log LLM calls and gadget execution
19
19
  * - **timing()** - Measure execution time for operations
20
20
  * - **tokenTracking()** - Track cumulative token usage and costs
21
+ * - **progressTracking(options?)** - Track progress with iterations, tokens, cost, and timing (SHOWCASE)
21
22
  * - **errorLogging()** - Log detailed error information
22
23
  * - **silent()** - No output (useful for testing)
23
24
  * - **monitoring(options?)** - All-in-one preset combining logging, timing, tokens, and errors
@@ -67,6 +68,89 @@ interface LoggingOptions {
67
68
  /** Include verbose details like parameters and results */
68
69
  verbose?: boolean;
69
70
  }
71
+ /**
72
+ * Progress statistics reported by progressTracking preset.
73
+ *
74
+ * Contains cumulative metrics across all LLM calls in the agent session,
75
+ * useful for building progress UI, cost monitoring, and performance tracking.
76
+ */
77
+ interface ProgressStats {
78
+ /** Current iteration number (increments on each LLM call start) */
79
+ currentIteration: number;
80
+ /** Total number of completed LLM calls */
81
+ totalCalls: number;
82
+ /** Cumulative input tokens across all calls */
83
+ totalInputTokens: number;
84
+ /** Cumulative output tokens across all calls */
85
+ totalOutputTokens: number;
86
+ /** Total tokens (input + output) */
87
+ totalTokens: number;
88
+ /** Cumulative cost in USD (requires modelRegistry) */
89
+ totalCost: number;
90
+ /** Elapsed time in seconds since first call */
91
+ elapsedSeconds: number;
92
+ }
93
+ /**
94
+ * Options for progressTracking preset.
95
+ *
96
+ * Controls how progress data is tracked and reported during agent execution.
97
+ */
98
+ interface ProgressTrackingOptions {
99
+ /**
100
+ * Model registry for cost calculation.
101
+ *
102
+ * If provided, enables automatic cost estimation based on token usage
103
+ * and model pricing data. Without it, totalCost will always be 0.
104
+ *
105
+ * @example
106
+ * ```typescript
107
+ * import { LLMist, HookPresets } from 'llmist';
108
+ *
109
+ * const client = LLMist.create();
110
+ * const hooks = HookPresets.progressTracking({
111
+ * modelRegistry: client.modelRegistry // Enable cost tracking
112
+ * });
113
+ * ```
114
+ */
115
+ modelRegistry?: ModelRegistry;
116
+ /**
117
+ * Callback invoked after each LLM call completion with cumulative stats.
118
+ *
119
+ * Use this to update progress UI, log metrics, or track budgets in real-time.
120
+ *
121
+ * @example
122
+ * ```typescript
123
+ * HookPresets.progressTracking({
124
+ * modelRegistry: client.modelRegistry,
125
+ * onProgress: (stats) => {
126
+ * console.log(`Iteration #${stats.currentIteration}`);
127
+ * console.log(`Cost so far: $${stats.totalCost.toFixed(4)}`);
128
+ * console.log(`Elapsed: ${stats.elapsedSeconds}s`);
129
+ * }
130
+ * })
131
+ * ```
132
+ */
133
+ onProgress?: (stats: ProgressStats) => void;
134
+ /**
135
+ * Whether to log progress to console after each LLM call.
136
+ *
137
+ * When enabled, prints a summary line with tokens, cost, and elapsed time.
138
+ * Useful for quick debugging without implementing a custom callback.
139
+ *
140
+ * Default: false
141
+ *
142
+ * @example
143
+ * ```typescript
144
+ * // Quick console-based progress tracking
145
+ * HookPresets.progressTracking({
146
+ * modelRegistry: client.modelRegistry,
147
+ * logProgress: true // Log to console
148
+ * })
149
+ * // Output: 📊 Progress: Iteration #2 | 1,234 tokens | $0.0056 | 12.3s
150
+ * ```
151
+ */
152
+ logProgress?: boolean;
153
+ }
70
154
  /**
71
155
  * Common hook presets.
72
156
  */
@@ -232,6 +316,129 @@ declare class HookPresets {
232
316
  * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetstokentracking | Full documentation}
233
317
  */
234
318
  static tokenTracking(): AgentHooks;
319
+ /**
320
+ * Tracks comprehensive progress metrics including iterations, tokens, cost, and timing.
321
+ *
322
+ * **This preset showcases llmist's core capabilities by demonstrating:**
323
+ * - Observer pattern for non-intrusive monitoring
324
+ * - Integration with ModelRegistry for cost estimation
325
+ * - Callback-based architecture for flexible UI updates
326
+ * - Provider-agnostic token and cost tracking
327
+ *
328
+ * Unlike `tokenTracking()` which only logs to console, this preset provides
329
+ * structured data through callbacks, making it perfect for building custom UIs,
330
+ * dashboards, or progress indicators (like the llmist CLI).
331
+ *
332
+ * **Output (when logProgress: true):**
333
+ * - Iteration number and call count
334
+ * - Cumulative token usage (input + output)
335
+ * - Cumulative cost in USD (requires modelRegistry)
336
+ * - Elapsed time in seconds
337
+ *
338
+ * **Use cases:**
339
+ * - Building CLI progress indicators with live updates
340
+ * - Creating web dashboards with real-time metrics
341
+ * - Budget monitoring and cost alerts
342
+ * - Performance tracking and optimization
343
+ * - Custom logging to external systems (Datadog, CloudWatch, etc.)
344
+ *
345
+ * **Performance:** Minimal overhead. Uses Date.now() for timing and optional
346
+ * ModelRegistry.estimateCost() which is O(1) lookup. Callback invocation is
347
+ * synchronous and fast.
348
+ *
349
+ * @param options - Progress tracking options
350
+ * @param options.modelRegistry - ModelRegistry for cost estimation (optional)
351
+ * @param options.onProgress - Callback invoked after each LLM call (optional)
352
+ * @param options.logProgress - Log progress to console (default: false)
353
+ * @returns Hook configuration with progress tracking observers
354
+ *
355
+ * @example
356
+ * ```typescript
357
+ * // Basic usage with callback (RECOMMENDED - used by llmist CLI)
358
+ * import { LLMist, HookPresets } from 'llmist';
359
+ *
360
+ * const client = LLMist.create();
361
+ *
362
+ * await client.agent()
363
+ * .withHooks(HookPresets.progressTracking({
364
+ * modelRegistry: client.modelRegistry,
365
+ * onProgress: (stats) => {
366
+ * // Update your UI with stats
367
+ * console.log(`#${stats.currentIteration} | ${stats.totalTokens} tokens | $${stats.totalCost.toFixed(4)}`);
368
+ * }
369
+ * }))
370
+ * .withGadgets(Calculator)
371
+ * .ask("Calculate 15 * 23");
372
+ * // Output: #1 | 245 tokens | $0.0012
373
+ * ```
374
+ *
375
+ * @example
376
+ * ```typescript
377
+ * // Console logging mode (quick debugging)
378
+ * await client.agent()
379
+ * .withHooks(HookPresets.progressTracking({
380
+ * modelRegistry: client.modelRegistry,
381
+ * logProgress: true // Simple console output
382
+ * }))
383
+ * .ask("Your prompt");
384
+ * // Output: 📊 Progress: Iteration #1 | 245 tokens | $0.0012 | 1.2s
385
+ * ```
386
+ *
387
+ * @example
388
+ * ```typescript
389
+ * // Budget monitoring with alerts
390
+ * const BUDGET_USD = 0.10;
391
+ *
392
+ * await client.agent()
393
+ * .withHooks(HookPresets.progressTracking({
394
+ * modelRegistry: client.modelRegistry,
395
+ * onProgress: (stats) => {
396
+ * if (stats.totalCost > BUDGET_USD) {
397
+ * throw new Error(`Budget exceeded: $${stats.totalCost.toFixed(4)}`);
398
+ * }
399
+ * }
400
+ * }))
401
+ * .ask("Long running task...");
402
+ * ```
403
+ *
404
+ * @example
405
+ * ```typescript
406
+ * // Web dashboard integration
407
+ * let progressBar: HTMLElement;
408
+ *
409
+ * await client.agent()
410
+ * .withHooks(HookPresets.progressTracking({
411
+ * modelRegistry: client.modelRegistry,
412
+ * onProgress: (stats) => {
413
+ * // Update web UI in real-time
414
+ * progressBar.textContent = `Iteration ${stats.currentIteration}`;
415
+ * progressBar.dataset.cost = stats.totalCost.toFixed(4);
416
+ * progressBar.dataset.tokens = stats.totalTokens.toString();
417
+ * }
418
+ * }))
419
+ * .ask("Your prompt");
420
+ * ```
421
+ *
422
+ * @example
423
+ * ```typescript
424
+ * // External logging (Datadog, CloudWatch, etc.)
425
+ * await client.agent()
426
+ * .withHooks(HookPresets.progressTracking({
427
+ * modelRegistry: client.modelRegistry,
428
+ * onProgress: async (stats) => {
429
+ * await metrics.gauge('llm.iteration', stats.currentIteration);
430
+ * await metrics.gauge('llm.cost', stats.totalCost);
431
+ * await metrics.gauge('llm.tokens', stats.totalTokens);
432
+ * }
433
+ * }))
434
+ * .ask("Your prompt");
435
+ * ```
436
+ *
437
+ * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsprogresstrackingoptions | Full documentation}
438
+ * @see {@link ProgressTrackingOptions} for detailed options
439
+ * @see {@link ProgressStats} for the callback data structure
440
+ */
441
+ static progressTracking(options?: ProgressTrackingOptions): AgentHooks;
235
442
  /**
236
443
  * Logs detailed error information for debugging and troubleshooting.
237
444
  *
@@ -1424,4 +1631,4 @@ declare class OpenAIChatProvider extends BaseProviderAdapter {
1424
1631
  }
1425
1632
  declare function createOpenAIProviderFromEnv(): OpenAIChatProvider | null;
1426
1633
 
1427
- export { AgentHooks, AnthropicMessagesProvider, BaseGadget, BreakLoopException, ConversationManager, type CreateGadgetConfig, Gadget, type GadgetConfig, GadgetExecutionResult, GadgetExecutor, GadgetRegistry, GeminiGenerativeProvider, HookPresets, HumanInputException, type IConversationManager, LLMGenerationOptions, LLMMessage, LLMStream, LLMStreamChunk, type LoggerOptions, type LoggingOptions, MODEL_ALIASES, ModelDescriptor, ModelSpec, OpenAIChatProvider, ParsedGadgetCall, ProviderAdapter, StreamEvent, type StreamProcessingResult, StreamProcessor, type StreamProcessorOptions, type ValidationIssue, type ValidationResult, createAnthropicProviderFromEnv, createGadget, createGeminiProviderFromEnv, createLogger, createOpenAIProviderFromEnv, defaultLogger, discoverProviderAdapters, getModelId, getProvider, hasProviderPrefix, resolveModel, validateAndApplyDefaults, validateGadgetParams };
1634
+ export { AgentHooks, AnthropicMessagesProvider, BaseGadget, BreakLoopException, ConversationManager, type CreateGadgetConfig, Gadget, type GadgetConfig, GadgetExecutionResult, GadgetExecutor, GadgetRegistry, GeminiGenerativeProvider, HookPresets, HumanInputException, type IConversationManager, LLMGenerationOptions, LLMMessage, LLMStream, LLMStreamChunk, type LoggerOptions, type LoggingOptions, MODEL_ALIASES, ModelDescriptor, ModelRegistry, ModelSpec, OpenAIChatProvider, ParsedGadgetCall, ProviderAdapter, StreamEvent, type StreamProcessingResult, StreamProcessor, type StreamProcessorOptions, type ValidationIssue, type ValidationResult, createAnthropicProviderFromEnv, createGadget, createGeminiProviderFromEnv, createLogger, createOpenAIProviderFromEnv, defaultLogger, discoverProviderAdapters, getModelId, getProvider, hasProviderPrefix, resolveModel, validateAndApplyDefaults, validateGadgetParams };