llmist 0.4.1 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.cts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { ZodType, ZodTypeAny } from 'zod';
2
2
  export { z } from 'zod';
3
- import { A as AgentHooks, o as ModelRegistry, L as LLMMessage, P as ParameterFormat, S as StreamEvent, G as GadgetRegistry, p as LLMStreamChunk, B as BaseGadget, q as ParsedGadgetCall, r as GadgetExecutionResult, s as ProviderAdapter, t as ModelDescriptor, u as ModelSpec, v as LLMGenerationOptions, w as LLMStream } from './mock-stream-C0vOqI3L.cjs';
4
- export { D as AfterGadgetExecutionAction, F as AfterGadgetExecutionControllerContext, I as AfterLLMCallAction, J as AfterLLMCallControllerContext, K as AfterLLMErrorAction, x as AgentBuilder, N as AgentOptions, O as BeforeGadgetExecutionAction, Q as BeforeLLMCallAction, R as ChunkInterceptorContext, T as Controllers, aa as CostEstimate, ak as DEFAULT_PROMPTS, E as EventHandlers, ar as GadgetClass, U as GadgetExecutionControllerContext, as as GadgetOrClass, V as GadgetParameterInterceptorContext, W as GadgetResultInterceptorContext, H as HistoryMessage, X as Interceptors, Y as LLMCallControllerContext, Z as LLMErrorControllerContext, a9 as LLMMessageBuilder, a8 as LLMRole, a7 as LLMist, a6 as LLMistOptions, _ as MessageInterceptorContext, a as MockBuilder, d as MockManager, h as MockMatcher, i as MockMatcherContext, j as MockOptions, M as MockProviderAdapter, k as MockRegistration, l as MockResponse, n as MockStats, ab as ModelFeatures, ag as ModelIdentifierParser, ac as ModelLimits, ad as ModelPricing, $ as ObserveChunkContext, a0 as ObserveGadgetCompleteContext, a1 as ObserveGadgetStartContext, a2 as ObserveLLMCallContext, a3 as ObserveLLMCompleteContext, a4 as ObserveLLMErrorContext, a5 as Observers, ah as PromptConfig, ai as PromptContext, aj as PromptTemplate, ae as ProviderIdentifier, an as QuickOptions, aq as StreamParser, at as TextOnlyAction, au as TextOnlyContext, av as TextOnlyCustomHandler, aw as TextOnlyGadgetConfig, ax as TextOnlyHandler, ay as TextOnlyStrategy, af as TokenUsage, y as collectEvents, z as collectText, ao as complete, c as createMockAdapter, b as createMockClient, e as createMockStream, f as createTextMockStream, g as getMockManager, m as mockLLM, al as resolvePromptTemplate, am as resolveRulesTemplate, C as runWithHandlers, ap as stream } from './mock-stream-C0vOqI3L.cjs';
3
+ import { A as AgentHooks, o as ModelRegistry, L as LLMMessage, P as ParameterFormat, S as StreamEvent, G as GadgetRegistry, p as LLMStreamChunk, q as GadgetExample, B as BaseGadget, r as ParsedGadgetCall, s as GadgetExecutionResult, t as ProviderAdapter, u as ModelDescriptor, v as ModelSpec, w as LLMGenerationOptions, x as LLMStream } from './mock-stream-C8mBXRzJ.cjs';
4
+ export { F as AfterGadgetExecutionAction, I as AfterGadgetExecutionControllerContext, J as AfterLLMCallAction, K as AfterLLMCallControllerContext, N as AfterLLMErrorAction, y as AgentBuilder, O as AgentOptions, Q as BeforeGadgetExecutionAction, R as BeforeLLMCallAction, T as ChunkInterceptorContext, U as Controllers, ab as CostEstimate, al as DEFAULT_PROMPTS, E as EventHandlers, as as GadgetClass, V as GadgetExecutionControllerContext, at as GadgetOrClass, W as GadgetParameterInterceptorContext, X as GadgetResultInterceptorContext, H as HistoryMessage, Y as Interceptors, Z as LLMCallControllerContext, _ as LLMErrorControllerContext, aa as LLMMessageBuilder, a9 as LLMRole, a8 as LLMist, a7 as LLMistOptions, $ as MessageInterceptorContext, a as MockBuilder, d as MockManager, h as MockMatcher, i as MockMatcherContext, j as MockOptions, M as MockProviderAdapter, k as MockRegistration, l as MockResponse, n as MockStats, ac as ModelFeatures, ah as ModelIdentifierParser, ad as ModelLimits, ae as ModelPricing, a0 as ObserveChunkContext, a1 as ObserveGadgetCompleteContext, a2 as ObserveGadgetStartContext, a3 as ObserveLLMCallContext, a4 as ObserveLLMCompleteContext, a5 as ObserveLLMErrorContext, a6 as Observers, ai as PromptConfig, aj as PromptContext, ak as PromptTemplate, af as ProviderIdentifier, ao as QuickOptions, ar as StreamParser, au as TextOnlyAction, av as TextOnlyContext, aw as TextOnlyCustomHandler, ax as TextOnlyGadgetConfig, ay as TextOnlyHandler, az as TextOnlyStrategy, ag as TokenUsage, z as collectEvents, C as collectText, ap as complete, c as createMockAdapter, b as createMockClient, e as createMockStream, f as createTextMockStream, g as getMockManager, m as mockLLM, am as resolvePromptTemplate, an as resolveRulesTemplate, D as runWithHandlers, aq as stream } from './mock-stream-C8mBXRzJ.cjs';
5
5
  import { Logger, ILogObj } from 'tslog';
6
6
  import { MessageCreateParamsStreaming, MessageStreamEvent } from '@anthropic-ai/sdk/resources/messages';
7
7
  import OpenAI from 'openai';
@@ -720,6 +720,16 @@ interface IConversationManager {
720
720
  * Extracted from AgentLoop to follow Single Responsibility Principle.
721
721
  */
722
722
 
723
+ /**
724
+ * Options for ConversationManager constructor.
725
+ */
726
+ interface ConversationManagerOptions {
727
+ parameterFormat?: ParameterFormat;
728
+ /** Custom gadget start marker prefix */
729
+ startPrefix?: string;
730
+ /** Custom gadget end marker prefix */
731
+ endPrefix?: string;
732
+ }
723
733
  /**
724
734
  * Default implementation of IConversationManager.
725
735
  * Manages conversation history by building on top of base messages (system prompt, gadget instructions).
@@ -729,7 +739,7 @@ declare class ConversationManager implements IConversationManager {
729
739
  private readonly initialMessages;
730
740
  private readonly historyBuilder;
731
741
  private readonly parameterFormat;
732
- constructor(baseMessages: LLMMessage[], initialMessages: LLMMessage[], parameterFormat?: ParameterFormat);
742
+ constructor(baseMessages: LLMMessage[], initialMessages: LLMMessage[], options?: ConversationManagerOptions);
733
743
  addUserMessage(content: string): void;
734
744
  addAssistantMessage(content: string): void;
735
745
  addGadgetCall(gadgetName: string, parameters: Record<string, unknown>, result: string): void;
@@ -1032,6 +1042,8 @@ interface CreateGadgetConfig<TSchema extends ZodType> {
1032
1042
  execute: (params: InferSchema$1<TSchema>) => string | Promise<string>;
1033
1043
  /** Optional timeout in milliseconds */
1034
1044
  timeoutMs?: number;
1045
+ /** Optional usage examples to help LLMs understand proper invocation */
1046
+ examples?: GadgetExample<InferSchema$1<TSchema>>[];
1035
1047
  }
1036
1048
  /**
1037
1049
  * Creates a gadget from a function (simpler than class-based approach).
@@ -1188,6 +1200,8 @@ interface GadgetConfig<TSchema extends ZodType> {
1188
1200
  name?: string;
1189
1201
  /** Optional timeout in milliseconds */
1190
1202
  timeoutMs?: number;
1203
+ /** Optional usage examples to help LLMs understand proper invocation */
1204
+ examples?: GadgetExample<InferSchema<TSchema>>[];
1191
1205
  }
1192
1206
  /**
1193
1207
  * Factory function to create a typed gadget base class.
@@ -1250,6 +1264,7 @@ declare function Gadget<TSchema extends ZodType>(config: GadgetConfig<TSchema>):
1250
1264
  parameterSchema: TSchema;
1251
1265
  name: string | undefined;
1252
1266
  timeoutMs: number | undefined;
1267
+ examples: GadgetExample<InferSchema<TSchema>>[] | undefined;
1253
1268
  /**
1254
1269
  * Type helper property for accessing inferred parameter type.
1255
1270
  * This is used in the execute method signature: `execute(params: this['params'])`
@@ -1645,4 +1660,4 @@ declare class OpenAIChatProvider extends BaseProviderAdapter {
1645
1660
  }
1646
1661
  declare function createOpenAIProviderFromEnv(): OpenAIChatProvider | null;
1647
1662
 
1648
- export { AgentHooks, AnthropicMessagesProvider, BaseGadget, BreakLoopException, ConversationManager, type CreateGadgetConfig, Gadget, type GadgetConfig, GadgetExecutionResult, GadgetExecutor, GadgetRegistry, GeminiGenerativeProvider, HookPresets, HumanInputException, type IConversationManager, LLMGenerationOptions, LLMMessage, LLMStream, LLMStreamChunk, type LoggerOptions, type LoggingOptions, MODEL_ALIASES, ModelDescriptor, ModelRegistry, ModelSpec, OpenAIChatProvider, ParsedGadgetCall, ProviderAdapter, StreamEvent, type StreamProcessingResult, StreamProcessor, type StreamProcessorOptions, type ValidationIssue, type ValidationResult, createAnthropicProviderFromEnv, createGadget, createGeminiProviderFromEnv, createLogger, createOpenAIProviderFromEnv, defaultLogger, discoverProviderAdapters, getModelId, getProvider, hasProviderPrefix, resolveModel, validateAndApplyDefaults, validateGadgetParams };
1663
+ export { AgentHooks, AnthropicMessagesProvider, BaseGadget, BreakLoopException, ConversationManager, type CreateGadgetConfig, Gadget, type GadgetConfig, GadgetExample, GadgetExecutionResult, GadgetExecutor, GadgetRegistry, GeminiGenerativeProvider, HookPresets, HumanInputException, type IConversationManager, LLMGenerationOptions, LLMMessage, LLMStream, LLMStreamChunk, type LoggerOptions, type LoggingOptions, MODEL_ALIASES, ModelDescriptor, ModelRegistry, ModelSpec, OpenAIChatProvider, ParsedGadgetCall, ProviderAdapter, StreamEvent, type StreamProcessingResult, StreamProcessor, type StreamProcessorOptions, type ValidationIssue, type ValidationResult, createAnthropicProviderFromEnv, createGadget, createGeminiProviderFromEnv, createLogger, createOpenAIProviderFromEnv, defaultLogger, discoverProviderAdapters, getModelId, getProvider, hasProviderPrefix, resolveModel, validateAndApplyDefaults, validateGadgetParams };
package/dist/index.d.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { ZodType, ZodTypeAny } from 'zod';
2
2
  export { z } from 'zod';
3
- import { A as AgentHooks, o as ModelRegistry, L as LLMMessage, P as ParameterFormat, S as StreamEvent, G as GadgetRegistry, p as LLMStreamChunk, B as BaseGadget, q as ParsedGadgetCall, r as GadgetExecutionResult, s as ProviderAdapter, t as ModelDescriptor, u as ModelSpec, v as LLMGenerationOptions, w as LLMStream } from './mock-stream-C0vOqI3L.js';
4
- export { D as AfterGadgetExecutionAction, F as AfterGadgetExecutionControllerContext, I as AfterLLMCallAction, J as AfterLLMCallControllerContext, K as AfterLLMErrorAction, x as AgentBuilder, N as AgentOptions, O as BeforeGadgetExecutionAction, Q as BeforeLLMCallAction, R as ChunkInterceptorContext, T as Controllers, aa as CostEstimate, ak as DEFAULT_PROMPTS, E as EventHandlers, ar as GadgetClass, U as GadgetExecutionControllerContext, as as GadgetOrClass, V as GadgetParameterInterceptorContext, W as GadgetResultInterceptorContext, H as HistoryMessage, X as Interceptors, Y as LLMCallControllerContext, Z as LLMErrorControllerContext, a9 as LLMMessageBuilder, a8 as LLMRole, a7 as LLMist, a6 as LLMistOptions, _ as MessageInterceptorContext, a as MockBuilder, d as MockManager, h as MockMatcher, i as MockMatcherContext, j as MockOptions, M as MockProviderAdapter, k as MockRegistration, l as MockResponse, n as MockStats, ab as ModelFeatures, ag as ModelIdentifierParser, ac as ModelLimits, ad as ModelPricing, $ as ObserveChunkContext, a0 as ObserveGadgetCompleteContext, a1 as ObserveGadgetStartContext, a2 as ObserveLLMCallContext, a3 as ObserveLLMCompleteContext, a4 as ObserveLLMErrorContext, a5 as Observers, ah as PromptConfig, ai as PromptContext, aj as PromptTemplate, ae as ProviderIdentifier, an as QuickOptions, aq as StreamParser, at as TextOnlyAction, au as TextOnlyContext, av as TextOnlyCustomHandler, aw as TextOnlyGadgetConfig, ax as TextOnlyHandler, ay as TextOnlyStrategy, af as TokenUsage, y as collectEvents, z as collectText, ao as complete, c as createMockAdapter, b as createMockClient, e as createMockStream, f as createTextMockStream, g as getMockManager, m as mockLLM, al as resolvePromptTemplate, am as resolveRulesTemplate, C as runWithHandlers, ap as stream } from './mock-stream-C0vOqI3L.js';
3
+ import { A as AgentHooks, o as ModelRegistry, L as LLMMessage, P as ParameterFormat, S as StreamEvent, G as GadgetRegistry, p as LLMStreamChunk, q as GadgetExample, B as BaseGadget, r as ParsedGadgetCall, s as GadgetExecutionResult, t as ProviderAdapter, u as ModelDescriptor, v as ModelSpec, w as LLMGenerationOptions, x as LLMStream } from './mock-stream-C8mBXRzJ.js';
4
+ export { F as AfterGadgetExecutionAction, I as AfterGadgetExecutionControllerContext, J as AfterLLMCallAction, K as AfterLLMCallControllerContext, N as AfterLLMErrorAction, y as AgentBuilder, O as AgentOptions, Q as BeforeGadgetExecutionAction, R as BeforeLLMCallAction, T as ChunkInterceptorContext, U as Controllers, ab as CostEstimate, al as DEFAULT_PROMPTS, E as EventHandlers, as as GadgetClass, V as GadgetExecutionControllerContext, at as GadgetOrClass, W as GadgetParameterInterceptorContext, X as GadgetResultInterceptorContext, H as HistoryMessage, Y as Interceptors, Z as LLMCallControllerContext, _ as LLMErrorControllerContext, aa as LLMMessageBuilder, a9 as LLMRole, a8 as LLMist, a7 as LLMistOptions, $ as MessageInterceptorContext, a as MockBuilder, d as MockManager, h as MockMatcher, i as MockMatcherContext, j as MockOptions, M as MockProviderAdapter, k as MockRegistration, l as MockResponse, n as MockStats, ac as ModelFeatures, ah as ModelIdentifierParser, ad as ModelLimits, ae as ModelPricing, a0 as ObserveChunkContext, a1 as ObserveGadgetCompleteContext, a2 as ObserveGadgetStartContext, a3 as ObserveLLMCallContext, a4 as ObserveLLMCompleteContext, a5 as ObserveLLMErrorContext, a6 as Observers, ai as PromptConfig, aj as PromptContext, ak as PromptTemplate, af as ProviderIdentifier, ao as QuickOptions, ar as StreamParser, au as TextOnlyAction, av as TextOnlyContext, aw as TextOnlyCustomHandler, ax as TextOnlyGadgetConfig, ay as TextOnlyHandler, az as TextOnlyStrategy, ag as TokenUsage, z as collectEvents, C as collectText, ap as complete, c as createMockAdapter, b as createMockClient, e as createMockStream, f as createTextMockStream, g as getMockManager, m as mockLLM, am as resolvePromptTemplate, an as resolveRulesTemplate, D as runWithHandlers, aq as stream } from './mock-stream-C8mBXRzJ.js';
5
5
  import { Logger, ILogObj } from 'tslog';
6
6
  import { MessageCreateParamsStreaming, MessageStreamEvent } from '@anthropic-ai/sdk/resources/messages';
7
7
  import OpenAI from 'openai';
@@ -720,6 +720,16 @@ interface IConversationManager {
720
720
  * Extracted from AgentLoop to follow Single Responsibility Principle.
721
721
  */
722
722
 
723
+ /**
724
+ * Options for ConversationManager constructor.
725
+ */
726
+ interface ConversationManagerOptions {
727
+ parameterFormat?: ParameterFormat;
728
+ /** Custom gadget start marker prefix */
729
+ startPrefix?: string;
730
+ /** Custom gadget end marker prefix */
731
+ endPrefix?: string;
732
+ }
723
733
  /**
724
734
  * Default implementation of IConversationManager.
725
735
  * Manages conversation history by building on top of base messages (system prompt, gadget instructions).
@@ -729,7 +739,7 @@ declare class ConversationManager implements IConversationManager {
729
739
  private readonly initialMessages;
730
740
  private readonly historyBuilder;
731
741
  private readonly parameterFormat;
732
- constructor(baseMessages: LLMMessage[], initialMessages: LLMMessage[], parameterFormat?: ParameterFormat);
742
+ constructor(baseMessages: LLMMessage[], initialMessages: LLMMessage[], options?: ConversationManagerOptions);
733
743
  addUserMessage(content: string): void;
734
744
  addAssistantMessage(content: string): void;
735
745
  addGadgetCall(gadgetName: string, parameters: Record<string, unknown>, result: string): void;
@@ -1032,6 +1042,8 @@ interface CreateGadgetConfig<TSchema extends ZodType> {
1032
1042
  execute: (params: InferSchema$1<TSchema>) => string | Promise<string>;
1033
1043
  /** Optional timeout in milliseconds */
1034
1044
  timeoutMs?: number;
1045
+ /** Optional usage examples to help LLMs understand proper invocation */
1046
+ examples?: GadgetExample<InferSchema$1<TSchema>>[];
1035
1047
  }
1036
1048
  /**
1037
1049
  * Creates a gadget from a function (simpler than class-based approach).
@@ -1188,6 +1200,8 @@ interface GadgetConfig<TSchema extends ZodType> {
1188
1200
  name?: string;
1189
1201
  /** Optional timeout in milliseconds */
1190
1202
  timeoutMs?: number;
1203
+ /** Optional usage examples to help LLMs understand proper invocation */
1204
+ examples?: GadgetExample<InferSchema<TSchema>>[];
1191
1205
  }
1192
1206
  /**
1193
1207
  * Factory function to create a typed gadget base class.
@@ -1250,6 +1264,7 @@ declare function Gadget<TSchema extends ZodType>(config: GadgetConfig<TSchema>):
1250
1264
  parameterSchema: TSchema;
1251
1265
  name: string | undefined;
1252
1266
  timeoutMs: number | undefined;
1267
+ examples: GadgetExample<InferSchema<TSchema>>[] | undefined;
1253
1268
  /**
1254
1269
  * Type helper property for accessing inferred parameter type.
1255
1270
  * This is used in the execute method signature: `execute(params: this['params'])`
@@ -1645,4 +1660,4 @@ declare class OpenAIChatProvider extends BaseProviderAdapter {
1645
1660
  }
1646
1661
  declare function createOpenAIProviderFromEnv(): OpenAIChatProvider | null;
1647
1662
 
1648
- export { AgentHooks, AnthropicMessagesProvider, BaseGadget, BreakLoopException, ConversationManager, type CreateGadgetConfig, Gadget, type GadgetConfig, GadgetExecutionResult, GadgetExecutor, GadgetRegistry, GeminiGenerativeProvider, HookPresets, HumanInputException, type IConversationManager, LLMGenerationOptions, LLMMessage, LLMStream, LLMStreamChunk, type LoggerOptions, type LoggingOptions, MODEL_ALIASES, ModelDescriptor, ModelRegistry, ModelSpec, OpenAIChatProvider, ParsedGadgetCall, ProviderAdapter, StreamEvent, type StreamProcessingResult, StreamProcessor, type StreamProcessorOptions, type ValidationIssue, type ValidationResult, createAnthropicProviderFromEnv, createGadget, createGeminiProviderFromEnv, createLogger, createOpenAIProviderFromEnv, defaultLogger, discoverProviderAdapters, getModelId, getProvider, hasProviderPrefix, resolveModel, validateAndApplyDefaults, validateGadgetParams };
1663
+ export { AgentHooks, AnthropicMessagesProvider, BaseGadget, BreakLoopException, ConversationManager, type CreateGadgetConfig, Gadget, type GadgetConfig, GadgetExample, GadgetExecutionResult, GadgetExecutor, GadgetRegistry, GeminiGenerativeProvider, HookPresets, HumanInputException, type IConversationManager, LLMGenerationOptions, LLMMessage, LLMStream, LLMStreamChunk, type LoggerOptions, type LoggingOptions, MODEL_ALIASES, ModelDescriptor, ModelRegistry, ModelSpec, OpenAIChatProvider, ParsedGadgetCall, ProviderAdapter, StreamEvent, type StreamProcessingResult, StreamProcessor, type StreamProcessorOptions, type ValidationIssue, type ValidationResult, createAnthropicProviderFromEnv, createGadget, createGeminiProviderFromEnv, createLogger, createOpenAIProviderFromEnv, defaultLogger, discoverProviderAdapters, getModelId, getProvider, hasProviderPrefix, resolveModel, validateAndApplyDefaults, validateGadgetParams };
package/dist/index.js CHANGED
@@ -1,6 +1,6 @@
1
1
  import {
2
2
  createGadget
3
- } from "./chunk-QVDGTUQN.js";
3
+ } from "./chunk-LKIBXQ5I.js";
4
4
  import {
5
5
  MockBuilder,
6
6
  MockManager,
@@ -13,7 +13,7 @@ import {
13
13
  mockLLM,
14
14
  validateAndApplyDefaults,
15
15
  validateGadgetParams
16
- } from "./chunk-A4GRCCXF.js";
16
+ } from "./chunk-VF2WOCHM.js";
17
17
  import {
18
18
  AgentBuilder,
19
19
  AnthropicMessagesProvider,
@@ -70,7 +70,7 @@ import {
70
70
  resolveRulesTemplate,
71
71
  runWithHandlers,
72
72
  stream
73
- } from "./chunk-LQE7TKKW.js";
73
+ } from "./chunk-MH4TQ5AD.js";
74
74
 
75
75
  // src/index.ts
76
76
  init_builder();
@@ -826,6 +826,7 @@ function Gadget(config) {
826
826
  parameterSchema = config.schema;
827
827
  name = config.name;
828
828
  timeoutMs = config.timeoutMs;
829
+ examples = config.examples;
829
830
  /**
830
831
  * Type helper property for accessing inferred parameter type.
831
832
  * This is used in the execute method signature: `execute(params: this['params'])`
package/dist/index.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/index.ts","../src/agent/hook-presets.ts","../src/agent/index.ts","../src/gadgets/typed-gadget.ts"],"sourcesContent":["// Re-export Zod's z for schema definitions\n// Using llmist's z ensures .describe() metadata is preserved in JSON schemas\nexport { z } from \"zod\";\n// Syntactic sugar: Agent builder and event handlers\nexport type { HistoryMessage } from \"./agent/builder.js\";\nexport { AgentBuilder } from \"./agent/builder.js\";\nexport type { EventHandlers } from \"./agent/event-handlers.js\";\nexport { collectEvents, collectText, runWithHandlers } from \"./agent/event-handlers.js\";\n// Syntactic sugar: Hook presets\nexport type { LoggingOptions } from \"./agent/hook-presets.js\";\nexport { HookPresets } from \"./agent/hook-presets.js\";\n// Agent infrastructure\n// New clean hooks system\nexport type {\n AfterGadgetExecutionAction,\n AfterGadgetExecutionControllerContext,\n AfterLLMCallAction,\n AfterLLMCallControllerContext,\n AfterLLMErrorAction,\n AgentHooks,\n AgentOptions,\n BeforeGadgetExecutionAction,\n BeforeLLMCallAction,\n // Interceptor contexts\n ChunkInterceptorContext,\n Controllers,\n GadgetExecutionControllerContext,\n GadgetParameterInterceptorContext,\n GadgetResultInterceptorContext,\n IConversationManager,\n Interceptors,\n // Controller contexts and actions\n LLMCallControllerContext,\n LLMErrorControllerContext,\n MessageInterceptorContext,\n ObserveChunkContext,\n ObserveGadgetCompleteContext,\n ObserveGadgetStartContext,\n // Observer contexts\n ObserveLLMCallContext,\n ObserveLLMCompleteContext,\n ObserveLLMErrorContext,\n Observers,\n StreamProcessingResult,\n StreamProcessorOptions,\n} from \"./agent/index.js\";\nexport { ConversationManager, StreamProcessor } from \"./agent/index.js\";\nexport type { LLMistOptions } from \"./core/client.js\";\nexport { LLMist } from \"./core/client.js\";\nexport type { LLMMessage, LLMRole } from \"./core/messages.js\";\nexport { LLMMessageBuilder } from \"./core/messages.js\";\n// Model catalog\nexport type {\n CostEstimate,\n ModelFeatures,\n ModelLimits,\n ModelPricing,\n ModelSpec,\n} from \"./core/model-catalog.js\";\nexport { ModelRegistry } from \"./core/model-registry.js\";\n\n// Syntactic sugar: Model shortcuts and quick methods\nexport {\n getModelId,\n getProvider,\n hasProviderPrefix,\n MODEL_ALIASES,\n resolveModel,\n} from \"./core/model-shortcuts.js\";\nexport type {\n LLMGenerationOptions,\n LLMStream,\n LLMStreamChunk,\n ModelDescriptor,\n ProviderIdentifier,\n TokenUsage,\n} from \"./core/options.js\";\nexport { ModelIdentifierParser } from \"./core/options.js\";\nexport type { PromptConfig, PromptContext, PromptTemplate } from \"./core/prompt-config.js\";\nexport {\n DEFAULT_PROMPTS,\n resolvePromptTemplate,\n resolveRulesTemplate,\n} from \"./core/prompt-config.js\";\nexport type { QuickOptions } from \"./core/quick-methods.js\";\nexport { complete, stream } from \"./core/quick-methods.js\";\nexport type { CreateGadgetConfig } from \"./gadgets/create-gadget.js\";\nexport { createGadget } from \"./gadgets/create-gadget.js\";\n// Gadget infrastructure\nexport { BreakLoopException, HumanInputException } from \"./gadgets/exceptions.js\";\nexport { GadgetExecutor } from \"./gadgets/executor.js\";\nexport { BaseGadget } from \"./gadgets/gadget.js\";\nexport { StreamParser } from \"./gadgets/parser.js\";\nexport type { GadgetClass, GadgetOrClass } from \"./gadgets/registry.js\";\nexport { GadgetRegistry } from \"./gadgets/registry.js\";\n\n// Syntactic sugar: Typed gadgets and helpers\nexport type { GadgetConfig } from \"./gadgets/typed-gadget.js\";\nexport { Gadget } from \"./gadgets/typed-gadget.js\";\nexport type {\n GadgetExecutionResult,\n ParsedGadgetCall,\n StreamEvent,\n TextOnlyAction,\n TextOnlyContext,\n TextOnlyCustomHandler,\n TextOnlyGadgetConfig,\n TextOnlyHandler,\n TextOnlyStrategy,\n} from \"./gadgets/types.js\";\nexport type { ValidationIssue, ValidationResult } from \"./gadgets/validation.js\";\nexport { validateAndApplyDefaults, validateGadgetParams } from \"./gadgets/validation.js\";\nexport type { LoggerOptions } from \"./logging/logger.js\";\nexport { createLogger, defaultLogger } from \"./logging/logger.js\";\nexport {\n AnthropicMessagesProvider,\n createAnthropicProviderFromEnv,\n} from \"./providers/anthropic.js\";\nexport { discoverProviderAdapters } from \"./providers/discovery.js\";\nexport { createGeminiProviderFromEnv, GeminiGenerativeProvider } from \"./providers/gemini.js\";\nexport { createOpenAIProviderFromEnv, OpenAIChatProvider } from \"./providers/openai.js\";\nexport type { ProviderAdapter } from \"./providers/provider.js\";\n\n// Testing/Mock infrastructure\nexport type {\n MockMatcher,\n MockMatcherContext,\n MockOptions,\n MockRegistration,\n MockResponse,\n MockStats,\n} from \"./testing/index.js\";\nexport {\n createMockAdapter,\n createMockClient,\n createMockStream,\n createTextMockStream,\n getMockManager,\n MockBuilder,\n MockManager,\n MockProviderAdapter,\n mockLLM,\n} from \"./testing/index.js\";\n","/**\n * Ready-to-use hook configurations for common monitoring, logging, and debugging tasks.\n *\n * HookPresets provide instant observability without writing custom hooks. They're the\n * fastest way to add monitoring to your agents during development and production.\n *\n * ## Available Presets\n *\n * - **logging(options?)** - Log LLM calls and gadget execution\n * - **timing()** - Measure execution time for operations\n * - **tokenTracking()** - Track cumulative token usage and costs\n * - **progressTracking(options?)** - Track progress with iterations, tokens, cost, and timing (SHOWCASE)\n * - **errorLogging()** - Log detailed error information\n * - **silent()** - No output (useful for testing)\n * - **monitoring(options?)** - All-in-one preset combining logging, timing, tokens, and errors\n * - **merge(...hookSets)** - Combine multiple hook configurations\n *\n * ## Quick Start\n *\n * @example\n * ```typescript\n * import { LLMist, HookPresets } from 'llmist';\n *\n * // Basic logging\n * await LLMist.createAgent()\n * .withHooks(HookPresets.logging())\n * .ask(\"Your prompt\");\n *\n * // Full monitoring suite (recommended for development)\n * await LLMist.createAgent()\n * .withHooks(HookPresets.monitoring({ verbose: true }))\n * .ask(\"Your prompt\");\n *\n * // Combine multiple presets\n * await LLMist.createAgent()\n * .withHooks(HookPresets.merge(\n * HookPresets.timing(),\n * HookPresets.tokenTracking()\n * ))\n * .ask(\"Your prompt\");\n *\n * // Environment-based configuration\n * const hooks = process.env.NODE_ENV === 'production'\n * ? HookPresets.merge(HookPresets.errorLogging(), HookPresets.tokenTracking())\n * : HookPresets.monitoring({ verbose: true });\n *\n * await LLMist.createAgent()\n * .withHooks(hooks)\n * .ask(\"Your prompt\");\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md | Full documentation}\n */\n\nimport type { AgentHooks } from \"./hooks.js\";\nimport type { ModelRegistry } from \"../core/model-registry.js\";\n\n/**\n * Options for logging preset.\n */\nexport interface LoggingOptions {\n /** Include verbose details like parameters and results */\n verbose?: boolean;\n}\n\n/**\n * Progress statistics reported by progressTracking preset.\n *\n * Contains cumulative metrics across all LLM calls in the agent session,\n * useful for building progress UI, cost monitoring, and performance tracking.\n */\nexport interface ProgressStats {\n /** Current iteration number (increments on each LLM call start) */\n currentIteration: number;\n\n /** Total number of completed LLM calls */\n totalCalls: number;\n\n /** Cumulative input tokens across all calls */\n totalInputTokens: number;\n\n /** Cumulative output tokens across all calls */\n totalOutputTokens: number;\n\n /** Total tokens (input + output) */\n totalTokens: number;\n\n /** Cumulative cost in USD (requires modelRegistry) */\n totalCost: number;\n\n /** Elapsed time in seconds since first call */\n elapsedSeconds: number;\n}\n\n/**\n * Options for progressTracking preset.\n *\n * Controls how progress data is tracked and reported during agent execution.\n */\nexport interface ProgressTrackingOptions {\n /**\n * Model registry for cost calculation.\n *\n * If provided, enables automatic cost estimation based on token usage\n * and model pricing data. Without it, totalCost will always be 0.\n *\n * @example\n * ```typescript\n * import { LLMist, HookPresets } from 'llmist';\n *\n * const client = LLMist.create();\n * const hooks = HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry // Enable cost tracking\n * });\n * ```\n */\n modelRegistry?: ModelRegistry;\n\n /**\n * Callback invoked after each LLM call completion with cumulative stats.\n *\n * Use this to update progress UI, log metrics, or track budgets in real-time.\n *\n * @example\n * ```typescript\n * HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry,\n * onProgress: (stats) => {\n * console.log(`Iteration #${stats.currentIteration}`);\n * console.log(`Cost so far: $${stats.totalCost.toFixed(4)}`);\n * console.log(`Elapsed: ${stats.elapsedSeconds}s`);\n * }\n * })\n * ```\n */\n onProgress?: (stats: ProgressStats) => void;\n\n /**\n * Whether to log progress to console after each LLM call.\n *\n * When enabled, prints a summary line with tokens, cost, and elapsed time.\n * Useful for quick debugging without implementing a custom callback.\n *\n * Default: false\n *\n * @example\n * ```typescript\n * // Quick console-based progress tracking\n * HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry,\n * logProgress: true // Log to console\n * })\n * // Output: 📊 Progress: Iteration #2 | 1,234 tokens | $0.0056 | 12.3s\n * ```\n */\n logProgress?: boolean;\n}\n\n/**\n * Common hook presets.\n */\nexport class HookPresets {\n /**\n * Logs LLM calls and gadget execution to console with optional verbosity.\n *\n * **Output (basic mode):**\n * - LLM call start/complete events with iteration numbers\n * - Gadget execution start/complete with gadget names\n * - Token counts when available\n *\n * **Output (verbose mode):**\n * - All basic mode output\n * - Full gadget parameters (formatted JSON)\n * - Full gadget results\n * - Complete LLM response text\n *\n * **Use cases:**\n * - Basic development debugging and execution flow visibility\n * - Understanding agent decision-making and tool usage\n * - Troubleshooting gadget invocations\n *\n * **Performance:** Minimal overhead. Console writes are synchronous but fast.\n *\n * @param options - Logging options\n * @param options.verbose - Include full parameters and results. Default: false\n * @returns Hook configuration that can be passed to .withHooks()\n *\n * @example\n * ```typescript\n * // Basic logging\n * await LLMist.createAgent()\n * .withHooks(HookPresets.logging())\n * .ask(\"Calculate 15 * 23\");\n * // Output: [LLM] Starting call (iteration 0)\n * // [GADGET] Executing Calculator\n * // [GADGET] Completed Calculator\n * // [LLM] Completed (tokens: 245)\n * ```\n *\n * @example\n * ```typescript\n * // Verbose logging with full details\n * await LLMist.createAgent()\n * .withHooks(HookPresets.logging({ verbose: true }))\n * .ask(\"Calculate 15 * 23\");\n * // Output includes: parameters, results, and full responses\n * ```\n *\n * @example\n * ```typescript\n * // Environment-based verbosity\n * const isDev = process.env.NODE_ENV === 'development';\n * .withHooks(HookPresets.logging({ verbose: isDev }))\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsloggingoptions | Full documentation}\n */\n static logging(options: LoggingOptions = {}): AgentHooks {\n return {\n observers: {\n onLLMCallStart: async (ctx) => {\n console.log(`[LLM] Starting call (iteration ${ctx.iteration})`);\n },\n onLLMCallComplete: async (ctx) => {\n const tokens = ctx.usage?.totalTokens ?? \"unknown\";\n console.log(`[LLM] Completed (tokens: ${tokens})`);\n if (options.verbose && ctx.finalMessage) {\n console.log(`[LLM] Response: ${ctx.finalMessage}`);\n }\n },\n onGadgetExecutionStart: async (ctx) => {\n console.log(`[GADGET] Executing ${ctx.gadgetName}`);\n if (options.verbose) {\n console.log(`[GADGET] Parameters:`, JSON.stringify(ctx.parameters, null, 2));\n }\n },\n onGadgetExecutionComplete: async (ctx) => {\n console.log(`[GADGET] Completed ${ctx.gadgetName}`);\n if (options.verbose) {\n const display = ctx.error ?? ctx.finalResult ?? \"(no result)\";\n console.log(`[GADGET] Result: ${display}`);\n }\n },\n },\n };\n }\n\n /**\n * Measures and logs execution time for LLM calls and gadgets.\n *\n * **Output:**\n * - Duration in milliseconds with ⏱️ emoji for each operation\n * - Separate timing for each LLM iteration\n * - Separate timing for each gadget execution\n *\n * **Use cases:**\n * - Performance profiling and optimization\n * - Identifying slow operations (LLM calls vs gadget execution)\n * - Monitoring response times in production\n * - Capacity planning and SLA tracking\n *\n * **Performance:** Negligible overhead. Uses Date.now() for timing measurements.\n *\n * @returns Hook configuration that can be passed to .withHooks()\n *\n * @example\n * ```typescript\n * // Basic timing\n * await LLMist.createAgent()\n * .withHooks(HookPresets.timing())\n * .withGadgets(Weather, Database)\n * .ask(\"What's the weather in NYC?\");\n * // Output: ⏱️ LLM call took 1234ms\n * // ⏱️ Gadget Weather took 567ms\n * // ⏱️ LLM call took 890ms\n * ```\n *\n * @example\n * ```typescript\n * // Combined with logging for full context\n * .withHooks(HookPresets.merge(\n * HookPresets.logging(),\n * HookPresets.timing()\n * ))\n * ```\n *\n * @example\n * ```typescript\n * // Correlate performance with cost\n * .withHooks(HookPresets.merge(\n * HookPresets.timing(),\n * HookPresets.tokenTracking()\n * ))\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetstiming | Full documentation}\n */\n static timing(): AgentHooks {\n const timings = new Map<string, number>();\n\n return {\n observers: {\n onLLMCallStart: async (ctx) => {\n timings.set(`llm-${ctx.iteration}`, Date.now());\n },\n onLLMCallComplete: async (ctx) => {\n const start = timings.get(`llm-${ctx.iteration}`);\n if (start) {\n const duration = Date.now() - start;\n console.log(`⏱️ LLM call took ${duration}ms`);\n timings.delete(`llm-${ctx.iteration}`);\n }\n },\n onGadgetExecutionStart: async (ctx) => {\n const key = `gadget-${ctx.gadgetName}-${Date.now()}`;\n timings.set(key, Date.now());\n // Store key for lookup in complete handler\n (ctx as any)._timingKey = key;\n },\n onGadgetExecutionComplete: async (ctx) => {\n const key = (ctx as any)._timingKey;\n if (key) {\n const start = timings.get(key);\n if (start) {\n const duration = Date.now() - start;\n console.log(`⏱️ Gadget ${ctx.gadgetName} took ${duration}ms`);\n timings.delete(key);\n }\n }\n },\n },\n };\n }\n\n /**\n * Tracks cumulative token usage across all LLM calls.\n *\n * **Output:**\n * - Per-call token count with 📊 emoji\n * - Cumulative total across all calls\n * - Call count for average calculations\n *\n * **Use cases:**\n * - Cost monitoring and budget tracking\n * - Optimizing prompts to reduce token usage\n * - Comparing token efficiency across different approaches\n * - Real-time cost estimation\n *\n * **Performance:** Minimal overhead. Simple counter increments.\n *\n * **Note:** Token counts depend on the provider's response. Some providers\n * may not include usage data, in which case counts won't be logged.\n *\n * @returns Hook configuration that can be passed to .withHooks()\n *\n * @example\n * ```typescript\n * // Basic token tracking\n * await LLMist.createAgent()\n * .withHooks(HookPresets.tokenTracking())\n * .ask(\"Summarize this document...\");\n * // Output: 📊 Tokens this call: 1,234\n * // 📊 Total tokens: 1,234 (across 1 calls)\n * // 📊 Tokens this call: 567\n * // 📊 Total tokens: 1,801 (across 2 calls)\n * ```\n *\n * @example\n * ```typescript\n * // Cost calculation with custom hook\n * let totalTokens = 0;\n * .withHooks(HookPresets.merge(\n * HookPresets.tokenTracking(),\n * {\n * observers: {\n * onLLMCallComplete: async (ctx) => {\n * totalTokens += ctx.usage?.totalTokens ?? 0;\n * const cost = (totalTokens / 1_000_000) * 3.0; // $3 per 1M tokens\n * console.log(`💰 Estimated cost: $${cost.toFixed(4)}`);\n * },\n * },\n * }\n * ))\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetstokentracking | Full documentation}\n */\n static tokenTracking(): AgentHooks {\n let totalTokens = 0;\n let totalCalls = 0;\n\n return {\n observers: {\n onLLMCallComplete: async (ctx) => {\n totalCalls++;\n if (ctx.usage?.totalTokens) {\n totalTokens += ctx.usage.totalTokens;\n console.log(`📊 Tokens this call: ${ctx.usage.totalTokens}`);\n console.log(`📊 Total tokens: ${totalTokens} (across ${totalCalls} calls)`);\n }\n },\n },\n };\n }\n\n /**\n * Tracks comprehensive progress metrics including iterations, tokens, cost, and timing.\n *\n * **This preset showcases llmist's core capabilities by demonstrating:**\n * - Observer pattern for non-intrusive monitoring\n * - Integration with ModelRegistry for cost estimation\n * - Callback-based architecture for flexible UI updates\n * - Provider-agnostic token and cost tracking\n *\n * Unlike `tokenTracking()` which only logs to console, this preset provides\n * structured data through callbacks, making it perfect for building custom UIs,\n * dashboards, or progress indicators (like the llmist CLI).\n *\n * **Output (when logProgress: true):**\n * - Iteration number and call count\n * - Cumulative token usage (input + output)\n * - Cumulative cost in USD (requires modelRegistry)\n * - Elapsed time in seconds\n *\n * **Use cases:**\n * - Building CLI progress indicators with live updates\n * - Creating web dashboards with real-time metrics\n * - Budget monitoring and cost alerts\n * - Performance tracking and optimization\n * - Custom logging to external systems (Datadog, CloudWatch, etc.)\n *\n * **Performance:** Minimal overhead. Uses Date.now() for timing and optional\n * ModelRegistry.estimateCost() which is O(1) lookup. Callback invocation is\n * synchronous and fast.\n *\n * @param options - Progress tracking options\n * @param options.modelRegistry - ModelRegistry for cost estimation (optional)\n * @param options.onProgress - Callback invoked after each LLM call (optional)\n * @param options.logProgress - Log progress to console (default: false)\n * @returns Hook configuration with progress tracking observers\n *\n * @example\n * ```typescript\n * // Basic usage with callback (RECOMMENDED - used by llmist CLI)\n * import { LLMist, HookPresets } from 'llmist';\n *\n * const client = LLMist.create();\n *\n * await client.agent()\n * .withHooks(HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry,\n * onProgress: (stats) => {\n * // Update your UI with stats\n * console.log(`#${stats.currentIteration} | ${stats.totalTokens} tokens | $${stats.totalCost.toFixed(4)}`);\n * }\n * }))\n * .withGadgets(Calculator)\n * .ask(\"Calculate 15 * 23\");\n * // Output: #1 | 245 tokens | $0.0012\n * ```\n *\n * @example\n * ```typescript\n * // Console logging mode (quick debugging)\n * await client.agent()\n * .withHooks(HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry,\n * logProgress: true // Simple console output\n * }))\n * .ask(\"Your prompt\");\n * // Output: 📊 Progress: Iteration #1 | 245 tokens | $0.0012 | 1.2s\n * ```\n *\n * @example\n * ```typescript\n * // Budget monitoring with alerts\n * const BUDGET_USD = 0.10;\n *\n * await client.agent()\n * .withHooks(HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry,\n * onProgress: (stats) => {\n * if (stats.totalCost > BUDGET_USD) {\n * throw new Error(`Budget exceeded: $${stats.totalCost.toFixed(4)}`);\n * }\n * }\n * }))\n * .ask(\"Long running task...\");\n * ```\n *\n * @example\n * ```typescript\n * // Web dashboard integration\n * let progressBar: HTMLElement;\n *\n * await client.agent()\n * .withHooks(HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry,\n * onProgress: (stats) => {\n * // Update web UI in real-time\n * progressBar.textContent = `Iteration ${stats.currentIteration}`;\n * progressBar.dataset.cost = stats.totalCost.toFixed(4);\n * progressBar.dataset.tokens = stats.totalTokens.toString();\n * }\n * }))\n * .ask(\"Your prompt\");\n * ```\n *\n * @example\n * ```typescript\n * // External logging (Datadog, CloudWatch, etc.)\n * await client.agent()\n * .withHooks(HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry,\n * onProgress: async (stats) => {\n * await metrics.gauge('llm.iteration', stats.currentIteration);\n * await metrics.gauge('llm.cost', stats.totalCost);\n * await metrics.gauge('llm.tokens', stats.totalTokens);\n * }\n * }))\n * .ask(\"Your prompt\");\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsprogresstrackingoptions | Full documentation}\n * @see {@link ProgressTrackingOptions} for detailed options\n * @see {@link ProgressStats} for the callback data structure\n */\n static progressTracking(options?: ProgressTrackingOptions): AgentHooks {\n const { modelRegistry, onProgress, logProgress = false } = options ?? {};\n\n // State tracking - follows same pattern as tokenTracking()\n let totalCalls = 0;\n let currentIteration = 0;\n let totalInputTokens = 0;\n let totalOutputTokens = 0;\n let totalCost = 0;\n const startTime = Date.now();\n\n return {\n observers: {\n // Track iteration on each LLM call start\n onLLMCallStart: async (ctx) => {\n currentIteration++;\n },\n\n // Accumulate metrics and report progress on each LLM call completion\n onLLMCallComplete: async (ctx) => {\n totalCalls++;\n\n // Track token usage from provider response\n if (ctx.usage) {\n totalInputTokens += ctx.usage.inputTokens;\n totalOutputTokens += ctx.usage.outputTokens;\n\n // Calculate cost using ModelRegistry (core llmist feature)\n // This showcases integration with llmist's pricing catalog\n if (modelRegistry) {\n try {\n // Extract model name from provider:model format\n // Example: \"openai:gpt-4o\" -> \"gpt-4o\"\n const modelName = ctx.options.model.includes(\":\")\n ? ctx.options.model.split(\":\")[1]\n : ctx.options.model;\n\n // Use core's estimateCost() for accurate pricing\n const costEstimate = modelRegistry.estimateCost(\n modelName,\n ctx.usage.inputTokens,\n ctx.usage.outputTokens,\n );\n\n if (costEstimate) {\n totalCost += costEstimate.totalCost;\n }\n } catch (error) {\n // Graceful degradation - log error but don't crash\n // This follows llmist's principle of non-intrusive monitoring\n if (logProgress) {\n console.warn(`⚠️ Cost estimation failed:`, error);\n }\n }\n }\n }\n\n // Build comprehensive progress stats\n const stats: ProgressStats = {\n currentIteration,\n totalCalls,\n totalInputTokens,\n totalOutputTokens,\n totalTokens: totalInputTokens + totalOutputTokens,\n totalCost,\n elapsedSeconds: Number(((Date.now() - startTime) / 1000).toFixed(1)),\n };\n\n // Invoke callback if provided (used by CLI and custom UIs)\n if (onProgress) {\n onProgress(stats);\n }\n\n // Optional console logging for quick debugging\n if (logProgress) {\n const formattedTokens = stats.totalTokens >= 1000\n ? `${(stats.totalTokens / 1000).toFixed(1)}k`\n : `${stats.totalTokens}`;\n\n const formattedCost = stats.totalCost > 0\n ? `$${stats.totalCost.toFixed(4)}`\n : \"$0\";\n\n console.log(\n `📊 Progress: Iteration #${stats.currentIteration} | ${formattedTokens} tokens | ${formattedCost} | ${stats.elapsedSeconds}s`,\n );\n }\n },\n },\n };\n }\n\n /**\n * Logs detailed error information for debugging and troubleshooting.\n *\n * **Output:**\n * - LLM errors with ❌ emoji, including model and recovery status\n * - Gadget errors with full context (parameters, error message)\n * - Separate logging for LLM and gadget failures\n *\n * **Use cases:**\n * - Troubleshooting production issues\n * - Understanding error patterns and frequency\n * - Debugging error recovery behavior\n * - Collecting error metrics for monitoring\n *\n * **Performance:** Minimal overhead. Only logs when errors occur.\n *\n * @returns Hook configuration that can be passed to .withHooks()\n *\n * @example\n * ```typescript\n * // Basic error logging\n * await LLMist.createAgent()\n * .withHooks(HookPresets.errorLogging())\n * .withGadgets(Database)\n * .ask(\"Fetch user data\");\n * // Output (on LLM error): ❌ LLM Error (iteration 1): Rate limit exceeded\n * // Model: gpt-5-nano\n * // Recovered: true\n * // Output (on gadget error): ❌ Gadget Error: Database\n * // Error: Connection timeout\n * // Parameters: {...}\n * ```\n *\n * @example\n * ```typescript\n * // Combine with monitoring for full context\n * .withHooks(HookPresets.merge(\n * HookPresets.monitoring(), // Includes errorLogging\n * customErrorAnalytics\n * ))\n * ```\n *\n * @example\n * ```typescript\n * // Error analytics collection\n * const errors: any[] = [];\n * .withHooks(HookPresets.merge(\n * HookPresets.errorLogging(),\n * {\n * observers: {\n * onLLMCallError: async (ctx) => {\n * errors.push({ type: 'llm', error: ctx.error, recovered: ctx.recovered });\n * },\n * },\n * }\n * ))\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetserrorlogging | Full documentation}\n */\n static errorLogging(): AgentHooks {\n return {\n observers: {\n onLLMCallError: async (ctx) => {\n console.error(`❌ LLM Error (iteration ${ctx.iteration}):`, ctx.error.message);\n console.error(` Model: ${ctx.options.model}`);\n console.error(` Recovered: ${ctx.recovered}`);\n },\n onGadgetExecutionComplete: async (ctx) => {\n if (ctx.error) {\n console.error(`❌ Gadget Error: ${ctx.gadgetName}`);\n console.error(` Error: ${ctx.error}`);\n console.error(` Parameters:`, JSON.stringify(ctx.parameters, null, 2));\n }\n },\n },\n };\n }\n\n /**\n * Returns empty hook configuration for clean output without any logging.\n *\n * **Output:**\n * - None. Returns {} (empty object).\n *\n * **Use cases:**\n * - Clean test output without console noise\n * - Production environments where logging is handled externally\n * - Baseline for custom hook development\n * - Temporary disable of all hook output\n *\n * **Performance:** Zero overhead. No-op hook configuration.\n *\n * @returns Empty hook configuration\n *\n * @example\n * ```typescript\n * // Clean test output\n * describe('Agent tests', () => {\n * it('should calculate correctly', async () => {\n * const result = await LLMist.createAgent()\n * .withHooks(HookPresets.silent()) // No console output\n * .withGadgets(Calculator)\n * .askAndCollect(\"What is 15 times 23?\");\n *\n * expect(result).toContain(\"345\");\n * });\n * });\n * ```\n *\n * @example\n * ```typescript\n * // Conditional silence based on environment\n * const isTesting = process.env.NODE_ENV === 'test';\n * .withHooks(isTesting ? HookPresets.silent() : HookPresets.monitoring())\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetssilent | Full documentation}\n */\n static silent(): AgentHooks {\n return {};\n }\n\n /**\n * Combines multiple hook configurations into one.\n *\n * Merge allows you to compose preset and custom hooks for modular monitoring\n * configurations. Understanding merge behavior is crucial for proper composition.\n *\n * **Merge behavior:**\n * - **Observers:** Composed - all handlers run sequentially in order\n * - **Interceptors:** Last one wins - only the last interceptor applies\n * - **Controllers:** Last one wins - only the last controller applies\n *\n * **Why interceptors/controllers don't compose:**\n * - Interceptors have different signatures per method, making composition impractical\n * - Controllers return specific actions that can't be meaningfully combined\n * - Only observers support composition because they're read-only and independent\n *\n * **Use cases:**\n * - Combining multiple presets (logging + timing + tokens)\n * - Adding custom hooks to presets\n * - Building modular, reusable monitoring configurations\n * - Environment-specific hook composition\n *\n * **Performance:** Minimal overhead for merging. Runtime performance depends on merged hooks.\n *\n * @param hookSets - Variable number of hook configurations to merge\n * @returns Single merged hook configuration with composed/overridden handlers\n *\n * @example\n * ```typescript\n * // Combine multiple presets\n * .withHooks(HookPresets.merge(\n * HookPresets.logging(),\n * HookPresets.timing(),\n * HookPresets.tokenTracking()\n * ))\n * // All observers from all three presets will run\n * ```\n *\n * @example\n * ```typescript\n * // Add custom observer to preset (both run)\n * .withHooks(HookPresets.merge(\n * HookPresets.timing(),\n * {\n * observers: {\n * onLLMCallComplete: async (ctx) => {\n * await saveMetrics({ tokens: ctx.usage?.totalTokens });\n * },\n * },\n * }\n * ))\n * ```\n *\n * @example\n * ```typescript\n * // Multiple interceptors (last wins!)\n * .withHooks(HookPresets.merge(\n * {\n * interceptors: {\n * interceptTextChunk: (chunk) => chunk.toUpperCase(), // Ignored\n * },\n * },\n * {\n * interceptors: {\n * interceptTextChunk: (chunk) => chunk.toLowerCase(), // This wins\n * },\n * }\n * ))\n * // Result: text will be lowercase\n * ```\n *\n * @example\n * ```typescript\n * // Modular environment-based configuration\n * const baseHooks = HookPresets.errorLogging();\n * const devHooks = HookPresets.merge(baseHooks, HookPresets.monitoring({ verbose: true }));\n * const prodHooks = HookPresets.merge(baseHooks, HookPresets.tokenTracking());\n *\n * const hooks = process.env.NODE_ENV === 'production' ? prodHooks : devHooks;\n * .withHooks(hooks)\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsmergehooksets | Full documentation}\n */\n static merge(...hookSets: AgentHooks[]): AgentHooks {\n const merged: AgentHooks = {\n observers: {},\n interceptors: {},\n controllers: {},\n };\n\n // Compose observers: run all handlers for the same event\n for (const hooks of hookSets) {\n if (hooks.observers) {\n for (const [key, handler] of Object.entries(hooks.observers)) {\n const typedKey = key as keyof typeof hooks.observers;\n if (merged.observers![typedKey]) {\n // Compose: run both existing and new handler\n const existing = merged.observers![typedKey];\n merged.observers![typedKey] = async (ctx: any) => {\n await existing(ctx);\n await handler(ctx);\n };\n } else {\n merged.observers![typedKey] = handler as any;\n }\n }\n }\n\n // Interceptors: last one wins (complex signatures make composition impractical)\n // Each interceptor has different parameters (chunk, message, parameters, etc.)\n // so we can't meaningfully compose them like we do with observers\n if (hooks.interceptors) {\n Object.assign(merged.interceptors!, hooks.interceptors);\n }\n\n // Controllers: last one wins (can't meaningfully compose boolean returns)\n if (hooks.controllers) {\n Object.assign(merged.controllers!, hooks.controllers);\n }\n }\n\n return merged;\n }\n\n /**\n * Composite preset combining logging, timing, tokenTracking, and errorLogging.\n *\n * This is the recommended preset for development and initial production deployments,\n * providing comprehensive observability with a single method call.\n *\n * **Includes:**\n * - All output from `logging()` preset (with optional verbosity)\n * - All output from `timing()` preset (execution times)\n * - All output from `tokenTracking()` preset (token usage)\n * - All output from `errorLogging()` preset (error details)\n *\n * **Output format:**\n * - Event logging: [LLM]/[GADGET] messages\n * - Timing: ⏱️ emoji with milliseconds\n * - Tokens: 📊 emoji with per-call and cumulative counts\n * - Errors: ❌ emoji with full error details\n *\n * **Use cases:**\n * - Full observability during development\n * - Comprehensive monitoring in production\n * - One-liner for complete agent visibility\n * - Troubleshooting and debugging with full context\n *\n * **Performance:** Combined overhead of all four presets, but still minimal in practice.\n *\n * @param options - Monitoring options\n * @param options.verbose - Passed to logging() preset for detailed output. Default: false\n * @returns Merged hook configuration combining all monitoring presets\n *\n * @example\n * ```typescript\n * // Basic monitoring (recommended for development)\n * await LLMist.createAgent()\n * .withHooks(HookPresets.monitoring())\n * .withGadgets(Calculator, Weather)\n * .ask(\"What is 15 times 23, and what's the weather in NYC?\");\n * // Output: All events, timing, tokens, and errors in one place\n * ```\n *\n * @example\n * ```typescript\n * // Verbose monitoring with full details\n * await LLMist.createAgent()\n * .withHooks(HookPresets.monitoring({ verbose: true }))\n * .ask(\"Your prompt\");\n * // Output includes: parameters, results, and complete responses\n * ```\n *\n * @example\n * ```typescript\n * // Environment-based monitoring\n * const isDev = process.env.NODE_ENV === 'development';\n * .withHooks(HookPresets.monitoring({ verbose: isDev }))\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsmonitoringoptions | Full documentation}\n */\n static monitoring(options: LoggingOptions = {}): AgentHooks {\n return HookPresets.merge(\n HookPresets.logging(options),\n HookPresets.timing(),\n HookPresets.tokenTracking(),\n HookPresets.errorLogging(),\n );\n }\n}\n","/**\n * Agent module - Composable, single-responsibility architecture for LLM agents.\n * This module provides a cleaner alternative to the monolithic AgentLoop.\n */\n\nexport type { AgentOptions } from \"./agent.js\";\nexport { ConversationManager } from \"./conversation-manager.js\";\n// New clean hooks system\nexport type {\n AfterGadgetExecutionAction,\n AfterGadgetExecutionControllerContext,\n AfterLLMCallAction,\n AfterLLMCallControllerContext,\n AfterLLMErrorAction,\n AgentHooks,\n BeforeGadgetExecutionAction,\n BeforeLLMCallAction,\n // Interceptor contexts\n ChunkInterceptorContext,\n Controllers,\n GadgetExecutionControllerContext,\n GadgetParameterInterceptorContext,\n GadgetResultInterceptorContext,\n Interceptors,\n // Controller contexts and actions\n LLMCallControllerContext,\n LLMErrorControllerContext,\n MessageInterceptorContext,\n ObserveChunkContext,\n ObserveGadgetCompleteContext,\n ObserveGadgetStartContext,\n // Observer contexts\n ObserveLLMCallContext,\n ObserveLLMCompleteContext,\n ObserveLLMErrorContext,\n Observers,\n} from \"./hooks.js\";\nexport type { IConversationManager } from \"./interfaces.js\";\n\n// StreamProcessor for advanced use cases\nexport {\n type StreamProcessingResult,\n StreamProcessor,\n type StreamProcessorOptions,\n} from \"./stream-processor.js\";\n","/**\n * Type-safe gadget factory with automatic parameter inference.\n *\n * Gadget eliminates the need for manual type assertions\n * by automatically inferring parameter types from the Zod schema.\n *\n * @example\n * ```typescript\n * class Calculator extends Gadget({\n * description: \"Performs arithmetic operations\",\n * schema: z.object({\n * operation: z.enum([\"add\", \"subtract\"]),\n * a: z.number(),\n * b: z.number(),\n * }),\n * }) {\n * // ✨ params is automatically typed!\n * execute(params: this['params']): string {\n * const { operation, a, b } = params; // All typed!\n * return operation === \"add\" ? String(a + b) : String(a - b);\n * }\n * }\n * ```\n */\n\nimport type { ZodType } from \"zod\";\nimport { BaseGadget } from \"./gadget.js\";\n\n/**\n * Infer the TypeScript type from a Zod schema.\n */\ntype InferSchema<T> = T extends ZodType<infer U> ? U : never;\n\n/**\n * Configuration for creating a typed gadget.\n */\nexport interface GadgetConfig<TSchema extends ZodType> {\n /** Human-readable description of what the gadget does */\n description: string;\n\n /** Zod schema for parameter validation */\n schema: TSchema;\n\n /** Optional custom name (defaults to class name) */\n name?: string;\n\n /** Optional timeout in milliseconds */\n timeoutMs?: number;\n}\n\n/**\n * Factory function to create a typed gadget base class.\n *\n * The returned class automatically infers parameter types from the Zod schema,\n * eliminating the need for manual type assertions in the execute method.\n *\n * @param config - Configuration with description and schema\n * @returns Base class to extend with typed execute method\n *\n * @example\n * ```typescript\n * import { z } from 'zod';\n * import { Gadget } from 'llmist';\n *\n * class Calculator extends Gadget({\n * description: \"Performs arithmetic operations\",\n * schema: z.object({\n * operation: z.enum([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n * a: z.number().describe(\"First number\"),\n * b: z.number().describe(\"Second number\"),\n * }),\n * }) {\n * execute(params: this['params']): string {\n * // params is automatically typed as:\n * // { operation: \"add\" | \"subtract\" | \"multiply\" | \"divide\"; a: number; b: number }\n * const { operation, a, b } = params;\n *\n * switch (operation) {\n * case \"add\": return String(a + b);\n * case \"subtract\": return String(a - b);\n * case \"multiply\": return String(a * b);\n * case \"divide\": return String(a / b);\n * }\n * }\n * }\n * ```\n *\n * @example\n * ```typescript\n * // With async execution\n * class WeatherGadget extends Gadget({\n * description: \"Fetches weather for a city\",\n * schema: z.object({\n * city: z.string().min(1).describe(\"City name\"),\n * }),\n * timeoutMs: 10000,\n * }) {\n * async execute(params: this['params']): Promise<string> {\n * const { city } = params; // Automatically typed as { city: string }\n * const weather = await fetchWeather(city);\n * return `Weather in ${city}: ${weather}`;\n * }\n * }\n * ```\n */\nexport function Gadget<TSchema extends ZodType>(config: GadgetConfig<TSchema>) {\n abstract class GadgetBase extends BaseGadget {\n description = config.description;\n parameterSchema = config.schema;\n name = config.name;\n timeoutMs = config.timeoutMs;\n\n /**\n * Type helper property for accessing inferred parameter type.\n * This is used in the execute method signature: `execute(params: this['params'])`\n *\n * Note: This is just for type inference - the actual params in execute()\n * will be Record<string, unknown> which you can safely cast to this['params']\n */\n readonly params!: InferSchema<TSchema>;\n\n /**\n * Execute the gadget. Subclasses should cast params to this['params'].\n *\n * @param params - Validated parameters from the LLM\n * @returns Result as a string (or Promise<string> for async gadgets)\n *\n * @example\n * ```typescript\n * execute(params: Record<string, unknown>): string {\n * const typed = params as this['params'];\n * // Now 'typed' is fully typed!\n * return String(typed.a + typed.b);\n * }\n * ```\n */\n abstract execute(params: Record<string, unknown>): string | Promise<string>;\n }\n\n return GadgetBase as {\n new (): GadgetBase & { params: InferSchema<TSchema> };\n };\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAKA;AAEA;AALA,SAAS,SAAS;;;AC+JX,IAAM,cAAN,MAAM,aAAY;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAwDvB,OAAO,QAAQ,UAA0B,CAAC,GAAe;AACvD,WAAO;AAAA,MACL,WAAW;AAAA,QACT,gBAAgB,OAAO,QAAQ;AAC7B,kBAAQ,IAAI,kCAAkC,IAAI,SAAS,GAAG;AAAA,QAChE;AAAA,QACA,mBAAmB,OAAO,QAAQ;AAChC,gBAAM,SAAS,IAAI,OAAO,eAAe;AACzC,kBAAQ,IAAI,4BAA4B,MAAM,GAAG;AACjD,cAAI,QAAQ,WAAW,IAAI,cAAc;AACvC,oBAAQ,IAAI,mBAAmB,IAAI,YAAY,EAAE;AAAA,UACnD;AAAA,QACF;AAAA,QACA,wBAAwB,OAAO,QAAQ;AACrC,kBAAQ,IAAI,sBAAsB,IAAI,UAAU,EAAE;AAClD,cAAI,QAAQ,SAAS;AACnB,oBAAQ,IAAI,wBAAwB,KAAK,UAAU,IAAI,YAAY,MAAM,CAAC,CAAC;AAAA,UAC7E;AAAA,QACF;AAAA,QACA,2BAA2B,OAAO,QAAQ;AACxC,kBAAQ,IAAI,sBAAsB,IAAI,UAAU,EAAE;AAClD,cAAI,QAAQ,SAAS;AACnB,kBAAM,UAAU,IAAI,SAAS,IAAI,eAAe;AAChD,oBAAQ,IAAI,oBAAoB,OAAO,EAAE;AAAA,UAC3C;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAoDA,OAAO,SAAqB;AAC1B,UAAM,UAAU,oBAAI,IAAoB;AAExC,WAAO;AAAA,MACL,WAAW;AAAA,QACT,gBAAgB,OAAO,QAAQ;AAC7B,kBAAQ,IAAI,OAAO,IAAI,SAAS,IAAI,KAAK,IAAI,CAAC;AAAA,QAChD;AAAA,QACA,mBAAmB,OAAO,QAAQ;AAChC,gBAAM,QAAQ,QAAQ,IAAI,OAAO,IAAI,SAAS,EAAE;AAChD,cAAI,OAAO;AACT,kBAAM,WAAW,KAAK,IAAI,IAAI;AAC9B,oBAAQ,IAAI,+BAAqB,QAAQ,IAAI;AAC7C,oBAAQ,OAAO,OAAO,IAAI,SAAS,EAAE;AAAA,UACvC;AAAA,QACF;AAAA,QACA,wBAAwB,OAAO,QAAQ;AACrC,gBAAM,MAAM,UAAU,IAAI,UAAU,IAAI,KAAK,IAAI,CAAC;AAClD,kBAAQ,IAAI,KAAK,KAAK,IAAI,CAAC;AAE3B,UAAC,IAAY,aAAa;AAAA,QAC5B;AAAA,QACA,2BAA2B,OAAO,QAAQ;AACxC,gBAAM,MAAO,IAAY;AACzB,cAAI,KAAK;AACP,kBAAM,QAAQ,QAAQ,IAAI,GAAG;AAC7B,gBAAI,OAAO;AACT,oBAAM,WAAW,KAAK,IAAI,IAAI;AAC9B,sBAAQ,IAAI,wBAAc,IAAI,UAAU,SAAS,QAAQ,IAAI;AAC7D,sBAAQ,OAAO,GAAG;AAAA,YACpB;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAuDA,OAAO,gBAA4B;AACjC,QAAI,cAAc;AAClB,QAAI,aAAa;AAEjB,WAAO;AAAA,MACL,WAAW;AAAA,QACT,mBAAmB,OAAO,QAAQ;AAChC;AACA,cAAI,IAAI,OAAO,aAAa;AAC1B,2BAAe,IAAI,MAAM;AACzB,oBAAQ,IAAI,+BAAwB,IAAI,MAAM,WAAW,EAAE;AAC3D,oBAAQ,IAAI,2BAAoB,WAAW,YAAY,UAAU,SAAS;AAAA,UAC5E;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4HA,OAAO,iBAAiB,SAA+C;AACrE,UAAM,EAAE,eAAe,YAAY,cAAc,MAAM,IAAI,WAAW,CAAC;AAGvE,QAAI,aAAa;AACjB,QAAI,mBAAmB;AACvB,QAAI,mBAAmB;AACvB,QAAI,oBAAoB;AACxB,QAAI,YAAY;AAChB,UAAM,YAAY,KAAK,IAAI;AAE3B,WAAO;AAAA,MACL,WAAW;AAAA;AAAA,QAET,gBAAgB,OAAO,QAAQ;AAC7B;AAAA,QACF;AAAA;AAAA,QAGA,mBAAmB,OAAO,QAAQ;AAChC;AAGA,cAAI,IAAI,OAAO;AACb,gCAAoB,IAAI,MAAM;AAC9B,iCAAqB,IAAI,MAAM;AAI/B,gBAAI,eAAe;AACjB,kBAAI;AAGF,sBAAM,YAAY,IAAI,QAAQ,MAAM,SAAS,GAAG,IAC5C,IAAI,QAAQ,MAAM,MAAM,GAAG,EAAE,CAAC,IAC9B,IAAI,QAAQ;AAGhB,sBAAM,eAAe,cAAc;AAAA,kBACjC;AAAA,kBACA,IAAI,MAAM;AAAA,kBACV,IAAI,MAAM;AAAA,gBACZ;AAEA,oBAAI,cAAc;AAChB,+BAAa,aAAa;AAAA,gBAC5B;AAAA,cACF,SAAS,OAAO;AAGd,oBAAI,aAAa;AACf,0BAAQ,KAAK,yCAA+B,KAAK;AAAA,gBACnD;AAAA,cACF;AAAA,YACF;AAAA,UACF;AAGA,gBAAM,QAAuB;AAAA,YAC3B;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA,aAAa,mBAAmB;AAAA,YAChC;AAAA,YACA,gBAAgB,SAAS,KAAK,IAAI,IAAI,aAAa,KAAM,QAAQ,CAAC,CAAC;AAAA,UACrE;AAGA,cAAI,YAAY;AACd,uBAAW,KAAK;AAAA,UAClB;AAGA,cAAI,aAAa;AACf,kBAAM,kBAAkB,MAAM,eAAe,MACzC,IAAI,MAAM,cAAc,KAAM,QAAQ,CAAC,CAAC,MACxC,GAAG,MAAM,WAAW;AAExB,kBAAM,gBAAgB,MAAM,YAAY,IACpC,IAAI,MAAM,UAAU,QAAQ,CAAC,CAAC,KAC9B;AAEJ,oBAAQ;AAAA,cACN,kCAA2B,MAAM,gBAAgB,MAAM,eAAe,aAAa,aAAa,MAAM,MAAM,cAAc;AAAA,YAC5H;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA8DA,OAAO,eAA2B;AAChC,WAAO;AAAA,MACL,WAAW;AAAA,QACT,gBAAgB,OAAO,QAAQ;AAC7B,kBAAQ,MAAM,+BAA0B,IAAI,SAAS,MAAM,IAAI,MAAM,OAAO;AAC5E,kBAAQ,MAAM,aAAa,IAAI,QAAQ,KAAK,EAAE;AAC9C,kBAAQ,MAAM,iBAAiB,IAAI,SAAS,EAAE;AAAA,QAChD;AAAA,QACA,2BAA2B,OAAO,QAAQ;AACxC,cAAI,IAAI,OAAO;AACb,oBAAQ,MAAM,wBAAmB,IAAI,UAAU,EAAE;AACjD,oBAAQ,MAAM,aAAa,IAAI,KAAK,EAAE;AACtC,oBAAQ,MAAM,kBAAkB,KAAK,UAAU,IAAI,YAAY,MAAM,CAAC,CAAC;AAAA,UACzE;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA0CA,OAAO,SAAqB;AAC1B,WAAO,CAAC;AAAA,EACV;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAsFA,OAAO,SAAS,UAAoC;AAClD,UAAM,SAAqB;AAAA,MACzB,WAAW,CAAC;AAAA,MACZ,cAAc,CAAC;AAAA,MACf,aAAa,CAAC;AAAA,IAChB;AAGA,eAAW,SAAS,UAAU;AAC5B,UAAI,MAAM,WAAW;AACnB,mBAAW,CAAC,KAAK,OAAO,KAAK,OAAO,QAAQ,MAAM,SAAS,GAAG;AAC5D,gBAAM,WAAW;AACjB,cAAI,OAAO,UAAW,QAAQ,GAAG;AAE/B,kBAAM,WAAW,OAAO,UAAW,QAAQ;AAC3C,mBAAO,UAAW,QAAQ,IAAI,OAAO,QAAa;AAChD,oBAAM,SAAS,GAAG;AAClB,oBAAM,QAAQ,GAAG;AAAA,YACnB;AAAA,UACF,OAAO;AACL,mBAAO,UAAW,QAAQ,IAAI;AAAA,UAChC;AAAA,QACF;AAAA,MACF;AAKA,UAAI,MAAM,cAAc;AACtB,eAAO,OAAO,OAAO,cAAe,MAAM,YAAY;AAAA,MACxD;AAGA,UAAI,MAAM,aAAa;AACrB,eAAO,OAAO,OAAO,aAAc,MAAM,WAAW;AAAA,MACtD;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4DA,OAAO,WAAW,UAA0B,CAAC,GAAe;AAC1D,WAAO,aAAY;AAAA,MACjB,aAAY,QAAQ,OAAO;AAAA,MAC3B,aAAY,OAAO;AAAA,MACnB,aAAY,cAAc;AAAA,MAC1B,aAAY,aAAa;AAAA,IAC3B;AAAA,EACF;AACF;;;AC/5BA;AAkCA;;;AFQA;AAEA;AASA;AAGA;AAeA;AAEA;AAMA;AAIA;AACA;AAEA;AAEA;;;AGWO,SAAS,OAAgC,QAA+B;AAAA,EAC7E,MAAe,mBAAmB,WAAW;AAAA,IAC3C,cAAc,OAAO;AAAA,IACrB,kBAAkB,OAAO;AAAA,IACzB,OAAO,OAAO;AAAA,IACd,YAAY,OAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IASV;AAAA,EAkBX;AAEA,SAAO;AAGT;;;AH7BA;AACA;AAIA;AACA;AACA;","names":[]}
1
+ {"version":3,"sources":["../src/index.ts","../src/agent/hook-presets.ts","../src/agent/index.ts","../src/gadgets/typed-gadget.ts"],"sourcesContent":["// Re-export Zod's z for schema definitions\n// Using llmist's z ensures .describe() metadata is preserved in JSON schemas\nexport { z } from \"zod\";\n// Syntactic sugar: Agent builder and event handlers\nexport type { HistoryMessage } from \"./agent/builder.js\";\nexport { AgentBuilder } from \"./agent/builder.js\";\nexport type { EventHandlers } from \"./agent/event-handlers.js\";\nexport { collectEvents, collectText, runWithHandlers } from \"./agent/event-handlers.js\";\n// Syntactic sugar: Hook presets\nexport type { LoggingOptions } from \"./agent/hook-presets.js\";\nexport { HookPresets } from \"./agent/hook-presets.js\";\n// Agent infrastructure\n// New clean hooks system\nexport type {\n AfterGadgetExecutionAction,\n AfterGadgetExecutionControllerContext,\n AfterLLMCallAction,\n AfterLLMCallControllerContext,\n AfterLLMErrorAction,\n AgentHooks,\n AgentOptions,\n BeforeGadgetExecutionAction,\n BeforeLLMCallAction,\n // Interceptor contexts\n ChunkInterceptorContext,\n Controllers,\n GadgetExecutionControllerContext,\n GadgetParameterInterceptorContext,\n GadgetResultInterceptorContext,\n IConversationManager,\n Interceptors,\n // Controller contexts and actions\n LLMCallControllerContext,\n LLMErrorControllerContext,\n MessageInterceptorContext,\n ObserveChunkContext,\n ObserveGadgetCompleteContext,\n ObserveGadgetStartContext,\n // Observer contexts\n ObserveLLMCallContext,\n ObserveLLMCompleteContext,\n ObserveLLMErrorContext,\n Observers,\n StreamProcessingResult,\n StreamProcessorOptions,\n} from \"./agent/index.js\";\nexport { ConversationManager, StreamProcessor } from \"./agent/index.js\";\nexport type { LLMistOptions } from \"./core/client.js\";\nexport { LLMist } from \"./core/client.js\";\nexport type { LLMMessage, LLMRole } from \"./core/messages.js\";\nexport { LLMMessageBuilder } from \"./core/messages.js\";\n// Model catalog\nexport type {\n CostEstimate,\n ModelFeatures,\n ModelLimits,\n ModelPricing,\n ModelSpec,\n} from \"./core/model-catalog.js\";\nexport { ModelRegistry } from \"./core/model-registry.js\";\n\n// Syntactic sugar: Model shortcuts and quick methods\nexport {\n getModelId,\n getProvider,\n hasProviderPrefix,\n MODEL_ALIASES,\n resolveModel,\n} from \"./core/model-shortcuts.js\";\nexport type {\n LLMGenerationOptions,\n LLMStream,\n LLMStreamChunk,\n ModelDescriptor,\n ProviderIdentifier,\n TokenUsage,\n} from \"./core/options.js\";\nexport { ModelIdentifierParser } from \"./core/options.js\";\nexport type { PromptConfig, PromptContext, PromptTemplate } from \"./core/prompt-config.js\";\nexport {\n DEFAULT_PROMPTS,\n resolvePromptTemplate,\n resolveRulesTemplate,\n} from \"./core/prompt-config.js\";\nexport type { QuickOptions } from \"./core/quick-methods.js\";\nexport { complete, stream } from \"./core/quick-methods.js\";\nexport type { CreateGadgetConfig } from \"./gadgets/create-gadget.js\";\nexport { createGadget } from \"./gadgets/create-gadget.js\";\n// Gadget infrastructure\nexport { BreakLoopException, HumanInputException } from \"./gadgets/exceptions.js\";\nexport { GadgetExecutor } from \"./gadgets/executor.js\";\nexport { BaseGadget } from \"./gadgets/gadget.js\";\nexport { StreamParser } from \"./gadgets/parser.js\";\nexport type { GadgetClass, GadgetOrClass } from \"./gadgets/registry.js\";\nexport { GadgetRegistry } from \"./gadgets/registry.js\";\n\n// Syntactic sugar: Typed gadgets and helpers\nexport type { GadgetConfig } from \"./gadgets/typed-gadget.js\";\nexport { Gadget } from \"./gadgets/typed-gadget.js\";\nexport type {\n GadgetExample,\n GadgetExecutionResult,\n ParsedGadgetCall,\n StreamEvent,\n TextOnlyAction,\n TextOnlyContext,\n TextOnlyCustomHandler,\n TextOnlyGadgetConfig,\n TextOnlyHandler,\n TextOnlyStrategy,\n} from \"./gadgets/types.js\";\nexport type { ValidationIssue, ValidationResult } from \"./gadgets/validation.js\";\nexport { validateAndApplyDefaults, validateGadgetParams } from \"./gadgets/validation.js\";\nexport type { LoggerOptions } from \"./logging/logger.js\";\nexport { createLogger, defaultLogger } from \"./logging/logger.js\";\nexport {\n AnthropicMessagesProvider,\n createAnthropicProviderFromEnv,\n} from \"./providers/anthropic.js\";\nexport { discoverProviderAdapters } from \"./providers/discovery.js\";\nexport { createGeminiProviderFromEnv, GeminiGenerativeProvider } from \"./providers/gemini.js\";\nexport { createOpenAIProviderFromEnv, OpenAIChatProvider } from \"./providers/openai.js\";\nexport type { ProviderAdapter } from \"./providers/provider.js\";\n\n// Testing/Mock infrastructure\nexport type {\n MockMatcher,\n MockMatcherContext,\n MockOptions,\n MockRegistration,\n MockResponse,\n MockStats,\n} from \"./testing/index.js\";\nexport {\n createMockAdapter,\n createMockClient,\n createMockStream,\n createTextMockStream,\n getMockManager,\n MockBuilder,\n MockManager,\n MockProviderAdapter,\n mockLLM,\n} from \"./testing/index.js\";\n","/**\n * Ready-to-use hook configurations for common monitoring, logging, and debugging tasks.\n *\n * HookPresets provide instant observability without writing custom hooks. They're the\n * fastest way to add monitoring to your agents during development and production.\n *\n * ## Available Presets\n *\n * - **logging(options?)** - Log LLM calls and gadget execution\n * - **timing()** - Measure execution time for operations\n * - **tokenTracking()** - Track cumulative token usage and costs\n * - **progressTracking(options?)** - Track progress with iterations, tokens, cost, and timing (SHOWCASE)\n * - **errorLogging()** - Log detailed error information\n * - **silent()** - No output (useful for testing)\n * - **monitoring(options?)** - All-in-one preset combining logging, timing, tokens, and errors\n * - **merge(...hookSets)** - Combine multiple hook configurations\n *\n * ## Quick Start\n *\n * @example\n * ```typescript\n * import { LLMist, HookPresets } from 'llmist';\n *\n * // Basic logging\n * await LLMist.createAgent()\n * .withHooks(HookPresets.logging())\n * .ask(\"Your prompt\");\n *\n * // Full monitoring suite (recommended for development)\n * await LLMist.createAgent()\n * .withHooks(HookPresets.monitoring({ verbose: true }))\n * .ask(\"Your prompt\");\n *\n * // Combine multiple presets\n * await LLMist.createAgent()\n * .withHooks(HookPresets.merge(\n * HookPresets.timing(),\n * HookPresets.tokenTracking()\n * ))\n * .ask(\"Your prompt\");\n *\n * // Environment-based configuration\n * const hooks = process.env.NODE_ENV === 'production'\n * ? HookPresets.merge(HookPresets.errorLogging(), HookPresets.tokenTracking())\n * : HookPresets.monitoring({ verbose: true });\n *\n * await LLMist.createAgent()\n * .withHooks(hooks)\n * .ask(\"Your prompt\");\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md | Full documentation}\n */\n\nimport type { AgentHooks } from \"./hooks.js\";\nimport type { ModelRegistry } from \"../core/model-registry.js\";\n\n/**\n * Options for logging preset.\n */\nexport interface LoggingOptions {\n /** Include verbose details like parameters and results */\n verbose?: boolean;\n}\n\n/**\n * Progress statistics reported by progressTracking preset.\n *\n * Contains cumulative metrics across all LLM calls in the agent session,\n * useful for building progress UI, cost monitoring, and performance tracking.\n */\nexport interface ProgressStats {\n /** Current iteration number (increments on each LLM call start) */\n currentIteration: number;\n\n /** Total number of completed LLM calls */\n totalCalls: number;\n\n /** Cumulative input tokens across all calls */\n totalInputTokens: number;\n\n /** Cumulative output tokens across all calls */\n totalOutputTokens: number;\n\n /** Total tokens (input + output) */\n totalTokens: number;\n\n /** Cumulative cost in USD (requires modelRegistry) */\n totalCost: number;\n\n /** Elapsed time in seconds since first call */\n elapsedSeconds: number;\n}\n\n/**\n * Options for progressTracking preset.\n *\n * Controls how progress data is tracked and reported during agent execution.\n */\nexport interface ProgressTrackingOptions {\n /**\n * Model registry for cost calculation.\n *\n * If provided, enables automatic cost estimation based on token usage\n * and model pricing data. Without it, totalCost will always be 0.\n *\n * @example\n * ```typescript\n * import { LLMist, HookPresets } from 'llmist';\n *\n * const client = LLMist.create();\n * const hooks = HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry // Enable cost tracking\n * });\n * ```\n */\n modelRegistry?: ModelRegistry;\n\n /**\n * Callback invoked after each LLM call completion with cumulative stats.\n *\n * Use this to update progress UI, log metrics, or track budgets in real-time.\n *\n * @example\n * ```typescript\n * HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry,\n * onProgress: (stats) => {\n * console.log(`Iteration #${stats.currentIteration}`);\n * console.log(`Cost so far: $${stats.totalCost.toFixed(4)}`);\n * console.log(`Elapsed: ${stats.elapsedSeconds}s`);\n * }\n * })\n * ```\n */\n onProgress?: (stats: ProgressStats) => void;\n\n /**\n * Whether to log progress to console after each LLM call.\n *\n * When enabled, prints a summary line with tokens, cost, and elapsed time.\n * Useful for quick debugging without implementing a custom callback.\n *\n * Default: false\n *\n * @example\n * ```typescript\n * // Quick console-based progress tracking\n * HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry,\n * logProgress: true // Log to console\n * })\n * // Output: 📊 Progress: Iteration #2 | 1,234 tokens | $0.0056 | 12.3s\n * ```\n */\n logProgress?: boolean;\n}\n\n/**\n * Common hook presets.\n */\nexport class HookPresets {\n /**\n * Logs LLM calls and gadget execution to console with optional verbosity.\n *\n * **Output (basic mode):**\n * - LLM call start/complete events with iteration numbers\n * - Gadget execution start/complete with gadget names\n * - Token counts when available\n *\n * **Output (verbose mode):**\n * - All basic mode output\n * - Full gadget parameters (formatted JSON)\n * - Full gadget results\n * - Complete LLM response text\n *\n * **Use cases:**\n * - Basic development debugging and execution flow visibility\n * - Understanding agent decision-making and tool usage\n * - Troubleshooting gadget invocations\n *\n * **Performance:** Minimal overhead. Console writes are synchronous but fast.\n *\n * @param options - Logging options\n * @param options.verbose - Include full parameters and results. Default: false\n * @returns Hook configuration that can be passed to .withHooks()\n *\n * @example\n * ```typescript\n * // Basic logging\n * await LLMist.createAgent()\n * .withHooks(HookPresets.logging())\n * .ask(\"Calculate 15 * 23\");\n * // Output: [LLM] Starting call (iteration 0)\n * // [GADGET] Executing Calculator\n * // [GADGET] Completed Calculator\n * // [LLM] Completed (tokens: 245)\n * ```\n *\n * @example\n * ```typescript\n * // Verbose logging with full details\n * await LLMist.createAgent()\n * .withHooks(HookPresets.logging({ verbose: true }))\n * .ask(\"Calculate 15 * 23\");\n * // Output includes: parameters, results, and full responses\n * ```\n *\n * @example\n * ```typescript\n * // Environment-based verbosity\n * const isDev = process.env.NODE_ENV === 'development';\n * .withHooks(HookPresets.logging({ verbose: isDev }))\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsloggingoptions | Full documentation}\n */\n static logging(options: LoggingOptions = {}): AgentHooks {\n return {\n observers: {\n onLLMCallStart: async (ctx) => {\n console.log(`[LLM] Starting call (iteration ${ctx.iteration})`);\n },\n onLLMCallComplete: async (ctx) => {\n const tokens = ctx.usage?.totalTokens ?? \"unknown\";\n console.log(`[LLM] Completed (tokens: ${tokens})`);\n if (options.verbose && ctx.finalMessage) {\n console.log(`[LLM] Response: ${ctx.finalMessage}`);\n }\n },\n onGadgetExecutionStart: async (ctx) => {\n console.log(`[GADGET] Executing ${ctx.gadgetName}`);\n if (options.verbose) {\n console.log(`[GADGET] Parameters:`, JSON.stringify(ctx.parameters, null, 2));\n }\n },\n onGadgetExecutionComplete: async (ctx) => {\n console.log(`[GADGET] Completed ${ctx.gadgetName}`);\n if (options.verbose) {\n const display = ctx.error ?? ctx.finalResult ?? \"(no result)\";\n console.log(`[GADGET] Result: ${display}`);\n }\n },\n },\n };\n }\n\n /**\n * Measures and logs execution time for LLM calls and gadgets.\n *\n * **Output:**\n * - Duration in milliseconds with ⏱️ emoji for each operation\n * - Separate timing for each LLM iteration\n * - Separate timing for each gadget execution\n *\n * **Use cases:**\n * - Performance profiling and optimization\n * - Identifying slow operations (LLM calls vs gadget execution)\n * - Monitoring response times in production\n * - Capacity planning and SLA tracking\n *\n * **Performance:** Negligible overhead. Uses Date.now() for timing measurements.\n *\n * @returns Hook configuration that can be passed to .withHooks()\n *\n * @example\n * ```typescript\n * // Basic timing\n * await LLMist.createAgent()\n * .withHooks(HookPresets.timing())\n * .withGadgets(Weather, Database)\n * .ask(\"What's the weather in NYC?\");\n * // Output: ⏱️ LLM call took 1234ms\n * // ⏱️ Gadget Weather took 567ms\n * // ⏱️ LLM call took 890ms\n * ```\n *\n * @example\n * ```typescript\n * // Combined with logging for full context\n * .withHooks(HookPresets.merge(\n * HookPresets.logging(),\n * HookPresets.timing()\n * ))\n * ```\n *\n * @example\n * ```typescript\n * // Correlate performance with cost\n * .withHooks(HookPresets.merge(\n * HookPresets.timing(),\n * HookPresets.tokenTracking()\n * ))\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetstiming | Full documentation}\n */\n static timing(): AgentHooks {\n const timings = new Map<string, number>();\n\n return {\n observers: {\n onLLMCallStart: async (ctx) => {\n timings.set(`llm-${ctx.iteration}`, Date.now());\n },\n onLLMCallComplete: async (ctx) => {\n const start = timings.get(`llm-${ctx.iteration}`);\n if (start) {\n const duration = Date.now() - start;\n console.log(`⏱️ LLM call took ${duration}ms`);\n timings.delete(`llm-${ctx.iteration}`);\n }\n },\n onGadgetExecutionStart: async (ctx) => {\n const key = `gadget-${ctx.gadgetName}-${Date.now()}`;\n timings.set(key, Date.now());\n // Store key for lookup in complete handler\n (ctx as any)._timingKey = key;\n },\n onGadgetExecutionComplete: async (ctx) => {\n const key = (ctx as any)._timingKey;\n if (key) {\n const start = timings.get(key);\n if (start) {\n const duration = Date.now() - start;\n console.log(`⏱️ Gadget ${ctx.gadgetName} took ${duration}ms`);\n timings.delete(key);\n }\n }\n },\n },\n };\n }\n\n /**\n * Tracks cumulative token usage across all LLM calls.\n *\n * **Output:**\n * - Per-call token count with 📊 emoji\n * - Cumulative total across all calls\n * - Call count for average calculations\n *\n * **Use cases:**\n * - Cost monitoring and budget tracking\n * - Optimizing prompts to reduce token usage\n * - Comparing token efficiency across different approaches\n * - Real-time cost estimation\n *\n * **Performance:** Minimal overhead. Simple counter increments.\n *\n * **Note:** Token counts depend on the provider's response. Some providers\n * may not include usage data, in which case counts won't be logged.\n *\n * @returns Hook configuration that can be passed to .withHooks()\n *\n * @example\n * ```typescript\n * // Basic token tracking\n * await LLMist.createAgent()\n * .withHooks(HookPresets.tokenTracking())\n * .ask(\"Summarize this document...\");\n * // Output: 📊 Tokens this call: 1,234\n * // 📊 Total tokens: 1,234 (across 1 calls)\n * // 📊 Tokens this call: 567\n * // 📊 Total tokens: 1,801 (across 2 calls)\n * ```\n *\n * @example\n * ```typescript\n * // Cost calculation with custom hook\n * let totalTokens = 0;\n * .withHooks(HookPresets.merge(\n * HookPresets.tokenTracking(),\n * {\n * observers: {\n * onLLMCallComplete: async (ctx) => {\n * totalTokens += ctx.usage?.totalTokens ?? 0;\n * const cost = (totalTokens / 1_000_000) * 3.0; // $3 per 1M tokens\n * console.log(`💰 Estimated cost: $${cost.toFixed(4)}`);\n * },\n * },\n * }\n * ))\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetstokentracking | Full documentation}\n */\n static tokenTracking(): AgentHooks {\n let totalTokens = 0;\n let totalCalls = 0;\n\n return {\n observers: {\n onLLMCallComplete: async (ctx) => {\n totalCalls++;\n if (ctx.usage?.totalTokens) {\n totalTokens += ctx.usage.totalTokens;\n console.log(`📊 Tokens this call: ${ctx.usage.totalTokens}`);\n console.log(`📊 Total tokens: ${totalTokens} (across ${totalCalls} calls)`);\n }\n },\n },\n };\n }\n\n /**\n * Tracks comprehensive progress metrics including iterations, tokens, cost, and timing.\n *\n * **This preset showcases llmist's core capabilities by demonstrating:**\n * - Observer pattern for non-intrusive monitoring\n * - Integration with ModelRegistry for cost estimation\n * - Callback-based architecture for flexible UI updates\n * - Provider-agnostic token and cost tracking\n *\n * Unlike `tokenTracking()` which only logs to console, this preset provides\n * structured data through callbacks, making it perfect for building custom UIs,\n * dashboards, or progress indicators (like the llmist CLI).\n *\n * **Output (when logProgress: true):**\n * - Iteration number and call count\n * - Cumulative token usage (input + output)\n * - Cumulative cost in USD (requires modelRegistry)\n * - Elapsed time in seconds\n *\n * **Use cases:**\n * - Building CLI progress indicators with live updates\n * - Creating web dashboards with real-time metrics\n * - Budget monitoring and cost alerts\n * - Performance tracking and optimization\n * - Custom logging to external systems (Datadog, CloudWatch, etc.)\n *\n * **Performance:** Minimal overhead. Uses Date.now() for timing and optional\n * ModelRegistry.estimateCost() which is O(1) lookup. Callback invocation is\n * synchronous and fast.\n *\n * @param options - Progress tracking options\n * @param options.modelRegistry - ModelRegistry for cost estimation (optional)\n * @param options.onProgress - Callback invoked after each LLM call (optional)\n * @param options.logProgress - Log progress to console (default: false)\n * @returns Hook configuration with progress tracking observers\n *\n * @example\n * ```typescript\n * // Basic usage with callback (RECOMMENDED - used by llmist CLI)\n * import { LLMist, HookPresets } from 'llmist';\n *\n * const client = LLMist.create();\n *\n * await client.agent()\n * .withHooks(HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry,\n * onProgress: (stats) => {\n * // Update your UI with stats\n * console.log(`#${stats.currentIteration} | ${stats.totalTokens} tokens | $${stats.totalCost.toFixed(4)}`);\n * }\n * }))\n * .withGadgets(Calculator)\n * .ask(\"Calculate 15 * 23\");\n * // Output: #1 | 245 tokens | $0.0012\n * ```\n *\n * @example\n * ```typescript\n * // Console logging mode (quick debugging)\n * await client.agent()\n * .withHooks(HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry,\n * logProgress: true // Simple console output\n * }))\n * .ask(\"Your prompt\");\n * // Output: 📊 Progress: Iteration #1 | 245 tokens | $0.0012 | 1.2s\n * ```\n *\n * @example\n * ```typescript\n * // Budget monitoring with alerts\n * const BUDGET_USD = 0.10;\n *\n * await client.agent()\n * .withHooks(HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry,\n * onProgress: (stats) => {\n * if (stats.totalCost > BUDGET_USD) {\n * throw new Error(`Budget exceeded: $${stats.totalCost.toFixed(4)}`);\n * }\n * }\n * }))\n * .ask(\"Long running task...\");\n * ```\n *\n * @example\n * ```typescript\n * // Web dashboard integration\n * let progressBar: HTMLElement;\n *\n * await client.agent()\n * .withHooks(HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry,\n * onProgress: (stats) => {\n * // Update web UI in real-time\n * progressBar.textContent = `Iteration ${stats.currentIteration}`;\n * progressBar.dataset.cost = stats.totalCost.toFixed(4);\n * progressBar.dataset.tokens = stats.totalTokens.toString();\n * }\n * }))\n * .ask(\"Your prompt\");\n * ```\n *\n * @example\n * ```typescript\n * // External logging (Datadog, CloudWatch, etc.)\n * await client.agent()\n * .withHooks(HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry,\n * onProgress: async (stats) => {\n * await metrics.gauge('llm.iteration', stats.currentIteration);\n * await metrics.gauge('llm.cost', stats.totalCost);\n * await metrics.gauge('llm.tokens', stats.totalTokens);\n * }\n * }))\n * .ask(\"Your prompt\");\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsprogresstrackingoptions | Full documentation}\n * @see {@link ProgressTrackingOptions} for detailed options\n * @see {@link ProgressStats} for the callback data structure\n */\n static progressTracking(options?: ProgressTrackingOptions): AgentHooks {\n const { modelRegistry, onProgress, logProgress = false } = options ?? {};\n\n // State tracking - follows same pattern as tokenTracking()\n let totalCalls = 0;\n let currentIteration = 0;\n let totalInputTokens = 0;\n let totalOutputTokens = 0;\n let totalCost = 0;\n const startTime = Date.now();\n\n return {\n observers: {\n // Track iteration on each LLM call start\n onLLMCallStart: async (ctx) => {\n currentIteration++;\n },\n\n // Accumulate metrics and report progress on each LLM call completion\n onLLMCallComplete: async (ctx) => {\n totalCalls++;\n\n // Track token usage from provider response\n if (ctx.usage) {\n totalInputTokens += ctx.usage.inputTokens;\n totalOutputTokens += ctx.usage.outputTokens;\n\n // Calculate cost using ModelRegistry (core llmist feature)\n // This showcases integration with llmist's pricing catalog\n if (modelRegistry) {\n try {\n // Extract model name from provider:model format\n // Example: \"openai:gpt-4o\" -> \"gpt-4o\"\n const modelName = ctx.options.model.includes(\":\")\n ? ctx.options.model.split(\":\")[1]\n : ctx.options.model;\n\n // Use core's estimateCost() for accurate pricing\n const costEstimate = modelRegistry.estimateCost(\n modelName,\n ctx.usage.inputTokens,\n ctx.usage.outputTokens,\n );\n\n if (costEstimate) {\n totalCost += costEstimate.totalCost;\n }\n } catch (error) {\n // Graceful degradation - log error but don't crash\n // This follows llmist's principle of non-intrusive monitoring\n if (logProgress) {\n console.warn(`⚠️ Cost estimation failed:`, error);\n }\n }\n }\n }\n\n // Build comprehensive progress stats\n const stats: ProgressStats = {\n currentIteration,\n totalCalls,\n totalInputTokens,\n totalOutputTokens,\n totalTokens: totalInputTokens + totalOutputTokens,\n totalCost,\n elapsedSeconds: Number(((Date.now() - startTime) / 1000).toFixed(1)),\n };\n\n // Invoke callback if provided (used by CLI and custom UIs)\n if (onProgress) {\n onProgress(stats);\n }\n\n // Optional console logging for quick debugging\n if (logProgress) {\n const formattedTokens = stats.totalTokens >= 1000\n ? `${(stats.totalTokens / 1000).toFixed(1)}k`\n : `${stats.totalTokens}`;\n\n const formattedCost = stats.totalCost > 0\n ? `$${stats.totalCost.toFixed(4)}`\n : \"$0\";\n\n console.log(\n `📊 Progress: Iteration #${stats.currentIteration} | ${formattedTokens} tokens | ${formattedCost} | ${stats.elapsedSeconds}s`,\n );\n }\n },\n },\n };\n }\n\n /**\n * Logs detailed error information for debugging and troubleshooting.\n *\n * **Output:**\n * - LLM errors with ❌ emoji, including model and recovery status\n * - Gadget errors with full context (parameters, error message)\n * - Separate logging for LLM and gadget failures\n *\n * **Use cases:**\n * - Troubleshooting production issues\n * - Understanding error patterns and frequency\n * - Debugging error recovery behavior\n * - Collecting error metrics for monitoring\n *\n * **Performance:** Minimal overhead. Only logs when errors occur.\n *\n * @returns Hook configuration that can be passed to .withHooks()\n *\n * @example\n * ```typescript\n * // Basic error logging\n * await LLMist.createAgent()\n * .withHooks(HookPresets.errorLogging())\n * .withGadgets(Database)\n * .ask(\"Fetch user data\");\n * // Output (on LLM error): ❌ LLM Error (iteration 1): Rate limit exceeded\n * // Model: gpt-5-nano\n * // Recovered: true\n * // Output (on gadget error): ❌ Gadget Error: Database\n * // Error: Connection timeout\n * // Parameters: {...}\n * ```\n *\n * @example\n * ```typescript\n * // Combine with monitoring for full context\n * .withHooks(HookPresets.merge(\n * HookPresets.monitoring(), // Includes errorLogging\n * customErrorAnalytics\n * ))\n * ```\n *\n * @example\n * ```typescript\n * // Error analytics collection\n * const errors: any[] = [];\n * .withHooks(HookPresets.merge(\n * HookPresets.errorLogging(),\n * {\n * observers: {\n * onLLMCallError: async (ctx) => {\n * errors.push({ type: 'llm', error: ctx.error, recovered: ctx.recovered });\n * },\n * },\n * }\n * ))\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetserrorlogging | Full documentation}\n */\n static errorLogging(): AgentHooks {\n return {\n observers: {\n onLLMCallError: async (ctx) => {\n console.error(`❌ LLM Error (iteration ${ctx.iteration}):`, ctx.error.message);\n console.error(` Model: ${ctx.options.model}`);\n console.error(` Recovered: ${ctx.recovered}`);\n },\n onGadgetExecutionComplete: async (ctx) => {\n if (ctx.error) {\n console.error(`❌ Gadget Error: ${ctx.gadgetName}`);\n console.error(` Error: ${ctx.error}`);\n console.error(` Parameters:`, JSON.stringify(ctx.parameters, null, 2));\n }\n },\n },\n };\n }\n\n /**\n * Returns empty hook configuration for clean output without any logging.\n *\n * **Output:**\n * - None. Returns {} (empty object).\n *\n * **Use cases:**\n * - Clean test output without console noise\n * - Production environments where logging is handled externally\n * - Baseline for custom hook development\n * - Temporary disable of all hook output\n *\n * **Performance:** Zero overhead. No-op hook configuration.\n *\n * @returns Empty hook configuration\n *\n * @example\n * ```typescript\n * // Clean test output\n * describe('Agent tests', () => {\n * it('should calculate correctly', async () => {\n * const result = await LLMist.createAgent()\n * .withHooks(HookPresets.silent()) // No console output\n * .withGadgets(Calculator)\n * .askAndCollect(\"What is 15 times 23?\");\n *\n * expect(result).toContain(\"345\");\n * });\n * });\n * ```\n *\n * @example\n * ```typescript\n * // Conditional silence based on environment\n * const isTesting = process.env.NODE_ENV === 'test';\n * .withHooks(isTesting ? HookPresets.silent() : HookPresets.monitoring())\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetssilent | Full documentation}\n */\n static silent(): AgentHooks {\n return {};\n }\n\n /**\n * Combines multiple hook configurations into one.\n *\n * Merge allows you to compose preset and custom hooks for modular monitoring\n * configurations. Understanding merge behavior is crucial for proper composition.\n *\n * **Merge behavior:**\n * - **Observers:** Composed - all handlers run sequentially in order\n * - **Interceptors:** Last one wins - only the last interceptor applies\n * - **Controllers:** Last one wins - only the last controller applies\n *\n * **Why interceptors/controllers don't compose:**\n * - Interceptors have different signatures per method, making composition impractical\n * - Controllers return specific actions that can't be meaningfully combined\n * - Only observers support composition because they're read-only and independent\n *\n * **Use cases:**\n * - Combining multiple presets (logging + timing + tokens)\n * - Adding custom hooks to presets\n * - Building modular, reusable monitoring configurations\n * - Environment-specific hook composition\n *\n * **Performance:** Minimal overhead for merging. Runtime performance depends on merged hooks.\n *\n * @param hookSets - Variable number of hook configurations to merge\n * @returns Single merged hook configuration with composed/overridden handlers\n *\n * @example\n * ```typescript\n * // Combine multiple presets\n * .withHooks(HookPresets.merge(\n * HookPresets.logging(),\n * HookPresets.timing(),\n * HookPresets.tokenTracking()\n * ))\n * // All observers from all three presets will run\n * ```\n *\n * @example\n * ```typescript\n * // Add custom observer to preset (both run)\n * .withHooks(HookPresets.merge(\n * HookPresets.timing(),\n * {\n * observers: {\n * onLLMCallComplete: async (ctx) => {\n * await saveMetrics({ tokens: ctx.usage?.totalTokens });\n * },\n * },\n * }\n * ))\n * ```\n *\n * @example\n * ```typescript\n * // Multiple interceptors (last wins!)\n * .withHooks(HookPresets.merge(\n * {\n * interceptors: {\n * interceptTextChunk: (chunk) => chunk.toUpperCase(), // Ignored\n * },\n * },\n * {\n * interceptors: {\n * interceptTextChunk: (chunk) => chunk.toLowerCase(), // This wins\n * },\n * }\n * ))\n * // Result: text will be lowercase\n * ```\n *\n * @example\n * ```typescript\n * // Modular environment-based configuration\n * const baseHooks = HookPresets.errorLogging();\n * const devHooks = HookPresets.merge(baseHooks, HookPresets.monitoring({ verbose: true }));\n * const prodHooks = HookPresets.merge(baseHooks, HookPresets.tokenTracking());\n *\n * const hooks = process.env.NODE_ENV === 'production' ? prodHooks : devHooks;\n * .withHooks(hooks)\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsmergehooksets | Full documentation}\n */\n static merge(...hookSets: AgentHooks[]): AgentHooks {\n const merged: AgentHooks = {\n observers: {},\n interceptors: {},\n controllers: {},\n };\n\n // Compose observers: run all handlers for the same event\n for (const hooks of hookSets) {\n if (hooks.observers) {\n for (const [key, handler] of Object.entries(hooks.observers)) {\n const typedKey = key as keyof typeof hooks.observers;\n if (merged.observers![typedKey]) {\n // Compose: run both existing and new handler\n const existing = merged.observers![typedKey];\n merged.observers![typedKey] = async (ctx: any) => {\n await existing(ctx);\n await handler(ctx);\n };\n } else {\n merged.observers![typedKey] = handler as any;\n }\n }\n }\n\n // Interceptors: last one wins (complex signatures make composition impractical)\n // Each interceptor has different parameters (chunk, message, parameters, etc.)\n // so we can't meaningfully compose them like we do with observers\n if (hooks.interceptors) {\n Object.assign(merged.interceptors!, hooks.interceptors);\n }\n\n // Controllers: last one wins (can't meaningfully compose boolean returns)\n if (hooks.controllers) {\n Object.assign(merged.controllers!, hooks.controllers);\n }\n }\n\n return merged;\n }\n\n /**\n * Composite preset combining logging, timing, tokenTracking, and errorLogging.\n *\n * This is the recommended preset for development and initial production deployments,\n * providing comprehensive observability with a single method call.\n *\n * **Includes:**\n * - All output from `logging()` preset (with optional verbosity)\n * - All output from `timing()` preset (execution times)\n * - All output from `tokenTracking()` preset (token usage)\n * - All output from `errorLogging()` preset (error details)\n *\n * **Output format:**\n * - Event logging: [LLM]/[GADGET] messages\n * - Timing: ⏱️ emoji with milliseconds\n * - Tokens: 📊 emoji with per-call and cumulative counts\n * - Errors: ❌ emoji with full error details\n *\n * **Use cases:**\n * - Full observability during development\n * - Comprehensive monitoring in production\n * - One-liner for complete agent visibility\n * - Troubleshooting and debugging with full context\n *\n * **Performance:** Combined overhead of all four presets, but still minimal in practice.\n *\n * @param options - Monitoring options\n * @param options.verbose - Passed to logging() preset for detailed output. Default: false\n * @returns Merged hook configuration combining all monitoring presets\n *\n * @example\n * ```typescript\n * // Basic monitoring (recommended for development)\n * await LLMist.createAgent()\n * .withHooks(HookPresets.monitoring())\n * .withGadgets(Calculator, Weather)\n * .ask(\"What is 15 times 23, and what's the weather in NYC?\");\n * // Output: All events, timing, tokens, and errors in one place\n * ```\n *\n * @example\n * ```typescript\n * // Verbose monitoring with full details\n * await LLMist.createAgent()\n * .withHooks(HookPresets.monitoring({ verbose: true }))\n * .ask(\"Your prompt\");\n * // Output includes: parameters, results, and complete responses\n * ```\n *\n * @example\n * ```typescript\n * // Environment-based monitoring\n * const isDev = process.env.NODE_ENV === 'development';\n * .withHooks(HookPresets.monitoring({ verbose: isDev }))\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsmonitoringoptions | Full documentation}\n */\n static monitoring(options: LoggingOptions = {}): AgentHooks {\n return HookPresets.merge(\n HookPresets.logging(options),\n HookPresets.timing(),\n HookPresets.tokenTracking(),\n HookPresets.errorLogging(),\n );\n }\n}\n","/**\n * Agent module - Composable, single-responsibility architecture for LLM agents.\n * This module provides a cleaner alternative to the monolithic AgentLoop.\n */\n\nexport type { AgentOptions } from \"./agent.js\";\nexport { ConversationManager } from \"./conversation-manager.js\";\n// New clean hooks system\nexport type {\n AfterGadgetExecutionAction,\n AfterGadgetExecutionControllerContext,\n AfterLLMCallAction,\n AfterLLMCallControllerContext,\n AfterLLMErrorAction,\n AgentHooks,\n BeforeGadgetExecutionAction,\n BeforeLLMCallAction,\n // Interceptor contexts\n ChunkInterceptorContext,\n Controllers,\n GadgetExecutionControllerContext,\n GadgetParameterInterceptorContext,\n GadgetResultInterceptorContext,\n Interceptors,\n // Controller contexts and actions\n LLMCallControllerContext,\n LLMErrorControllerContext,\n MessageInterceptorContext,\n ObserveChunkContext,\n ObserveGadgetCompleteContext,\n ObserveGadgetStartContext,\n // Observer contexts\n ObserveLLMCallContext,\n ObserveLLMCompleteContext,\n ObserveLLMErrorContext,\n Observers,\n} from \"./hooks.js\";\nexport type { IConversationManager } from \"./interfaces.js\";\n\n// StreamProcessor for advanced use cases\nexport {\n type StreamProcessingResult,\n StreamProcessor,\n type StreamProcessorOptions,\n} from \"./stream-processor.js\";\n","/**\n * Type-safe gadget factory with automatic parameter inference.\n *\n * Gadget eliminates the need for manual type assertions\n * by automatically inferring parameter types from the Zod schema.\n *\n * @example\n * ```typescript\n * class Calculator extends Gadget({\n * description: \"Performs arithmetic operations\",\n * schema: z.object({\n * operation: z.enum([\"add\", \"subtract\"]),\n * a: z.number(),\n * b: z.number(),\n * }),\n * }) {\n * // ✨ params is automatically typed!\n * execute(params: this['params']): string {\n * const { operation, a, b } = params; // All typed!\n * return operation === \"add\" ? String(a + b) : String(a - b);\n * }\n * }\n * ```\n */\n\nimport type { ZodType } from \"zod\";\nimport { BaseGadget } from \"./gadget.js\";\nimport type { GadgetExample } from \"./types.js\";\n\n/**\n * Infer the TypeScript type from a Zod schema.\n */\ntype InferSchema<T> = T extends ZodType<infer U> ? U : never;\n\n/**\n * Configuration for creating a typed gadget.\n */\nexport interface GadgetConfig<TSchema extends ZodType> {\n /** Human-readable description of what the gadget does */\n description: string;\n\n /** Zod schema for parameter validation */\n schema: TSchema;\n\n /** Optional custom name (defaults to class name) */\n name?: string;\n\n /** Optional timeout in milliseconds */\n timeoutMs?: number;\n\n /** Optional usage examples to help LLMs understand proper invocation */\n examples?: GadgetExample<InferSchema<TSchema>>[];\n}\n\n/**\n * Factory function to create a typed gadget base class.\n *\n * The returned class automatically infers parameter types from the Zod schema,\n * eliminating the need for manual type assertions in the execute method.\n *\n * @param config - Configuration with description and schema\n * @returns Base class to extend with typed execute method\n *\n * @example\n * ```typescript\n * import { z } from 'zod';\n * import { Gadget } from 'llmist';\n *\n * class Calculator extends Gadget({\n * description: \"Performs arithmetic operations\",\n * schema: z.object({\n * operation: z.enum([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n * a: z.number().describe(\"First number\"),\n * b: z.number().describe(\"Second number\"),\n * }),\n * }) {\n * execute(params: this['params']): string {\n * // params is automatically typed as:\n * // { operation: \"add\" | \"subtract\" | \"multiply\" | \"divide\"; a: number; b: number }\n * const { operation, a, b } = params;\n *\n * switch (operation) {\n * case \"add\": return String(a + b);\n * case \"subtract\": return String(a - b);\n * case \"multiply\": return String(a * b);\n * case \"divide\": return String(a / b);\n * }\n * }\n * }\n * ```\n *\n * @example\n * ```typescript\n * // With async execution\n * class WeatherGadget extends Gadget({\n * description: \"Fetches weather for a city\",\n * schema: z.object({\n * city: z.string().min(1).describe(\"City name\"),\n * }),\n * timeoutMs: 10000,\n * }) {\n * async execute(params: this['params']): Promise<string> {\n * const { city } = params; // Automatically typed as { city: string }\n * const weather = await fetchWeather(city);\n * return `Weather in ${city}: ${weather}`;\n * }\n * }\n * ```\n */\nexport function Gadget<TSchema extends ZodType>(config: GadgetConfig<TSchema>) {\n abstract class GadgetBase extends BaseGadget {\n description = config.description;\n parameterSchema = config.schema;\n name = config.name;\n timeoutMs = config.timeoutMs;\n examples = config.examples;\n\n /**\n * Type helper property for accessing inferred parameter type.\n * This is used in the execute method signature: `execute(params: this['params'])`\n *\n * Note: This is just for type inference - the actual params in execute()\n * will be Record<string, unknown> which you can safely cast to this['params']\n */\n readonly params!: InferSchema<TSchema>;\n\n /**\n * Execute the gadget. Subclasses should cast params to this['params'].\n *\n * @param params - Validated parameters from the LLM\n * @returns Result as a string (or Promise<string> for async gadgets)\n *\n * @example\n * ```typescript\n * execute(params: Record<string, unknown>): string {\n * const typed = params as this['params'];\n * // Now 'typed' is fully typed!\n * return String(typed.a + typed.b);\n * }\n * ```\n */\n abstract execute(params: Record<string, unknown>): string | Promise<string>;\n }\n\n return GadgetBase as {\n new (): GadgetBase & { params: InferSchema<TSchema> };\n };\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAKA;AAEA;AALA,SAAS,SAAS;;;AC+JX,IAAM,cAAN,MAAM,aAAY;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAwDvB,OAAO,QAAQ,UAA0B,CAAC,GAAe;AACvD,WAAO;AAAA,MACL,WAAW;AAAA,QACT,gBAAgB,OAAO,QAAQ;AAC7B,kBAAQ,IAAI,kCAAkC,IAAI,SAAS,GAAG;AAAA,QAChE;AAAA,QACA,mBAAmB,OAAO,QAAQ;AAChC,gBAAM,SAAS,IAAI,OAAO,eAAe;AACzC,kBAAQ,IAAI,4BAA4B,MAAM,GAAG;AACjD,cAAI,QAAQ,WAAW,IAAI,cAAc;AACvC,oBAAQ,IAAI,mBAAmB,IAAI,YAAY,EAAE;AAAA,UACnD;AAAA,QACF;AAAA,QACA,wBAAwB,OAAO,QAAQ;AACrC,kBAAQ,IAAI,sBAAsB,IAAI,UAAU,EAAE;AAClD,cAAI,QAAQ,SAAS;AACnB,oBAAQ,IAAI,wBAAwB,KAAK,UAAU,IAAI,YAAY,MAAM,CAAC,CAAC;AAAA,UAC7E;AAAA,QACF;AAAA,QACA,2BAA2B,OAAO,QAAQ;AACxC,kBAAQ,IAAI,sBAAsB,IAAI,UAAU,EAAE;AAClD,cAAI,QAAQ,SAAS;AACnB,kBAAM,UAAU,IAAI,SAAS,IAAI,eAAe;AAChD,oBAAQ,IAAI,oBAAoB,OAAO,EAAE;AAAA,UAC3C;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAoDA,OAAO,SAAqB;AAC1B,UAAM,UAAU,oBAAI,IAAoB;AAExC,WAAO;AAAA,MACL,WAAW;AAAA,QACT,gBAAgB,OAAO,QAAQ;AAC7B,kBAAQ,IAAI,OAAO,IAAI,SAAS,IAAI,KAAK,IAAI,CAAC;AAAA,QAChD;AAAA,QACA,mBAAmB,OAAO,QAAQ;AAChC,gBAAM,QAAQ,QAAQ,IAAI,OAAO,IAAI,SAAS,EAAE;AAChD,cAAI,OAAO;AACT,kBAAM,WAAW,KAAK,IAAI,IAAI;AAC9B,oBAAQ,IAAI,+BAAqB,QAAQ,IAAI;AAC7C,oBAAQ,OAAO,OAAO,IAAI,SAAS,EAAE;AAAA,UACvC;AAAA,QACF;AAAA,QACA,wBAAwB,OAAO,QAAQ;AACrC,gBAAM,MAAM,UAAU,IAAI,UAAU,IAAI,KAAK,IAAI,CAAC;AAClD,kBAAQ,IAAI,KAAK,KAAK,IAAI,CAAC;AAE3B,UAAC,IAAY,aAAa;AAAA,QAC5B;AAAA,QACA,2BAA2B,OAAO,QAAQ;AACxC,gBAAM,MAAO,IAAY;AACzB,cAAI,KAAK;AACP,kBAAM,QAAQ,QAAQ,IAAI,GAAG;AAC7B,gBAAI,OAAO;AACT,oBAAM,WAAW,KAAK,IAAI,IAAI;AAC9B,sBAAQ,IAAI,wBAAc,IAAI,UAAU,SAAS,QAAQ,IAAI;AAC7D,sBAAQ,OAAO,GAAG;AAAA,YACpB;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAuDA,OAAO,gBAA4B;AACjC,QAAI,cAAc;AAClB,QAAI,aAAa;AAEjB,WAAO;AAAA,MACL,WAAW;AAAA,QACT,mBAAmB,OAAO,QAAQ;AAChC;AACA,cAAI,IAAI,OAAO,aAAa;AAC1B,2BAAe,IAAI,MAAM;AACzB,oBAAQ,IAAI,+BAAwB,IAAI,MAAM,WAAW,EAAE;AAC3D,oBAAQ,IAAI,2BAAoB,WAAW,YAAY,UAAU,SAAS;AAAA,UAC5E;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4HA,OAAO,iBAAiB,SAA+C;AACrE,UAAM,EAAE,eAAe,YAAY,cAAc,MAAM,IAAI,WAAW,CAAC;AAGvE,QAAI,aAAa;AACjB,QAAI,mBAAmB;AACvB,QAAI,mBAAmB;AACvB,QAAI,oBAAoB;AACxB,QAAI,YAAY;AAChB,UAAM,YAAY,KAAK,IAAI;AAE3B,WAAO;AAAA,MACL,WAAW;AAAA;AAAA,QAET,gBAAgB,OAAO,QAAQ;AAC7B;AAAA,QACF;AAAA;AAAA,QAGA,mBAAmB,OAAO,QAAQ;AAChC;AAGA,cAAI,IAAI,OAAO;AACb,gCAAoB,IAAI,MAAM;AAC9B,iCAAqB,IAAI,MAAM;AAI/B,gBAAI,eAAe;AACjB,kBAAI;AAGF,sBAAM,YAAY,IAAI,QAAQ,MAAM,SAAS,GAAG,IAC5C,IAAI,QAAQ,MAAM,MAAM,GAAG,EAAE,CAAC,IAC9B,IAAI,QAAQ;AAGhB,sBAAM,eAAe,cAAc;AAAA,kBACjC;AAAA,kBACA,IAAI,MAAM;AAAA,kBACV,IAAI,MAAM;AAAA,gBACZ;AAEA,oBAAI,cAAc;AAChB,+BAAa,aAAa;AAAA,gBAC5B;AAAA,cACF,SAAS,OAAO;AAGd,oBAAI,aAAa;AACf,0BAAQ,KAAK,yCAA+B,KAAK;AAAA,gBACnD;AAAA,cACF;AAAA,YACF;AAAA,UACF;AAGA,gBAAM,QAAuB;AAAA,YAC3B;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA,aAAa,mBAAmB;AAAA,YAChC;AAAA,YACA,gBAAgB,SAAS,KAAK,IAAI,IAAI,aAAa,KAAM,QAAQ,CAAC,CAAC;AAAA,UACrE;AAGA,cAAI,YAAY;AACd,uBAAW,KAAK;AAAA,UAClB;AAGA,cAAI,aAAa;AACf,kBAAM,kBAAkB,MAAM,eAAe,MACzC,IAAI,MAAM,cAAc,KAAM,QAAQ,CAAC,CAAC,MACxC,GAAG,MAAM,WAAW;AAExB,kBAAM,gBAAgB,MAAM,YAAY,IACpC,IAAI,MAAM,UAAU,QAAQ,CAAC,CAAC,KAC9B;AAEJ,oBAAQ;AAAA,cACN,kCAA2B,MAAM,gBAAgB,MAAM,eAAe,aAAa,aAAa,MAAM,MAAM,cAAc;AAAA,YAC5H;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA8DA,OAAO,eAA2B;AAChC,WAAO;AAAA,MACL,WAAW;AAAA,QACT,gBAAgB,OAAO,QAAQ;AAC7B,kBAAQ,MAAM,+BAA0B,IAAI,SAAS,MAAM,IAAI,MAAM,OAAO;AAC5E,kBAAQ,MAAM,aAAa,IAAI,QAAQ,KAAK,EAAE;AAC9C,kBAAQ,MAAM,iBAAiB,IAAI,SAAS,EAAE;AAAA,QAChD;AAAA,QACA,2BAA2B,OAAO,QAAQ;AACxC,cAAI,IAAI,OAAO;AACb,oBAAQ,MAAM,wBAAmB,IAAI,UAAU,EAAE;AACjD,oBAAQ,MAAM,aAAa,IAAI,KAAK,EAAE;AACtC,oBAAQ,MAAM,kBAAkB,KAAK,UAAU,IAAI,YAAY,MAAM,CAAC,CAAC;AAAA,UACzE;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA0CA,OAAO,SAAqB;AAC1B,WAAO,CAAC;AAAA,EACV;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAsFA,OAAO,SAAS,UAAoC;AAClD,UAAM,SAAqB;AAAA,MACzB,WAAW,CAAC;AAAA,MACZ,cAAc,CAAC;AAAA,MACf,aAAa,CAAC;AAAA,IAChB;AAGA,eAAW,SAAS,UAAU;AAC5B,UAAI,MAAM,WAAW;AACnB,mBAAW,CAAC,KAAK,OAAO,KAAK,OAAO,QAAQ,MAAM,SAAS,GAAG;AAC5D,gBAAM,WAAW;AACjB,cAAI,OAAO,UAAW,QAAQ,GAAG;AAE/B,kBAAM,WAAW,OAAO,UAAW,QAAQ;AAC3C,mBAAO,UAAW,QAAQ,IAAI,OAAO,QAAa;AAChD,oBAAM,SAAS,GAAG;AAClB,oBAAM,QAAQ,GAAG;AAAA,YACnB;AAAA,UACF,OAAO;AACL,mBAAO,UAAW,QAAQ,IAAI;AAAA,UAChC;AAAA,QACF;AAAA,MACF;AAKA,UAAI,MAAM,cAAc;AACtB,eAAO,OAAO,OAAO,cAAe,MAAM,YAAY;AAAA,MACxD;AAGA,UAAI,MAAM,aAAa;AACrB,eAAO,OAAO,OAAO,aAAc,MAAM,WAAW;AAAA,MACtD;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4DA,OAAO,WAAW,UAA0B,CAAC,GAAe;AAC1D,WAAO,aAAY;AAAA,MACjB,aAAY,QAAQ,OAAO;AAAA,MAC3B,aAAY,OAAO;AAAA,MACnB,aAAY,cAAc;AAAA,MAC1B,aAAY,aAAa;AAAA,IAC3B;AAAA,EACF;AACF;;;AC/5BA;AAkCA;;;AFQA;AAEA;AASA;AAGA;AAeA;AAEA;AAMA;AAIA;AACA;AAEA;AAEA;;;AGeO,SAAS,OAAgC,QAA+B;AAAA,EAC7E,MAAe,mBAAmB,WAAW;AAAA,IAC3C,cAAc,OAAO;AAAA,IACrB,kBAAkB,OAAO;AAAA,IACzB,OAAO,OAAO;AAAA,IACd,YAAY,OAAO;AAAA,IACnB,WAAW,OAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAST;AAAA,EAkBX;AAEA,SAAO;AAGT;;;AHjCA;AACA;AAIA;AACA;AACA;","names":[]}
@@ -1,6 +1,33 @@
1
1
  import { Logger, ILogObj } from 'tslog';
2
2
  import { ZodTypeAny } from 'zod';
3
3
 
4
+ /**
5
+ * Example of gadget usage to help LLMs understand proper invocation.
6
+ *
7
+ * Examples are rendered alongside the schema in `getInstruction()` to provide
8
+ * concrete usage patterns for the LLM.
9
+ *
10
+ * @template TParams - Inferred parameter type from Zod schema (defaults to Record<string, unknown>)
11
+ *
12
+ * @example
13
+ * ```typescript
14
+ * const calculator = createGadget({
15
+ * schema: z.object({ a: z.number(), b: z.number() }),
16
+ * examples: [
17
+ * { params: { a: 5, b: 3 }, output: "8", comment: "Addition example" }
18
+ * ],
19
+ * // ...
20
+ * });
21
+ * ```
22
+ */
23
+ interface GadgetExample<TParams = Record<string, unknown>> {
24
+ /** Example parameter values (typed to match schema) */
25
+ params: TParams;
26
+ /** Optional expected output/result string */
27
+ output?: string;
28
+ /** Optional description explaining what this example demonstrates */
29
+ comment?: string;
30
+ }
4
31
  interface GadgetExecutionResult {
5
32
  gadgetName: string;
6
33
  invocationId: string;
@@ -89,7 +116,7 @@ type TextOnlyAction = {
89
116
  parameters: Record<string, unknown>;
90
117
  };
91
118
 
92
- type ParameterFormat = "json" | "yaml" | "auto";
119
+ type ParameterFormat = "json" | "yaml" | "toml" | "auto";
93
120
  interface StreamParserOptions {
94
121
  startPrefix?: string;
95
122
  endPrefix?: string;
@@ -110,6 +137,11 @@ declare class StreamParser {
110
137
  private readonly parameterFormat;
111
138
  constructor(options?: StreamParserOptions);
112
139
  private takeTextUntil;
140
+ /**
141
+ * Parse gadget name, handling both old format (name:invocationId) and new format (just name).
142
+ * For new format, generates a unique invocation ID.
143
+ */
144
+ private parseGadgetName;
113
145
  /**
114
146
  * Parse parameter string according to configured format
115
147
  */
@@ -149,6 +181,14 @@ declare abstract class BaseGadget {
149
181
  * Set to 0 or undefined to disable timeout for this gadget.
150
182
  */
151
183
  timeoutMs?: number;
184
+ /**
185
+ * Optional usage examples to help LLMs understand proper invocation.
186
+ * Examples are rendered in getInstruction() alongside the schema.
187
+ *
188
+ * Note: Uses broader `unknown` type to allow typed examples from subclasses
189
+ * while maintaining runtime compatibility.
190
+ */
191
+ examples?: GadgetExample<unknown>[];
152
192
  /**
153
193
  * Execute the gadget with the given parameters.
154
194
  * Can be synchronous or asynchronous.
@@ -167,7 +207,7 @@ declare abstract class BaseGadget {
167
207
  * Generate instruction text for the LLM with format-specific schema.
168
208
  * Combines name, description, and parameter schema into a formatted instruction.
169
209
  *
170
- * @param format - Format for the schema representation ('json' | 'yaml' | 'auto')
210
+ * @param format - Format for the schema representation ('json' | 'yaml' | 'toml' | 'auto')
171
211
  * @returns Formatted instruction string
172
212
  */
173
213
  getInstruction(format?: ParameterFormat): string;
@@ -232,6 +272,11 @@ interface PromptConfig {
232
272
  * Default: "Parameters in JSON format (valid JSON object)"
233
273
  */
234
274
  formatDescriptionJson?: PromptTemplate;
275
+ /**
276
+ * Format description for TOML parameter format.
277
+ * Default: "Parameters in TOML format (key = value pairs, use triple-quotes for multiline)"
278
+ */
279
+ formatDescriptionToml?: PromptTemplate;
235
280
  /**
236
281
  * Rules that appear in the rules section.
237
282
  * Can be an array of strings or a function that returns an array.
@@ -248,6 +293,11 @@ interface PromptConfig {
248
293
  * Default: "\n\nInput Schema (YAML):"
249
294
  */
250
295
  schemaLabelYaml?: PromptTemplate;
296
+ /**
297
+ * Schema label for TOML format.
298
+ * Default: "\n\nInput Schema (TOML):"
299
+ */
300
+ schemaLabelToml?: PromptTemplate;
251
301
  /**
252
302
  * Custom examples to show in the examples section.
253
303
  * If provided, replaces the default examples entirely.
@@ -285,6 +335,11 @@ declare class LLMMessageBuilder {
285
335
  private endPrefix;
286
336
  private promptConfig;
287
337
  constructor(promptConfig?: PromptConfig);
338
+ /**
339
+ * Set custom prefixes for gadget markers.
340
+ * Used to configure history builder to match system prompt markers.
341
+ */
342
+ withPrefixes(startPrefix: string, endPrefix: string): this;
288
343
  addSystem(content: string, metadata?: Record<string, unknown>): this;
289
344
  addGadgets(gadgets: BaseGadget[], parameterFormat?: ParameterFormat, options?: {
290
345
  startPrefix?: string;
@@ -2598,4 +2653,4 @@ declare function createTextMockStream(text: string, options?: {
2598
2653
  usage?: MockResponse["usage"];
2599
2654
  }): LLMStream;
2600
2655
 
2601
- export { type ObserveChunkContext as $, type AgentHooks as A, BaseGadget as B, runWithHandlers as C, type AfterGadgetExecutionAction as D, type EventHandlers as E, type AfterGadgetExecutionControllerContext as F, GadgetRegistry as G, type HistoryMessage as H, type AfterLLMCallAction as I, type AfterLLMCallControllerContext as J, type AfterLLMErrorAction as K, type LLMMessage as L, MockProviderAdapter as M, type AgentOptions as N, type BeforeGadgetExecutionAction as O, type ParameterFormat as P, type BeforeLLMCallAction as Q, type ChunkInterceptorContext as R, type StreamEvent as S, type Controllers as T, type GadgetExecutionControllerContext as U, type GadgetParameterInterceptorContext as V, type GadgetResultInterceptorContext as W, type Interceptors as X, type LLMCallControllerContext as Y, type LLMErrorControllerContext as Z, type MessageInterceptorContext as _, MockBuilder as a, type ObserveGadgetCompleteContext as a0, type ObserveGadgetStartContext as a1, type ObserveLLMCallContext as a2, type ObserveLLMCompleteContext as a3, type ObserveLLMErrorContext as a4, type Observers as a5, type LLMistOptions as a6, LLMist as a7, type LLMRole as a8, LLMMessageBuilder as a9, type CostEstimate as aa, type ModelFeatures as ab, type ModelLimits as ac, type ModelPricing as ad, type ProviderIdentifier as ae, type TokenUsage as af, ModelIdentifierParser as ag, type PromptConfig as ah, type PromptContext as ai, type PromptTemplate as aj, DEFAULT_PROMPTS as ak, resolvePromptTemplate as al, resolveRulesTemplate as am, type QuickOptions as an, complete as ao, stream as ap, StreamParser as aq, type GadgetClass as ar, type GadgetOrClass as as, type TextOnlyAction as at, type TextOnlyContext as au, type TextOnlyCustomHandler as av, type TextOnlyGadgetConfig as aw, type TextOnlyHandler as ax, type TextOnlyStrategy as ay, createMockClient as b, createMockAdapter as c, MockManager as d, createMockStream as e, createTextMockStream as f, getMockManager as g, type MockMatcher as h, type MockMatcherContext as i, type MockOptions as j, type MockRegistration as k, type MockResponse as l, mockLLM as m, type MockStats as n, ModelRegistry as o, type LLMStreamChunk as p, type ParsedGadgetCall as q, type GadgetExecutionResult as r, type ProviderAdapter as s, type ModelDescriptor as t, type ModelSpec as u, type LLMGenerationOptions as v, type LLMStream as w, AgentBuilder as x, collectEvents as y, collectText as z };
2656
+ export { type MessageInterceptorContext as $, type AgentHooks as A, BaseGadget as B, collectText as C, runWithHandlers as D, type EventHandlers as E, type AfterGadgetExecutionAction as F, GadgetRegistry as G, type HistoryMessage as H, type AfterGadgetExecutionControllerContext as I, type AfterLLMCallAction as J, type AfterLLMCallControllerContext as K, type LLMMessage as L, MockProviderAdapter as M, type AfterLLMErrorAction as N, type AgentOptions as O, type ParameterFormat as P, type BeforeGadgetExecutionAction as Q, type BeforeLLMCallAction as R, type StreamEvent as S, type ChunkInterceptorContext as T, type Controllers as U, type GadgetExecutionControllerContext as V, type GadgetParameterInterceptorContext as W, type GadgetResultInterceptorContext as X, type Interceptors as Y, type LLMCallControllerContext as Z, type LLMErrorControllerContext as _, MockBuilder as a, type ObserveChunkContext as a0, type ObserveGadgetCompleteContext as a1, type ObserveGadgetStartContext as a2, type ObserveLLMCallContext as a3, type ObserveLLMCompleteContext as a4, type ObserveLLMErrorContext as a5, type Observers as a6, type LLMistOptions as a7, LLMist as a8, type LLMRole as a9, LLMMessageBuilder as aa, type CostEstimate as ab, type ModelFeatures as ac, type ModelLimits as ad, type ModelPricing as ae, type ProviderIdentifier as af, type TokenUsage as ag, ModelIdentifierParser as ah, type PromptConfig as ai, type PromptContext as aj, type PromptTemplate as ak, DEFAULT_PROMPTS as al, resolvePromptTemplate as am, resolveRulesTemplate as an, type QuickOptions as ao, complete as ap, stream as aq, StreamParser as ar, type GadgetClass as as, type GadgetOrClass as at, type TextOnlyAction as au, type TextOnlyContext as av, type TextOnlyCustomHandler as aw, type TextOnlyGadgetConfig as ax, type TextOnlyHandler as ay, type TextOnlyStrategy as az, createMockClient as b, createMockAdapter as c, MockManager as d, createMockStream as e, createTextMockStream as f, getMockManager as g, type MockMatcher as h, type MockMatcherContext as i, type MockOptions as j, type MockRegistration as k, type MockResponse as l, mockLLM as m, type MockStats as n, ModelRegistry as o, type LLMStreamChunk as p, type GadgetExample as q, type ParsedGadgetCall as r, type GadgetExecutionResult as s, type ProviderAdapter as t, type ModelDescriptor as u, type ModelSpec as v, type LLMGenerationOptions as w, type LLMStream as x, AgentBuilder as y, collectEvents as z };