llmist 1.3.0 → 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/index.ts","../src/agent/hook-presets.ts","../src/agent/index.ts","../src/agent/compaction/index.ts","../src/agent/hints.ts","../src/gadgets/typed-gadget.ts"],"sourcesContent":["// Re-export Zod's z for schema definitions\n// Using llmist's z ensures .describe() metadata is preserved in JSON schemas\nexport { z } from \"zod\";\n// Syntactic sugar: Agent builder and event handlers\nexport type { HistoryMessage } from \"./agent/builder.js\";\nexport { AgentBuilder } from \"./agent/builder.js\";\nexport type { EventHandlers } from \"./agent/event-handlers.js\";\nexport { collectEvents, collectText, runWithHandlers } from \"./agent/event-handlers.js\";\n// Syntactic sugar: Hook presets\nexport type { LoggingOptions } from \"./agent/hook-presets.js\";\nexport { HookPresets } from \"./agent/hook-presets.js\";\n// Agent infrastructure\n// New clean hooks system\nexport type {\n AfterGadgetExecutionAction,\n AfterGadgetExecutionControllerContext,\n AfterLLMCallAction,\n AfterLLMCallControllerContext,\n AfterLLMErrorAction,\n AgentHooks,\n AgentOptions,\n BeforeGadgetExecutionAction,\n BeforeLLMCallAction,\n // Interceptor contexts\n ChunkInterceptorContext,\n Controllers,\n GadgetExecutionControllerContext,\n GadgetParameterInterceptorContext,\n GadgetResultInterceptorContext,\n // LLM Assistance Hints\n HintsConfig,\n IConversationManager,\n Interceptors,\n IterationHintOptions,\n // Controller contexts and actions\n LLMCallControllerContext,\n LLMErrorControllerContext,\n MessageInterceptorContext,\n ObserveChunkContext,\n ObserveGadgetCompleteContext,\n ObserveGadgetStartContext,\n // Observer contexts\n ObserveLLMCallContext,\n ObserveLLMCompleteContext,\n ObserveLLMErrorContext,\n Observers,\n ParallelGadgetHintOptions,\n StreamProcessingResult,\n StreamProcessorOptions,\n // Gadget output limiting\n StoredOutput,\n // Context compaction\n CompactionConfig,\n CompactionContext,\n CompactionEvent,\n CompactionResult,\n CompactionStats,\n CompactionStrategy,\n MessageTurn,\n ObserveCompactionContext,\n ResolvedCompactionConfig,\n} from \"./agent/index.js\";\nexport {\n // Existing exports\n ConversationManager,\n GadgetOutputStore,\n StreamProcessor,\n // Compaction exports\n CompactionManager,\n DEFAULT_COMPACTION_CONFIG,\n DEFAULT_SUMMARIZATION_PROMPT,\n HybridStrategy,\n SlidingWindowStrategy,\n SummarizationStrategy,\n // LLM Assistance Hints\n createHints,\n iterationProgressHint,\n parallelGadgetHint,\n} from \"./agent/index.js\";\nexport type { LLMistOptions } from \"./core/client.js\";\nexport { LLMist } from \"./core/client.js\";\nexport type { LLMMessage, LLMRole } from \"./core/messages.js\";\nexport { LLMMessageBuilder } from \"./core/messages.js\";\n// Model catalog\nexport type {\n CostEstimate,\n ModelFeatures,\n ModelLimits,\n ModelPricing,\n ModelSpec,\n} from \"./core/model-catalog.js\";\nexport { ModelRegistry } from \"./core/model-registry.js\";\n\n// Syntactic sugar: Model shortcuts and quick methods\nexport {\n getModelId,\n getProvider,\n hasProviderPrefix,\n MODEL_ALIASES,\n resolveModel,\n} from \"./core/model-shortcuts.js\";\nexport type {\n LLMGenerationOptions,\n LLMStream,\n LLMStreamChunk,\n ModelDescriptor,\n ProviderIdentifier,\n TokenUsage,\n} from \"./core/options.js\";\nexport { ModelIdentifierParser } from \"./core/options.js\";\nexport type {\n HintContext,\n HintTemplate,\n PromptConfig,\n PromptContext,\n PromptTemplate,\n} from \"./core/prompt-config.js\";\nexport {\n DEFAULT_HINTS,\n DEFAULT_PROMPTS,\n resolveHintTemplate,\n resolvePromptTemplate,\n resolveRulesTemplate,\n} from \"./core/prompt-config.js\";\nexport type { QuickOptions } from \"./core/quick-methods.js\";\nexport { complete, stream } from \"./core/quick-methods.js\";\nexport type { CreateGadgetConfig } from \"./gadgets/create-gadget.js\";\nexport { createGadget } from \"./gadgets/create-gadget.js\";\n// Gadget output viewer (for custom output store integration)\nexport { createGadgetOutputViewer } from \"./gadgets/output-viewer.js\";\n// Gadget infrastructure\nexport { BreakLoopException, HumanInputException } from \"./gadgets/exceptions.js\";\nexport { GadgetExecutor } from \"./gadgets/executor.js\";\nexport { BaseGadget } from \"./gadgets/gadget.js\";\nexport { StreamParser } from \"./gadgets/parser.js\";\nexport type { GadgetClass, GadgetOrClass } from \"./gadgets/registry.js\";\nexport { GadgetRegistry } from \"./gadgets/registry.js\";\n\n// Syntactic sugar: Typed gadgets and helpers\nexport type { GadgetConfig } from \"./gadgets/typed-gadget.js\";\nexport { Gadget } from \"./gadgets/typed-gadget.js\";\nexport type {\n GadgetExample,\n GadgetExecutionResult,\n ParsedGadgetCall,\n StreamEvent,\n TextOnlyAction,\n TextOnlyContext,\n TextOnlyCustomHandler,\n TextOnlyGadgetConfig,\n TextOnlyHandler,\n TextOnlyStrategy,\n} from \"./gadgets/types.js\";\nexport type { ValidationIssue, ValidationResult } from \"./gadgets/validation.js\";\nexport { validateAndApplyDefaults, validateGadgetParams } from \"./gadgets/validation.js\";\nexport type { LoggerOptions } from \"./logging/logger.js\";\nexport { createLogger, defaultLogger } from \"./logging/logger.js\";\nexport {\n AnthropicMessagesProvider,\n createAnthropicProviderFromEnv,\n} from \"./providers/anthropic.js\";\nexport { discoverProviderAdapters } from \"./providers/discovery.js\";\nexport { createGeminiProviderFromEnv, GeminiGenerativeProvider } from \"./providers/gemini.js\";\nexport { createOpenAIProviderFromEnv, OpenAIChatProvider } from \"./providers/openai.js\";\nexport type { ProviderAdapter } from \"./providers/provider.js\";\n\n// Testing/Mock infrastructure\nexport type {\n MockMatcher,\n MockMatcherContext,\n MockOptions,\n MockRegistration,\n MockResponse,\n MockStats,\n} from \"./testing/index.js\";\nexport {\n createMockAdapter,\n createMockClient,\n createMockStream,\n createTextMockStream,\n getMockManager,\n MockBuilder,\n MockManager,\n MockProviderAdapter,\n mockLLM,\n} from \"./testing/index.js\";\n","/**\n * Ready-to-use hook configurations for common monitoring, logging, and debugging tasks.\n *\n * HookPresets provide instant observability without writing custom hooks. They're the\n * fastest way to add monitoring to your agents during development and production.\n *\n * ## Available Presets\n *\n * - **logging(options?)** - Log LLM calls and gadget execution\n * - **timing()** - Measure execution time for operations\n * - **tokenTracking()** - Track cumulative token usage and costs\n * - **progressTracking(options?)** - Track progress with iterations, tokens, cost, and timing (SHOWCASE)\n * - **errorLogging()** - Log detailed error information\n * - **silent()** - No output (useful for testing)\n * - **monitoring(options?)** - All-in-one preset combining logging, timing, tokens, and errors\n * - **merge(...hookSets)** - Combine multiple hook configurations\n *\n * ## Quick Start\n *\n * @example\n * ```typescript\n * import { LLMist, HookPresets } from 'llmist';\n *\n * // Basic logging\n * await LLMist.createAgent()\n * .withHooks(HookPresets.logging())\n * .ask(\"Your prompt\");\n *\n * // Full monitoring suite (recommended for development)\n * await LLMist.createAgent()\n * .withHooks(HookPresets.monitoring({ verbose: true }))\n * .ask(\"Your prompt\");\n *\n * // Combine multiple presets\n * await LLMist.createAgent()\n * .withHooks(HookPresets.merge(\n * HookPresets.timing(),\n * HookPresets.tokenTracking()\n * ))\n * .ask(\"Your prompt\");\n *\n * // Environment-based configuration\n * const hooks = process.env.NODE_ENV === 'production'\n * ? HookPresets.merge(HookPresets.errorLogging(), HookPresets.tokenTracking())\n * : HookPresets.monitoring({ verbose: true });\n *\n * await LLMist.createAgent()\n * .withHooks(hooks)\n * .ask(\"Your prompt\");\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md | Full documentation}\n */\n\nimport type { AgentHooks } from \"./hooks.js\";\nimport type { ModelRegistry } from \"../core/model-registry.js\";\n\n/**\n * Options for logging preset.\n */\nexport interface LoggingOptions {\n /** Include verbose details like parameters and results */\n verbose?: boolean;\n}\n\n/**\n * Progress statistics reported by progressTracking preset.\n *\n * Contains cumulative metrics across all LLM calls in the agent session,\n * useful for building progress UI, cost monitoring, and performance tracking.\n */\nexport interface ProgressStats {\n /** Current iteration number (increments on each LLM call start) */\n currentIteration: number;\n\n /** Total number of completed LLM calls */\n totalCalls: number;\n\n /** Cumulative input tokens across all calls */\n totalInputTokens: number;\n\n /** Cumulative output tokens across all calls */\n totalOutputTokens: number;\n\n /** Total tokens (input + output) */\n totalTokens: number;\n\n /** Cumulative cost in USD (requires modelRegistry) */\n totalCost: number;\n\n /** Elapsed time in seconds since first call */\n elapsedSeconds: number;\n}\n\n/**\n * Options for progressTracking preset.\n *\n * Controls how progress data is tracked and reported during agent execution.\n */\nexport interface ProgressTrackingOptions {\n /**\n * Model registry for cost calculation.\n *\n * If provided, enables automatic cost estimation based on token usage\n * and model pricing data. Without it, totalCost will always be 0.\n *\n * @example\n * ```typescript\n * import { LLMist, HookPresets } from 'llmist';\n *\n * const client = LLMist.create();\n * const hooks = HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry // Enable cost tracking\n * });\n * ```\n */\n modelRegistry?: ModelRegistry;\n\n /**\n * Callback invoked after each LLM call completion with cumulative stats.\n *\n * Use this to update progress UI, log metrics, or track budgets in real-time.\n *\n * @example\n * ```typescript\n * HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry,\n * onProgress: (stats) => {\n * console.log(`Iteration #${stats.currentIteration}`);\n * console.log(`Cost so far: $${stats.totalCost.toFixed(4)}`);\n * console.log(`Elapsed: ${stats.elapsedSeconds}s`);\n * }\n * })\n * ```\n */\n onProgress?: (stats: ProgressStats) => void;\n\n /**\n * Whether to log progress to console after each LLM call.\n *\n * When enabled, prints a summary line with tokens, cost, and elapsed time.\n * Useful for quick debugging without implementing a custom callback.\n *\n * Default: false\n *\n * @example\n * ```typescript\n * // Quick console-based progress tracking\n * HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry,\n * logProgress: true // Log to console\n * })\n * // Output: 📊 Progress: Iteration #2 | 1,234 tokens | $0.0056 | 12.3s\n * ```\n */\n logProgress?: boolean;\n}\n\n/**\n * Common hook presets.\n */\nexport class HookPresets {\n /**\n * Logs LLM calls and gadget execution to console with optional verbosity.\n *\n * **Output (basic mode):**\n * - LLM call start/complete events with iteration numbers\n * - Gadget execution start/complete with gadget names\n * - Token counts when available\n *\n * **Output (verbose mode):**\n * - All basic mode output\n * - Full gadget parameters (formatted JSON)\n * - Full gadget results\n * - Complete LLM response text\n *\n * **Use cases:**\n * - Basic development debugging and execution flow visibility\n * - Understanding agent decision-making and tool usage\n * - Troubleshooting gadget invocations\n *\n * **Performance:** Minimal overhead. Console writes are synchronous but fast.\n *\n * @param options - Logging options\n * @param options.verbose - Include full parameters and results. Default: false\n * @returns Hook configuration that can be passed to .withHooks()\n *\n * @example\n * ```typescript\n * // Basic logging\n * await LLMist.createAgent()\n * .withHooks(HookPresets.logging())\n * .ask(\"Calculate 15 * 23\");\n * // Output: [LLM] Starting call (iteration 0)\n * // [GADGET] Executing Calculator\n * // [GADGET] Completed Calculator\n * // [LLM] Completed (tokens: 245)\n * ```\n *\n * @example\n * ```typescript\n * // Verbose logging with full details\n * await LLMist.createAgent()\n * .withHooks(HookPresets.logging({ verbose: true }))\n * .ask(\"Calculate 15 * 23\");\n * // Output includes: parameters, results, and full responses\n * ```\n *\n * @example\n * ```typescript\n * // Environment-based verbosity\n * const isDev = process.env.NODE_ENV === 'development';\n * .withHooks(HookPresets.logging({ verbose: isDev }))\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsloggingoptions | Full documentation}\n */\n static logging(options: LoggingOptions = {}): AgentHooks {\n return {\n observers: {\n onLLMCallStart: async (ctx) => {\n console.log(`[LLM] Starting call (iteration ${ctx.iteration})`);\n },\n onLLMCallComplete: async (ctx) => {\n const tokens = ctx.usage?.totalTokens ?? \"unknown\";\n console.log(`[LLM] Completed (tokens: ${tokens})`);\n if (options.verbose && ctx.finalMessage) {\n console.log(`[LLM] Response: ${ctx.finalMessage}`);\n }\n },\n onGadgetExecutionStart: async (ctx) => {\n console.log(`[GADGET] Executing ${ctx.gadgetName}`);\n if (options.verbose) {\n console.log(`[GADGET] Parameters:`, JSON.stringify(ctx.parameters, null, 2));\n }\n },\n onGadgetExecutionComplete: async (ctx) => {\n console.log(`[GADGET] Completed ${ctx.gadgetName}`);\n if (options.verbose) {\n const display = ctx.error ?? ctx.finalResult ?? \"(no result)\";\n console.log(`[GADGET] Result: ${display}`);\n }\n },\n },\n };\n }\n\n /**\n * Measures and logs execution time for LLM calls and gadgets.\n *\n * **Output:**\n * - Duration in milliseconds with ⏱️ emoji for each operation\n * - Separate timing for each LLM iteration\n * - Separate timing for each gadget execution\n *\n * **Use cases:**\n * - Performance profiling and optimization\n * - Identifying slow operations (LLM calls vs gadget execution)\n * - Monitoring response times in production\n * - Capacity planning and SLA tracking\n *\n * **Performance:** Negligible overhead. Uses Date.now() for timing measurements.\n *\n * @returns Hook configuration that can be passed to .withHooks()\n *\n * @example\n * ```typescript\n * // Basic timing\n * await LLMist.createAgent()\n * .withHooks(HookPresets.timing())\n * .withGadgets(Weather, Database)\n * .ask(\"What's the weather in NYC?\");\n * // Output: ⏱️ LLM call took 1234ms\n * // ⏱️ Gadget Weather took 567ms\n * // ⏱️ LLM call took 890ms\n * ```\n *\n * @example\n * ```typescript\n * // Combined with logging for full context\n * .withHooks(HookPresets.merge(\n * HookPresets.logging(),\n * HookPresets.timing()\n * ))\n * ```\n *\n * @example\n * ```typescript\n * // Correlate performance with cost\n * .withHooks(HookPresets.merge(\n * HookPresets.timing(),\n * HookPresets.tokenTracking()\n * ))\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetstiming | Full documentation}\n */\n static timing(): AgentHooks {\n const timings = new Map<string, number>();\n\n return {\n observers: {\n onLLMCallStart: async (ctx) => {\n timings.set(`llm-${ctx.iteration}`, Date.now());\n },\n onLLMCallComplete: async (ctx) => {\n const start = timings.get(`llm-${ctx.iteration}`);\n if (start) {\n const duration = Date.now() - start;\n console.log(`⏱️ LLM call took ${duration}ms`);\n timings.delete(`llm-${ctx.iteration}`);\n }\n },\n onGadgetExecutionStart: async (ctx) => {\n const key = `gadget-${ctx.gadgetName}-${Date.now()}`;\n timings.set(key, Date.now());\n // Store key for lookup in complete handler\n (ctx as any)._timingKey = key;\n },\n onGadgetExecutionComplete: async (ctx) => {\n const key = (ctx as any)._timingKey;\n if (key) {\n const start = timings.get(key);\n if (start) {\n const duration = Date.now() - start;\n console.log(`⏱️ Gadget ${ctx.gadgetName} took ${duration}ms`);\n timings.delete(key);\n }\n }\n },\n },\n };\n }\n\n /**\n * Tracks cumulative token usage across all LLM calls.\n *\n * **Output:**\n * - Per-call token count with 📊 emoji\n * - Cumulative total across all calls\n * - Call count for average calculations\n *\n * **Use cases:**\n * - Cost monitoring and budget tracking\n * - Optimizing prompts to reduce token usage\n * - Comparing token efficiency across different approaches\n * - Real-time cost estimation\n *\n * **Performance:** Minimal overhead. Simple counter increments.\n *\n * **Note:** Token counts depend on the provider's response. Some providers\n * may not include usage data, in which case counts won't be logged.\n *\n * @returns Hook configuration that can be passed to .withHooks()\n *\n * @example\n * ```typescript\n * // Basic token tracking\n * await LLMist.createAgent()\n * .withHooks(HookPresets.tokenTracking())\n * .ask(\"Summarize this document...\");\n * // Output: 📊 Tokens this call: 1,234\n * // 📊 Total tokens: 1,234 (across 1 calls)\n * // 📊 Tokens this call: 567\n * // 📊 Total tokens: 1,801 (across 2 calls)\n * ```\n *\n * @example\n * ```typescript\n * // Cost calculation with custom hook\n * let totalTokens = 0;\n * .withHooks(HookPresets.merge(\n * HookPresets.tokenTracking(),\n * {\n * observers: {\n * onLLMCallComplete: async (ctx) => {\n * totalTokens += ctx.usage?.totalTokens ?? 0;\n * const cost = (totalTokens / 1_000_000) * 3.0; // $3 per 1M tokens\n * console.log(`💰 Estimated cost: $${cost.toFixed(4)}`);\n * },\n * },\n * }\n * ))\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetstokentracking | Full documentation}\n */\n static tokenTracking(): AgentHooks {\n let totalTokens = 0;\n let totalCalls = 0;\n\n return {\n observers: {\n onLLMCallComplete: async (ctx) => {\n totalCalls++;\n if (ctx.usage?.totalTokens) {\n totalTokens += ctx.usage.totalTokens;\n console.log(`📊 Tokens this call: ${ctx.usage.totalTokens}`);\n console.log(`📊 Total tokens: ${totalTokens} (across ${totalCalls} calls)`);\n }\n },\n },\n };\n }\n\n /**\n * Tracks comprehensive progress metrics including iterations, tokens, cost, and timing.\n *\n * **This preset showcases llmist's core capabilities by demonstrating:**\n * - Observer pattern for non-intrusive monitoring\n * - Integration with ModelRegistry for cost estimation\n * - Callback-based architecture for flexible UI updates\n * - Provider-agnostic token and cost tracking\n *\n * Unlike `tokenTracking()` which only logs to console, this preset provides\n * structured data through callbacks, making it perfect for building custom UIs,\n * dashboards, or progress indicators (like the llmist CLI).\n *\n * **Output (when logProgress: true):**\n * - Iteration number and call count\n * - Cumulative token usage (input + output)\n * - Cumulative cost in USD (requires modelRegistry)\n * - Elapsed time in seconds\n *\n * **Use cases:**\n * - Building CLI progress indicators with live updates\n * - Creating web dashboards with real-time metrics\n * - Budget monitoring and cost alerts\n * - Performance tracking and optimization\n * - Custom logging to external systems (Datadog, CloudWatch, etc.)\n *\n * **Performance:** Minimal overhead. Uses Date.now() for timing and optional\n * ModelRegistry.estimateCost() which is O(1) lookup. Callback invocation is\n * synchronous and fast.\n *\n * @param options - Progress tracking options\n * @param options.modelRegistry - ModelRegistry for cost estimation (optional)\n * @param options.onProgress - Callback invoked after each LLM call (optional)\n * @param options.logProgress - Log progress to console (default: false)\n * @returns Hook configuration with progress tracking observers\n *\n * @example\n * ```typescript\n * // Basic usage with callback (RECOMMENDED - used by llmist CLI)\n * import { LLMist, HookPresets } from 'llmist';\n *\n * const client = LLMist.create();\n *\n * await client.agent()\n * .withHooks(HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry,\n * onProgress: (stats) => {\n * // Update your UI with stats\n * console.log(`#${stats.currentIteration} | ${stats.totalTokens} tokens | $${stats.totalCost.toFixed(4)}`);\n * }\n * }))\n * .withGadgets(Calculator)\n * .ask(\"Calculate 15 * 23\");\n * // Output: #1 | 245 tokens | $0.0012\n * ```\n *\n * @example\n * ```typescript\n * // Console logging mode (quick debugging)\n * await client.agent()\n * .withHooks(HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry,\n * logProgress: true // Simple console output\n * }))\n * .ask(\"Your prompt\");\n * // Output: 📊 Progress: Iteration #1 | 245 tokens | $0.0012 | 1.2s\n * ```\n *\n * @example\n * ```typescript\n * // Budget monitoring with alerts\n * const BUDGET_USD = 0.10;\n *\n * await client.agent()\n * .withHooks(HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry,\n * onProgress: (stats) => {\n * if (stats.totalCost > BUDGET_USD) {\n * throw new Error(`Budget exceeded: $${stats.totalCost.toFixed(4)}`);\n * }\n * }\n * }))\n * .ask(\"Long running task...\");\n * ```\n *\n * @example\n * ```typescript\n * // Web dashboard integration\n * let progressBar: HTMLElement;\n *\n * await client.agent()\n * .withHooks(HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry,\n * onProgress: (stats) => {\n * // Update web UI in real-time\n * progressBar.textContent = `Iteration ${stats.currentIteration}`;\n * progressBar.dataset.cost = stats.totalCost.toFixed(4);\n * progressBar.dataset.tokens = stats.totalTokens.toString();\n * }\n * }))\n * .ask(\"Your prompt\");\n * ```\n *\n * @example\n * ```typescript\n * // External logging (Datadog, CloudWatch, etc.)\n * await client.agent()\n * .withHooks(HookPresets.progressTracking({\n * modelRegistry: client.modelRegistry,\n * onProgress: async (stats) => {\n * await metrics.gauge('llm.iteration', stats.currentIteration);\n * await metrics.gauge('llm.cost', stats.totalCost);\n * await metrics.gauge('llm.tokens', stats.totalTokens);\n * }\n * }))\n * .ask(\"Your prompt\");\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsprogresstrackingoptions | Full documentation}\n * @see {@link ProgressTrackingOptions} for detailed options\n * @see {@link ProgressStats} for the callback data structure\n */\n static progressTracking(options?: ProgressTrackingOptions): AgentHooks {\n const { modelRegistry, onProgress, logProgress = false } = options ?? {};\n\n // State tracking - follows same pattern as tokenTracking()\n let totalCalls = 0;\n let currentIteration = 0;\n let totalInputTokens = 0;\n let totalOutputTokens = 0;\n let totalCost = 0;\n const startTime = Date.now();\n\n return {\n observers: {\n // Track iteration on each LLM call start\n onLLMCallStart: async (ctx) => {\n currentIteration++;\n },\n\n // Accumulate metrics and report progress on each LLM call completion\n onLLMCallComplete: async (ctx) => {\n totalCalls++;\n\n // Track token usage from provider response\n if (ctx.usage) {\n totalInputTokens += ctx.usage.inputTokens;\n totalOutputTokens += ctx.usage.outputTokens;\n\n // Calculate cost using ModelRegistry (core llmist feature)\n // This showcases integration with llmist's pricing catalog\n if (modelRegistry) {\n try {\n // Extract model name from provider:model format\n // Example: \"openai:gpt-4o\" -> \"gpt-4o\"\n const modelName = ctx.options.model.includes(\":\")\n ? ctx.options.model.split(\":\")[1]\n : ctx.options.model;\n\n // Use core's estimateCost() for accurate pricing\n const costEstimate = modelRegistry.estimateCost(\n modelName,\n ctx.usage.inputTokens,\n ctx.usage.outputTokens,\n );\n\n if (costEstimate) {\n totalCost += costEstimate.totalCost;\n }\n } catch (error) {\n // Graceful degradation - log error but don't crash\n // This follows llmist's principle of non-intrusive monitoring\n if (logProgress) {\n console.warn(`⚠️ Cost estimation failed:`, error);\n }\n }\n }\n }\n\n // Build comprehensive progress stats\n const stats: ProgressStats = {\n currentIteration,\n totalCalls,\n totalInputTokens,\n totalOutputTokens,\n totalTokens: totalInputTokens + totalOutputTokens,\n totalCost,\n elapsedSeconds: Number(((Date.now() - startTime) / 1000).toFixed(1)),\n };\n\n // Invoke callback if provided (used by CLI and custom UIs)\n if (onProgress) {\n onProgress(stats);\n }\n\n // Optional console logging for quick debugging\n if (logProgress) {\n const formattedTokens = stats.totalTokens >= 1000\n ? `${(stats.totalTokens / 1000).toFixed(1)}k`\n : `${stats.totalTokens}`;\n\n const formattedCost = stats.totalCost > 0\n ? `$${stats.totalCost.toFixed(4)}`\n : \"$0\";\n\n console.log(\n `📊 Progress: Iteration #${stats.currentIteration} | ${formattedTokens} tokens | ${formattedCost} | ${stats.elapsedSeconds}s`,\n );\n }\n },\n },\n };\n }\n\n /**\n * Logs detailed error information for debugging and troubleshooting.\n *\n * **Output:**\n * - LLM errors with ❌ emoji, including model and recovery status\n * - Gadget errors with full context (parameters, error message)\n * - Separate logging for LLM and gadget failures\n *\n * **Use cases:**\n * - Troubleshooting production issues\n * - Understanding error patterns and frequency\n * - Debugging error recovery behavior\n * - Collecting error metrics for monitoring\n *\n * **Performance:** Minimal overhead. Only logs when errors occur.\n *\n * @returns Hook configuration that can be passed to .withHooks()\n *\n * @example\n * ```typescript\n * // Basic error logging\n * await LLMist.createAgent()\n * .withHooks(HookPresets.errorLogging())\n * .withGadgets(Database)\n * .ask(\"Fetch user data\");\n * // Output (on LLM error): ❌ LLM Error (iteration 1): Rate limit exceeded\n * // Model: gpt-5-nano\n * // Recovered: true\n * // Output (on gadget error): ❌ Gadget Error: Database\n * // Error: Connection timeout\n * // Parameters: {...}\n * ```\n *\n * @example\n * ```typescript\n * // Combine with monitoring for full context\n * .withHooks(HookPresets.merge(\n * HookPresets.monitoring(), // Includes errorLogging\n * customErrorAnalytics\n * ))\n * ```\n *\n * @example\n * ```typescript\n * // Error analytics collection\n * const errors: any[] = [];\n * .withHooks(HookPresets.merge(\n * HookPresets.errorLogging(),\n * {\n * observers: {\n * onLLMCallError: async (ctx) => {\n * errors.push({ type: 'llm', error: ctx.error, recovered: ctx.recovered });\n * },\n * },\n * }\n * ))\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetserrorlogging | Full documentation}\n */\n static errorLogging(): AgentHooks {\n return {\n observers: {\n onLLMCallError: async (ctx) => {\n console.error(`❌ LLM Error (iteration ${ctx.iteration}):`, ctx.error.message);\n console.error(` Model: ${ctx.options.model}`);\n console.error(` Recovered: ${ctx.recovered}`);\n },\n onGadgetExecutionComplete: async (ctx) => {\n if (ctx.error) {\n console.error(`❌ Gadget Error: ${ctx.gadgetName}`);\n console.error(` Error: ${ctx.error}`);\n console.error(` Parameters:`, JSON.stringify(ctx.parameters, null, 2));\n }\n },\n },\n };\n }\n\n /**\n * Tracks context compaction events.\n *\n * **Output:**\n * - Compaction events with 🗜️ emoji\n * - Strategy name, tokens before/after, and savings\n * - Cumulative statistics\n *\n * **Use cases:**\n * - Monitoring long-running conversations\n * - Understanding when and how compaction occurs\n * - Debugging context management issues\n *\n * **Performance:** Minimal overhead. Simple console output.\n *\n * @returns Hook configuration that can be passed to .withHooks()\n *\n * @example\n * ```typescript\n * await LLMist.createAgent()\n * .withHooks(HookPresets.compactionTracking())\n * .ask(\"Your prompt\");\n * ```\n */\n static compactionTracking(): AgentHooks {\n return {\n observers: {\n onCompaction: async (ctx) => {\n const saved = ctx.event.tokensBefore - ctx.event.tokensAfter;\n const percent = ((saved / ctx.event.tokensBefore) * 100).toFixed(1);\n console.log(\n `🗜️ Compaction (${ctx.event.strategy}): ${ctx.event.tokensBefore} → ${ctx.event.tokensAfter} tokens (saved ${saved}, ${percent}%)`,\n );\n console.log(\n ` Messages: ${ctx.event.messagesBefore} → ${ctx.event.messagesAfter}`,\n );\n if (ctx.stats.totalCompactions > 1) {\n console.log(\n ` Cumulative: ${ctx.stats.totalCompactions} compactions, ${ctx.stats.totalTokensSaved} tokens saved`,\n );\n }\n },\n },\n };\n }\n\n /**\n * Returns empty hook configuration for clean output without any logging.\n *\n * **Output:**\n * - None. Returns {} (empty object).\n *\n * **Use cases:**\n * - Clean test output without console noise\n * - Production environments where logging is handled externally\n * - Baseline for custom hook development\n * - Temporary disable of all hook output\n *\n * **Performance:** Zero overhead. No-op hook configuration.\n *\n * @returns Empty hook configuration\n *\n * @example\n * ```typescript\n * // Clean test output\n * describe('Agent tests', () => {\n * it('should calculate correctly', async () => {\n * const result = await LLMist.createAgent()\n * .withHooks(HookPresets.silent()) // No console output\n * .withGadgets(Calculator)\n * .askAndCollect(\"What is 15 times 23?\");\n *\n * expect(result).toContain(\"345\");\n * });\n * });\n * ```\n *\n * @example\n * ```typescript\n * // Conditional silence based on environment\n * const isTesting = process.env.NODE_ENV === 'test';\n * .withHooks(isTesting ? HookPresets.silent() : HookPresets.monitoring())\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetssilent | Full documentation}\n */\n static silent(): AgentHooks {\n return {};\n }\n\n /**\n * Combines multiple hook configurations into one.\n *\n * Merge allows you to compose preset and custom hooks for modular monitoring\n * configurations. Understanding merge behavior is crucial for proper composition.\n *\n * **Merge behavior:**\n * - **Observers:** Composed - all handlers run sequentially in order\n * - **Interceptors:** Last one wins - only the last interceptor applies\n * - **Controllers:** Last one wins - only the last controller applies\n *\n * **Why interceptors/controllers don't compose:**\n * - Interceptors have different signatures per method, making composition impractical\n * - Controllers return specific actions that can't be meaningfully combined\n * - Only observers support composition because they're read-only and independent\n *\n * **Use cases:**\n * - Combining multiple presets (logging + timing + tokens)\n * - Adding custom hooks to presets\n * - Building modular, reusable monitoring configurations\n * - Environment-specific hook composition\n *\n * **Performance:** Minimal overhead for merging. Runtime performance depends on merged hooks.\n *\n * @param hookSets - Variable number of hook configurations to merge\n * @returns Single merged hook configuration with composed/overridden handlers\n *\n * @example\n * ```typescript\n * // Combine multiple presets\n * .withHooks(HookPresets.merge(\n * HookPresets.logging(),\n * HookPresets.timing(),\n * HookPresets.tokenTracking()\n * ))\n * // All observers from all three presets will run\n * ```\n *\n * @example\n * ```typescript\n * // Add custom observer to preset (both run)\n * .withHooks(HookPresets.merge(\n * HookPresets.timing(),\n * {\n * observers: {\n * onLLMCallComplete: async (ctx) => {\n * await saveMetrics({ tokens: ctx.usage?.totalTokens });\n * },\n * },\n * }\n * ))\n * ```\n *\n * @example\n * ```typescript\n * // Multiple interceptors (last wins!)\n * .withHooks(HookPresets.merge(\n * {\n * interceptors: {\n * interceptTextChunk: (chunk) => chunk.toUpperCase(), // Ignored\n * },\n * },\n * {\n * interceptors: {\n * interceptTextChunk: (chunk) => chunk.toLowerCase(), // This wins\n * },\n * }\n * ))\n * // Result: text will be lowercase\n * ```\n *\n * @example\n * ```typescript\n * // Modular environment-based configuration\n * const baseHooks = HookPresets.errorLogging();\n * const devHooks = HookPresets.merge(baseHooks, HookPresets.monitoring({ verbose: true }));\n * const prodHooks = HookPresets.merge(baseHooks, HookPresets.tokenTracking());\n *\n * const hooks = process.env.NODE_ENV === 'production' ? prodHooks : devHooks;\n * .withHooks(hooks)\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsmergehooksets | Full documentation}\n */\n static merge(...hookSets: AgentHooks[]): AgentHooks {\n const merged: AgentHooks = {\n observers: {},\n interceptors: {},\n controllers: {},\n };\n\n // Compose observers: run all handlers for the same event\n for (const hooks of hookSets) {\n if (hooks.observers) {\n for (const [key, handler] of Object.entries(hooks.observers)) {\n const typedKey = key as keyof typeof hooks.observers;\n if (merged.observers![typedKey]) {\n // Compose: run both existing and new handler\n const existing = merged.observers![typedKey];\n merged.observers![typedKey] = async (ctx: any) => {\n await existing(ctx);\n await handler(ctx);\n };\n } else {\n merged.observers![typedKey] = handler as any;\n }\n }\n }\n\n // Interceptors: last one wins (complex signatures make composition impractical)\n // Each interceptor has different parameters (chunk, message, parameters, etc.)\n // so we can't meaningfully compose them like we do with observers\n if (hooks.interceptors) {\n Object.assign(merged.interceptors!, hooks.interceptors);\n }\n\n // Controllers: last one wins (can't meaningfully compose boolean returns)\n if (hooks.controllers) {\n Object.assign(merged.controllers!, hooks.controllers);\n }\n }\n\n return merged;\n }\n\n /**\n * Composite preset combining logging, timing, tokenTracking, and errorLogging.\n *\n * This is the recommended preset for development and initial production deployments,\n * providing comprehensive observability with a single method call.\n *\n * **Includes:**\n * - All output from `logging()` preset (with optional verbosity)\n * - All output from `timing()` preset (execution times)\n * - All output from `tokenTracking()` preset (token usage)\n * - All output from `errorLogging()` preset (error details)\n *\n * **Output format:**\n * - Event logging: [LLM]/[GADGET] messages\n * - Timing: ⏱️ emoji with milliseconds\n * - Tokens: 📊 emoji with per-call and cumulative counts\n * - Errors: ❌ emoji with full error details\n *\n * **Use cases:**\n * - Full observability during development\n * - Comprehensive monitoring in production\n * - One-liner for complete agent visibility\n * - Troubleshooting and debugging with full context\n *\n * **Performance:** Combined overhead of all four presets, but still minimal in practice.\n *\n * @param options - Monitoring options\n * @param options.verbose - Passed to logging() preset for detailed output. Default: false\n * @returns Merged hook configuration combining all monitoring presets\n *\n * @example\n * ```typescript\n * // Basic monitoring (recommended for development)\n * await LLMist.createAgent()\n * .withHooks(HookPresets.monitoring())\n * .withGadgets(Calculator, Weather)\n * .ask(\"What is 15 times 23, and what's the weather in NYC?\");\n * // Output: All events, timing, tokens, and errors in one place\n * ```\n *\n * @example\n * ```typescript\n * // Verbose monitoring with full details\n * await LLMist.createAgent()\n * .withHooks(HookPresets.monitoring({ verbose: true }))\n * .ask(\"Your prompt\");\n * // Output includes: parameters, results, and complete responses\n * ```\n *\n * @example\n * ```typescript\n * // Environment-based monitoring\n * const isDev = process.env.NODE_ENV === 'development';\n * .withHooks(HookPresets.monitoring({ verbose: isDev }))\n * ```\n *\n * @see {@link https://github.com/zbigniewsobiecki/llmist/blob/main/docs/HOOKS.md#hookpresetsmonitoringoptions | Full documentation}\n */\n static monitoring(options: LoggingOptions = {}): AgentHooks {\n return HookPresets.merge(\n HookPresets.logging(options),\n HookPresets.timing(),\n HookPresets.tokenTracking(),\n HookPresets.errorLogging(),\n );\n }\n}\n","/**\n * Agent module - Composable, single-responsibility architecture for LLM agents.\n * This module provides a cleaner alternative to the monolithic AgentLoop.\n */\n\nexport type { AgentOptions } from \"./agent.js\";\nexport { ConversationManager } from \"./conversation-manager.js\";\n// New clean hooks system\nexport type {\n AfterGadgetExecutionAction,\n AfterGadgetExecutionControllerContext,\n AfterLLMCallAction,\n AfterLLMCallControllerContext,\n AfterLLMErrorAction,\n AgentHooks,\n BeforeGadgetExecutionAction,\n BeforeLLMCallAction,\n // Interceptor contexts\n ChunkInterceptorContext,\n Controllers,\n GadgetExecutionControllerContext,\n GadgetParameterInterceptorContext,\n GadgetResultInterceptorContext,\n Interceptors,\n // Controller contexts and actions\n LLMCallControllerContext,\n LLMErrorControllerContext,\n MessageInterceptorContext,\n ObserveChunkContext,\n ObserveGadgetCompleteContext,\n ObserveGadgetStartContext,\n // Observer contexts\n ObserveLLMCallContext,\n ObserveLLMCompleteContext,\n ObserveLLMErrorContext,\n Observers,\n} from \"./hooks.js\";\nexport type { IConversationManager } from \"./interfaces.js\";\n\n// StreamProcessor for advanced use cases\nexport {\n type StreamProcessingResult,\n StreamProcessor,\n type StreamProcessorOptions,\n} from \"./stream-processor.js\";\n\n// Gadget output limiting\nexport type { StoredOutput } from \"./gadget-output-store.js\";\nexport { GadgetOutputStore } from \"./gadget-output-store.js\";\n\n// Context compaction\nexport {\n type CompactionConfig,\n type CompactionEvent,\n type CompactionStats,\n type ResolvedCompactionConfig,\n DEFAULT_COMPACTION_CONFIG,\n DEFAULT_SUMMARIZATION_PROMPT,\n} from \"./compaction/index.js\";\n\nexport {\n type CompactionContext,\n type CompactionResult,\n type CompactionStrategy,\n type MessageTurn,\n CompactionManager,\n HybridStrategy,\n SlidingWindowStrategy,\n SummarizationStrategy,\n} from \"./compaction/index.js\";\n\nexport type { ObserveCompactionContext } from \"./hooks.js\";\n\n// LLM Assistance Hints\nexport {\n createHints,\n iterationProgressHint,\n parallelGadgetHint,\n type HintsConfig,\n type IterationHintOptions,\n type ParallelGadgetHintOptions,\n} from \"./hints.js\";\n","/**\n * Context Compaction System\n *\n * Automatically manages conversation context to prevent context window overflow\n * in long-running agent conversations.\n *\n * Features:\n * - Automatic threshold monitoring (default: 80% of context window)\n * - Multiple strategies: sliding-window, summarization, hybrid (default)\n * - Full visibility via StreamEvents and hooks\n * - Enabled by default with sensible defaults\n *\n * @example\n * ```typescript\n * // Auto-enabled with defaults\n * const agent = await LLMist.createAgent()\n * .withModel('sonnet')\n * .ask('Help me...');\n *\n * // Custom configuration\n * const agent = await LLMist.createAgent()\n * .withModel('gpt-4')\n * .withCompaction({\n * triggerThresholdPercent: 70,\n * preserveRecentTurns: 10,\n * })\n * .ask('...');\n *\n * // Disable compaction\n * const agent = await LLMist.createAgent()\n * .withModel('sonnet')\n * .withoutCompaction()\n * .ask('...');\n * ```\n */\n\n// Configuration\nexport {\n type CompactionConfig,\n type CompactionEvent,\n type CompactionStats,\n type ResolvedCompactionConfig,\n DEFAULT_COMPACTION_CONFIG,\n DEFAULT_SUMMARIZATION_PROMPT,\n resolveCompactionConfig,\n} from \"./config.js\";\n\n// Strategy interface and utilities\nexport {\n type CompactionContext,\n type CompactionResult,\n type CompactionStrategy,\n type MessageTurn,\n groupIntoTurns,\n flattenTurns,\n} from \"./strategy.js\";\n\n// Strategy implementations\nexport { HybridStrategy, SlidingWindowStrategy, SummarizationStrategy } from \"./strategies/index.js\";\n\n// Manager\nexport { CompactionManager } from \"./manager.js\";\n","/**\n * LLM Assistance Hints System\n *\n * Provides reusable hook factories that inject helpful context and coaching\n * messages to guide LLM behavior during agentic execution.\n *\n * ## Two Types of Hints\n *\n * 1. **Proactive (beforeLLMCall)**: Inject context before LLM generates response\n * - Example: Iteration progress (\"You're on iteration 3/10\")\n *\n * 2. **Reactive (afterLLMCall)**: Coach based on what LLM did\n * - Example: \"Tip: You can call multiple gadgets in parallel\"\n *\n * ## Usage\n *\n * ```typescript\n * import { createHints, iterationProgressHint, parallelGadgetHint } from \"llmist\";\n *\n * // Option 1: Use individual hints\n * const agent = new AgentBuilder()\n * .withHooks(HookPresets.merge(\n * iterationProgressHint({ timing: \"late\" }),\n * parallelGadgetHint(),\n * ))\n * .build();\n *\n * // Option 2: Use convenience factory\n * const agent = new AgentBuilder()\n * .withHooks(createHints({\n * iterationProgress: { timing: \"late\" },\n * parallelGadgets: true,\n * }))\n * .build();\n * ```\n *\n * @module agent/hints\n */\n\nimport {\n DEFAULT_HINTS,\n resolveHintTemplate,\n type HintContext,\n type HintTemplate,\n} from \"../core/prompt-config.js\";\nimport { HookPresets } from \"./hook-presets.js\";\nimport type { AgentHooks } from \"./hooks.js\";\n\n// ============================================================================\n// CONFIGURATION TYPES\n// ============================================================================\n\n/**\n * Options for iteration progress hint.\n */\nexport interface IterationHintOptions {\n /**\n * When to show the hint.\n * - \"always\": Show on every iteration\n * - \"late\": Show only when >= 50% through iterations\n * - \"urgent\": Show only when >= 80% through iterations\n * @default \"always\"\n */\n timing?: \"always\" | \"late\" | \"urgent\";\n\n /**\n * Whether to include urgency indicators for late iterations.\n * Adds extra text when running low on iterations.\n * @default true\n */\n showUrgency?: boolean;\n\n /**\n * Custom template. Supports placeholders: {iteration}, {maxIterations}, {remaining}\n * Or a function receiving HintContext.\n * @default DEFAULT_HINTS.iterationProgressHint\n */\n template?: HintTemplate;\n}\n\n/**\n * Options for parallel gadget usage hint.\n */\nexport interface ParallelGadgetHintOptions {\n /**\n * Minimum number of gadget calls to consider \"efficient\".\n * If response has fewer calls, hint will suggest parallelization.\n * @default 2\n */\n minGadgetsForEfficiency?: number;\n\n /**\n * Custom message when single gadget detected.\n * @default DEFAULT_HINTS.parallelGadgetsHint\n */\n message?: string;\n\n /**\n * Whether to enable this hint.\n * @default true\n */\n enabled?: boolean;\n}\n\n/**\n * Combined hints configuration for createHints().\n */\nexport interface HintsConfig {\n /**\n * Enable iteration progress hints.\n * Pass `true` for defaults, or options object for customization.\n */\n iterationProgress?: boolean | IterationHintOptions;\n\n /**\n * Enable parallel gadget hints.\n * Pass `true` for defaults, or options object for customization.\n */\n parallelGadgets?: boolean | ParallelGadgetHintOptions;\n\n /**\n * Additional custom hooks to merge.\n */\n custom?: AgentHooks[];\n}\n\n// ============================================================================\n// HINT FACTORIES\n// ============================================================================\n\n/**\n * Creates a proactive hint that informs the LLM about iteration progress.\n *\n * This hint is injected before each LLM call (via beforeLLMCall controller),\n * helping the LLM understand how much \"budget\" remains for completing the task.\n *\n * @param options - Configuration options\n * @returns AgentHooks that can be merged with other hooks\n *\n * @example\n * ```typescript\n * // Basic usage - show on every iteration\n * const hooks = iterationProgressHint();\n *\n * // Show only when running low on iterations\n * const hooks = iterationProgressHint({ timing: \"late\" });\n *\n * // Custom template\n * const hooks = iterationProgressHint({\n * template: \"Turn {iteration} of {maxIterations}. {remaining} turns left.\",\n * });\n * ```\n */\nexport function iterationProgressHint(options?: IterationHintOptions): AgentHooks {\n const { timing = \"always\", showUrgency = true, template } = options ?? {};\n\n return {\n controllers: {\n beforeLLMCall: async (ctx) => {\n const iteration = ctx.iteration + 1; // 1-based for user-friendliness\n const maxIterations = ctx.maxIterations;\n const progress = iteration / maxIterations;\n\n // Check timing condition\n if (timing === \"late\" && progress < 0.5) {\n return { action: \"proceed\" };\n }\n if (timing === \"urgent\" && progress < 0.8) {\n return { action: \"proceed\" };\n }\n\n // Build hint context with all fields populated\n const remaining = maxIterations - iteration;\n const hintContext: HintContext = {\n iteration,\n maxIterations,\n remaining,\n };\n\n // Resolve template\n let hint = resolveHintTemplate(\n template,\n DEFAULT_HINTS.iterationProgressHint,\n hintContext,\n );\n\n // Add urgency indicator if late in iterations\n if (showUrgency && progress >= 0.8) {\n hint += \" ⚠️ Running low on iterations - focus on completing the task.\";\n }\n\n // Inject as system-level context in messages\n const messages = [...ctx.options.messages];\n\n // Find last user message index (compatible with older ES targets)\n let lastUserIndex = -1;\n for (let i = messages.length - 1; i >= 0; i--) {\n if (messages[i].role === \"user\") {\n lastUserIndex = i;\n break;\n }\n }\n\n if (lastUserIndex >= 0) {\n // Insert hint after the last user message\n messages.splice(lastUserIndex + 1, 0, {\n role: \"user\",\n content: `[System Hint] ${hint}`,\n });\n } else {\n // No user messages found - append hint at the end\n messages.push({\n role: \"user\",\n content: `[System Hint] ${hint}`,\n });\n }\n\n return {\n action: \"proceed\",\n modifiedOptions: { messages },\n };\n },\n },\n };\n}\n\n/**\n * Creates a reactive hint that encourages parallel gadget usage.\n *\n * This hint analyzes the LLM's response and, if only a single gadget was called,\n * appends a reminder that multiple gadgets can be used in parallel for efficiency.\n *\n * @param options - Configuration options\n * @returns AgentHooks that can be merged with other hooks\n *\n * @example\n * ```typescript\n * // Basic usage\n * const hooks = parallelGadgetHint();\n *\n * // Custom threshold and message\n * const hooks = parallelGadgetHint({\n * minGadgetsForEfficiency: 3,\n * message: \"Consider calling multiple gadgets at once!\",\n * });\n * ```\n */\nexport function parallelGadgetHint(options?: ParallelGadgetHintOptions): AgentHooks {\n const {\n minGadgetsForEfficiency = 2,\n message = DEFAULT_HINTS.parallelGadgetsHint,\n enabled = true,\n } = options ?? {};\n\n return {\n controllers: {\n afterLLMCall: async (ctx) => {\n if (!enabled) {\n return { action: \"continue\" };\n }\n\n // Only hint if gadgets were called but below efficiency threshold\n if (ctx.gadgetCallCount > 0 && ctx.gadgetCallCount < minGadgetsForEfficiency) {\n return {\n action: \"append_messages\",\n messages: [\n {\n role: \"user\",\n content: `[System Hint] ${message}`,\n },\n ],\n };\n }\n\n return { action: \"continue\" };\n },\n },\n };\n}\n\n// ============================================================================\n// CONVENIENCE FACTORY\n// ============================================================================\n\n/**\n * Creates combined hints from a configuration object.\n *\n * This is a convenience function that creates and merges multiple hints\n * based on a simple configuration object.\n *\n * @param config - Configuration for which hints to enable\n * @returns Merged AgentHooks\n *\n * @example\n * ```typescript\n * const hooks = createHints({\n * iterationProgress: { timing: \"late\" },\n * parallelGadgets: true,\n * });\n *\n * const agent = new AgentBuilder()\n * .withHooks(HookPresets.merge(existingHooks, hooks))\n * .build();\n * ```\n */\nexport function createHints(config: HintsConfig): AgentHooks {\n const hooksToMerge: AgentHooks[] = [];\n\n // Iteration progress hint\n if (config.iterationProgress) {\n const options =\n typeof config.iterationProgress === \"boolean\" ? {} : config.iterationProgress;\n hooksToMerge.push(iterationProgressHint(options));\n }\n\n // Parallel gadgets hint\n if (config.parallelGadgets) {\n const options =\n typeof config.parallelGadgets === \"boolean\" ? {} : config.parallelGadgets;\n hooksToMerge.push(parallelGadgetHint(options));\n }\n\n // Custom hooks\n if (config.custom) {\n hooksToMerge.push(...config.custom);\n }\n\n return HookPresets.merge(...hooksToMerge);\n}\n","/**\n * Type-safe gadget factory with automatic parameter inference.\n *\n * Gadget eliminates the need for manual type assertions\n * by automatically inferring parameter types from the Zod schema.\n *\n * @example\n * ```typescript\n * class Calculator extends Gadget({\n * description: \"Performs arithmetic operations\",\n * schema: z.object({\n * operation: z.enum([\"add\", \"subtract\"]),\n * a: z.number(),\n * b: z.number(),\n * }),\n * }) {\n * // ✨ params is automatically typed!\n * execute(params: this['params']): string {\n * const { operation, a, b } = params; // All typed!\n * return operation === \"add\" ? String(a + b) : String(a - b);\n * }\n * }\n * ```\n */\n\nimport type { ZodType } from \"zod\";\nimport { BaseGadget } from \"./gadget.js\";\nimport type { GadgetExample } from \"./types.js\";\n\n/**\n * Infer the TypeScript type from a Zod schema.\n */\ntype InferSchema<T> = T extends ZodType<infer U> ? U : never;\n\n/**\n * Configuration for creating a typed gadget.\n */\nexport interface GadgetConfig<TSchema extends ZodType> {\n /** Human-readable description of what the gadget does */\n description: string;\n\n /** Zod schema for parameter validation */\n schema: TSchema;\n\n /** Optional custom name (defaults to class name) */\n name?: string;\n\n /** Optional timeout in milliseconds */\n timeoutMs?: number;\n\n /** Optional usage examples to help LLMs understand proper invocation */\n examples?: GadgetExample<InferSchema<TSchema>>[];\n}\n\n/**\n * Factory function to create a typed gadget base class.\n *\n * The returned class automatically infers parameter types from the Zod schema,\n * eliminating the need for manual type assertions in the execute method.\n *\n * @param config - Configuration with description and schema\n * @returns Base class to extend with typed execute method\n *\n * @example\n * ```typescript\n * import { z } from 'zod';\n * import { Gadget } from 'llmist';\n *\n * class Calculator extends Gadget({\n * description: \"Performs arithmetic operations\",\n * schema: z.object({\n * operation: z.enum([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n * a: z.number().describe(\"First number\"),\n * b: z.number().describe(\"Second number\"),\n * }),\n * }) {\n * execute(params: this['params']): string {\n * // params is automatically typed as:\n * // { operation: \"add\" | \"subtract\" | \"multiply\" | \"divide\"; a: number; b: number }\n * const { operation, a, b } = params;\n *\n * switch (operation) {\n * case \"add\": return String(a + b);\n * case \"subtract\": return String(a - b);\n * case \"multiply\": return String(a * b);\n * case \"divide\": return String(a / b);\n * }\n * }\n * }\n * ```\n *\n * @example\n * ```typescript\n * // With async execution\n * class WeatherGadget extends Gadget({\n * description: \"Fetches weather for a city\",\n * schema: z.object({\n * city: z.string().min(1).describe(\"City name\"),\n * }),\n * timeoutMs: 10000,\n * }) {\n * async execute(params: this['params']): Promise<string> {\n * const { city } = params; // Automatically typed as { city: string }\n * const weather = await fetchWeather(city);\n * return `Weather in ${city}: ${weather}`;\n * }\n * }\n * ```\n */\nexport function Gadget<TSchema extends ZodType>(config: GadgetConfig<TSchema>) {\n abstract class GadgetBase extends BaseGadget {\n description = config.description;\n parameterSchema = config.schema;\n name = config.name;\n timeoutMs = config.timeoutMs;\n examples = config.examples;\n\n /**\n * Type helper property for accessing inferred parameter type.\n * This is used in the execute method signature: `execute(params: this['params'])`\n *\n * Note: This is just for type inference - the actual params in execute()\n * will be Record<string, unknown> which you can safely cast to this['params']\n */\n readonly params!: InferSchema<TSchema>;\n\n /**\n * Execute the gadget. Subclasses should cast params to this['params'].\n *\n * @param params - Validated parameters from the LLM\n * @returns Result as a string (or Promise<string> for async gadgets)\n *\n * @example\n * ```typescript\n * execute(params: Record<string, unknown>): string {\n * const typed = params as this['params'];\n * // Now 'typed' is fully typed!\n * return String(typed.a + typed.b);\n * }\n * ```\n */\n abstract execute(params: Record<string, unknown>): string | Promise<string>;\n }\n\n return GadgetBase as {\n new (): GadgetBase & { params: InferSchema<TSchema> };\n };\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAKA;AAEA;AALA,SAAS,SAAS;;;AC+JX,IAAM,cAAN,MAAM,aAAY;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAwDvB,OAAO,QAAQ,UAA0B,CAAC,GAAe;AACvD,WAAO;AAAA,MACL,WAAW;AAAA,QACT,gBAAgB,OAAO,QAAQ;AAC7B,kBAAQ,IAAI,kCAAkC,IAAI,SAAS,GAAG;AAAA,QAChE;AAAA,QACA,mBAAmB,OAAO,QAAQ;AAChC,gBAAM,SAAS,IAAI,OAAO,eAAe;AACzC,kBAAQ,IAAI,4BAA4B,MAAM,GAAG;AACjD,cAAI,QAAQ,WAAW,IAAI,cAAc;AACvC,oBAAQ,IAAI,mBAAmB,IAAI,YAAY,EAAE;AAAA,UACnD;AAAA,QACF;AAAA,QACA,wBAAwB,OAAO,QAAQ;AACrC,kBAAQ,IAAI,sBAAsB,IAAI,UAAU,EAAE;AAClD,cAAI,QAAQ,SAAS;AACnB,oBAAQ,IAAI,wBAAwB,KAAK,UAAU,IAAI,YAAY,MAAM,CAAC,CAAC;AAAA,UAC7E;AAAA,QACF;AAAA,QACA,2BAA2B,OAAO,QAAQ;AACxC,kBAAQ,IAAI,sBAAsB,IAAI,UAAU,EAAE;AAClD,cAAI,QAAQ,SAAS;AACnB,kBAAM,UAAU,IAAI,SAAS,IAAI,eAAe;AAChD,oBAAQ,IAAI,oBAAoB,OAAO,EAAE;AAAA,UAC3C;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAoDA,OAAO,SAAqB;AAC1B,UAAM,UAAU,oBAAI,IAAoB;AAExC,WAAO;AAAA,MACL,WAAW;AAAA,QACT,gBAAgB,OAAO,QAAQ;AAC7B,kBAAQ,IAAI,OAAO,IAAI,SAAS,IAAI,KAAK,IAAI,CAAC;AAAA,QAChD;AAAA,QACA,mBAAmB,OAAO,QAAQ;AAChC,gBAAM,QAAQ,QAAQ,IAAI,OAAO,IAAI,SAAS,EAAE;AAChD,cAAI,OAAO;AACT,kBAAM,WAAW,KAAK,IAAI,IAAI;AAC9B,oBAAQ,IAAI,+BAAqB,QAAQ,IAAI;AAC7C,oBAAQ,OAAO,OAAO,IAAI,SAAS,EAAE;AAAA,UACvC;AAAA,QACF;AAAA,QACA,wBAAwB,OAAO,QAAQ;AACrC,gBAAM,MAAM,UAAU,IAAI,UAAU,IAAI,KAAK,IAAI,CAAC;AAClD,kBAAQ,IAAI,KAAK,KAAK,IAAI,CAAC;AAE3B,UAAC,IAAY,aAAa;AAAA,QAC5B;AAAA,QACA,2BAA2B,OAAO,QAAQ;AACxC,gBAAM,MAAO,IAAY;AACzB,cAAI,KAAK;AACP,kBAAM,QAAQ,QAAQ,IAAI,GAAG;AAC7B,gBAAI,OAAO;AACT,oBAAM,WAAW,KAAK,IAAI,IAAI;AAC9B,sBAAQ,IAAI,wBAAc,IAAI,UAAU,SAAS,QAAQ,IAAI;AAC7D,sBAAQ,OAAO,GAAG;AAAA,YACpB;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAuDA,OAAO,gBAA4B;AACjC,QAAI,cAAc;AAClB,QAAI,aAAa;AAEjB,WAAO;AAAA,MACL,WAAW;AAAA,QACT,mBAAmB,OAAO,QAAQ;AAChC;AACA,cAAI,IAAI,OAAO,aAAa;AAC1B,2BAAe,IAAI,MAAM;AACzB,oBAAQ,IAAI,+BAAwB,IAAI,MAAM,WAAW,EAAE;AAC3D,oBAAQ,IAAI,2BAAoB,WAAW,YAAY,UAAU,SAAS;AAAA,UAC5E;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4HA,OAAO,iBAAiB,SAA+C;AACrE,UAAM,EAAE,eAAe,YAAY,cAAc,MAAM,IAAI,WAAW,CAAC;AAGvE,QAAI,aAAa;AACjB,QAAI,mBAAmB;AACvB,QAAI,mBAAmB;AACvB,QAAI,oBAAoB;AACxB,QAAI,YAAY;AAChB,UAAM,YAAY,KAAK,IAAI;AAE3B,WAAO;AAAA,MACL,WAAW;AAAA;AAAA,QAET,gBAAgB,OAAO,QAAQ;AAC7B;AAAA,QACF;AAAA;AAAA,QAGA,mBAAmB,OAAO,QAAQ;AAChC;AAGA,cAAI,IAAI,OAAO;AACb,gCAAoB,IAAI,MAAM;AAC9B,iCAAqB,IAAI,MAAM;AAI/B,gBAAI,eAAe;AACjB,kBAAI;AAGF,sBAAM,YAAY,IAAI,QAAQ,MAAM,SAAS,GAAG,IAC5C,IAAI,QAAQ,MAAM,MAAM,GAAG,EAAE,CAAC,IAC9B,IAAI,QAAQ;AAGhB,sBAAM,eAAe,cAAc;AAAA,kBACjC;AAAA,kBACA,IAAI,MAAM;AAAA,kBACV,IAAI,MAAM;AAAA,gBACZ;AAEA,oBAAI,cAAc;AAChB,+BAAa,aAAa;AAAA,gBAC5B;AAAA,cACF,SAAS,OAAO;AAGd,oBAAI,aAAa;AACf,0BAAQ,KAAK,yCAA+B,KAAK;AAAA,gBACnD;AAAA,cACF;AAAA,YACF;AAAA,UACF;AAGA,gBAAM,QAAuB;AAAA,YAC3B;AAAA,YACA;AAAA,YACA;AAAA,YACA;AAAA,YACA,aAAa,mBAAmB;AAAA,YAChC;AAAA,YACA,gBAAgB,SAAS,KAAK,IAAI,IAAI,aAAa,KAAM,QAAQ,CAAC,CAAC;AAAA,UACrE;AAGA,cAAI,YAAY;AACd,uBAAW,KAAK;AAAA,UAClB;AAGA,cAAI,aAAa;AACf,kBAAM,kBAAkB,MAAM,eAAe,MACzC,IAAI,MAAM,cAAc,KAAM,QAAQ,CAAC,CAAC,MACxC,GAAG,MAAM,WAAW;AAExB,kBAAM,gBAAgB,MAAM,YAAY,IACpC,IAAI,MAAM,UAAU,QAAQ,CAAC,CAAC,KAC9B;AAEJ,oBAAQ;AAAA,cACN,kCAA2B,MAAM,gBAAgB,MAAM,eAAe,aAAa,aAAa,MAAM,MAAM,cAAc;AAAA,YAC5H;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA8DA,OAAO,eAA2B;AAChC,WAAO;AAAA,MACL,WAAW;AAAA,QACT,gBAAgB,OAAO,QAAQ;AAC7B,kBAAQ,MAAM,+BAA0B,IAAI,SAAS,MAAM,IAAI,MAAM,OAAO;AAC5E,kBAAQ,MAAM,aAAa,IAAI,QAAQ,KAAK,EAAE;AAC9C,kBAAQ,MAAM,iBAAiB,IAAI,SAAS,EAAE;AAAA,QAChD;AAAA,QACA,2BAA2B,OAAO,QAAQ;AACxC,cAAI,IAAI,OAAO;AACb,oBAAQ,MAAM,wBAAmB,IAAI,UAAU,EAAE;AACjD,oBAAQ,MAAM,aAAa,IAAI,KAAK,EAAE;AACtC,oBAAQ,MAAM,kBAAkB,KAAK,UAAU,IAAI,YAAY,MAAM,CAAC,CAAC;AAAA,UACzE;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA0BA,OAAO,qBAAiC;AACtC,WAAO;AAAA,MACL,WAAW;AAAA,QACT,cAAc,OAAO,QAAQ;AAC3B,gBAAM,QAAQ,IAAI,MAAM,eAAe,IAAI,MAAM;AACjD,gBAAM,WAAY,QAAQ,IAAI,MAAM,eAAgB,KAAK,QAAQ,CAAC;AAClE,kBAAQ;AAAA,YACN,gCAAoB,IAAI,MAAM,QAAQ,MAAM,IAAI,MAAM,YAAY,WAAM,IAAI,MAAM,WAAW,kBAAkB,KAAK,KAAK,OAAO;AAAA,UAClI;AACA,kBAAQ;AAAA,YACN,gBAAgB,IAAI,MAAM,cAAc,WAAM,IAAI,MAAM,aAAa;AAAA,UACvE;AACA,cAAI,IAAI,MAAM,mBAAmB,GAAG;AAClC,oBAAQ;AAAA,cACN,kBAAkB,IAAI,MAAM,gBAAgB,iBAAiB,IAAI,MAAM,gBAAgB;AAAA,YACzF;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA0CA,OAAO,SAAqB;AAC1B,WAAO,CAAC;AAAA,EACV;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAsFA,OAAO,SAAS,UAAoC;AAClD,UAAM,SAAqB;AAAA,MACzB,WAAW,CAAC;AAAA,MACZ,cAAc,CAAC;AAAA,MACf,aAAa,CAAC;AAAA,IAChB;AAGA,eAAW,SAAS,UAAU;AAC5B,UAAI,MAAM,WAAW;AACnB,mBAAW,CAAC,KAAK,OAAO,KAAK,OAAO,QAAQ,MAAM,SAAS,GAAG;AAC5D,gBAAM,WAAW;AACjB,cAAI,OAAO,UAAW,QAAQ,GAAG;AAE/B,kBAAM,WAAW,OAAO,UAAW,QAAQ;AAC3C,mBAAO,UAAW,QAAQ,IAAI,OAAO,QAAa;AAChD,oBAAM,SAAS,GAAG;AAClB,oBAAM,QAAQ,GAAG;AAAA,YACnB;AAAA,UACF,OAAO;AACL,mBAAO,UAAW,QAAQ,IAAI;AAAA,UAChC;AAAA,QACF;AAAA,MACF;AAKA,UAAI,MAAM,cAAc;AACtB,eAAO,OAAO,OAAO,cAAe,MAAM,YAAY;AAAA,MACxD;AAGA,UAAI,MAAM,aAAa;AACrB,eAAO,OAAO,OAAO,aAAc,MAAM,WAAW;AAAA,MACtD;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4DA,OAAO,WAAW,UAA0B,CAAC,GAAe;AAC1D,WAAO,aAAY;AAAA,MACjB,aAAY,QAAQ,OAAO;AAAA,MAC3B,aAAY,OAAO;AAAA,MACnB,aAAY,cAAc;AAAA,MAC1B,aAAY,aAAa;AAAA,IAC3B;AAAA,EACF;AACF;;;AC78BA;AAkCA;AAQA;;;ACXA;AAWA;AAUA;AAGA;;;ACtBA;AAkHO,SAAS,sBAAsB,SAA4C;AAChF,QAAM,EAAE,SAAS,UAAU,cAAc,MAAM,SAAS,IAAI,WAAW,CAAC;AAExE,SAAO;AAAA,IACL,aAAa;AAAA,MACX,eAAe,OAAO,QAAQ;AAC5B,cAAM,YAAY,IAAI,YAAY;AAClC,cAAM,gBAAgB,IAAI;AAC1B,cAAM,WAAW,YAAY;AAG7B,YAAI,WAAW,UAAU,WAAW,KAAK;AACvC,iBAAO,EAAE,QAAQ,UAAU;AAAA,QAC7B;AACA,YAAI,WAAW,YAAY,WAAW,KAAK;AACzC,iBAAO,EAAE,QAAQ,UAAU;AAAA,QAC7B;AAGA,cAAM,YAAY,gBAAgB;AAClC,cAAM,cAA2B;AAAA,UAC/B;AAAA,UACA;AAAA,UACA;AAAA,QACF;AAGA,YAAI,OAAO;AAAA,UACT;AAAA,UACA,cAAc;AAAA,UACd;AAAA,QACF;AAGA,YAAI,eAAe,YAAY,KAAK;AAClC,kBAAQ;AAAA,QACV;AAGA,cAAM,WAAW,CAAC,GAAG,IAAI,QAAQ,QAAQ;AAGzC,YAAI,gBAAgB;AACpB,iBAAS,IAAI,SAAS,SAAS,GAAG,KAAK,GAAG,KAAK;AAC7C,cAAI,SAAS,CAAC,EAAE,SAAS,QAAQ;AAC/B,4BAAgB;AAChB;AAAA,UACF;AAAA,QACF;AAEA,YAAI,iBAAiB,GAAG;AAEtB,mBAAS,OAAO,gBAAgB,GAAG,GAAG;AAAA,YACpC,MAAM;AAAA,YACN,SAAS,iBAAiB,IAAI;AAAA,UAChC,CAAC;AAAA,QACH,OAAO;AAEL,mBAAS,KAAK;AAAA,YACZ,MAAM;AAAA,YACN,SAAS,iBAAiB,IAAI;AAAA,UAChC,CAAC;AAAA,QACH;AAEA,eAAO;AAAA,UACL,QAAQ;AAAA,UACR,iBAAiB,EAAE,SAAS;AAAA,QAC9B;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACF;AAuBO,SAAS,mBAAmB,SAAiD;AAClF,QAAM;AAAA,IACJ,0BAA0B;AAAA,IAC1B,UAAU,cAAc;AAAA,IACxB,UAAU;AAAA,EACZ,IAAI,WAAW,CAAC;AAEhB,SAAO;AAAA,IACL,aAAa;AAAA,MACX,cAAc,OAAO,QAAQ;AAC3B,YAAI,CAAC,SAAS;AACZ,iBAAO,EAAE,QAAQ,WAAW;AAAA,QAC9B;AAGA,YAAI,IAAI,kBAAkB,KAAK,IAAI,kBAAkB,yBAAyB;AAC5E,iBAAO;AAAA,YACL,QAAQ;AAAA,YACR,UAAU;AAAA,cACR;AAAA,gBACE,MAAM;AAAA,gBACN,SAAS,iBAAiB,OAAO;AAAA,cACnC;AAAA,YACF;AAAA,UACF;AAAA,QACF;AAEA,eAAO,EAAE,QAAQ,WAAW;AAAA,MAC9B;AAAA,IACF;AAAA,EACF;AACF;AA2BO,SAAS,YAAY,QAAiC;AAC3D,QAAM,eAA6B,CAAC;AAGpC,MAAI,OAAO,mBAAmB;AAC5B,UAAM,UACJ,OAAO,OAAO,sBAAsB,YAAY,CAAC,IAAI,OAAO;AAC9D,iBAAa,KAAK,sBAAsB,OAAO,CAAC;AAAA,EAClD;AAGA,MAAI,OAAO,iBAAiB;AAC1B,UAAM,UACJ,OAAO,OAAO,oBAAoB,YAAY,CAAC,IAAI,OAAO;AAC5D,iBAAa,KAAK,mBAAmB,OAAO,CAAC;AAAA,EAC/C;AAGA,MAAI,OAAO,QAAQ;AACjB,iBAAa,KAAK,GAAG,OAAO,MAAM;AAAA,EACpC;AAEA,SAAO,YAAY,MAAM,GAAG,YAAY;AAC1C;;;AJxPA;AAEA;AASA;AAGA;AAeA;AAQA;AAQA;AAEA;AAEA;AAEA;AACA;AACA;AACA;AAEA;;;AK9GA;AAmFO,SAAS,OAAgC,QAA+B;AAAA,EAC7E,MAAe,mBAAmB,WAAW;AAAA,IAC3C,cAAc,OAAO;AAAA,IACrB,kBAAkB,OAAO;AAAA,IACzB,OAAO,OAAO;AAAA,IACd,YAAY,OAAO;AAAA,IACnB,WAAW,OAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAST;AAAA,EAkBX;AAEA,SAAO;AAGT;;;ALSA;AACA;AAIA;AACA;AACA;","names":[]}
1
+ {"version":3,"sources":[],"sourcesContent":[],"mappings":"","names":[]}
@@ -678,6 +678,41 @@ interface LLMGenerationOptions {
678
678
  responseFormat?: "text";
679
679
  metadata?: Record<string, unknown>;
680
680
  extra?: Record<string, unknown>;
681
+ /**
682
+ * Optional abort signal for cancelling the request mid-flight.
683
+ *
684
+ * When the signal is aborted, the provider will attempt to cancel
685
+ * the underlying HTTP request and the stream will terminate with
686
+ * an abort error. Use `isAbortError()` from `@/core/errors` to
687
+ * detect cancellation in error handling.
688
+ *
689
+ * @example
690
+ * ```typescript
691
+ * const controller = new AbortController();
692
+ *
693
+ * const stream = client.stream({
694
+ * model: "claude-3-5-sonnet-20241022",
695
+ * messages: [{ role: "user", content: "Tell me a long story" }],
696
+ * signal: controller.signal,
697
+ * });
698
+ *
699
+ * // Cancel after 5 seconds
700
+ * setTimeout(() => controller.abort(), 5000);
701
+ *
702
+ * try {
703
+ * for await (const chunk of stream) {
704
+ * process.stdout.write(chunk.text);
705
+ * }
706
+ * } catch (error) {
707
+ * if (isAbortError(error)) {
708
+ * console.log("\nRequest was cancelled");
709
+ * } else {
710
+ * throw error;
711
+ * }
712
+ * }
713
+ * ```
714
+ */
715
+ signal?: AbortSignal;
681
716
  }
682
717
  interface TokenUsage {
683
718
  inputTokens: number;
@@ -1850,6 +1885,8 @@ interface AgentOptions {
1850
1885
  gadgetOutputLimitPercent?: number;
1851
1886
  /** Context compaction configuration (enabled by default) */
1852
1887
  compactionConfig?: CompactionConfig;
1888
+ /** Optional abort signal for cancelling requests mid-flight */
1889
+ signal?: AbortSignal;
1853
1890
  }
1854
1891
  /**
1855
1892
  * Agent: Lean orchestrator that delegates to StreamProcessor.
@@ -1890,6 +1927,7 @@ declare class Agent {
1890
1927
  private readonly outputLimitEnabled;
1891
1928
  private readonly outputLimitCharLimit;
1892
1929
  private readonly compactionManager?;
1930
+ private readonly signal?;
1893
1931
  /**
1894
1932
  * Creates a new Agent instance.
1895
1933
  * @internal This constructor is private. Use LLMist.createAgent() or AgentBuilder instead.
@@ -2058,6 +2096,7 @@ declare class AgentBuilder {
2058
2096
  private gadgetOutputLimit?;
2059
2097
  private gadgetOutputLimitPercent?;
2060
2098
  private compactionConfig?;
2099
+ private signal?;
2061
2100
  constructor(client?: LLMist);
2062
2101
  /**
2063
2102
  * Set the model to use.
@@ -2429,6 +2468,32 @@ declare class AgentBuilder {
2429
2468
  * ```
2430
2469
  */
2431
2470
  withoutCompaction(): this;
2471
+ /**
2472
+ * Set an abort signal for cancelling requests mid-flight.
2473
+ *
2474
+ * When the signal is aborted, the current LLM request will be cancelled
2475
+ * and the agent loop will exit gracefully.
2476
+ *
2477
+ * @param signal - AbortSignal from an AbortController
2478
+ * @returns This builder for chaining
2479
+ *
2480
+ * @example
2481
+ * ```typescript
2482
+ * const controller = new AbortController();
2483
+ *
2484
+ * // Cancel after 30 seconds
2485
+ * setTimeout(() => controller.abort(), 30000);
2486
+ *
2487
+ * const agent = LLMist.createAgent()
2488
+ * .withModel("sonnet")
2489
+ * .withSignal(controller.signal)
2490
+ * .ask("Write a long story");
2491
+ *
2492
+ * // Or cancel on user action
2493
+ * document.getElementById("cancel").onclick = () => controller.abort();
2494
+ * ```
2495
+ */
2496
+ withSignal(signal: AbortSignal): this;
2432
2497
  /**
2433
2498
  * Add a synthetic gadget call to the conversation history.
2434
2499
  *
@@ -678,6 +678,41 @@ interface LLMGenerationOptions {
678
678
  responseFormat?: "text";
679
679
  metadata?: Record<string, unknown>;
680
680
  extra?: Record<string, unknown>;
681
+ /**
682
+ * Optional abort signal for cancelling the request mid-flight.
683
+ *
684
+ * When the signal is aborted, the provider will attempt to cancel
685
+ * the underlying HTTP request and the stream will terminate with
686
+ * an abort error. Use `isAbortError()` from `@/core/errors` to
687
+ * detect cancellation in error handling.
688
+ *
689
+ * @example
690
+ * ```typescript
691
+ * const controller = new AbortController();
692
+ *
693
+ * const stream = client.stream({
694
+ * model: "claude-3-5-sonnet-20241022",
695
+ * messages: [{ role: "user", content: "Tell me a long story" }],
696
+ * signal: controller.signal,
697
+ * });
698
+ *
699
+ * // Cancel after 5 seconds
700
+ * setTimeout(() => controller.abort(), 5000);
701
+ *
702
+ * try {
703
+ * for await (const chunk of stream) {
704
+ * process.stdout.write(chunk.text);
705
+ * }
706
+ * } catch (error) {
707
+ * if (isAbortError(error)) {
708
+ * console.log("\nRequest was cancelled");
709
+ * } else {
710
+ * throw error;
711
+ * }
712
+ * }
713
+ * ```
714
+ */
715
+ signal?: AbortSignal;
681
716
  }
682
717
  interface TokenUsage {
683
718
  inputTokens: number;
@@ -1850,6 +1885,8 @@ interface AgentOptions {
1850
1885
  gadgetOutputLimitPercent?: number;
1851
1886
  /** Context compaction configuration (enabled by default) */
1852
1887
  compactionConfig?: CompactionConfig;
1888
+ /** Optional abort signal for cancelling requests mid-flight */
1889
+ signal?: AbortSignal;
1853
1890
  }
1854
1891
  /**
1855
1892
  * Agent: Lean orchestrator that delegates to StreamProcessor.
@@ -1890,6 +1927,7 @@ declare class Agent {
1890
1927
  private readonly outputLimitEnabled;
1891
1928
  private readonly outputLimitCharLimit;
1892
1929
  private readonly compactionManager?;
1930
+ private readonly signal?;
1893
1931
  /**
1894
1932
  * Creates a new Agent instance.
1895
1933
  * @internal This constructor is private. Use LLMist.createAgent() or AgentBuilder instead.
@@ -2058,6 +2096,7 @@ declare class AgentBuilder {
2058
2096
  private gadgetOutputLimit?;
2059
2097
  private gadgetOutputLimitPercent?;
2060
2098
  private compactionConfig?;
2099
+ private signal?;
2061
2100
  constructor(client?: LLMist);
2062
2101
  /**
2063
2102
  * Set the model to use.
@@ -2429,6 +2468,32 @@ declare class AgentBuilder {
2429
2468
  * ```
2430
2469
  */
2431
2470
  withoutCompaction(): this;
2471
+ /**
2472
+ * Set an abort signal for cancelling requests mid-flight.
2473
+ *
2474
+ * When the signal is aborted, the current LLM request will be cancelled
2475
+ * and the agent loop will exit gracefully.
2476
+ *
2477
+ * @param signal - AbortSignal from an AbortController
2478
+ * @returns This builder for chaining
2479
+ *
2480
+ * @example
2481
+ * ```typescript
2482
+ * const controller = new AbortController();
2483
+ *
2484
+ * // Cancel after 30 seconds
2485
+ * setTimeout(() => controller.abort(), 30000);
2486
+ *
2487
+ * const agent = LLMist.createAgent()
2488
+ * .withModel("sonnet")
2489
+ * .withSignal(controller.signal)
2490
+ * .ask("Write a long story");
2491
+ *
2492
+ * // Or cancel on user action
2493
+ * document.getElementById("cancel").onclick = () => controller.abort();
2494
+ * ```
2495
+ */
2496
+ withSignal(signal: AbortSignal): this;
2432
2497
  /**
2433
2498
  * Add a synthetic gadget call to the conversation history.
2434
2499
  *
@@ -3249,6 +3249,8 @@ var init_agent = __esm({
3249
3249
  outputLimitCharLimit;
3250
3250
  // Context compaction
3251
3251
  compactionManager;
3252
+ // Cancellation
3253
+ signal;
3252
3254
  /**
3253
3255
  * Creates a new Agent instance.
3254
3256
  * @internal This constructor is private. Use LLMist.createAgent() or AgentBuilder instead.
@@ -3316,6 +3318,7 @@ var init_agent = __esm({
3316
3318
  options.compactionConfig
3317
3319
  );
3318
3320
  }
3321
+ this.signal = options.signal;
3319
3322
  }
3320
3323
  /**
3321
3324
  * Get the gadget registry for this agent.
@@ -3433,7 +3436,8 @@ var init_agent = __esm({
3433
3436
  model: this.model,
3434
3437
  messages: this.conversation.getMessages(),
3435
3438
  temperature: this.temperature,
3436
- maxTokens: this.defaultMaxTokens
3439
+ maxTokens: this.defaultMaxTokens,
3440
+ signal: this.signal
3437
3441
  };
3438
3442
  await this.safeObserve(async () => {
3439
3443
  if (this.hooks.observers?.onLLMCallStart) {
@@ -3786,6 +3790,7 @@ var init_builder = __esm({
3786
3790
  gadgetOutputLimit;
3787
3791
  gadgetOutputLimitPercent;
3788
3792
  compactionConfig;
3793
+ signal;
3789
3794
  constructor(client) {
3790
3795
  this.client = client;
3791
3796
  }
@@ -4232,6 +4237,35 @@ var init_builder = __esm({
4232
4237
  this.compactionConfig = { enabled: false };
4233
4238
  return this;
4234
4239
  }
4240
+ /**
4241
+ * Set an abort signal for cancelling requests mid-flight.
4242
+ *
4243
+ * When the signal is aborted, the current LLM request will be cancelled
4244
+ * and the agent loop will exit gracefully.
4245
+ *
4246
+ * @param signal - AbortSignal from an AbortController
4247
+ * @returns This builder for chaining
4248
+ *
4249
+ * @example
4250
+ * ```typescript
4251
+ * const controller = new AbortController();
4252
+ *
4253
+ * // Cancel after 30 seconds
4254
+ * setTimeout(() => controller.abort(), 30000);
4255
+ *
4256
+ * const agent = LLMist.createAgent()
4257
+ * .withModel("sonnet")
4258
+ * .withSignal(controller.signal)
4259
+ * .ask("Write a long story");
4260
+ *
4261
+ * // Or cancel on user action
4262
+ * document.getElementById("cancel").onclick = () => controller.abort();
4263
+ * ```
4264
+ */
4265
+ withSignal(signal) {
4266
+ this.signal = signal;
4267
+ return this;
4268
+ }
4235
4269
  /**
4236
4270
  * Add a synthetic gadget call to the conversation history.
4237
4271
  *
@@ -4348,7 +4382,8 @@ ${endPrefix}`
4348
4382
  defaultGadgetTimeoutMs: this.defaultGadgetTimeoutMs,
4349
4383
  gadgetOutputLimit: this.gadgetOutputLimit,
4350
4384
  gadgetOutputLimitPercent: this.gadgetOutputLimitPercent,
4351
- compactionConfig: this.compactionConfig
4385
+ compactionConfig: this.compactionConfig,
4386
+ signal: this.signal
4352
4387
  };
4353
4388
  return new Agent(AGENT_INTERNAL_KEY, options);
4354
4389
  }
@@ -4451,7 +4486,8 @@ ${endPrefix}`
4451
4486
  defaultGadgetTimeoutMs: this.defaultGadgetTimeoutMs,
4452
4487
  gadgetOutputLimit: this.gadgetOutputLimit,
4453
4488
  gadgetOutputLimitPercent: this.gadgetOutputLimitPercent,
4454
- compactionConfig: this.compactionConfig
4489
+ compactionConfig: this.compactionConfig,
4490
+ signal: this.signal
4455
4491
  };
4456
4492
  return new Agent(AGENT_INTERNAL_KEY, options);
4457
4493
  }
@@ -4758,7 +4794,7 @@ var init_base_provider = __esm({
4758
4794
  async *stream(options, descriptor, spec) {
4759
4795
  const preparedMessages = this.prepareMessages(options.messages);
4760
4796
  const payload = this.buildRequestPayload(options, descriptor, spec, preparedMessages);
4761
- const rawStream = await this.executeStreamRequest(payload);
4797
+ const rawStream = await this.executeStreamRequest(payload, options.signal);
4762
4798
  yield* this.wrapStream(rawStream);
4763
4799
  }
4764
4800
  /**
@@ -4876,9 +4912,9 @@ var init_anthropic = __esm({
4876
4912
  };
4877
4913
  return payload;
4878
4914
  }
4879
- async executeStreamRequest(payload) {
4915
+ async executeStreamRequest(payload, signal) {
4880
4916
  const client = this.client;
4881
- const stream2 = await client.messages.create(payload);
4917
+ const stream2 = await client.messages.create(payload, signal ? { signal } : void 0);
4882
4918
  return stream2;
4883
4919
  }
4884
4920
  async *wrapStream(iterable) {
@@ -5210,9 +5246,15 @@ var init_gemini = __esm({
5210
5246
  config
5211
5247
  };
5212
5248
  }
5213
- async executeStreamRequest(payload) {
5249
+ async executeStreamRequest(payload, signal) {
5214
5250
  const client = this.client;
5215
- const streamResponse = await client.models.generateContentStream(payload);
5251
+ const streamResponse = await client.models.generateContentStream({
5252
+ ...payload,
5253
+ config: {
5254
+ ...payload.config,
5255
+ ...signal ? { abortSignal: signal } : {}
5256
+ }
5257
+ });
5216
5258
  return streamResponse;
5217
5259
  }
5218
5260
  /**
@@ -5801,9 +5843,9 @@ var init_openai = __esm({
5801
5843
  ...shouldIncludeTemperature ? { temperature } : {}
5802
5844
  };
5803
5845
  }
5804
- async executeStreamRequest(payload) {
5846
+ async executeStreamRequest(payload, signal) {
5805
5847
  const client = this.client;
5806
- const stream2 = await client.chat.completions.create(payload);
5848
+ const stream2 = await client.chat.completions.create(payload, signal ? { signal } : void 0);
5807
5849
  return stream2;
5808
5850
  }
5809
5851
  async *wrapStream(iterable) {