llmist 0.6.2 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.cts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { ZodType, ZodTypeAny } from 'zod';
2
2
  export { z } from 'zod';
3
- import { A as AgentHooks, o as ModelRegistry, L as LLMMessage, P as ParameterFormat, S as StreamEvent, G as GadgetRegistry, p as LLMStreamChunk, q as GadgetExample, B as BaseGadget, r as ParsedGadgetCall, s as GadgetExecutionResult, t as ProviderAdapter, u as ModelDescriptor, v as ModelSpec, w as LLMGenerationOptions, x as LLMStream } from './mock-stream-B5R6XPif.cjs';
4
- export { F as AfterGadgetExecutionAction, I as AfterGadgetExecutionControllerContext, J as AfterLLMCallAction, K as AfterLLMCallControllerContext, N as AfterLLMErrorAction, y as AgentBuilder, O as AgentOptions, Q as BeforeGadgetExecutionAction, R as BeforeLLMCallAction, T as ChunkInterceptorContext, U as Controllers, ab as CostEstimate, al as DEFAULT_PROMPTS, E as EventHandlers, as as GadgetClass, V as GadgetExecutionControllerContext, at as GadgetOrClass, W as GadgetParameterInterceptorContext, X as GadgetResultInterceptorContext, H as HistoryMessage, Y as Interceptors, Z as LLMCallControllerContext, _ as LLMErrorControllerContext, aa as LLMMessageBuilder, a9 as LLMRole, a8 as LLMist, a7 as LLMistOptions, $ as MessageInterceptorContext, a as MockBuilder, d as MockManager, h as MockMatcher, i as MockMatcherContext, j as MockOptions, M as MockProviderAdapter, k as MockRegistration, l as MockResponse, n as MockStats, ac as ModelFeatures, ah as ModelIdentifierParser, ad as ModelLimits, ae as ModelPricing, a0 as ObserveChunkContext, a1 as ObserveGadgetCompleteContext, a2 as ObserveGadgetStartContext, a3 as ObserveLLMCallContext, a4 as ObserveLLMCompleteContext, a5 as ObserveLLMErrorContext, a6 as Observers, ai as PromptConfig, aj as PromptContext, ak as PromptTemplate, af as ProviderIdentifier, ao as QuickOptions, ar as StreamParser, au as TextOnlyAction, av as TextOnlyContext, aw as TextOnlyCustomHandler, ax as TextOnlyGadgetConfig, ay as TextOnlyHandler, az as TextOnlyStrategy, ag as TokenUsage, z as collectEvents, C as collectText, ap as complete, c as createMockAdapter, b as createMockClient, e as createMockStream, f as createTextMockStream, g as getMockManager, m as mockLLM, am as resolvePromptTemplate, an as resolveRulesTemplate, D as runWithHandlers, aq as stream } from './mock-stream-B5R6XPif.cjs';
3
+ import { A as AgentHooks, o as ModelRegistry, L as LLMMessage, P as ParameterFormat, S as StreamEvent, T as TokenUsage, G as GadgetRegistry, p as LLMStreamChunk, q as GadgetExample, B as BaseGadget, r as ParsedGadgetCall, s as GadgetExecutionResult, t as ProviderAdapter, u as ModelDescriptor, v as ModelSpec, w as LLMGenerationOptions, x as LLMStream } from './mock-stream-CjmvWDc3.cjs';
4
+ export { F as AfterGadgetExecutionAction, I as AfterGadgetExecutionControllerContext, J as AfterLLMCallAction, K as AfterLLMCallControllerContext, N as AfterLLMErrorAction, y as AgentBuilder, O as AgentOptions, Q as BeforeGadgetExecutionAction, R as BeforeLLMCallAction, U as ChunkInterceptorContext, V as Controllers, ac as CostEstimate, al as DEFAULT_PROMPTS, E as EventHandlers, as as GadgetClass, W as GadgetExecutionControllerContext, at as GadgetOrClass, X as GadgetParameterInterceptorContext, Y as GadgetResultInterceptorContext, H as HistoryMessage, Z as Interceptors, _ as LLMCallControllerContext, $ as LLMErrorControllerContext, ab as LLMMessageBuilder, aa as LLMRole, a9 as LLMist, a8 as LLMistOptions, a0 as MessageInterceptorContext, a as MockBuilder, d as MockManager, h as MockMatcher, i as MockMatcherContext, j as MockOptions, M as MockProviderAdapter, k as MockRegistration, l as MockResponse, n as MockStats, ad as ModelFeatures, ah as ModelIdentifierParser, ae as ModelLimits, af as ModelPricing, a1 as ObserveChunkContext, a2 as ObserveGadgetCompleteContext, a3 as ObserveGadgetStartContext, a4 as ObserveLLMCallContext, a5 as ObserveLLMCompleteContext, a6 as ObserveLLMErrorContext, a7 as Observers, ai as PromptConfig, aj as PromptContext, ak as PromptTemplate, ag as ProviderIdentifier, ao as QuickOptions, ar as StreamParser, au as TextOnlyAction, av as TextOnlyContext, aw as TextOnlyCustomHandler, ax as TextOnlyGadgetConfig, ay as TextOnlyHandler, az as TextOnlyStrategy, z as collectEvents, C as collectText, ap as complete, c as createMockAdapter, b as createMockClient, e as createMockStream, f as createTextMockStream, g as getMockManager, m as mockLLM, am as resolvePromptTemplate, an as resolveRulesTemplate, D as runWithHandlers, aq as stream } from './mock-stream-CjmvWDc3.cjs';
5
5
  import { Logger, ILogObj } from 'tslog';
6
6
  import { MessageCreateParamsStreaming, MessageStreamEvent } from '@anthropic-ai/sdk/resources/messages';
7
7
  import OpenAI from 'openai';
@@ -797,12 +797,8 @@ interface StreamProcessingResult {
797
797
  didExecuteGadgets: boolean;
798
798
  /** LLM finish reason */
799
799
  finishReason: string | null;
800
- /** Token usage */
801
- usage?: {
802
- inputTokens: number;
803
- outputTokens: number;
804
- totalTokens: number;
805
- };
800
+ /** Token usage (including cached token counts when available) */
801
+ usage?: TokenUsage;
806
802
  /** The raw accumulated response text */
807
803
  rawResponse: string;
808
804
  /** The final message (after interceptors) */
@@ -1660,6 +1656,7 @@ type GeminiChunk = {
1660
1656
  promptTokenCount?: number;
1661
1657
  candidatesTokenCount?: number;
1662
1658
  totalTokenCount?: number;
1659
+ cachedContentTokenCount?: number;
1663
1660
  };
1664
1661
  };
1665
1662
  declare class GeminiGenerativeProvider extends BaseProviderAdapter {
@@ -1769,4 +1766,4 @@ declare class OpenAIChatProvider extends BaseProviderAdapter {
1769
1766
  }
1770
1767
  declare function createOpenAIProviderFromEnv(): OpenAIChatProvider | null;
1771
1768
 
1772
- export { AgentHooks, AnthropicMessagesProvider, BaseGadget, BreakLoopException, ConversationManager, type CreateGadgetConfig, Gadget, type GadgetConfig, GadgetExample, GadgetExecutionResult, GadgetExecutor, GadgetOutputStore, GadgetRegistry, GeminiGenerativeProvider, HookPresets, HumanInputException, type IConversationManager, LLMGenerationOptions, LLMMessage, LLMStream, LLMStreamChunk, type LoggerOptions, type LoggingOptions, MODEL_ALIASES, ModelDescriptor, ModelRegistry, ModelSpec, OpenAIChatProvider, ParsedGadgetCall, ProviderAdapter, type StoredOutput, StreamEvent, type StreamProcessingResult, StreamProcessor, type StreamProcessorOptions, type ValidationIssue, type ValidationResult, createAnthropicProviderFromEnv, createGadget, createGadgetOutputViewer, createGeminiProviderFromEnv, createLogger, createOpenAIProviderFromEnv, defaultLogger, discoverProviderAdapters, getModelId, getProvider, hasProviderPrefix, resolveModel, validateAndApplyDefaults, validateGadgetParams };
1769
+ export { AgentHooks, AnthropicMessagesProvider, BaseGadget, BreakLoopException, ConversationManager, type CreateGadgetConfig, Gadget, type GadgetConfig, GadgetExample, GadgetExecutionResult, GadgetExecutor, GadgetOutputStore, GadgetRegistry, GeminiGenerativeProvider, HookPresets, HumanInputException, type IConversationManager, LLMGenerationOptions, LLMMessage, LLMStream, LLMStreamChunk, type LoggerOptions, type LoggingOptions, MODEL_ALIASES, ModelDescriptor, ModelRegistry, ModelSpec, OpenAIChatProvider, ParsedGadgetCall, ProviderAdapter, type StoredOutput, StreamEvent, type StreamProcessingResult, StreamProcessor, type StreamProcessorOptions, TokenUsage, type ValidationIssue, type ValidationResult, createAnthropicProviderFromEnv, createGadget, createGadgetOutputViewer, createGeminiProviderFromEnv, createLogger, createOpenAIProviderFromEnv, defaultLogger, discoverProviderAdapters, getModelId, getProvider, hasProviderPrefix, resolveModel, validateAndApplyDefaults, validateGadgetParams };
package/dist/index.d.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { ZodType, ZodTypeAny } from 'zod';
2
2
  export { z } from 'zod';
3
- import { A as AgentHooks, o as ModelRegistry, L as LLMMessage, P as ParameterFormat, S as StreamEvent, G as GadgetRegistry, p as LLMStreamChunk, q as GadgetExample, B as BaseGadget, r as ParsedGadgetCall, s as GadgetExecutionResult, t as ProviderAdapter, u as ModelDescriptor, v as ModelSpec, w as LLMGenerationOptions, x as LLMStream } from './mock-stream-B5R6XPif.js';
4
- export { F as AfterGadgetExecutionAction, I as AfterGadgetExecutionControllerContext, J as AfterLLMCallAction, K as AfterLLMCallControllerContext, N as AfterLLMErrorAction, y as AgentBuilder, O as AgentOptions, Q as BeforeGadgetExecutionAction, R as BeforeLLMCallAction, T as ChunkInterceptorContext, U as Controllers, ab as CostEstimate, al as DEFAULT_PROMPTS, E as EventHandlers, as as GadgetClass, V as GadgetExecutionControllerContext, at as GadgetOrClass, W as GadgetParameterInterceptorContext, X as GadgetResultInterceptorContext, H as HistoryMessage, Y as Interceptors, Z as LLMCallControllerContext, _ as LLMErrorControllerContext, aa as LLMMessageBuilder, a9 as LLMRole, a8 as LLMist, a7 as LLMistOptions, $ as MessageInterceptorContext, a as MockBuilder, d as MockManager, h as MockMatcher, i as MockMatcherContext, j as MockOptions, M as MockProviderAdapter, k as MockRegistration, l as MockResponse, n as MockStats, ac as ModelFeatures, ah as ModelIdentifierParser, ad as ModelLimits, ae as ModelPricing, a0 as ObserveChunkContext, a1 as ObserveGadgetCompleteContext, a2 as ObserveGadgetStartContext, a3 as ObserveLLMCallContext, a4 as ObserveLLMCompleteContext, a5 as ObserveLLMErrorContext, a6 as Observers, ai as PromptConfig, aj as PromptContext, ak as PromptTemplate, af as ProviderIdentifier, ao as QuickOptions, ar as StreamParser, au as TextOnlyAction, av as TextOnlyContext, aw as TextOnlyCustomHandler, ax as TextOnlyGadgetConfig, ay as TextOnlyHandler, az as TextOnlyStrategy, ag as TokenUsage, z as collectEvents, C as collectText, ap as complete, c as createMockAdapter, b as createMockClient, e as createMockStream, f as createTextMockStream, g as getMockManager, m as mockLLM, am as resolvePromptTemplate, an as resolveRulesTemplate, D as runWithHandlers, aq as stream } from './mock-stream-B5R6XPif.js';
3
+ import { A as AgentHooks, o as ModelRegistry, L as LLMMessage, P as ParameterFormat, S as StreamEvent, T as TokenUsage, G as GadgetRegistry, p as LLMStreamChunk, q as GadgetExample, B as BaseGadget, r as ParsedGadgetCall, s as GadgetExecutionResult, t as ProviderAdapter, u as ModelDescriptor, v as ModelSpec, w as LLMGenerationOptions, x as LLMStream } from './mock-stream-CjmvWDc3.js';
4
+ export { F as AfterGadgetExecutionAction, I as AfterGadgetExecutionControllerContext, J as AfterLLMCallAction, K as AfterLLMCallControllerContext, N as AfterLLMErrorAction, y as AgentBuilder, O as AgentOptions, Q as BeforeGadgetExecutionAction, R as BeforeLLMCallAction, U as ChunkInterceptorContext, V as Controllers, ac as CostEstimate, al as DEFAULT_PROMPTS, E as EventHandlers, as as GadgetClass, W as GadgetExecutionControllerContext, at as GadgetOrClass, X as GadgetParameterInterceptorContext, Y as GadgetResultInterceptorContext, H as HistoryMessage, Z as Interceptors, _ as LLMCallControllerContext, $ as LLMErrorControllerContext, ab as LLMMessageBuilder, aa as LLMRole, a9 as LLMist, a8 as LLMistOptions, a0 as MessageInterceptorContext, a as MockBuilder, d as MockManager, h as MockMatcher, i as MockMatcherContext, j as MockOptions, M as MockProviderAdapter, k as MockRegistration, l as MockResponse, n as MockStats, ad as ModelFeatures, ah as ModelIdentifierParser, ae as ModelLimits, af as ModelPricing, a1 as ObserveChunkContext, a2 as ObserveGadgetCompleteContext, a3 as ObserveGadgetStartContext, a4 as ObserveLLMCallContext, a5 as ObserveLLMCompleteContext, a6 as ObserveLLMErrorContext, a7 as Observers, ai as PromptConfig, aj as PromptContext, ak as PromptTemplate, ag as ProviderIdentifier, ao as QuickOptions, ar as StreamParser, au as TextOnlyAction, av as TextOnlyContext, aw as TextOnlyCustomHandler, ax as TextOnlyGadgetConfig, ay as TextOnlyHandler, az as TextOnlyStrategy, z as collectEvents, C as collectText, ap as complete, c as createMockAdapter, b as createMockClient, e as createMockStream, f as createTextMockStream, g as getMockManager, m as mockLLM, am as resolvePromptTemplate, an as resolveRulesTemplate, D as runWithHandlers, aq as stream } from './mock-stream-CjmvWDc3.js';
5
5
  import { Logger, ILogObj } from 'tslog';
6
6
  import { MessageCreateParamsStreaming, MessageStreamEvent } from '@anthropic-ai/sdk/resources/messages';
7
7
  import OpenAI from 'openai';
@@ -797,12 +797,8 @@ interface StreamProcessingResult {
797
797
  didExecuteGadgets: boolean;
798
798
  /** LLM finish reason */
799
799
  finishReason: string | null;
800
- /** Token usage */
801
- usage?: {
802
- inputTokens: number;
803
- outputTokens: number;
804
- totalTokens: number;
805
- };
800
+ /** Token usage (including cached token counts when available) */
801
+ usage?: TokenUsage;
806
802
  /** The raw accumulated response text */
807
803
  rawResponse: string;
808
804
  /** The final message (after interceptors) */
@@ -1660,6 +1656,7 @@ type GeminiChunk = {
1660
1656
  promptTokenCount?: number;
1661
1657
  candidatesTokenCount?: number;
1662
1658
  totalTokenCount?: number;
1659
+ cachedContentTokenCount?: number;
1663
1660
  };
1664
1661
  };
1665
1662
  declare class GeminiGenerativeProvider extends BaseProviderAdapter {
@@ -1769,4 +1766,4 @@ declare class OpenAIChatProvider extends BaseProviderAdapter {
1769
1766
  }
1770
1767
  declare function createOpenAIProviderFromEnv(): OpenAIChatProvider | null;
1771
1768
 
1772
- export { AgentHooks, AnthropicMessagesProvider, BaseGadget, BreakLoopException, ConversationManager, type CreateGadgetConfig, Gadget, type GadgetConfig, GadgetExample, GadgetExecutionResult, GadgetExecutor, GadgetOutputStore, GadgetRegistry, GeminiGenerativeProvider, HookPresets, HumanInputException, type IConversationManager, LLMGenerationOptions, LLMMessage, LLMStream, LLMStreamChunk, type LoggerOptions, type LoggingOptions, MODEL_ALIASES, ModelDescriptor, ModelRegistry, ModelSpec, OpenAIChatProvider, ParsedGadgetCall, ProviderAdapter, type StoredOutput, StreamEvent, type StreamProcessingResult, StreamProcessor, type StreamProcessorOptions, type ValidationIssue, type ValidationResult, createAnthropicProviderFromEnv, createGadget, createGadgetOutputViewer, createGeminiProviderFromEnv, createLogger, createOpenAIProviderFromEnv, defaultLogger, discoverProviderAdapters, getModelId, getProvider, hasProviderPrefix, resolveModel, validateAndApplyDefaults, validateGadgetParams };
1769
+ export { AgentHooks, AnthropicMessagesProvider, BaseGadget, BreakLoopException, ConversationManager, type CreateGadgetConfig, Gadget, type GadgetConfig, GadgetExample, GadgetExecutionResult, GadgetExecutor, GadgetOutputStore, GadgetRegistry, GeminiGenerativeProvider, HookPresets, HumanInputException, type IConversationManager, LLMGenerationOptions, LLMMessage, LLMStream, LLMStreamChunk, type LoggerOptions, type LoggingOptions, MODEL_ALIASES, ModelDescriptor, ModelRegistry, ModelSpec, OpenAIChatProvider, ParsedGadgetCall, ProviderAdapter, type StoredOutput, StreamEvent, type StreamProcessingResult, StreamProcessor, type StreamProcessorOptions, TokenUsage, type ValidationIssue, type ValidationResult, createAnthropicProviderFromEnv, createGadget, createGadgetOutputViewer, createGeminiProviderFromEnv, createLogger, createOpenAIProviderFromEnv, defaultLogger, discoverProviderAdapters, getModelId, getProvider, hasProviderPrefix, resolveModel, validateAndApplyDefaults, validateGadgetParams };
package/dist/index.js CHANGED
@@ -10,7 +10,7 @@ import {
10
10
  mockLLM,
11
11
  validateAndApplyDefaults,
12
12
  validateGadgetParams
13
- } from "./chunk-TSR25DAY.js";
13
+ } from "./chunk-4IMGADVY.js";
14
14
  import {
15
15
  AgentBuilder,
16
16
  AnthropicMessagesProvider,
@@ -74,7 +74,7 @@ import {
74
74
  resolveRulesTemplate,
75
75
  runWithHandlers,
76
76
  stream
77
- } from "./chunk-DVK6ZQOV.js";
77
+ } from "./chunk-62M4TDAK.js";
78
78
 
79
79
  // src/index.ts
80
80
  init_builder();
@@ -374,6 +374,8 @@ interface ModelPricing {
374
374
  output: number;
375
375
  /** Price per 1 million cached input tokens in USD (if supported) */
376
376
  cachedInput?: number;
377
+ /** Price per 1 million cache write tokens in USD (Anthropic: 1.25x input price) */
378
+ cacheWriteInput?: number;
377
379
  }
378
380
  interface ModelFeatures {
379
381
  /** Supports streaming responses */
@@ -426,6 +428,10 @@ interface ModelLimits {
426
428
  }
427
429
  interface CostEstimate {
428
430
  inputCost: number;
431
+ /** Cost for cached input tokens (already included in inputCost calculation) */
432
+ cachedInputCost: number;
433
+ /** Cost for cache creation tokens (already included in inputCost calculation, Anthropic only) */
434
+ cacheCreationCost: number;
429
435
  outputCost: number;
430
436
  totalCost: number;
431
437
  currency: "USD";
@@ -446,6 +452,10 @@ interface TokenUsage {
446
452
  inputTokens: number;
447
453
  outputTokens: number;
448
454
  totalTokens: number;
455
+ /** Number of input tokens served from cache (subset of inputTokens) */
456
+ cachedInputTokens?: number;
457
+ /** Number of input tokens written to cache (subset of inputTokens, Anthropic only) */
458
+ cacheCreationInputTokens?: number;
449
459
  }
450
460
  interface LLMStreamChunk {
451
461
  text: string;
@@ -587,12 +597,13 @@ declare class ModelRegistry {
587
597
  /**
588
598
  * Estimate API cost for a given model and token usage
589
599
  * @param modelId - Full model identifier
590
- * @param inputTokens - Number of input tokens
600
+ * @param inputTokens - Number of input tokens (total, including cached and cache creation)
591
601
  * @param outputTokens - Number of output tokens
592
- * @param useCachedInput - Whether to use cached input pricing (if supported by provider)
602
+ * @param cachedInputTokens - Number of cached input tokens (subset of inputTokens)
603
+ * @param cacheCreationInputTokens - Number of cache creation tokens (subset of inputTokens, Anthropic only)
593
604
  * @returns CostEstimate if model found, undefined otherwise
594
605
  */
595
- estimateCost(modelId: string, inputTokens: number, outputTokens: number, useCachedInput?: boolean): CostEstimate | undefined;
606
+ estimateCost(modelId: string, inputTokens: number, outputTokens: number, cachedInputTokens?: number, cacheCreationInputTokens?: number): CostEstimate | undefined;
596
607
  /**
597
608
  * Validate that requested token count fits within model limits
598
609
  * @param modelId - Full model identifier
@@ -1179,11 +1190,8 @@ interface ObserveLLMCompleteContext {
1179
1190
  iteration: number;
1180
1191
  options: Readonly<LLMGenerationOptions>;
1181
1192
  finishReason: string | null;
1182
- usage?: {
1183
- inputTokens: number;
1184
- outputTokens: number;
1185
- totalTokens: number;
1186
- };
1193
+ /** Token usage including cached token counts when available */
1194
+ usage?: TokenUsage;
1187
1195
  /** The complete raw response text */
1188
1196
  rawResponse: string;
1189
1197
  /** The final message that will be added to history (after interceptors) */
@@ -1242,12 +1250,8 @@ interface ObserveChunkContext {
1242
1250
  rawChunk: string;
1243
1251
  /** Accumulated text so far */
1244
1252
  accumulatedText: string;
1245
- /** Token usage if available (Anthropic sends input tokens at stream start) */
1246
- usage?: {
1247
- inputTokens: number;
1248
- outputTokens: number;
1249
- totalTokens: number;
1250
- };
1253
+ /** Token usage if available (providers send usage at stream start/end) */
1254
+ usage?: TokenUsage;
1251
1255
  logger: Logger<ILogObj>;
1252
1256
  }
1253
1257
  /**
@@ -1387,11 +1391,8 @@ interface AfterLLMCallControllerContext {
1387
1391
  iteration: number;
1388
1392
  options: Readonly<LLMGenerationOptions>;
1389
1393
  finishReason: string | null;
1390
- usage?: {
1391
- inputTokens: number;
1392
- outputTokens: number;
1393
- totalTokens: number;
1394
- };
1394
+ /** Token usage including cached token counts when available */
1395
+ usage?: TokenUsage;
1395
1396
  /** The final message (after interceptors) that will be added to history */
1396
1397
  finalMessage: string;
1397
1398
  logger: Logger<ILogObj>;
@@ -1565,6 +1566,18 @@ interface AgentOptions {
1565
1566
  }>;
1566
1567
  /** Text-only handler */
1567
1568
  textOnlyHandler?: TextOnlyHandler;
1569
+ /**
1570
+ * Handler for text content that appears alongside gadget calls.
1571
+ * When set, text accompanying gadgets will be wrapped as a synthetic gadget call.
1572
+ */
1573
+ textWithGadgetsHandler?: {
1574
+ /** Name of the gadget to use for wrapping text */
1575
+ gadgetName: string;
1576
+ /** Maps text content to gadget parameters */
1577
+ parameterMapping: (text: string) => Record<string, unknown>;
1578
+ /** Maps text content to the result string (optional, defaults to text) */
1579
+ resultMapping?: (text: string) => string;
1580
+ };
1568
1581
  /** Stop on gadget error */
1569
1582
  stopOnGadgetError?: boolean;
1570
1583
  /** Custom error continuation logic */
@@ -1612,6 +1625,7 @@ declare class Agent {
1612
1625
  private readonly gadgetEndPrefix?;
1613
1626
  private readonly onHumanInputRequired?;
1614
1627
  private readonly textOnlyHandler;
1628
+ private readonly textWithGadgetsHandler?;
1615
1629
  private readonly stopOnGadgetError;
1616
1630
  private readonly shouldContinueAfterError?;
1617
1631
  private readonly defaultGadgetTimeoutMs?;
@@ -1741,6 +1755,7 @@ declare class AgentBuilder {
1741
1755
  private gadgetStartPrefix?;
1742
1756
  private gadgetEndPrefix?;
1743
1757
  private textOnlyHandler?;
1758
+ private textWithGadgetsHandler?;
1744
1759
  private stopOnGadgetError?;
1745
1760
  private shouldContinueAfterError?;
1746
1761
  private defaultGadgetTimeoutMs?;
@@ -1949,6 +1964,31 @@ declare class AgentBuilder {
1949
1964
  * ```
1950
1965
  */
1951
1966
  withTextOnlyHandler(handler: TextOnlyHandler): this;
1967
+ /**
1968
+ * Set the handler for text content that appears alongside gadget calls.
1969
+ *
1970
+ * When set, text accompanying gadget responses will be wrapped as a
1971
+ * synthetic gadget call before the actual gadget results in the
1972
+ * conversation history.
1973
+ *
1974
+ * @param handler - Configuration for wrapping text
1975
+ * @returns This builder for chaining
1976
+ *
1977
+ * @example
1978
+ * ```typescript
1979
+ * // Wrap text as TellUser gadget
1980
+ * .withTextWithGadgetsHandler({
1981
+ * gadgetName: "TellUser",
1982
+ * parameterMapping: (text) => ({ message: text, done: false, type: "info" }),
1983
+ * resultMapping: (text) => `ℹ️ ${text}`,
1984
+ * })
1985
+ * ```
1986
+ */
1987
+ withTextWithGadgetsHandler(handler: {
1988
+ gadgetName: string;
1989
+ parameterMapping: (text: string) => Record<string, unknown>;
1990
+ resultMapping?: (text: string) => string;
1991
+ }): this;
1952
1992
  /**
1953
1993
  * Set whether to stop gadget execution on first error.
1954
1994
  *
@@ -2047,6 +2087,37 @@ declare class AgentBuilder {
2047
2087
  * ```
2048
2088
  */
2049
2089
  withGadgetOutputLimitPercent(percent: number): this;
2090
+ /**
2091
+ * Add a synthetic gadget call to the conversation history.
2092
+ *
2093
+ * This is useful for in-context learning - showing the LLM what "past self"
2094
+ * did correctly so it mimics the pattern. The call is formatted with proper
2095
+ * markers and parameter format.
2096
+ *
2097
+ * @param gadgetName - Name of the gadget
2098
+ * @param parameters - Parameters passed to the gadget
2099
+ * @param result - Result returned by the gadget
2100
+ * @returns This builder for chaining
2101
+ *
2102
+ * @example
2103
+ * ```typescript
2104
+ * .withSyntheticGadgetCall(
2105
+ * 'TellUser',
2106
+ * {
2107
+ * message: '👋 Hello!\n\nHere\'s what I can do:\n- Analyze code\n- Run commands',
2108
+ * done: false,
2109
+ * type: 'info'
2110
+ * },
2111
+ * 'ℹ️ 👋 Hello!\n\nHere\'s what I can do:\n- Analyze code\n- Run commands'
2112
+ * )
2113
+ * ```
2114
+ */
2115
+ withSyntheticGadgetCall(gadgetName: string, parameters: Record<string, unknown>, result: string): this;
2116
+ /**
2117
+ * Format parameters for synthetic gadget calls.
2118
+ * Uses heredoc for multiline string values.
2119
+ */
2120
+ private formatSyntheticParameters;
2050
2121
  /**
2051
2122
  * Build and create the agent with the given user prompt.
2052
2123
  * Returns the Agent instance ready to run.
@@ -2702,4 +2773,4 @@ declare function createTextMockStream(text: string, options?: {
2702
2773
  usage?: MockResponse["usage"];
2703
2774
  }): LLMStream;
2704
2775
 
2705
- export { type MessageInterceptorContext as $, type AgentHooks as A, BaseGadget as B, collectText as C, runWithHandlers as D, type EventHandlers as E, type AfterGadgetExecutionAction as F, GadgetRegistry as G, type HistoryMessage as H, type AfterGadgetExecutionControllerContext as I, type AfterLLMCallAction as J, type AfterLLMCallControllerContext as K, type LLMMessage as L, MockProviderAdapter as M, type AfterLLMErrorAction as N, type AgentOptions as O, type ParameterFormat as P, type BeforeGadgetExecutionAction as Q, type BeforeLLMCallAction as R, type StreamEvent as S, type ChunkInterceptorContext as T, type Controllers as U, type GadgetExecutionControllerContext as V, type GadgetParameterInterceptorContext as W, type GadgetResultInterceptorContext as X, type Interceptors as Y, type LLMCallControllerContext as Z, type LLMErrorControllerContext as _, MockBuilder as a, type ObserveChunkContext as a0, type ObserveGadgetCompleteContext as a1, type ObserveGadgetStartContext as a2, type ObserveLLMCallContext as a3, type ObserveLLMCompleteContext as a4, type ObserveLLMErrorContext as a5, type Observers as a6, type LLMistOptions as a7, LLMist as a8, type LLMRole as a9, LLMMessageBuilder as aa, type CostEstimate as ab, type ModelFeatures as ac, type ModelLimits as ad, type ModelPricing as ae, type ProviderIdentifier as af, type TokenUsage as ag, ModelIdentifierParser as ah, type PromptConfig as ai, type PromptContext as aj, type PromptTemplate as ak, DEFAULT_PROMPTS as al, resolvePromptTemplate as am, resolveRulesTemplate as an, type QuickOptions as ao, complete as ap, stream as aq, StreamParser as ar, type GadgetClass as as, type GadgetOrClass as at, type TextOnlyAction as au, type TextOnlyContext as av, type TextOnlyCustomHandler as aw, type TextOnlyGadgetConfig as ax, type TextOnlyHandler as ay, type TextOnlyStrategy as az, createMockClient as b, createMockAdapter as c, MockManager as d, createMockStream as e, createTextMockStream as f, getMockManager as g, type MockMatcher as h, type MockMatcherContext as i, type MockOptions as j, type MockRegistration as k, type MockResponse as l, mockLLM as m, type MockStats as n, ModelRegistry as o, type LLMStreamChunk as p, type GadgetExample as q, type ParsedGadgetCall as r, type GadgetExecutionResult as s, type ProviderAdapter as t, type ModelDescriptor as u, type ModelSpec as v, type LLMGenerationOptions as w, type LLMStream as x, AgentBuilder as y, collectEvents as z };
2776
+ export { type LLMErrorControllerContext as $, type AgentHooks as A, BaseGadget as B, collectText as C, runWithHandlers as D, type EventHandlers as E, type AfterGadgetExecutionAction as F, GadgetRegistry as G, type HistoryMessage as H, type AfterGadgetExecutionControllerContext as I, type AfterLLMCallAction as J, type AfterLLMCallControllerContext as K, type LLMMessage as L, MockProviderAdapter as M, type AfterLLMErrorAction as N, type AgentOptions as O, type ParameterFormat as P, type BeforeGadgetExecutionAction as Q, type BeforeLLMCallAction as R, type StreamEvent as S, type TokenUsage as T, type ChunkInterceptorContext as U, type Controllers as V, type GadgetExecutionControllerContext as W, type GadgetParameterInterceptorContext as X, type GadgetResultInterceptorContext as Y, type Interceptors as Z, type LLMCallControllerContext as _, MockBuilder as a, type MessageInterceptorContext as a0, type ObserveChunkContext as a1, type ObserveGadgetCompleteContext as a2, type ObserveGadgetStartContext as a3, type ObserveLLMCallContext as a4, type ObserveLLMCompleteContext as a5, type ObserveLLMErrorContext as a6, type Observers as a7, type LLMistOptions as a8, LLMist as a9, type LLMRole as aa, LLMMessageBuilder as ab, type CostEstimate as ac, type ModelFeatures as ad, type ModelLimits as ae, type ModelPricing as af, type ProviderIdentifier as ag, ModelIdentifierParser as ah, type PromptConfig as ai, type PromptContext as aj, type PromptTemplate as ak, DEFAULT_PROMPTS as al, resolvePromptTemplate as am, resolveRulesTemplate as an, type QuickOptions as ao, complete as ap, stream as aq, StreamParser as ar, type GadgetClass as as, type GadgetOrClass as at, type TextOnlyAction as au, type TextOnlyContext as av, type TextOnlyCustomHandler as aw, type TextOnlyGadgetConfig as ax, type TextOnlyHandler as ay, type TextOnlyStrategy as az, createMockClient as b, createMockAdapter as c, MockManager as d, createMockStream as e, createTextMockStream as f, getMockManager as g, type MockMatcher as h, type MockMatcherContext as i, type MockOptions as j, type MockRegistration as k, type MockResponse as l, mockLLM as m, type MockStats as n, ModelRegistry as o, type LLMStreamChunk as p, type GadgetExample as q, type ParsedGadgetCall as r, type GadgetExecutionResult as s, type ProviderAdapter as t, type ModelDescriptor as u, type ModelSpec as v, type LLMGenerationOptions as w, type LLMStream as x, AgentBuilder as y, collectEvents as z };
@@ -374,6 +374,8 @@ interface ModelPricing {
374
374
  output: number;
375
375
  /** Price per 1 million cached input tokens in USD (if supported) */
376
376
  cachedInput?: number;
377
+ /** Price per 1 million cache write tokens in USD (Anthropic: 1.25x input price) */
378
+ cacheWriteInput?: number;
377
379
  }
378
380
  interface ModelFeatures {
379
381
  /** Supports streaming responses */
@@ -426,6 +428,10 @@ interface ModelLimits {
426
428
  }
427
429
  interface CostEstimate {
428
430
  inputCost: number;
431
+ /** Cost for cached input tokens (already included in inputCost calculation) */
432
+ cachedInputCost: number;
433
+ /** Cost for cache creation tokens (already included in inputCost calculation, Anthropic only) */
434
+ cacheCreationCost: number;
429
435
  outputCost: number;
430
436
  totalCost: number;
431
437
  currency: "USD";
@@ -446,6 +452,10 @@ interface TokenUsage {
446
452
  inputTokens: number;
447
453
  outputTokens: number;
448
454
  totalTokens: number;
455
+ /** Number of input tokens served from cache (subset of inputTokens) */
456
+ cachedInputTokens?: number;
457
+ /** Number of input tokens written to cache (subset of inputTokens, Anthropic only) */
458
+ cacheCreationInputTokens?: number;
449
459
  }
450
460
  interface LLMStreamChunk {
451
461
  text: string;
@@ -587,12 +597,13 @@ declare class ModelRegistry {
587
597
  /**
588
598
  * Estimate API cost for a given model and token usage
589
599
  * @param modelId - Full model identifier
590
- * @param inputTokens - Number of input tokens
600
+ * @param inputTokens - Number of input tokens (total, including cached and cache creation)
591
601
  * @param outputTokens - Number of output tokens
592
- * @param useCachedInput - Whether to use cached input pricing (if supported by provider)
602
+ * @param cachedInputTokens - Number of cached input tokens (subset of inputTokens)
603
+ * @param cacheCreationInputTokens - Number of cache creation tokens (subset of inputTokens, Anthropic only)
593
604
  * @returns CostEstimate if model found, undefined otherwise
594
605
  */
595
- estimateCost(modelId: string, inputTokens: number, outputTokens: number, useCachedInput?: boolean): CostEstimate | undefined;
606
+ estimateCost(modelId: string, inputTokens: number, outputTokens: number, cachedInputTokens?: number, cacheCreationInputTokens?: number): CostEstimate | undefined;
596
607
  /**
597
608
  * Validate that requested token count fits within model limits
598
609
  * @param modelId - Full model identifier
@@ -1179,11 +1190,8 @@ interface ObserveLLMCompleteContext {
1179
1190
  iteration: number;
1180
1191
  options: Readonly<LLMGenerationOptions>;
1181
1192
  finishReason: string | null;
1182
- usage?: {
1183
- inputTokens: number;
1184
- outputTokens: number;
1185
- totalTokens: number;
1186
- };
1193
+ /** Token usage including cached token counts when available */
1194
+ usage?: TokenUsage;
1187
1195
  /** The complete raw response text */
1188
1196
  rawResponse: string;
1189
1197
  /** The final message that will be added to history (after interceptors) */
@@ -1242,12 +1250,8 @@ interface ObserveChunkContext {
1242
1250
  rawChunk: string;
1243
1251
  /** Accumulated text so far */
1244
1252
  accumulatedText: string;
1245
- /** Token usage if available (Anthropic sends input tokens at stream start) */
1246
- usage?: {
1247
- inputTokens: number;
1248
- outputTokens: number;
1249
- totalTokens: number;
1250
- };
1253
+ /** Token usage if available (providers send usage at stream start/end) */
1254
+ usage?: TokenUsage;
1251
1255
  logger: Logger<ILogObj>;
1252
1256
  }
1253
1257
  /**
@@ -1387,11 +1391,8 @@ interface AfterLLMCallControllerContext {
1387
1391
  iteration: number;
1388
1392
  options: Readonly<LLMGenerationOptions>;
1389
1393
  finishReason: string | null;
1390
- usage?: {
1391
- inputTokens: number;
1392
- outputTokens: number;
1393
- totalTokens: number;
1394
- };
1394
+ /** Token usage including cached token counts when available */
1395
+ usage?: TokenUsage;
1395
1396
  /** The final message (after interceptors) that will be added to history */
1396
1397
  finalMessage: string;
1397
1398
  logger: Logger<ILogObj>;
@@ -1565,6 +1566,18 @@ interface AgentOptions {
1565
1566
  }>;
1566
1567
  /** Text-only handler */
1567
1568
  textOnlyHandler?: TextOnlyHandler;
1569
+ /**
1570
+ * Handler for text content that appears alongside gadget calls.
1571
+ * When set, text accompanying gadgets will be wrapped as a synthetic gadget call.
1572
+ */
1573
+ textWithGadgetsHandler?: {
1574
+ /** Name of the gadget to use for wrapping text */
1575
+ gadgetName: string;
1576
+ /** Maps text content to gadget parameters */
1577
+ parameterMapping: (text: string) => Record<string, unknown>;
1578
+ /** Maps text content to the result string (optional, defaults to text) */
1579
+ resultMapping?: (text: string) => string;
1580
+ };
1568
1581
  /** Stop on gadget error */
1569
1582
  stopOnGadgetError?: boolean;
1570
1583
  /** Custom error continuation logic */
@@ -1612,6 +1625,7 @@ declare class Agent {
1612
1625
  private readonly gadgetEndPrefix?;
1613
1626
  private readonly onHumanInputRequired?;
1614
1627
  private readonly textOnlyHandler;
1628
+ private readonly textWithGadgetsHandler?;
1615
1629
  private readonly stopOnGadgetError;
1616
1630
  private readonly shouldContinueAfterError?;
1617
1631
  private readonly defaultGadgetTimeoutMs?;
@@ -1741,6 +1755,7 @@ declare class AgentBuilder {
1741
1755
  private gadgetStartPrefix?;
1742
1756
  private gadgetEndPrefix?;
1743
1757
  private textOnlyHandler?;
1758
+ private textWithGadgetsHandler?;
1744
1759
  private stopOnGadgetError?;
1745
1760
  private shouldContinueAfterError?;
1746
1761
  private defaultGadgetTimeoutMs?;
@@ -1949,6 +1964,31 @@ declare class AgentBuilder {
1949
1964
  * ```
1950
1965
  */
1951
1966
  withTextOnlyHandler(handler: TextOnlyHandler): this;
1967
+ /**
1968
+ * Set the handler for text content that appears alongside gadget calls.
1969
+ *
1970
+ * When set, text accompanying gadget responses will be wrapped as a
1971
+ * synthetic gadget call before the actual gadget results in the
1972
+ * conversation history.
1973
+ *
1974
+ * @param handler - Configuration for wrapping text
1975
+ * @returns This builder for chaining
1976
+ *
1977
+ * @example
1978
+ * ```typescript
1979
+ * // Wrap text as TellUser gadget
1980
+ * .withTextWithGadgetsHandler({
1981
+ * gadgetName: "TellUser",
1982
+ * parameterMapping: (text) => ({ message: text, done: false, type: "info" }),
1983
+ * resultMapping: (text) => `ℹ️ ${text}`,
1984
+ * })
1985
+ * ```
1986
+ */
1987
+ withTextWithGadgetsHandler(handler: {
1988
+ gadgetName: string;
1989
+ parameterMapping: (text: string) => Record<string, unknown>;
1990
+ resultMapping?: (text: string) => string;
1991
+ }): this;
1952
1992
  /**
1953
1993
  * Set whether to stop gadget execution on first error.
1954
1994
  *
@@ -2047,6 +2087,37 @@ declare class AgentBuilder {
2047
2087
  * ```
2048
2088
  */
2049
2089
  withGadgetOutputLimitPercent(percent: number): this;
2090
+ /**
2091
+ * Add a synthetic gadget call to the conversation history.
2092
+ *
2093
+ * This is useful for in-context learning - showing the LLM what "past self"
2094
+ * did correctly so it mimics the pattern. The call is formatted with proper
2095
+ * markers and parameter format.
2096
+ *
2097
+ * @param gadgetName - Name of the gadget
2098
+ * @param parameters - Parameters passed to the gadget
2099
+ * @param result - Result returned by the gadget
2100
+ * @returns This builder for chaining
2101
+ *
2102
+ * @example
2103
+ * ```typescript
2104
+ * .withSyntheticGadgetCall(
2105
+ * 'TellUser',
2106
+ * {
2107
+ * message: '👋 Hello!\n\nHere\'s what I can do:\n- Analyze code\n- Run commands',
2108
+ * done: false,
2109
+ * type: 'info'
2110
+ * },
2111
+ * 'ℹ️ 👋 Hello!\n\nHere\'s what I can do:\n- Analyze code\n- Run commands'
2112
+ * )
2113
+ * ```
2114
+ */
2115
+ withSyntheticGadgetCall(gadgetName: string, parameters: Record<string, unknown>, result: string): this;
2116
+ /**
2117
+ * Format parameters for synthetic gadget calls.
2118
+ * Uses heredoc for multiline string values.
2119
+ */
2120
+ private formatSyntheticParameters;
2050
2121
  /**
2051
2122
  * Build and create the agent with the given user prompt.
2052
2123
  * Returns the Agent instance ready to run.
@@ -2702,4 +2773,4 @@ declare function createTextMockStream(text: string, options?: {
2702
2773
  usage?: MockResponse["usage"];
2703
2774
  }): LLMStream;
2704
2775
 
2705
- export { type MessageInterceptorContext as $, type AgentHooks as A, BaseGadget as B, collectText as C, runWithHandlers as D, type EventHandlers as E, type AfterGadgetExecutionAction as F, GadgetRegistry as G, type HistoryMessage as H, type AfterGadgetExecutionControllerContext as I, type AfterLLMCallAction as J, type AfterLLMCallControllerContext as K, type LLMMessage as L, MockProviderAdapter as M, type AfterLLMErrorAction as N, type AgentOptions as O, type ParameterFormat as P, type BeforeGadgetExecutionAction as Q, type BeforeLLMCallAction as R, type StreamEvent as S, type ChunkInterceptorContext as T, type Controllers as U, type GadgetExecutionControllerContext as V, type GadgetParameterInterceptorContext as W, type GadgetResultInterceptorContext as X, type Interceptors as Y, type LLMCallControllerContext as Z, type LLMErrorControllerContext as _, MockBuilder as a, type ObserveChunkContext as a0, type ObserveGadgetCompleteContext as a1, type ObserveGadgetStartContext as a2, type ObserveLLMCallContext as a3, type ObserveLLMCompleteContext as a4, type ObserveLLMErrorContext as a5, type Observers as a6, type LLMistOptions as a7, LLMist as a8, type LLMRole as a9, LLMMessageBuilder as aa, type CostEstimate as ab, type ModelFeatures as ac, type ModelLimits as ad, type ModelPricing as ae, type ProviderIdentifier as af, type TokenUsage as ag, ModelIdentifierParser as ah, type PromptConfig as ai, type PromptContext as aj, type PromptTemplate as ak, DEFAULT_PROMPTS as al, resolvePromptTemplate as am, resolveRulesTemplate as an, type QuickOptions as ao, complete as ap, stream as aq, StreamParser as ar, type GadgetClass as as, type GadgetOrClass as at, type TextOnlyAction as au, type TextOnlyContext as av, type TextOnlyCustomHandler as aw, type TextOnlyGadgetConfig as ax, type TextOnlyHandler as ay, type TextOnlyStrategy as az, createMockClient as b, createMockAdapter as c, MockManager as d, createMockStream as e, createTextMockStream as f, getMockManager as g, type MockMatcher as h, type MockMatcherContext as i, type MockOptions as j, type MockRegistration as k, type MockResponse as l, mockLLM as m, type MockStats as n, ModelRegistry as o, type LLMStreamChunk as p, type GadgetExample as q, type ParsedGadgetCall as r, type GadgetExecutionResult as s, type ProviderAdapter as t, type ModelDescriptor as u, type ModelSpec as v, type LLMGenerationOptions as w, type LLMStream as x, AgentBuilder as y, collectEvents as z };
2776
+ export { type LLMErrorControllerContext as $, type AgentHooks as A, BaseGadget as B, collectText as C, runWithHandlers as D, type EventHandlers as E, type AfterGadgetExecutionAction as F, GadgetRegistry as G, type HistoryMessage as H, type AfterGadgetExecutionControllerContext as I, type AfterLLMCallAction as J, type AfterLLMCallControllerContext as K, type LLMMessage as L, MockProviderAdapter as M, type AfterLLMErrorAction as N, type AgentOptions as O, type ParameterFormat as P, type BeforeGadgetExecutionAction as Q, type BeforeLLMCallAction as R, type StreamEvent as S, type TokenUsage as T, type ChunkInterceptorContext as U, type Controllers as V, type GadgetExecutionControllerContext as W, type GadgetParameterInterceptorContext as X, type GadgetResultInterceptorContext as Y, type Interceptors as Z, type LLMCallControllerContext as _, MockBuilder as a, type MessageInterceptorContext as a0, type ObserveChunkContext as a1, type ObserveGadgetCompleteContext as a2, type ObserveGadgetStartContext as a3, type ObserveLLMCallContext as a4, type ObserveLLMCompleteContext as a5, type ObserveLLMErrorContext as a6, type Observers as a7, type LLMistOptions as a8, LLMist as a9, type LLMRole as aa, LLMMessageBuilder as ab, type CostEstimate as ac, type ModelFeatures as ad, type ModelLimits as ae, type ModelPricing as af, type ProviderIdentifier as ag, ModelIdentifierParser as ah, type PromptConfig as ai, type PromptContext as aj, type PromptTemplate as ak, DEFAULT_PROMPTS as al, resolvePromptTemplate as am, resolveRulesTemplate as an, type QuickOptions as ao, complete as ap, stream as aq, StreamParser as ar, type GadgetClass as as, type GadgetOrClass as at, type TextOnlyAction as au, type TextOnlyContext as av, type TextOnlyCustomHandler as aw, type TextOnlyGadgetConfig as ax, type TextOnlyHandler as ay, type TextOnlyStrategy as az, createMockClient as b, createMockAdapter as c, MockManager as d, createMockStream as e, createTextMockStream as f, getMockManager as g, type MockMatcher as h, type MockMatcherContext as i, type MockOptions as j, type MockRegistration as k, type MockResponse as l, mockLLM as m, type MockStats as n, ModelRegistry as o, type LLMStreamChunk as p, type GadgetExample as q, type ParsedGadgetCall as r, type GadgetExecutionResult as s, type ProviderAdapter as t, type ModelDescriptor as u, type ModelSpec as v, type LLMGenerationOptions as w, type LLMStream as x, AgentBuilder as y, collectEvents as z };