llmist 0.7.0 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.cts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { ZodType, ZodTypeAny } from 'zod';
2
2
  export { z } from 'zod';
3
- import { A as AgentHooks, o as ModelRegistry, L as LLMMessage, P as ParameterFormat, S as StreamEvent, G as GadgetRegistry, p as LLMStreamChunk, q as GadgetExample, B as BaseGadget, r as ParsedGadgetCall, s as GadgetExecutionResult, t as ProviderAdapter, u as ModelDescriptor, v as ModelSpec, w as LLMGenerationOptions, x as LLMStream } from './mock-stream-B2qwECvd.cjs';
4
- export { F as AfterGadgetExecutionAction, I as AfterGadgetExecutionControllerContext, J as AfterLLMCallAction, K as AfterLLMCallControllerContext, N as AfterLLMErrorAction, y as AgentBuilder, O as AgentOptions, Q as BeforeGadgetExecutionAction, R as BeforeLLMCallAction, T as ChunkInterceptorContext, U as Controllers, ab as CostEstimate, al as DEFAULT_PROMPTS, E as EventHandlers, as as GadgetClass, V as GadgetExecutionControllerContext, at as GadgetOrClass, W as GadgetParameterInterceptorContext, X as GadgetResultInterceptorContext, H as HistoryMessage, Y as Interceptors, Z as LLMCallControllerContext, _ as LLMErrorControllerContext, aa as LLMMessageBuilder, a9 as LLMRole, a8 as LLMist, a7 as LLMistOptions, $ as MessageInterceptorContext, a as MockBuilder, d as MockManager, h as MockMatcher, i as MockMatcherContext, j as MockOptions, M as MockProviderAdapter, k as MockRegistration, l as MockResponse, n as MockStats, ac as ModelFeatures, ah as ModelIdentifierParser, ad as ModelLimits, ae as ModelPricing, a0 as ObserveChunkContext, a1 as ObserveGadgetCompleteContext, a2 as ObserveGadgetStartContext, a3 as ObserveLLMCallContext, a4 as ObserveLLMCompleteContext, a5 as ObserveLLMErrorContext, a6 as Observers, ai as PromptConfig, aj as PromptContext, ak as PromptTemplate, af as ProviderIdentifier, ao as QuickOptions, ar as StreamParser, au as TextOnlyAction, av as TextOnlyContext, aw as TextOnlyCustomHandler, ax as TextOnlyGadgetConfig, ay as TextOnlyHandler, az as TextOnlyStrategy, ag as TokenUsage, z as collectEvents, C as collectText, ap as complete, c as createMockAdapter, b as createMockClient, e as createMockStream, f as createTextMockStream, g as getMockManager, m as mockLLM, am as resolvePromptTemplate, an as resolveRulesTemplate, D as runWithHandlers, aq as stream } from './mock-stream-B2qwECvd.cjs';
3
+ import { A as AgentHooks, o as ModelRegistry, L as LLMMessage, P as ParameterFormat, S as StreamEvent, T as TokenUsage, G as GadgetRegistry, p as LLMStreamChunk, q as GadgetExample, B as BaseGadget, r as ParsedGadgetCall, s as GadgetExecutionResult, t as ProviderAdapter, u as ModelDescriptor, v as ModelSpec, w as LLMGenerationOptions, x as LLMStream } from './mock-stream-CjmvWDc3.cjs';
4
+ export { F as AfterGadgetExecutionAction, I as AfterGadgetExecutionControllerContext, J as AfterLLMCallAction, K as AfterLLMCallControllerContext, N as AfterLLMErrorAction, y as AgentBuilder, O as AgentOptions, Q as BeforeGadgetExecutionAction, R as BeforeLLMCallAction, U as ChunkInterceptorContext, V as Controllers, ac as CostEstimate, al as DEFAULT_PROMPTS, E as EventHandlers, as as GadgetClass, W as GadgetExecutionControllerContext, at as GadgetOrClass, X as GadgetParameterInterceptorContext, Y as GadgetResultInterceptorContext, H as HistoryMessage, Z as Interceptors, _ as LLMCallControllerContext, $ as LLMErrorControllerContext, ab as LLMMessageBuilder, aa as LLMRole, a9 as LLMist, a8 as LLMistOptions, a0 as MessageInterceptorContext, a as MockBuilder, d as MockManager, h as MockMatcher, i as MockMatcherContext, j as MockOptions, M as MockProviderAdapter, k as MockRegistration, l as MockResponse, n as MockStats, ad as ModelFeatures, ah as ModelIdentifierParser, ae as ModelLimits, af as ModelPricing, a1 as ObserveChunkContext, a2 as ObserveGadgetCompleteContext, a3 as ObserveGadgetStartContext, a4 as ObserveLLMCallContext, a5 as ObserveLLMCompleteContext, a6 as ObserveLLMErrorContext, a7 as Observers, ai as PromptConfig, aj as PromptContext, ak as PromptTemplate, ag as ProviderIdentifier, ao as QuickOptions, ar as StreamParser, au as TextOnlyAction, av as TextOnlyContext, aw as TextOnlyCustomHandler, ax as TextOnlyGadgetConfig, ay as TextOnlyHandler, az as TextOnlyStrategy, z as collectEvents, C as collectText, ap as complete, c as createMockAdapter, b as createMockClient, e as createMockStream, f as createTextMockStream, g as getMockManager, m as mockLLM, am as resolvePromptTemplate, an as resolveRulesTemplate, D as runWithHandlers, aq as stream } from './mock-stream-CjmvWDc3.cjs';
5
5
  import { Logger, ILogObj } from 'tslog';
6
6
  import { MessageCreateParamsStreaming, MessageStreamEvent } from '@anthropic-ai/sdk/resources/messages';
7
7
  import OpenAI from 'openai';
@@ -797,12 +797,8 @@ interface StreamProcessingResult {
797
797
  didExecuteGadgets: boolean;
798
798
  /** LLM finish reason */
799
799
  finishReason: string | null;
800
- /** Token usage */
801
- usage?: {
802
- inputTokens: number;
803
- outputTokens: number;
804
- totalTokens: number;
805
- };
800
+ /** Token usage (including cached token counts when available) */
801
+ usage?: TokenUsage;
806
802
  /** The raw accumulated response text */
807
803
  rawResponse: string;
808
804
  /** The final message (after interceptors) */
@@ -1660,6 +1656,7 @@ type GeminiChunk = {
1660
1656
  promptTokenCount?: number;
1661
1657
  candidatesTokenCount?: number;
1662
1658
  totalTokenCount?: number;
1659
+ cachedContentTokenCount?: number;
1663
1660
  };
1664
1661
  };
1665
1662
  declare class GeminiGenerativeProvider extends BaseProviderAdapter {
@@ -1769,4 +1766,4 @@ declare class OpenAIChatProvider extends BaseProviderAdapter {
1769
1766
  }
1770
1767
  declare function createOpenAIProviderFromEnv(): OpenAIChatProvider | null;
1771
1768
 
1772
- export { AgentHooks, AnthropicMessagesProvider, BaseGadget, BreakLoopException, ConversationManager, type CreateGadgetConfig, Gadget, type GadgetConfig, GadgetExample, GadgetExecutionResult, GadgetExecutor, GadgetOutputStore, GadgetRegistry, GeminiGenerativeProvider, HookPresets, HumanInputException, type IConversationManager, LLMGenerationOptions, LLMMessage, LLMStream, LLMStreamChunk, type LoggerOptions, type LoggingOptions, MODEL_ALIASES, ModelDescriptor, ModelRegistry, ModelSpec, OpenAIChatProvider, ParsedGadgetCall, ProviderAdapter, type StoredOutput, StreamEvent, type StreamProcessingResult, StreamProcessor, type StreamProcessorOptions, type ValidationIssue, type ValidationResult, createAnthropicProviderFromEnv, createGadget, createGadgetOutputViewer, createGeminiProviderFromEnv, createLogger, createOpenAIProviderFromEnv, defaultLogger, discoverProviderAdapters, getModelId, getProvider, hasProviderPrefix, resolveModel, validateAndApplyDefaults, validateGadgetParams };
1769
+ export { AgentHooks, AnthropicMessagesProvider, BaseGadget, BreakLoopException, ConversationManager, type CreateGadgetConfig, Gadget, type GadgetConfig, GadgetExample, GadgetExecutionResult, GadgetExecutor, GadgetOutputStore, GadgetRegistry, GeminiGenerativeProvider, HookPresets, HumanInputException, type IConversationManager, LLMGenerationOptions, LLMMessage, LLMStream, LLMStreamChunk, type LoggerOptions, type LoggingOptions, MODEL_ALIASES, ModelDescriptor, ModelRegistry, ModelSpec, OpenAIChatProvider, ParsedGadgetCall, ProviderAdapter, type StoredOutput, StreamEvent, type StreamProcessingResult, StreamProcessor, type StreamProcessorOptions, TokenUsage, type ValidationIssue, type ValidationResult, createAnthropicProviderFromEnv, createGadget, createGadgetOutputViewer, createGeminiProviderFromEnv, createLogger, createOpenAIProviderFromEnv, defaultLogger, discoverProviderAdapters, getModelId, getProvider, hasProviderPrefix, resolveModel, validateAndApplyDefaults, validateGadgetParams };
package/dist/index.d.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { ZodType, ZodTypeAny } from 'zod';
2
2
  export { z } from 'zod';
3
- import { A as AgentHooks, o as ModelRegistry, L as LLMMessage, P as ParameterFormat, S as StreamEvent, G as GadgetRegistry, p as LLMStreamChunk, q as GadgetExample, B as BaseGadget, r as ParsedGadgetCall, s as GadgetExecutionResult, t as ProviderAdapter, u as ModelDescriptor, v as ModelSpec, w as LLMGenerationOptions, x as LLMStream } from './mock-stream-B2qwECvd.js';
4
- export { F as AfterGadgetExecutionAction, I as AfterGadgetExecutionControllerContext, J as AfterLLMCallAction, K as AfterLLMCallControllerContext, N as AfterLLMErrorAction, y as AgentBuilder, O as AgentOptions, Q as BeforeGadgetExecutionAction, R as BeforeLLMCallAction, T as ChunkInterceptorContext, U as Controllers, ab as CostEstimate, al as DEFAULT_PROMPTS, E as EventHandlers, as as GadgetClass, V as GadgetExecutionControllerContext, at as GadgetOrClass, W as GadgetParameterInterceptorContext, X as GadgetResultInterceptorContext, H as HistoryMessage, Y as Interceptors, Z as LLMCallControllerContext, _ as LLMErrorControllerContext, aa as LLMMessageBuilder, a9 as LLMRole, a8 as LLMist, a7 as LLMistOptions, $ as MessageInterceptorContext, a as MockBuilder, d as MockManager, h as MockMatcher, i as MockMatcherContext, j as MockOptions, M as MockProviderAdapter, k as MockRegistration, l as MockResponse, n as MockStats, ac as ModelFeatures, ah as ModelIdentifierParser, ad as ModelLimits, ae as ModelPricing, a0 as ObserveChunkContext, a1 as ObserveGadgetCompleteContext, a2 as ObserveGadgetStartContext, a3 as ObserveLLMCallContext, a4 as ObserveLLMCompleteContext, a5 as ObserveLLMErrorContext, a6 as Observers, ai as PromptConfig, aj as PromptContext, ak as PromptTemplate, af as ProviderIdentifier, ao as QuickOptions, ar as StreamParser, au as TextOnlyAction, av as TextOnlyContext, aw as TextOnlyCustomHandler, ax as TextOnlyGadgetConfig, ay as TextOnlyHandler, az as TextOnlyStrategy, ag as TokenUsage, z as collectEvents, C as collectText, ap as complete, c as createMockAdapter, b as createMockClient, e as createMockStream, f as createTextMockStream, g as getMockManager, m as mockLLM, am as resolvePromptTemplate, an as resolveRulesTemplate, D as runWithHandlers, aq as stream } from './mock-stream-B2qwECvd.js';
3
+ import { A as AgentHooks, o as ModelRegistry, L as LLMMessage, P as ParameterFormat, S as StreamEvent, T as TokenUsage, G as GadgetRegistry, p as LLMStreamChunk, q as GadgetExample, B as BaseGadget, r as ParsedGadgetCall, s as GadgetExecutionResult, t as ProviderAdapter, u as ModelDescriptor, v as ModelSpec, w as LLMGenerationOptions, x as LLMStream } from './mock-stream-CjmvWDc3.js';
4
+ export { F as AfterGadgetExecutionAction, I as AfterGadgetExecutionControllerContext, J as AfterLLMCallAction, K as AfterLLMCallControllerContext, N as AfterLLMErrorAction, y as AgentBuilder, O as AgentOptions, Q as BeforeGadgetExecutionAction, R as BeforeLLMCallAction, U as ChunkInterceptorContext, V as Controllers, ac as CostEstimate, al as DEFAULT_PROMPTS, E as EventHandlers, as as GadgetClass, W as GadgetExecutionControllerContext, at as GadgetOrClass, X as GadgetParameterInterceptorContext, Y as GadgetResultInterceptorContext, H as HistoryMessage, Z as Interceptors, _ as LLMCallControllerContext, $ as LLMErrorControllerContext, ab as LLMMessageBuilder, aa as LLMRole, a9 as LLMist, a8 as LLMistOptions, a0 as MessageInterceptorContext, a as MockBuilder, d as MockManager, h as MockMatcher, i as MockMatcherContext, j as MockOptions, M as MockProviderAdapter, k as MockRegistration, l as MockResponse, n as MockStats, ad as ModelFeatures, ah as ModelIdentifierParser, ae as ModelLimits, af as ModelPricing, a1 as ObserveChunkContext, a2 as ObserveGadgetCompleteContext, a3 as ObserveGadgetStartContext, a4 as ObserveLLMCallContext, a5 as ObserveLLMCompleteContext, a6 as ObserveLLMErrorContext, a7 as Observers, ai as PromptConfig, aj as PromptContext, ak as PromptTemplate, ag as ProviderIdentifier, ao as QuickOptions, ar as StreamParser, au as TextOnlyAction, av as TextOnlyContext, aw as TextOnlyCustomHandler, ax as TextOnlyGadgetConfig, ay as TextOnlyHandler, az as TextOnlyStrategy, z as collectEvents, C as collectText, ap as complete, c as createMockAdapter, b as createMockClient, e as createMockStream, f as createTextMockStream, g as getMockManager, m as mockLLM, am as resolvePromptTemplate, an as resolveRulesTemplate, D as runWithHandlers, aq as stream } from './mock-stream-CjmvWDc3.js';
5
5
  import { Logger, ILogObj } from 'tslog';
6
6
  import { MessageCreateParamsStreaming, MessageStreamEvent } from '@anthropic-ai/sdk/resources/messages';
7
7
  import OpenAI from 'openai';
@@ -797,12 +797,8 @@ interface StreamProcessingResult {
797
797
  didExecuteGadgets: boolean;
798
798
  /** LLM finish reason */
799
799
  finishReason: string | null;
800
- /** Token usage */
801
- usage?: {
802
- inputTokens: number;
803
- outputTokens: number;
804
- totalTokens: number;
805
- };
800
+ /** Token usage (including cached token counts when available) */
801
+ usage?: TokenUsage;
806
802
  /** The raw accumulated response text */
807
803
  rawResponse: string;
808
804
  /** The final message (after interceptors) */
@@ -1660,6 +1656,7 @@ type GeminiChunk = {
1660
1656
  promptTokenCount?: number;
1661
1657
  candidatesTokenCount?: number;
1662
1658
  totalTokenCount?: number;
1659
+ cachedContentTokenCount?: number;
1663
1660
  };
1664
1661
  };
1665
1662
  declare class GeminiGenerativeProvider extends BaseProviderAdapter {
@@ -1769,4 +1766,4 @@ declare class OpenAIChatProvider extends BaseProviderAdapter {
1769
1766
  }
1770
1767
  declare function createOpenAIProviderFromEnv(): OpenAIChatProvider | null;
1771
1768
 
1772
- export { AgentHooks, AnthropicMessagesProvider, BaseGadget, BreakLoopException, ConversationManager, type CreateGadgetConfig, Gadget, type GadgetConfig, GadgetExample, GadgetExecutionResult, GadgetExecutor, GadgetOutputStore, GadgetRegistry, GeminiGenerativeProvider, HookPresets, HumanInputException, type IConversationManager, LLMGenerationOptions, LLMMessage, LLMStream, LLMStreamChunk, type LoggerOptions, type LoggingOptions, MODEL_ALIASES, ModelDescriptor, ModelRegistry, ModelSpec, OpenAIChatProvider, ParsedGadgetCall, ProviderAdapter, type StoredOutput, StreamEvent, type StreamProcessingResult, StreamProcessor, type StreamProcessorOptions, type ValidationIssue, type ValidationResult, createAnthropicProviderFromEnv, createGadget, createGadgetOutputViewer, createGeminiProviderFromEnv, createLogger, createOpenAIProviderFromEnv, defaultLogger, discoverProviderAdapters, getModelId, getProvider, hasProviderPrefix, resolveModel, validateAndApplyDefaults, validateGadgetParams };
1769
+ export { AgentHooks, AnthropicMessagesProvider, BaseGadget, BreakLoopException, ConversationManager, type CreateGadgetConfig, Gadget, type GadgetConfig, GadgetExample, GadgetExecutionResult, GadgetExecutor, GadgetOutputStore, GadgetRegistry, GeminiGenerativeProvider, HookPresets, HumanInputException, type IConversationManager, LLMGenerationOptions, LLMMessage, LLMStream, LLMStreamChunk, type LoggerOptions, type LoggingOptions, MODEL_ALIASES, ModelDescriptor, ModelRegistry, ModelSpec, OpenAIChatProvider, ParsedGadgetCall, ProviderAdapter, type StoredOutput, StreamEvent, type StreamProcessingResult, StreamProcessor, type StreamProcessorOptions, TokenUsage, type ValidationIssue, type ValidationResult, createAnthropicProviderFromEnv, createGadget, createGadgetOutputViewer, createGeminiProviderFromEnv, createLogger, createOpenAIProviderFromEnv, defaultLogger, discoverProviderAdapters, getModelId, getProvider, hasProviderPrefix, resolveModel, validateAndApplyDefaults, validateGadgetParams };
package/dist/index.js CHANGED
@@ -10,7 +10,7 @@ import {
10
10
  mockLLM,
11
11
  validateAndApplyDefaults,
12
12
  validateGadgetParams
13
- } from "./chunk-CTC2WJZA.js";
13
+ } from "./chunk-4IMGADVY.js";
14
14
  import {
15
15
  AgentBuilder,
16
16
  AnthropicMessagesProvider,
@@ -74,7 +74,7 @@ import {
74
74
  resolveRulesTemplate,
75
75
  runWithHandlers,
76
76
  stream
77
- } from "./chunk-ZFHFBEQ5.js";
77
+ } from "./chunk-62M4TDAK.js";
78
78
 
79
79
  // src/index.ts
80
80
  init_builder();
@@ -374,6 +374,8 @@ interface ModelPricing {
374
374
  output: number;
375
375
  /** Price per 1 million cached input tokens in USD (if supported) */
376
376
  cachedInput?: number;
377
+ /** Price per 1 million cache write tokens in USD (Anthropic: 1.25x input price) */
378
+ cacheWriteInput?: number;
377
379
  }
378
380
  interface ModelFeatures {
379
381
  /** Supports streaming responses */
@@ -426,6 +428,10 @@ interface ModelLimits {
426
428
  }
427
429
  interface CostEstimate {
428
430
  inputCost: number;
431
+ /** Cost for cached input tokens (already included in inputCost calculation) */
432
+ cachedInputCost: number;
433
+ /** Cost for cache creation tokens (already included in inputCost calculation, Anthropic only) */
434
+ cacheCreationCost: number;
429
435
  outputCost: number;
430
436
  totalCost: number;
431
437
  currency: "USD";
@@ -446,6 +452,10 @@ interface TokenUsage {
446
452
  inputTokens: number;
447
453
  outputTokens: number;
448
454
  totalTokens: number;
455
+ /** Number of input tokens served from cache (subset of inputTokens) */
456
+ cachedInputTokens?: number;
457
+ /** Number of input tokens written to cache (subset of inputTokens, Anthropic only) */
458
+ cacheCreationInputTokens?: number;
449
459
  }
450
460
  interface LLMStreamChunk {
451
461
  text: string;
@@ -587,12 +597,13 @@ declare class ModelRegistry {
587
597
  /**
588
598
  * Estimate API cost for a given model and token usage
589
599
  * @param modelId - Full model identifier
590
- * @param inputTokens - Number of input tokens
600
+ * @param inputTokens - Number of input tokens (total, including cached and cache creation)
591
601
  * @param outputTokens - Number of output tokens
592
- * @param useCachedInput - Whether to use cached input pricing (if supported by provider)
602
+ * @param cachedInputTokens - Number of cached input tokens (subset of inputTokens)
603
+ * @param cacheCreationInputTokens - Number of cache creation tokens (subset of inputTokens, Anthropic only)
593
604
  * @returns CostEstimate if model found, undefined otherwise
594
605
  */
595
- estimateCost(modelId: string, inputTokens: number, outputTokens: number, useCachedInput?: boolean): CostEstimate | undefined;
606
+ estimateCost(modelId: string, inputTokens: number, outputTokens: number, cachedInputTokens?: number, cacheCreationInputTokens?: number): CostEstimate | undefined;
596
607
  /**
597
608
  * Validate that requested token count fits within model limits
598
609
  * @param modelId - Full model identifier
@@ -1179,11 +1190,8 @@ interface ObserveLLMCompleteContext {
1179
1190
  iteration: number;
1180
1191
  options: Readonly<LLMGenerationOptions>;
1181
1192
  finishReason: string | null;
1182
- usage?: {
1183
- inputTokens: number;
1184
- outputTokens: number;
1185
- totalTokens: number;
1186
- };
1193
+ /** Token usage including cached token counts when available */
1194
+ usage?: TokenUsage;
1187
1195
  /** The complete raw response text */
1188
1196
  rawResponse: string;
1189
1197
  /** The final message that will be added to history (after interceptors) */
@@ -1242,12 +1250,8 @@ interface ObserveChunkContext {
1242
1250
  rawChunk: string;
1243
1251
  /** Accumulated text so far */
1244
1252
  accumulatedText: string;
1245
- /** Token usage if available (Anthropic sends input tokens at stream start) */
1246
- usage?: {
1247
- inputTokens: number;
1248
- outputTokens: number;
1249
- totalTokens: number;
1250
- };
1253
+ /** Token usage if available (providers send usage at stream start/end) */
1254
+ usage?: TokenUsage;
1251
1255
  logger: Logger<ILogObj>;
1252
1256
  }
1253
1257
  /**
@@ -1387,11 +1391,8 @@ interface AfterLLMCallControllerContext {
1387
1391
  iteration: number;
1388
1392
  options: Readonly<LLMGenerationOptions>;
1389
1393
  finishReason: string | null;
1390
- usage?: {
1391
- inputTokens: number;
1392
- outputTokens: number;
1393
- totalTokens: number;
1394
- };
1394
+ /** Token usage including cached token counts when available */
1395
+ usage?: TokenUsage;
1395
1396
  /** The final message (after interceptors) that will be added to history */
1396
1397
  finalMessage: string;
1397
1398
  logger: Logger<ILogObj>;
@@ -2772,4 +2773,4 @@ declare function createTextMockStream(text: string, options?: {
2772
2773
  usage?: MockResponse["usage"];
2773
2774
  }): LLMStream;
2774
2775
 
2775
- export { type MessageInterceptorContext as $, type AgentHooks as A, BaseGadget as B, collectText as C, runWithHandlers as D, type EventHandlers as E, type AfterGadgetExecutionAction as F, GadgetRegistry as G, type HistoryMessage as H, type AfterGadgetExecutionControllerContext as I, type AfterLLMCallAction as J, type AfterLLMCallControllerContext as K, type LLMMessage as L, MockProviderAdapter as M, type AfterLLMErrorAction as N, type AgentOptions as O, type ParameterFormat as P, type BeforeGadgetExecutionAction as Q, type BeforeLLMCallAction as R, type StreamEvent as S, type ChunkInterceptorContext as T, type Controllers as U, type GadgetExecutionControllerContext as V, type GadgetParameterInterceptorContext as W, type GadgetResultInterceptorContext as X, type Interceptors as Y, type LLMCallControllerContext as Z, type LLMErrorControllerContext as _, MockBuilder as a, type ObserveChunkContext as a0, type ObserveGadgetCompleteContext as a1, type ObserveGadgetStartContext as a2, type ObserveLLMCallContext as a3, type ObserveLLMCompleteContext as a4, type ObserveLLMErrorContext as a5, type Observers as a6, type LLMistOptions as a7, LLMist as a8, type LLMRole as a9, LLMMessageBuilder as aa, type CostEstimate as ab, type ModelFeatures as ac, type ModelLimits as ad, type ModelPricing as ae, type ProviderIdentifier as af, type TokenUsage as ag, ModelIdentifierParser as ah, type PromptConfig as ai, type PromptContext as aj, type PromptTemplate as ak, DEFAULT_PROMPTS as al, resolvePromptTemplate as am, resolveRulesTemplate as an, type QuickOptions as ao, complete as ap, stream as aq, StreamParser as ar, type GadgetClass as as, type GadgetOrClass as at, type TextOnlyAction as au, type TextOnlyContext as av, type TextOnlyCustomHandler as aw, type TextOnlyGadgetConfig as ax, type TextOnlyHandler as ay, type TextOnlyStrategy as az, createMockClient as b, createMockAdapter as c, MockManager as d, createMockStream as e, createTextMockStream as f, getMockManager as g, type MockMatcher as h, type MockMatcherContext as i, type MockOptions as j, type MockRegistration as k, type MockResponse as l, mockLLM as m, type MockStats as n, ModelRegistry as o, type LLMStreamChunk as p, type GadgetExample as q, type ParsedGadgetCall as r, type GadgetExecutionResult as s, type ProviderAdapter as t, type ModelDescriptor as u, type ModelSpec as v, type LLMGenerationOptions as w, type LLMStream as x, AgentBuilder as y, collectEvents as z };
2776
+ export { type LLMErrorControllerContext as $, type AgentHooks as A, BaseGadget as B, collectText as C, runWithHandlers as D, type EventHandlers as E, type AfterGadgetExecutionAction as F, GadgetRegistry as G, type HistoryMessage as H, type AfterGadgetExecutionControllerContext as I, type AfterLLMCallAction as J, type AfterLLMCallControllerContext as K, type LLMMessage as L, MockProviderAdapter as M, type AfterLLMErrorAction as N, type AgentOptions as O, type ParameterFormat as P, type BeforeGadgetExecutionAction as Q, type BeforeLLMCallAction as R, type StreamEvent as S, type TokenUsage as T, type ChunkInterceptorContext as U, type Controllers as V, type GadgetExecutionControllerContext as W, type GadgetParameterInterceptorContext as X, type GadgetResultInterceptorContext as Y, type Interceptors as Z, type LLMCallControllerContext as _, MockBuilder as a, type MessageInterceptorContext as a0, type ObserveChunkContext as a1, type ObserveGadgetCompleteContext as a2, type ObserveGadgetStartContext as a3, type ObserveLLMCallContext as a4, type ObserveLLMCompleteContext as a5, type ObserveLLMErrorContext as a6, type Observers as a7, type LLMistOptions as a8, LLMist as a9, type LLMRole as aa, LLMMessageBuilder as ab, type CostEstimate as ac, type ModelFeatures as ad, type ModelLimits as ae, type ModelPricing as af, type ProviderIdentifier as ag, ModelIdentifierParser as ah, type PromptConfig as ai, type PromptContext as aj, type PromptTemplate as ak, DEFAULT_PROMPTS as al, resolvePromptTemplate as am, resolveRulesTemplate as an, type QuickOptions as ao, complete as ap, stream as aq, StreamParser as ar, type GadgetClass as as, type GadgetOrClass as at, type TextOnlyAction as au, type TextOnlyContext as av, type TextOnlyCustomHandler as aw, type TextOnlyGadgetConfig as ax, type TextOnlyHandler as ay, type TextOnlyStrategy as az, createMockClient as b, createMockAdapter as c, MockManager as d, createMockStream as e, createTextMockStream as f, getMockManager as g, type MockMatcher as h, type MockMatcherContext as i, type MockOptions as j, type MockRegistration as k, type MockResponse as l, mockLLM as m, type MockStats as n, ModelRegistry as o, type LLMStreamChunk as p, type GadgetExample as q, type ParsedGadgetCall as r, type GadgetExecutionResult as s, type ProviderAdapter as t, type ModelDescriptor as u, type ModelSpec as v, type LLMGenerationOptions as w, type LLMStream as x, AgentBuilder as y, collectEvents as z };
@@ -374,6 +374,8 @@ interface ModelPricing {
374
374
  output: number;
375
375
  /** Price per 1 million cached input tokens in USD (if supported) */
376
376
  cachedInput?: number;
377
+ /** Price per 1 million cache write tokens in USD (Anthropic: 1.25x input price) */
378
+ cacheWriteInput?: number;
377
379
  }
378
380
  interface ModelFeatures {
379
381
  /** Supports streaming responses */
@@ -426,6 +428,10 @@ interface ModelLimits {
426
428
  }
427
429
  interface CostEstimate {
428
430
  inputCost: number;
431
+ /** Cost for cached input tokens (already included in inputCost calculation) */
432
+ cachedInputCost: number;
433
+ /** Cost for cache creation tokens (already included in inputCost calculation, Anthropic only) */
434
+ cacheCreationCost: number;
429
435
  outputCost: number;
430
436
  totalCost: number;
431
437
  currency: "USD";
@@ -446,6 +452,10 @@ interface TokenUsage {
446
452
  inputTokens: number;
447
453
  outputTokens: number;
448
454
  totalTokens: number;
455
+ /** Number of input tokens served from cache (subset of inputTokens) */
456
+ cachedInputTokens?: number;
457
+ /** Number of input tokens written to cache (subset of inputTokens, Anthropic only) */
458
+ cacheCreationInputTokens?: number;
449
459
  }
450
460
  interface LLMStreamChunk {
451
461
  text: string;
@@ -587,12 +597,13 @@ declare class ModelRegistry {
587
597
  /**
588
598
  * Estimate API cost for a given model and token usage
589
599
  * @param modelId - Full model identifier
590
- * @param inputTokens - Number of input tokens
600
+ * @param inputTokens - Number of input tokens (total, including cached and cache creation)
591
601
  * @param outputTokens - Number of output tokens
592
- * @param useCachedInput - Whether to use cached input pricing (if supported by provider)
602
+ * @param cachedInputTokens - Number of cached input tokens (subset of inputTokens)
603
+ * @param cacheCreationInputTokens - Number of cache creation tokens (subset of inputTokens, Anthropic only)
593
604
  * @returns CostEstimate if model found, undefined otherwise
594
605
  */
595
- estimateCost(modelId: string, inputTokens: number, outputTokens: number, useCachedInput?: boolean): CostEstimate | undefined;
606
+ estimateCost(modelId: string, inputTokens: number, outputTokens: number, cachedInputTokens?: number, cacheCreationInputTokens?: number): CostEstimate | undefined;
596
607
  /**
597
608
  * Validate that requested token count fits within model limits
598
609
  * @param modelId - Full model identifier
@@ -1179,11 +1190,8 @@ interface ObserveLLMCompleteContext {
1179
1190
  iteration: number;
1180
1191
  options: Readonly<LLMGenerationOptions>;
1181
1192
  finishReason: string | null;
1182
- usage?: {
1183
- inputTokens: number;
1184
- outputTokens: number;
1185
- totalTokens: number;
1186
- };
1193
+ /** Token usage including cached token counts when available */
1194
+ usage?: TokenUsage;
1187
1195
  /** The complete raw response text */
1188
1196
  rawResponse: string;
1189
1197
  /** The final message that will be added to history (after interceptors) */
@@ -1242,12 +1250,8 @@ interface ObserveChunkContext {
1242
1250
  rawChunk: string;
1243
1251
  /** Accumulated text so far */
1244
1252
  accumulatedText: string;
1245
- /** Token usage if available (Anthropic sends input tokens at stream start) */
1246
- usage?: {
1247
- inputTokens: number;
1248
- outputTokens: number;
1249
- totalTokens: number;
1250
- };
1253
+ /** Token usage if available (providers send usage at stream start/end) */
1254
+ usage?: TokenUsage;
1251
1255
  logger: Logger<ILogObj>;
1252
1256
  }
1253
1257
  /**
@@ -1387,11 +1391,8 @@ interface AfterLLMCallControllerContext {
1387
1391
  iteration: number;
1388
1392
  options: Readonly<LLMGenerationOptions>;
1389
1393
  finishReason: string | null;
1390
- usage?: {
1391
- inputTokens: number;
1392
- outputTokens: number;
1393
- totalTokens: number;
1394
- };
1394
+ /** Token usage including cached token counts when available */
1395
+ usage?: TokenUsage;
1395
1396
  /** The final message (after interceptors) that will be added to history */
1396
1397
  finalMessage: string;
1397
1398
  logger: Logger<ILogObj>;
@@ -2772,4 +2773,4 @@ declare function createTextMockStream(text: string, options?: {
2772
2773
  usage?: MockResponse["usage"];
2773
2774
  }): LLMStream;
2774
2775
 
2775
- export { type MessageInterceptorContext as $, type AgentHooks as A, BaseGadget as B, collectText as C, runWithHandlers as D, type EventHandlers as E, type AfterGadgetExecutionAction as F, GadgetRegistry as G, type HistoryMessage as H, type AfterGadgetExecutionControllerContext as I, type AfterLLMCallAction as J, type AfterLLMCallControllerContext as K, type LLMMessage as L, MockProviderAdapter as M, type AfterLLMErrorAction as N, type AgentOptions as O, type ParameterFormat as P, type BeforeGadgetExecutionAction as Q, type BeforeLLMCallAction as R, type StreamEvent as S, type ChunkInterceptorContext as T, type Controllers as U, type GadgetExecutionControllerContext as V, type GadgetParameterInterceptorContext as W, type GadgetResultInterceptorContext as X, type Interceptors as Y, type LLMCallControllerContext as Z, type LLMErrorControllerContext as _, MockBuilder as a, type ObserveChunkContext as a0, type ObserveGadgetCompleteContext as a1, type ObserveGadgetStartContext as a2, type ObserveLLMCallContext as a3, type ObserveLLMCompleteContext as a4, type ObserveLLMErrorContext as a5, type Observers as a6, type LLMistOptions as a7, LLMist as a8, type LLMRole as a9, LLMMessageBuilder as aa, type CostEstimate as ab, type ModelFeatures as ac, type ModelLimits as ad, type ModelPricing as ae, type ProviderIdentifier as af, type TokenUsage as ag, ModelIdentifierParser as ah, type PromptConfig as ai, type PromptContext as aj, type PromptTemplate as ak, DEFAULT_PROMPTS as al, resolvePromptTemplate as am, resolveRulesTemplate as an, type QuickOptions as ao, complete as ap, stream as aq, StreamParser as ar, type GadgetClass as as, type GadgetOrClass as at, type TextOnlyAction as au, type TextOnlyContext as av, type TextOnlyCustomHandler as aw, type TextOnlyGadgetConfig as ax, type TextOnlyHandler as ay, type TextOnlyStrategy as az, createMockClient as b, createMockAdapter as c, MockManager as d, createMockStream as e, createTextMockStream as f, getMockManager as g, type MockMatcher as h, type MockMatcherContext as i, type MockOptions as j, type MockRegistration as k, type MockResponse as l, mockLLM as m, type MockStats as n, ModelRegistry as o, type LLMStreamChunk as p, type GadgetExample as q, type ParsedGadgetCall as r, type GadgetExecutionResult as s, type ProviderAdapter as t, type ModelDescriptor as u, type ModelSpec as v, type LLMGenerationOptions as w, type LLMStream as x, AgentBuilder as y, collectEvents as z };
2776
+ export { type LLMErrorControllerContext as $, type AgentHooks as A, BaseGadget as B, collectText as C, runWithHandlers as D, type EventHandlers as E, type AfterGadgetExecutionAction as F, GadgetRegistry as G, type HistoryMessage as H, type AfterGadgetExecutionControllerContext as I, type AfterLLMCallAction as J, type AfterLLMCallControllerContext as K, type LLMMessage as L, MockProviderAdapter as M, type AfterLLMErrorAction as N, type AgentOptions as O, type ParameterFormat as P, type BeforeGadgetExecutionAction as Q, type BeforeLLMCallAction as R, type StreamEvent as S, type TokenUsage as T, type ChunkInterceptorContext as U, type Controllers as V, type GadgetExecutionControllerContext as W, type GadgetParameterInterceptorContext as X, type GadgetResultInterceptorContext as Y, type Interceptors as Z, type LLMCallControllerContext as _, MockBuilder as a, type MessageInterceptorContext as a0, type ObserveChunkContext as a1, type ObserveGadgetCompleteContext as a2, type ObserveGadgetStartContext as a3, type ObserveLLMCallContext as a4, type ObserveLLMCompleteContext as a5, type ObserveLLMErrorContext as a6, type Observers as a7, type LLMistOptions as a8, LLMist as a9, type LLMRole as aa, LLMMessageBuilder as ab, type CostEstimate as ac, type ModelFeatures as ad, type ModelLimits as ae, type ModelPricing as af, type ProviderIdentifier as ag, ModelIdentifierParser as ah, type PromptConfig as ai, type PromptContext as aj, type PromptTemplate as ak, DEFAULT_PROMPTS as al, resolvePromptTemplate as am, resolveRulesTemplate as an, type QuickOptions as ao, complete as ap, stream as aq, StreamParser as ar, type GadgetClass as as, type GadgetOrClass as at, type TextOnlyAction as au, type TextOnlyContext as av, type TextOnlyCustomHandler as aw, type TextOnlyGadgetConfig as ax, type TextOnlyHandler as ay, type TextOnlyStrategy as az, createMockClient as b, createMockAdapter as c, MockManager as d, createMockStream as e, createTextMockStream as f, getMockManager as g, type MockMatcher as h, type MockMatcherContext as i, type MockOptions as j, type MockRegistration as k, type MockResponse as l, mockLLM as m, type MockStats as n, ModelRegistry as o, type LLMStreamChunk as p, type GadgetExample as q, type ParsedGadgetCall as r, type GadgetExecutionResult as s, type ProviderAdapter as t, type ModelDescriptor as u, type ModelSpec as v, type LLMGenerationOptions as w, type LLMStream as x, AgentBuilder as y, collectEvents as z };