llmist 2.6.0 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.cts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { ZodType, ZodTypeAny } from 'zod';
2
2
  export { z } from 'zod';
3
- import { A as AgentHooks, s as ModelRegistry, t as LLMist, C as CompactionConfig, I as IConversationManager, u as CompactionEvent, L as LLMMessage, v as CompactionStats, w as CompactionStrategy, R as ResolvedCompactionConfig, x as CompactionContext, y as CompactionResult, z as MessageContent, G as GadgetMediaOutput, H as HintTemplate, S as StreamEvent, T as TokenUsage, D as GadgetRegistry, E as MediaStore, b as LLMStreamChunk, F as ExecutionContext, J as GadgetExecuteReturn, K as GadgetExample, B as BaseGadget, P as ParsedGadgetCall, N as GadgetExecutionResult, O as MediaKind, Q as MediaMetadata, U as GadgetExecuteResultWithMedia, V as ProviderAdapter, W as ModelDescriptor, X as ModelSpec, Y as LLMGenerationOptions, a as LLMStream, Z as ImageModelSpec, _ as ImageGenerationOptions, $ as ImageGenerationResult, a0 as SpeechModelSpec, a1 as SpeechGenerationOptions, a2 as SpeechGenerationResult } from './mock-stream-Jgg5u6Uf.cjs';
4
- export { ab as AfterGadgetExecutionAction, ac as AfterGadgetExecutionControllerContext, ad as AfterLLMCallAction, ae as AfterLLMCallControllerContext, af as AfterLLMErrorAction, a6 as AgentBuilder, ag as AgentOptions, aE as AudioContentPart, aF as AudioMimeType, aG as AudioSource, ah as BeforeGadgetExecutionAction, ai as BeforeLLMCallAction, aj as ChunkInterceptorContext, aH as ContentPart, ak as Controllers, b4 as CostEstimate, bq as CostReportingLLMist, aB as DEFAULT_COMPACTION_CONFIG, bg as DEFAULT_HINTS, bh as DEFAULT_PROMPTS, aC as DEFAULT_SUMMARIZATION_PROMPT, a7 as EventHandlers, bo as GadgetClass, br as GadgetExecuteResult, al as GadgetExecutionControllerContext, bp as GadgetOrClass, am as GadgetParameterInterceptorContext, an as GadgetResultInterceptorContext, bs as GadgetSkippedEvent, bc as HintContext, a3 as HistoryMessage, aI as ImageBase64Source, aJ as ImageContentPart, aK as ImageMimeType, aL as ImageSource, aM as ImageUrlSource, ao as Interceptors, ap as LLMCallControllerContext, aq as LLMErrorControllerContext, b2 as LLMMessageBuilder, b0 as LLMRole, aD as LLMistOptions, ar as MessageInterceptorContext, as as MessageTurn, d as MockBuilder, f as MockManager, l as MockMatcher, n as MockMatcherContext, o as MockOptions, M as MockProviderAdapter, p as MockRegistration, q as MockResponse, r as MockStats, b5 as ModelFeatures, bb as ModelIdentifierParser, b6 as ModelLimits, b7 as ModelPricing, at as ObserveChunkContext, au as ObserveCompactionContext, av as ObserveGadgetCompleteContext, aw as ObserveGadgetStartContext, ax as ObserveLLMCallContext, ay as ObserveLLMCompleteContext, az as ObserveLLMErrorContext, aA as Observers, bd as PromptConfig, be as PromptContext, bf as PromptTemplate, ba as ProviderIdentifier, bl as QuickOptions, bt as StoredMedia, aN as TextContentPart, bu as TextOnlyAction, bv as TextOnlyContext, bw as TextOnlyCustomHandler, bx as TextOnlyGadgetConfig, by as TextOnlyHandler, bz as TextOnlyStrategy, a4 as TrailingMessage, a5 as TrailingMessageContext, b8 as VisionAnalyzeOptions, b9 as VisionAnalyzeResult, aO as audioFromBase64, aP as audioFromBuffer, a8 as collectEvents, a9 as collectText, bm as complete, c as createMockAdapter, e as createMockClient, h as createMockStream, i as createTextMockStream, aQ as detectAudioMimeType, aR as detectImageMimeType, b1 as extractText, g as getMockManager, aS as imageFromBase64, aT as imageFromBuffer, aU as imageFromUrl, aV as isAudioPart, aW as isDataUrl, aX as isImagePart, aY as isTextPart, m as mockLLM, b3 as normalizeContent, aZ as parseDataUrl, bi as resolveHintTemplate, bj as resolvePromptTemplate, bk as resolveRulesTemplate, aa as runWithHandlers, bn as stream, a_ as text, a$ as toBase64 } from './mock-stream-Jgg5u6Uf.cjs';
3
+ import { s as AgentHooks, t as ModelRegistry, u as LLMist, C as CompactionConfig, I as IConversationManager, v as CompactionEvent, L as LLMMessage, w as CompactionStats, x as CompactionStrategy, R as ResolvedCompactionConfig, y as CompactionContext, z as CompactionResult, B as MessageContent, G as GadgetMediaOutput, H as HintTemplate, S as StreamEvent, T as TokenUsage, D as GadgetRegistry, E as MediaStore, F as AgentContextConfig, J as SubagentConfigMap, b as LLMStreamChunk, K as ExecutionContext, N as GadgetExecuteReturn, O as GadgetExample, A as AbstractGadget, P as ParsedGadgetCall, Q as GadgetExecutionResult, U as MediaKind, V as MediaMetadata, W as GadgetExecuteResultWithMedia, X as ProviderAdapter, Y as ModelDescriptor, Z as ModelSpec, _ as LLMGenerationOptions, a as LLMStream, $ as ImageModelSpec, a0 as ImageGenerationOptions, a1 as ImageGenerationResult, a2 as SpeechModelSpec, a3 as SpeechGenerationOptions, a4 as SpeechGenerationResult } from './mock-stream-COHw8h9b.cjs';
4
+ export { ad as AfterGadgetExecutionAction, ae as AfterGadgetExecutionControllerContext, af as AfterLLMCallAction, ag as AfterLLMCallControllerContext, ah as AfterLLMErrorAction, a8 as AgentBuilder, ai as AgentOptions, aG as AudioContentPart, aH as AudioMimeType, aI as AudioSource, aj as BeforeGadgetExecutionAction, ak as BeforeLLMCallAction, al as ChunkInterceptorContext, aJ as ContentPart, am as Controllers, b6 as CostEstimate, bs as CostReportingLLMist, aD as DEFAULT_COMPACTION_CONFIG, bi as DEFAULT_HINTS, bj as DEFAULT_PROMPTS, aE as DEFAULT_SUMMARIZATION_PROMPT, a9 as EventHandlers, bq as GadgetClass, bt as GadgetExecuteResult, an as GadgetExecutionControllerContext, br as GadgetOrClass, ao as GadgetParameterInterceptorContext, ap as GadgetResultInterceptorContext, bu as GadgetSkippedEvent, be as HintContext, a5 as HistoryMessage, aK as ImageBase64Source, aL as ImageContentPart, aM as ImageMimeType, aN as ImageSource, aO as ImageUrlSource, aq as Interceptors, ar as LLMCallControllerContext, as as LLMErrorControllerContext, b4 as LLMMessageBuilder, aF as LLMistOptions, at as MessageInterceptorContext, b2 as MessageRole, au as MessageTurn, d as MockBuilder, f as MockManager, l as MockMatcher, n as MockMatcherContext, o as MockOptions, M as MockProviderAdapter, p as MockRegistration, q as MockResponse, r as MockStats, b7 as ModelFeatures, bd as ModelIdentifierParser, b8 as ModelLimits, b9 as ModelPricing, av as ObserveChunkContext, aw as ObserveCompactionContext, ax as ObserveGadgetCompleteContext, ay as ObserveGadgetStartContext, az as ObserveLLMCallContext, aA as ObserveLLMCompleteContext, aB as ObserveLLMErrorContext, aC as Observers, bf as PromptContext, bg as PromptTemplate, bh as PromptTemplateConfig, bc as ProviderIdentifier, bv as StoredMedia, aP as TextContentPart, bn as TextGenerationOptions, bw as TextOnlyAction, bx as TextOnlyContext, by as TextOnlyCustomHandler, bz as TextOnlyGadgetConfig, bA as TextOnlyHandler, bB as TextOnlyStrategy, a6 as TrailingMessage, a7 as TrailingMessageContext, ba as VisionAnalyzeOptions, bb as VisionAnalyzeResult, aQ as audioFromBase64, aR as audioFromBuffer, aa as collectEvents, ab as collectText, bo as complete, c as createMockAdapter, e as createMockClient, h as createMockStream, i as createTextMockStream, aS as detectAudioMimeType, aT as detectImageMimeType, b3 as extractMessageText, g as getMockManager, aU as imageFromBase64, aV as imageFromBuffer, aW as imageFromUrl, aX as isAudioPart, aY as isDataUrl, aZ as isImagePart, a_ as isTextPart, m as mockLLM, b5 as normalizeMessageContent, a$ as parseDataUrl, bk as resolveHintTemplate, bl as resolvePromptTemplate, bm as resolveRulesTemplate, ac as runWithHandlers, bp as stream, b0 as text, b1 as toBase64 } from './mock-stream-COHw8h9b.cjs';
5
5
  import { Logger, ILogObj } from 'tslog';
6
6
  import { MessageCreateParamsStreaming, MessageStreamEvent } from '@anthropic-ai/sdk/resources/messages';
7
7
  import OpenAI from 'openai';
@@ -888,7 +888,7 @@ declare class ConversationManager implements IConversationManager {
888
888
  constructor(baseMessages: LLMMessage[], initialMessages: LLMMessage[], options?: ConversationManagerOptions);
889
889
  addUserMessage(content: MessageContent): void;
890
890
  addAssistantMessage(content: string): void;
891
- addGadgetCall(gadgetName: string, parameters: Record<string, unknown>, result: string, media?: GadgetMediaOutput[], mediaIds?: string[]): void;
891
+ addGadgetCallResult(gadgetName: string, parameters: Record<string, unknown>, result: string, media?: GadgetMediaOutput[], mediaIds?: string[]): void;
892
892
  getMessages(): LLMMessage[];
893
893
  getHistoryMessages(): LLMMessage[];
894
894
  getBaseMessages(): LLMMessage[];
@@ -1178,12 +1178,12 @@ interface StreamProcessorOptions {
1178
1178
  hooks?: AgentHooks;
1179
1179
  /** Logger instance */
1180
1180
  logger?: Logger<ILogObj>;
1181
- /** Callback for human input */
1182
- onHumanInputRequired?: (question: string) => Promise<string>;
1181
+ /** Callback for requesting human input during execution */
1182
+ requestHumanInput?: (question: string) => Promise<string>;
1183
1183
  /** Whether to stop on gadget errors */
1184
1184
  stopOnGadgetError?: boolean;
1185
- /** Custom error continuation logic */
1186
- shouldContinueAfterError?: (context: {
1185
+ /** Custom error recovery logic */
1186
+ canRecoverFromGadgetError?: (context: {
1187
1187
  error: string;
1188
1188
  gadgetName: string;
1189
1189
  errorType: "parse" | "validation" | "execution";
@@ -1195,6 +1195,10 @@ interface StreamProcessorOptions {
1195
1195
  client?: LLMist;
1196
1196
  /** MediaStore for storing gadget media outputs */
1197
1197
  mediaStore?: MediaStore;
1198
+ /** Parent agent configuration for subagents to inherit */
1199
+ agentConfig?: AgentContextConfig;
1200
+ /** Subagent-specific configuration overrides */
1201
+ subagentConfig?: SubagentConfigMap;
1198
1202
  }
1199
1203
  /**
1200
1204
  * Result of stream processing.
@@ -1245,12 +1249,12 @@ declare class StreamProcessor {
1245
1249
  private readonly parser;
1246
1250
  private readonly executor;
1247
1251
  private readonly stopOnGadgetError;
1248
- private readonly shouldContinueAfterError?;
1249
- private accumulatedText;
1250
- private shouldStopExecution;
1252
+ private readonly canRecoverFromGadgetError?;
1253
+ private responseText;
1254
+ private executionHalted;
1251
1255
  private observerFailureCount;
1252
1256
  /** Gadgets waiting for their dependencies to complete */
1253
- private pendingGadgets;
1257
+ private gadgetsAwaitingDependencies;
1254
1258
  /** Completed gadget results, keyed by invocation ID */
1255
1259
  private completedResults;
1256
1260
  /** Invocation IDs of gadgets that have failed (error or skipped due to dependency) */
@@ -1302,17 +1306,17 @@ declare class StreamProcessor {
1302
1306
  */
1303
1307
  private runObserversInParallel;
1304
1308
  /**
1305
- * Check if execution should continue after an error.
1309
+ * Check if execution can recover from an error.
1306
1310
  *
1307
1311
  * Returns true if we should continue processing subsequent gadgets, false if we should stop.
1308
1312
  *
1309
1313
  * Logic:
1310
- * - If custom shouldContinueAfterError is provided, use it
1314
+ * - If custom canRecoverFromGadgetError is provided, use it
1311
1315
  * - Otherwise, use stopOnGadgetError config:
1312
1316
  * - stopOnGadgetError=true → return false (stop execution)
1313
1317
  * - stopOnGadgetError=false → return true (continue execution)
1314
1318
  */
1315
- private checkContinueAfterError;
1319
+ private checkCanRecoverFromError;
1316
1320
  /**
1317
1321
  * Determine the type of error from a gadget execution.
1318
1322
  */
@@ -1541,14 +1545,14 @@ interface CreateGadgetConfig<TSchema extends ZodType> {
1541
1545
  * .ask("What's the weather in Paris and what's 10 + 5?");
1542
1546
  * ```
1543
1547
  */
1544
- declare function createGadget<TSchema extends ZodType>(config: CreateGadgetConfig<TSchema>): BaseGadget;
1548
+ declare function createGadget<TSchema extends ZodType>(config: CreateGadgetConfig<TSchema>): AbstractGadget;
1545
1549
 
1546
1550
  /**
1547
- * Exception that gadgets can throw to signal the agent loop should terminate.
1551
+ * Signal that a gadget throws to indicate task completion and agent termination.
1548
1552
  *
1549
- * When a gadget throws this exception, the agent loop will:
1553
+ * When a gadget throws this signal, the agent loop will:
1550
1554
  * 1. Complete the current iteration
1551
- * 2. Return the exception message as the gadget's result
1555
+ * 2. Return the signal message as the gadget's result
1552
1556
  * 3. Exit the loop instead of continuing to the next iteration
1553
1557
  *
1554
1558
  * @example
@@ -1564,12 +1568,12 @@ declare function createGadget<TSchema extends ZodType>(config: CreateGadgetConfi
1564
1568
  * }) {
1565
1569
  * execute(params: this['params']): string {
1566
1570
  * const message = params.message || 'Task completed';
1567
- * throw new BreakLoopException(message);
1571
+ * throw new TaskCompletionSignal(message);
1568
1572
  * }
1569
1573
  * }
1570
1574
  * ```
1571
1575
  */
1572
- declare class BreakLoopException extends Error {
1576
+ declare class TaskCompletionSignal extends Error {
1573
1577
  constructor(message?: string);
1574
1578
  }
1575
1579
  /**
@@ -1577,7 +1581,7 @@ declare class BreakLoopException extends Error {
1577
1581
  *
1578
1582
  * When a gadget throws this exception, the agent loop will:
1579
1583
  * 1. Pause execution and wait for human input
1580
- * 2. If `onHumanInputRequired` callback is provided, call it and await the answer
1584
+ * 2. If `requestHumanInput` callback is provided, call it and await the answer
1581
1585
  * 3. Return the user's answer as the gadget's result
1582
1586
  * 4. Continue the loop with the answer added to conversation history
1583
1587
  *
@@ -1596,15 +1600,49 @@ declare class BreakLoopException extends Error {
1596
1600
  * }),
1597
1601
  * }) {
1598
1602
  * execute(params: this['params']): string {
1599
- * throw new HumanInputException(params.question);
1603
+ * throw new HumanInputRequiredException(params.question);
1600
1604
  * }
1601
1605
  * }
1602
1606
  * ```
1603
1607
  */
1604
- declare class HumanInputException extends Error {
1608
+ declare class HumanInputRequiredException extends Error {
1605
1609
  readonly question: string;
1606
1610
  constructor(question: string);
1607
1611
  }
1612
+ /**
1613
+ * Exception thrown when a gadget execution exceeds its timeout limit.
1614
+ *
1615
+ * When a gadget's execution time exceeds either:
1616
+ * - The gadget's own `timeoutMs` property, or
1617
+ * - The global `defaultGadgetTimeoutMs` configured in runtime/agent loop options
1618
+ *
1619
+ * The executor will automatically throw this exception and return it as an error.
1620
+ *
1621
+ * @example
1622
+ * ```typescript
1623
+ * import { z } from 'zod';
1624
+ *
1625
+ * class SlowApiGadget extends Gadget({
1626
+ * name: 'SlowApi',
1627
+ * description: 'Calls a slow external API',
1628
+ * timeoutMs: 5000, // 5 second timeout
1629
+ * schema: z.object({
1630
+ * endpoint: z.string(),
1631
+ * }),
1632
+ * }) {
1633
+ * async execute(params: this['params']): Promise<string> {
1634
+ * // If this takes longer than 5 seconds, execution will be aborted
1635
+ * const response = await fetch(params.endpoint);
1636
+ * return await response.text();
1637
+ * }
1638
+ * }
1639
+ * ```
1640
+ */
1641
+ declare class TimeoutException extends Error {
1642
+ readonly timeoutMs: number;
1643
+ readonly gadgetName: string;
1644
+ constructor(gadgetName: string, timeoutMs: number);
1645
+ }
1608
1646
  /**
1609
1647
  * Exception thrown when gadget execution is aborted.
1610
1648
  *
@@ -1620,7 +1658,7 @@ declare class HumanInputException extends Error {
1620
1658
  * schema: z.object({ data: z.string() }),
1621
1659
  * }) {
1622
1660
  * async execute(params: this['params'], ctx: ExecutionContext): Promise<string> {
1623
- * // Check at key points - throws AbortError if aborted
1661
+ * // Check at key points - throws AbortException if aborted
1624
1662
  * this.throwIfAborted(ctx);
1625
1663
  *
1626
1664
  * await this.doPartOne(params.data);
@@ -1634,7 +1672,7 @@ declare class HumanInputException extends Error {
1634
1672
  * }
1635
1673
  * ```
1636
1674
  */
1637
- declare class AbortError extends Error {
1675
+ declare class AbortException extends Error {
1638
1676
  constructor(message?: string);
1639
1677
  }
1640
1678
 
@@ -1649,25 +1687,27 @@ interface ErrorFormatterOptions {
1649
1687
 
1650
1688
  declare class GadgetExecutor {
1651
1689
  private readonly registry;
1652
- private readonly onHumanInputRequired?;
1690
+ private readonly requestHumanInput?;
1653
1691
  private readonly defaultGadgetTimeoutMs?;
1654
1692
  private readonly client?;
1655
1693
  private readonly mediaStore?;
1694
+ private readonly agentConfig?;
1695
+ private readonly subagentConfig?;
1656
1696
  private readonly logger;
1657
1697
  private readonly errorFormatter;
1658
1698
  private readonly argPrefix;
1659
- constructor(registry: GadgetRegistry, onHumanInputRequired?: ((question: string) => Promise<string>) | undefined, logger?: Logger<ILogObj>, defaultGadgetTimeoutMs?: number | undefined, errorFormatterOptions?: ErrorFormatterOptions, client?: LLMist | undefined, mediaStore?: MediaStore | undefined);
1699
+ constructor(registry: GadgetRegistry, requestHumanInput?: ((question: string) => Promise<string>) | undefined, logger?: Logger<ILogObj>, defaultGadgetTimeoutMs?: number | undefined, errorFormatterOptions?: ErrorFormatterOptions, client?: LLMist | undefined, mediaStore?: MediaStore | undefined, agentConfig?: AgentContextConfig | undefined, subagentConfig?: SubagentConfigMap | undefined);
1660
1700
  /**
1661
1701
  * Creates a promise that rejects with a TimeoutException after the specified timeout.
1662
1702
  * Aborts the provided AbortController before rejecting, allowing gadgets to clean up.
1663
1703
  */
1664
1704
  private createTimeoutPromise;
1665
1705
  /**
1666
- * Normalizes gadget execute result to consistent format.
1706
+ * Unify gadget execute result to consistent internal format.
1667
1707
  * Handles string returns (backwards compat), object returns with cost,
1668
1708
  * and object returns with media.
1669
1709
  */
1670
- private normalizeExecuteResult;
1710
+ private unifyExecuteResult;
1671
1711
  execute(call: ParsedGadgetCall): Promise<GadgetExecutionResult>;
1672
1712
  executeAll(calls: ParsedGadgetCall[]): Promise<GadgetExecutionResult[]>;
1673
1713
  /**
@@ -1694,7 +1734,7 @@ declare class GadgetExecutor {
1694
1734
  * registry.register("GadgetOutputViewer", viewer);
1695
1735
  * ```
1696
1736
  */
1697
- declare function createGadgetOutputViewer(store: GadgetOutputStore, maxOutputChars?: number): BaseGadget;
1737
+ declare function createGadgetOutputViewer(store: GadgetOutputStore, maxOutputChars?: number): AbstractGadget;
1698
1738
 
1699
1739
  interface StreamParserOptions {
1700
1740
  startPrefix?: string;
@@ -1702,16 +1742,24 @@ interface StreamParserOptions {
1702
1742
  /** Prefix for block format arguments. Default: "!!!ARG:" */
1703
1743
  argPrefix?: string;
1704
1744
  }
1705
- declare class StreamParser {
1745
+ /**
1746
+ * Parser for extracting gadget invocations from LLM text output.
1747
+ * Processes text chunks incrementally and emits events for text and gadget calls.
1748
+ */
1749
+ declare class GadgetCallParser {
1706
1750
  private buffer;
1707
- private lastReportedTextLength;
1751
+ private lastEmittedTextOffset;
1708
1752
  private readonly startPrefix;
1709
1753
  private readonly endPrefix;
1710
1754
  private readonly argPrefix;
1711
1755
  constructor(options?: StreamParserOptions);
1712
- private takeTextUntil;
1713
1756
  /**
1714
- * Parse gadget name with optional invocation ID and dependencies.
1757
+ * Extract and consume text up to the given index.
1758
+ * Returns undefined if no meaningful text to emit.
1759
+ */
1760
+ private extractTextSegment;
1761
+ /**
1762
+ * Parse gadget invocation metadata from the header line.
1715
1763
  *
1716
1764
  * Supported formats:
1717
1765
  * - `GadgetName` - Auto-generate ID, no dependencies
@@ -1720,7 +1768,7 @@ declare class StreamParser {
1720
1768
  *
1721
1769
  * Dependencies must be comma-separated invocation IDs.
1722
1770
  */
1723
- private parseGadgetName;
1771
+ private parseInvocationMetadata;
1724
1772
  /**
1725
1773
  * Extract the error message from a parse error.
1726
1774
  * Preserves full message since the error formatter adds contextual help
@@ -1925,7 +1973,7 @@ declare function Gadget<TSchema extends ZodType>(config: GadgetConfig<TSchema>):
1925
1973
  * @param options - Optional description, metadata, and fileName
1926
1974
  * @returns A GadgetMediaOutput ready to include in results
1927
1975
  */
1928
- declare function createMedia(kind: MediaKind, data: Buffer | Uint8Array, mimeType: string, options?: {
1976
+ declare function createMediaOutput(kind: MediaKind, data: Buffer | Uint8Array, mimeType: string, options?: {
1929
1977
  description?: string;
1930
1978
  metadata?: MediaMetadata;
1931
1979
  fileName?: string;
@@ -1944,8 +1992,8 @@ declare function createMedia(kind: MediaKind, data: Buffer | Uint8Array, mimeTyp
1944
1992
  * return resultWithMedia(
1945
1993
  * "Generated 2 charts",
1946
1994
  * [
1947
- * createMedia("image", barChartPng, "image/png", { description: "Bar chart" }),
1948
- * createMedia("image", pieChartPng, "image/png", { description: "Pie chart" }),
1995
+ * createMediaOutput("image", barChartPng, "image/png", { description: "Bar chart" }),
1996
+ * createMediaOutput("image", pieChartPng, "image/png", { description: "Pie chart" }),
1949
1997
  * ],
1950
1998
  * 0.002
1951
1999
  * );
@@ -2183,7 +2231,7 @@ declare function validateAndApplyDefaults<T = Record<string, unknown>>(schema: Z
2183
2231
  * }
2184
2232
  * ```
2185
2233
  */
2186
- declare function validateGadgetParams(gadget: BaseGadget, params: Record<string, unknown>): ValidationResult;
2234
+ declare function validateGadgetParams(gadget: AbstractGadget, params: Record<string, unknown>): ValidationResult;
2187
2235
 
2188
2236
  /**
2189
2237
  * Logger configuration options for the library.
@@ -2275,16 +2323,16 @@ declare abstract class BaseProviderAdapter implements ProviderAdapter {
2275
2323
  */
2276
2324
  protected prepareMessages(messages: LLMMessage[]): LLMMessage[];
2277
2325
  /**
2278
- * Build the provider-specific request payload.
2326
+ * Build the provider-specific API request.
2279
2327
  * This method must be implemented by each concrete provider.
2280
2328
  *
2281
2329
  * @param options - The generation options
2282
2330
  * @param descriptor - The model descriptor
2283
2331
  * @param spec - Optional model specification with metadata
2284
2332
  * @param messages - The prepared messages
2285
- * @returns Provider-specific payload ready for the API call
2333
+ * @returns Provider-specific request object ready for the API call
2286
2334
  */
2287
- protected abstract buildRequestPayload(options: LLMGenerationOptions, descriptor: ModelDescriptor, spec: ModelSpec | undefined, messages: LLMMessage[]): unknown;
2335
+ protected abstract buildApiRequest(options: LLMGenerationOptions, descriptor: ModelDescriptor, spec: ModelSpec | undefined, messages: LLMMessage[]): unknown;
2288
2336
  /**
2289
2337
  * Execute the stream request using the provider's SDK.
2290
2338
  * This method must be implemented by each concrete provider.
@@ -2295,13 +2343,13 @@ declare abstract class BaseProviderAdapter implements ProviderAdapter {
2295
2343
  */
2296
2344
  protected abstract executeStreamRequest(payload: unknown, signal?: AbortSignal): Promise<AsyncIterable<unknown>>;
2297
2345
  /**
2298
- * Wrap the provider-specific stream into the universal LLMStream format.
2346
+ * Normalize the provider-specific stream into the universal LLMStream format.
2299
2347
  * This method must be implemented by each concrete provider.
2300
2348
  *
2301
2349
  * @param rawStream - The provider-specific stream
2302
2350
  * @returns Universal LLMStream
2303
2351
  */
2304
- protected abstract wrapStream(rawStream: AsyncIterable<unknown>): LLMStream;
2352
+ protected abstract normalizeProviderStream(rawStream: AsyncIterable<unknown>): LLMStream;
2305
2353
  }
2306
2354
 
2307
2355
  declare class AnthropicMessagesProvider extends BaseProviderAdapter {
@@ -2312,7 +2360,7 @@ declare class AnthropicMessagesProvider extends BaseProviderAdapter {
2312
2360
  generateImage(): Promise<never>;
2313
2361
  supportsSpeechGeneration(_modelId: string): boolean;
2314
2362
  generateSpeech(): Promise<never>;
2315
- protected buildRequestPayload(options: LLMGenerationOptions, descriptor: ModelDescriptor, spec: ModelSpec | undefined, messages: LLMMessage[]): MessageCreateParamsStreaming;
2363
+ protected buildApiRequest(options: LLMGenerationOptions, descriptor: ModelDescriptor, spec: ModelSpec | undefined, messages: LLMMessage[]): MessageCreateParamsStreaming;
2316
2364
  /**
2317
2365
  * Convert llmist content to Anthropic's content block format.
2318
2366
  * Handles text, images (base64 only), and applies cache_control.
@@ -2323,7 +2371,7 @@ declare class AnthropicMessagesProvider extends BaseProviderAdapter {
2323
2371
  */
2324
2372
  private convertImagePart;
2325
2373
  protected executeStreamRequest(payload: MessageCreateParamsStreaming, signal?: AbortSignal): Promise<AsyncIterable<MessageStreamEvent>>;
2326
- protected wrapStream(iterable: AsyncIterable<unknown>): LLMStream;
2374
+ protected normalizeProviderStream(iterable: AsyncIterable<unknown>): LLMStream;
2327
2375
  /**
2328
2376
  * Count tokens in messages using Anthropic's native token counting API.
2329
2377
  *
@@ -2398,7 +2446,7 @@ declare class GeminiGenerativeProvider extends BaseProviderAdapter {
2398
2446
  getSpeechModelSpecs(): SpeechModelSpec[];
2399
2447
  supportsSpeechGeneration(modelId: string): boolean;
2400
2448
  generateSpeech(options: SpeechGenerationOptions): Promise<SpeechGenerationResult>;
2401
- protected buildRequestPayload(options: LLMGenerationOptions, descriptor: ModelDescriptor, _spec: ModelSpec | undefined, messages: LLMMessage[]): {
2449
+ protected buildApiRequest(options: LLMGenerationOptions, descriptor: ModelDescriptor, _spec: ModelSpec | undefined, messages: LLMMessage[]): {
2402
2450
  model: string;
2403
2451
  contents: GeminiContent[];
2404
2452
  config: Record<string, unknown>;
@@ -2434,8 +2482,8 @@ declare class GeminiGenerativeProvider extends BaseProviderAdapter {
2434
2482
  */
2435
2483
  private convertToGeminiParts;
2436
2484
  private buildGenerationConfig;
2437
- protected wrapStream(iterable: AsyncIterable<unknown>): LLMStream;
2438
- private extractText;
2485
+ protected normalizeProviderStream(iterable: AsyncIterable<unknown>): LLMStream;
2486
+ private extractMessageText;
2439
2487
  private extractFinishReason;
2440
2488
  private extractUsage;
2441
2489
  /**
@@ -2475,7 +2523,7 @@ declare class OpenAIChatProvider extends BaseProviderAdapter {
2475
2523
  getSpeechModelSpecs(): SpeechModelSpec[];
2476
2524
  supportsSpeechGeneration(modelId: string): boolean;
2477
2525
  generateSpeech(options: SpeechGenerationOptions): Promise<SpeechGenerationResult>;
2478
- protected buildRequestPayload(options: LLMGenerationOptions, descriptor: ModelDescriptor, spec: ModelSpec | undefined, messages: LLMMessage[]): Parameters<OpenAI["chat"]["completions"]["create"]>[0];
2526
+ protected buildApiRequest(options: LLMGenerationOptions, descriptor: ModelDescriptor, spec: ModelSpec | undefined, messages: LLMMessage[]): Parameters<OpenAI["chat"]["completions"]["create"]>[0];
2479
2527
  /**
2480
2528
  * Convert an LLMMessage to OpenAI's ChatCompletionMessageParam.
2481
2529
  * Handles role-specific content type requirements:
@@ -2494,7 +2542,7 @@ declare class OpenAIChatProvider extends BaseProviderAdapter {
2494
2542
  */
2495
2543
  private convertImagePart;
2496
2544
  protected executeStreamRequest(payload: Parameters<OpenAI["chat"]["completions"]["create"]>[0], signal?: AbortSignal): Promise<AsyncIterable<ChatCompletionChunk>>;
2497
- protected wrapStream(iterable: AsyncIterable<unknown>): LLMStream;
2545
+ protected normalizeProviderStream(iterable: AsyncIterable<unknown>): LLMStream;
2498
2546
  /**
2499
2547
  * Count tokens in messages using OpenAI's tiktoken library.
2500
2548
  *
@@ -2522,4 +2570,4 @@ declare class OpenAIChatProvider extends BaseProviderAdapter {
2522
2570
  }
2523
2571
  declare function createOpenAIProviderFromEnv(): OpenAIChatProvider | null;
2524
2572
 
2525
- export { AbortError, AgentHooks, AnthropicMessagesProvider, BaseGadget, BreakLoopException, CompactionConfig, CompactionContext, CompactionEvent, CompactionManager, CompactionResult, CompactionStats, CompactionStrategy, ConversationManager, type CreateGadgetConfig, ExecutionContext, Gadget, type GadgetConfig, GadgetExample, GadgetExecuteResultWithMedia, GadgetExecuteReturn, GadgetExecutionResult, GadgetExecutor, GadgetMediaOutput, GadgetOutputStore, GadgetRegistry, GeminiGenerativeProvider, HintTemplate, type HintsConfig, HookPresets, HumanInputException, HybridStrategy, IConversationManager, type IterationHintOptions, LLMGenerationOptions, LLMMessage, LLMStream, LLMStreamChunk, LLMist, type LoggerOptions, type LoggingOptions, MODEL_ALIASES, MediaKind, MediaMetadata, MediaStore, MessageContent, ModelDescriptor, ModelRegistry, ModelSpec, OpenAIChatProvider, type ParallelGadgetHintOptions, ParsedGadgetCall, ProviderAdapter, ResolvedCompactionConfig, SlidingWindowStrategy, type StoredOutput, StreamEvent, StreamParser, type StreamProcessingResult, StreamProcessor, type StreamProcessorOptions, SummarizationStrategy, TokenUsage, type ValidationIssue, type ValidationResult, createAnthropicProviderFromEnv, createGadget, createGadgetOutputViewer, createGeminiProviderFromEnv, createHints, createLogger, createMedia, createOpenAIProviderFromEnv, defaultLogger, discoverProviderAdapters, getModelId, getProvider, hasProviderPrefix, iterationProgressHint, parallelGadgetHint, resolveModel, resultWithAudio, resultWithFile, resultWithImage, resultWithImages, resultWithMedia, validateAndApplyDefaults, validateGadgetParams };
2573
+ export { AbortException, AbstractGadget, AgentHooks, AnthropicMessagesProvider, CompactionConfig, CompactionContext, CompactionEvent, CompactionManager, CompactionResult, CompactionStats, CompactionStrategy, ConversationManager, type CreateGadgetConfig, ExecutionContext, Gadget, GadgetCallParser, type GadgetConfig, GadgetExample, GadgetExecuteResultWithMedia, GadgetExecuteReturn, GadgetExecutionResult, GadgetExecutor, GadgetMediaOutput, GadgetOutputStore, GadgetRegistry, GeminiGenerativeProvider, HintTemplate, type HintsConfig, HookPresets, HumanInputRequiredException, HybridStrategy, IConversationManager, type IterationHintOptions, LLMGenerationOptions, LLMMessage, LLMStream, LLMStreamChunk, LLMist, type LoggerOptions, type LoggingOptions, MODEL_ALIASES, MediaKind, MediaMetadata, MediaStore, MessageContent, ModelDescriptor, ModelRegistry, ModelSpec, OpenAIChatProvider, type ParallelGadgetHintOptions, ParsedGadgetCall, ProviderAdapter, ResolvedCompactionConfig, SlidingWindowStrategy, type StoredOutput, StreamEvent, type StreamProcessingResult, StreamProcessor, type StreamProcessorOptions, SummarizationStrategy, TaskCompletionSignal, TimeoutException, TokenUsage, type ValidationIssue, type ValidationResult, createAnthropicProviderFromEnv, createGadget, createGadgetOutputViewer, createGeminiProviderFromEnv, createHints, createLogger, createMediaOutput, createOpenAIProviderFromEnv, defaultLogger, discoverProviderAdapters, getModelId, getProvider, hasProviderPrefix, iterationProgressHint, parallelGadgetHint, resolveModel, resultWithAudio, resultWithFile, resultWithImage, resultWithImages, resultWithMedia, validateAndApplyDefaults, validateGadgetParams };