llmist 1.3.0 → 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.cts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { ZodType, ZodTypeAny } from 'zod';
2
2
  export { z } from 'zod';
3
- import { A as AgentHooks, q as ModelRegistry, I as IConversationManager, b as LLMMessage, S as StreamEvent, T as TokenUsage, G as GadgetRegistry, a as LLMStreamChunk, C as CompactionStrategy, R as ResolvedCompactionConfig, r as CompactionContext, s as CompactionResult, t as LLMist, u as CompactionConfig, v as CompactionEvent, w as CompactionStats, H as HintTemplate, x as GadgetExample, B as BaseGadget, P as ParsedGadgetCall, y as GadgetExecutionResult, z as ProviderAdapter, D as ModelDescriptor, E as ModelSpec, F as LLMGenerationOptions, L as LLMStream } from './mock-stream-DNt-HBTn.cjs';
4
- export { V as AfterGadgetExecutionAction, W as AfterGadgetExecutionControllerContext, X as AfterLLMCallAction, Y as AfterLLMCallControllerContext, Z as AfterLLMErrorAction, K as AgentBuilder, _ as AgentOptions, $ as BeforeGadgetExecutionAction, a0 as BeforeLLMCallAction, a1 as ChunkInterceptorContext, a2 as Controllers, ao as CostEstimate, aj as DEFAULT_COMPACTION_CONFIG, ay as DEFAULT_HINTS, az as DEFAULT_PROMPTS, ak as DEFAULT_SUMMARIZATION_PROMPT, N as EventHandlers, aG as GadgetClass, a3 as GadgetExecutionControllerContext, aH as GadgetOrClass, a4 as GadgetParameterInterceptorContext, a5 as GadgetResultInterceptorContext, au as HintContext, J as HistoryMessage, a6 as Interceptors, a7 as LLMCallControllerContext, a8 as LLMErrorControllerContext, an as LLMMessageBuilder, am as LLMRole, al as LLMistOptions, a9 as MessageInterceptorContext, ah as MessageTurn, d as MockBuilder, f as MockManager, j as MockMatcher, k as MockMatcherContext, l as MockOptions, M as MockProviderAdapter, n as MockRegistration, o as MockResponse, p as MockStats, ap as ModelFeatures, at as ModelIdentifierParser, aq as ModelLimits, ar as ModelPricing, aa as ObserveChunkContext, ai as ObserveCompactionContext, ab as ObserveGadgetCompleteContext, ac as ObserveGadgetStartContext, ad as ObserveLLMCallContext, ae as ObserveLLMCompleteContext, af as ObserveLLMErrorContext, ag as Observers, av as PromptConfig, aw as PromptContext, ax as PromptTemplate, as as ProviderIdentifier, aD as QuickOptions, aI as TextOnlyAction, aJ as TextOnlyContext, aK as TextOnlyCustomHandler, aL as TextOnlyGadgetConfig, aM as TextOnlyHandler, aN as TextOnlyStrategy, O as collectEvents, Q as collectText, aE as complete, c as createMockAdapter, e as createMockClient, h as createMockStream, i as createTextMockStream, g as getMockManager, m as mockLLM, aA as resolveHintTemplate, aB as resolvePromptTemplate, aC as resolveRulesTemplate, U as runWithHandlers, aF as stream } from './mock-stream-DNt-HBTn.cjs';
3
+ import { A as AgentHooks, q as ModelRegistry, I as IConversationManager, b as LLMMessage, S as StreamEvent, T as TokenUsage, G as GadgetRegistry, a as LLMStreamChunk, C as CompactionStrategy, R as ResolvedCompactionConfig, r as CompactionContext, s as CompactionResult, t as LLMist, u as CompactionConfig, v as CompactionEvent, w as CompactionStats, H as HintTemplate, x as GadgetExample, B as BaseGadget, P as ParsedGadgetCall, y as GadgetExecutionResult, z as ProviderAdapter, D as ModelDescriptor, E as ModelSpec, F as LLMGenerationOptions, L as LLMStream } from './mock-stream-DD5yJM44.cjs';
4
+ export { V as AfterGadgetExecutionAction, W as AfterGadgetExecutionControllerContext, X as AfterLLMCallAction, Y as AfterLLMCallControllerContext, Z as AfterLLMErrorAction, K as AgentBuilder, _ as AgentOptions, $ as BeforeGadgetExecutionAction, a0 as BeforeLLMCallAction, a1 as ChunkInterceptorContext, a2 as Controllers, ao as CostEstimate, aj as DEFAULT_COMPACTION_CONFIG, ay as DEFAULT_HINTS, az as DEFAULT_PROMPTS, ak as DEFAULT_SUMMARIZATION_PROMPT, N as EventHandlers, aG as GadgetClass, a3 as GadgetExecutionControllerContext, aH as GadgetOrClass, a4 as GadgetParameterInterceptorContext, a5 as GadgetResultInterceptorContext, au as HintContext, J as HistoryMessage, a6 as Interceptors, a7 as LLMCallControllerContext, a8 as LLMErrorControllerContext, an as LLMMessageBuilder, am as LLMRole, al as LLMistOptions, a9 as MessageInterceptorContext, ah as MessageTurn, d as MockBuilder, f as MockManager, j as MockMatcher, k as MockMatcherContext, l as MockOptions, M as MockProviderAdapter, n as MockRegistration, o as MockResponse, p as MockStats, ap as ModelFeatures, at as ModelIdentifierParser, aq as ModelLimits, ar as ModelPricing, aa as ObserveChunkContext, ai as ObserveCompactionContext, ab as ObserveGadgetCompleteContext, ac as ObserveGadgetStartContext, ad as ObserveLLMCallContext, ae as ObserveLLMCompleteContext, af as ObserveLLMErrorContext, ag as Observers, av as PromptConfig, aw as PromptContext, ax as PromptTemplate, as as ProviderIdentifier, aD as QuickOptions, aI as TextOnlyAction, aJ as TextOnlyContext, aK as TextOnlyCustomHandler, aL as TextOnlyGadgetConfig, aM as TextOnlyHandler, aN as TextOnlyStrategy, O as collectEvents, Q as collectText, aE as complete, c as createMockAdapter, e as createMockClient, h as createMockStream, i as createTextMockStream, g as getMockManager, m as mockLLM, aA as resolveHintTemplate, aB as resolvePromptTemplate, aC as resolveRulesTemplate, U as runWithHandlers, aF as stream } from './mock-stream-DD5yJM44.cjs';
5
5
  import { Logger, ILogObj } from 'tslog';
6
6
  import { MessageCreateParamsStreaming, MessageStreamEvent } from '@anthropic-ai/sdk/resources/messages';
7
7
  import OpenAI from 'openai';
@@ -1990,9 +1990,10 @@ declare abstract class BaseProviderAdapter implements ProviderAdapter {
1990
1990
  * This method must be implemented by each concrete provider.
1991
1991
  *
1992
1992
  * @param payload - The provider-specific payload
1993
+ * @param signal - Optional abort signal for cancelling the request
1993
1994
  * @returns An async iterable of provider-specific chunks
1994
1995
  */
1995
- protected abstract executeStreamRequest(payload: unknown): Promise<AsyncIterable<unknown>>;
1996
+ protected abstract executeStreamRequest(payload: unknown, signal?: AbortSignal): Promise<AsyncIterable<unknown>>;
1996
1997
  /**
1997
1998
  * Wrap the provider-specific stream into the universal LLMStream format.
1998
1999
  * This method must be implemented by each concrete provider.
@@ -2008,7 +2009,7 @@ declare class AnthropicMessagesProvider extends BaseProviderAdapter {
2008
2009
  supports(descriptor: ModelDescriptor): boolean;
2009
2010
  getModelSpecs(): ModelSpec[];
2010
2011
  protected buildRequestPayload(options: LLMGenerationOptions, descriptor: ModelDescriptor, spec: ModelSpec | undefined, messages: LLMMessage[]): MessageCreateParamsStreaming;
2011
- protected executeStreamRequest(payload: MessageCreateParamsStreaming): Promise<AsyncIterable<MessageStreamEvent>>;
2012
+ protected executeStreamRequest(payload: MessageCreateParamsStreaming, signal?: AbortSignal): Promise<AsyncIterable<MessageStreamEvent>>;
2012
2013
  protected wrapStream(iterable: AsyncIterable<unknown>): LLMStream;
2013
2014
  /**
2014
2015
  * Count tokens in messages using Anthropic's native token counting API.
@@ -2079,7 +2080,7 @@ declare class GeminiGenerativeProvider extends BaseProviderAdapter {
2079
2080
  }>;
2080
2081
  }>;
2081
2082
  config: Record<string, unknown>;
2082
- }): Promise<AsyncIterable<GeminiChunk>>;
2083
+ }, signal?: AbortSignal): Promise<AsyncIterable<GeminiChunk>>;
2083
2084
  /**
2084
2085
  * Convert LLM messages to Gemini contents format.
2085
2086
  *
@@ -2134,7 +2135,7 @@ declare class OpenAIChatProvider extends BaseProviderAdapter {
2134
2135
  supports(descriptor: ModelDescriptor): boolean;
2135
2136
  getModelSpecs(): ModelSpec[];
2136
2137
  protected buildRequestPayload(options: LLMGenerationOptions, descriptor: ModelDescriptor, spec: ModelSpec | undefined, messages: LLMMessage[]): Parameters<OpenAI["chat"]["completions"]["create"]>[0];
2137
- protected executeStreamRequest(payload: Parameters<OpenAI["chat"]["completions"]["create"]>[0]): Promise<AsyncIterable<ChatCompletionChunk>>;
2138
+ protected executeStreamRequest(payload: Parameters<OpenAI["chat"]["completions"]["create"]>[0], signal?: AbortSignal): Promise<AsyncIterable<ChatCompletionChunk>>;
2138
2139
  protected wrapStream(iterable: AsyncIterable<unknown>): LLMStream;
2139
2140
  /**
2140
2141
  * Count tokens in messages using OpenAI's tiktoken library.
package/dist/index.d.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { ZodType, ZodTypeAny } from 'zod';
2
2
  export { z } from 'zod';
3
- import { A as AgentHooks, q as ModelRegistry, I as IConversationManager, b as LLMMessage, S as StreamEvent, T as TokenUsage, G as GadgetRegistry, a as LLMStreamChunk, C as CompactionStrategy, R as ResolvedCompactionConfig, r as CompactionContext, s as CompactionResult, t as LLMist, u as CompactionConfig, v as CompactionEvent, w as CompactionStats, H as HintTemplate, x as GadgetExample, B as BaseGadget, P as ParsedGadgetCall, y as GadgetExecutionResult, z as ProviderAdapter, D as ModelDescriptor, E as ModelSpec, F as LLMGenerationOptions, L as LLMStream } from './mock-stream-DNt-HBTn.js';
4
- export { V as AfterGadgetExecutionAction, W as AfterGadgetExecutionControllerContext, X as AfterLLMCallAction, Y as AfterLLMCallControllerContext, Z as AfterLLMErrorAction, K as AgentBuilder, _ as AgentOptions, $ as BeforeGadgetExecutionAction, a0 as BeforeLLMCallAction, a1 as ChunkInterceptorContext, a2 as Controllers, ao as CostEstimate, aj as DEFAULT_COMPACTION_CONFIG, ay as DEFAULT_HINTS, az as DEFAULT_PROMPTS, ak as DEFAULT_SUMMARIZATION_PROMPT, N as EventHandlers, aG as GadgetClass, a3 as GadgetExecutionControllerContext, aH as GadgetOrClass, a4 as GadgetParameterInterceptorContext, a5 as GadgetResultInterceptorContext, au as HintContext, J as HistoryMessage, a6 as Interceptors, a7 as LLMCallControllerContext, a8 as LLMErrorControllerContext, an as LLMMessageBuilder, am as LLMRole, al as LLMistOptions, a9 as MessageInterceptorContext, ah as MessageTurn, d as MockBuilder, f as MockManager, j as MockMatcher, k as MockMatcherContext, l as MockOptions, M as MockProviderAdapter, n as MockRegistration, o as MockResponse, p as MockStats, ap as ModelFeatures, at as ModelIdentifierParser, aq as ModelLimits, ar as ModelPricing, aa as ObserveChunkContext, ai as ObserveCompactionContext, ab as ObserveGadgetCompleteContext, ac as ObserveGadgetStartContext, ad as ObserveLLMCallContext, ae as ObserveLLMCompleteContext, af as ObserveLLMErrorContext, ag as Observers, av as PromptConfig, aw as PromptContext, ax as PromptTemplate, as as ProviderIdentifier, aD as QuickOptions, aI as TextOnlyAction, aJ as TextOnlyContext, aK as TextOnlyCustomHandler, aL as TextOnlyGadgetConfig, aM as TextOnlyHandler, aN as TextOnlyStrategy, O as collectEvents, Q as collectText, aE as complete, c as createMockAdapter, e as createMockClient, h as createMockStream, i as createTextMockStream, g as getMockManager, m as mockLLM, aA as resolveHintTemplate, aB as resolvePromptTemplate, aC as resolveRulesTemplate, U as runWithHandlers, aF as stream } from './mock-stream-DNt-HBTn.js';
3
+ import { A as AgentHooks, q as ModelRegistry, I as IConversationManager, b as LLMMessage, S as StreamEvent, T as TokenUsage, G as GadgetRegistry, a as LLMStreamChunk, C as CompactionStrategy, R as ResolvedCompactionConfig, r as CompactionContext, s as CompactionResult, t as LLMist, u as CompactionConfig, v as CompactionEvent, w as CompactionStats, H as HintTemplate, x as GadgetExample, B as BaseGadget, P as ParsedGadgetCall, y as GadgetExecutionResult, z as ProviderAdapter, D as ModelDescriptor, E as ModelSpec, F as LLMGenerationOptions, L as LLMStream } from './mock-stream-DD5yJM44.js';
4
+ export { V as AfterGadgetExecutionAction, W as AfterGadgetExecutionControllerContext, X as AfterLLMCallAction, Y as AfterLLMCallControllerContext, Z as AfterLLMErrorAction, K as AgentBuilder, _ as AgentOptions, $ as BeforeGadgetExecutionAction, a0 as BeforeLLMCallAction, a1 as ChunkInterceptorContext, a2 as Controllers, ao as CostEstimate, aj as DEFAULT_COMPACTION_CONFIG, ay as DEFAULT_HINTS, az as DEFAULT_PROMPTS, ak as DEFAULT_SUMMARIZATION_PROMPT, N as EventHandlers, aG as GadgetClass, a3 as GadgetExecutionControllerContext, aH as GadgetOrClass, a4 as GadgetParameterInterceptorContext, a5 as GadgetResultInterceptorContext, au as HintContext, J as HistoryMessage, a6 as Interceptors, a7 as LLMCallControllerContext, a8 as LLMErrorControllerContext, an as LLMMessageBuilder, am as LLMRole, al as LLMistOptions, a9 as MessageInterceptorContext, ah as MessageTurn, d as MockBuilder, f as MockManager, j as MockMatcher, k as MockMatcherContext, l as MockOptions, M as MockProviderAdapter, n as MockRegistration, o as MockResponse, p as MockStats, ap as ModelFeatures, at as ModelIdentifierParser, aq as ModelLimits, ar as ModelPricing, aa as ObserveChunkContext, ai as ObserveCompactionContext, ab as ObserveGadgetCompleteContext, ac as ObserveGadgetStartContext, ad as ObserveLLMCallContext, ae as ObserveLLMCompleteContext, af as ObserveLLMErrorContext, ag as Observers, av as PromptConfig, aw as PromptContext, ax as PromptTemplate, as as ProviderIdentifier, aD as QuickOptions, aI as TextOnlyAction, aJ as TextOnlyContext, aK as TextOnlyCustomHandler, aL as TextOnlyGadgetConfig, aM as TextOnlyHandler, aN as TextOnlyStrategy, O as collectEvents, Q as collectText, aE as complete, c as createMockAdapter, e as createMockClient, h as createMockStream, i as createTextMockStream, g as getMockManager, m as mockLLM, aA as resolveHintTemplate, aB as resolvePromptTemplate, aC as resolveRulesTemplate, U as runWithHandlers, aF as stream } from './mock-stream-DD5yJM44.js';
5
5
  import { Logger, ILogObj } from 'tslog';
6
6
  import { MessageCreateParamsStreaming, MessageStreamEvent } from '@anthropic-ai/sdk/resources/messages';
7
7
  import OpenAI from 'openai';
@@ -1990,9 +1990,10 @@ declare abstract class BaseProviderAdapter implements ProviderAdapter {
1990
1990
  * This method must be implemented by each concrete provider.
1991
1991
  *
1992
1992
  * @param payload - The provider-specific payload
1993
+ * @param signal - Optional abort signal for cancelling the request
1993
1994
  * @returns An async iterable of provider-specific chunks
1994
1995
  */
1995
- protected abstract executeStreamRequest(payload: unknown): Promise<AsyncIterable<unknown>>;
1996
+ protected abstract executeStreamRequest(payload: unknown, signal?: AbortSignal): Promise<AsyncIterable<unknown>>;
1996
1997
  /**
1997
1998
  * Wrap the provider-specific stream into the universal LLMStream format.
1998
1999
  * This method must be implemented by each concrete provider.
@@ -2008,7 +2009,7 @@ declare class AnthropicMessagesProvider extends BaseProviderAdapter {
2008
2009
  supports(descriptor: ModelDescriptor): boolean;
2009
2010
  getModelSpecs(): ModelSpec[];
2010
2011
  protected buildRequestPayload(options: LLMGenerationOptions, descriptor: ModelDescriptor, spec: ModelSpec | undefined, messages: LLMMessage[]): MessageCreateParamsStreaming;
2011
- protected executeStreamRequest(payload: MessageCreateParamsStreaming): Promise<AsyncIterable<MessageStreamEvent>>;
2012
+ protected executeStreamRequest(payload: MessageCreateParamsStreaming, signal?: AbortSignal): Promise<AsyncIterable<MessageStreamEvent>>;
2012
2013
  protected wrapStream(iterable: AsyncIterable<unknown>): LLMStream;
2013
2014
  /**
2014
2015
  * Count tokens in messages using Anthropic's native token counting API.
@@ -2079,7 +2080,7 @@ declare class GeminiGenerativeProvider extends BaseProviderAdapter {
2079
2080
  }>;
2080
2081
  }>;
2081
2082
  config: Record<string, unknown>;
2082
- }): Promise<AsyncIterable<GeminiChunk>>;
2083
+ }, signal?: AbortSignal): Promise<AsyncIterable<GeminiChunk>>;
2083
2084
  /**
2084
2085
  * Convert LLM messages to Gemini contents format.
2085
2086
  *
@@ -2134,7 +2135,7 @@ declare class OpenAIChatProvider extends BaseProviderAdapter {
2134
2135
  supports(descriptor: ModelDescriptor): boolean;
2135
2136
  getModelSpecs(): ModelSpec[];
2136
2137
  protected buildRequestPayload(options: LLMGenerationOptions, descriptor: ModelDescriptor, spec: ModelSpec | undefined, messages: LLMMessage[]): Parameters<OpenAI["chat"]["completions"]["create"]>[0];
2137
- protected executeStreamRequest(payload: Parameters<OpenAI["chat"]["completions"]["create"]>[0]): Promise<AsyncIterable<ChatCompletionChunk>>;
2138
+ protected executeStreamRequest(payload: Parameters<OpenAI["chat"]["completions"]["create"]>[0], signal?: AbortSignal): Promise<AsyncIterable<ChatCompletionChunk>>;
2138
2139
  protected wrapStream(iterable: AsyncIterable<unknown>): LLMStream;
2139
2140
  /**
2140
2141
  * Count tokens in messages using OpenAI's tiktoken library.