llmist 1.3.1 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.cts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { ZodType, ZodTypeAny } from 'zod';
2
2
  export { z } from 'zod';
3
- import { A as AgentHooks, q as ModelRegistry, I as IConversationManager, b as LLMMessage, S as StreamEvent, T as TokenUsage, G as GadgetRegistry, a as LLMStreamChunk, C as CompactionStrategy, R as ResolvedCompactionConfig, r as CompactionContext, s as CompactionResult, t as LLMist, u as CompactionConfig, v as CompactionEvent, w as CompactionStats, H as HintTemplate, x as GadgetExample, B as BaseGadget, P as ParsedGadgetCall, y as GadgetExecutionResult, z as ProviderAdapter, D as ModelDescriptor, E as ModelSpec, F as LLMGenerationOptions, L as LLMStream } from './mock-stream-DNt-HBTn.cjs';
4
- export { V as AfterGadgetExecutionAction, W as AfterGadgetExecutionControllerContext, X as AfterLLMCallAction, Y as AfterLLMCallControllerContext, Z as AfterLLMErrorAction, K as AgentBuilder, _ as AgentOptions, $ as BeforeGadgetExecutionAction, a0 as BeforeLLMCallAction, a1 as ChunkInterceptorContext, a2 as Controllers, ao as CostEstimate, aj as DEFAULT_COMPACTION_CONFIG, ay as DEFAULT_HINTS, az as DEFAULT_PROMPTS, ak as DEFAULT_SUMMARIZATION_PROMPT, N as EventHandlers, aG as GadgetClass, a3 as GadgetExecutionControllerContext, aH as GadgetOrClass, a4 as GadgetParameterInterceptorContext, a5 as GadgetResultInterceptorContext, au as HintContext, J as HistoryMessage, a6 as Interceptors, a7 as LLMCallControllerContext, a8 as LLMErrorControllerContext, an as LLMMessageBuilder, am as LLMRole, al as LLMistOptions, a9 as MessageInterceptorContext, ah as MessageTurn, d as MockBuilder, f as MockManager, j as MockMatcher, k as MockMatcherContext, l as MockOptions, M as MockProviderAdapter, n as MockRegistration, o as MockResponse, p as MockStats, ap as ModelFeatures, at as ModelIdentifierParser, aq as ModelLimits, ar as ModelPricing, aa as ObserveChunkContext, ai as ObserveCompactionContext, ab as ObserveGadgetCompleteContext, ac as ObserveGadgetStartContext, ad as ObserveLLMCallContext, ae as ObserveLLMCompleteContext, af as ObserveLLMErrorContext, ag as Observers, av as PromptConfig, aw as PromptContext, ax as PromptTemplate, as as ProviderIdentifier, aD as QuickOptions, aI as TextOnlyAction, aJ as TextOnlyContext, aK as TextOnlyCustomHandler, aL as TextOnlyGadgetConfig, aM as TextOnlyHandler, aN as TextOnlyStrategy, O as collectEvents, Q as collectText, aE as complete, c as createMockAdapter, e as createMockClient, h as createMockStream, i as createTextMockStream, g as getMockManager, m as mockLLM, aA as resolveHintTemplate, aB as resolvePromptTemplate, aC as resolveRulesTemplate, U as runWithHandlers, aF as stream } from './mock-stream-DNt-HBTn.cjs';
3
+ import { A as AgentHooks, q as ModelRegistry, I as IConversationManager, b as LLMMessage, S as StreamEvent, T as TokenUsage, G as GadgetRegistry, a as LLMStreamChunk, C as CompactionStrategy, R as ResolvedCompactionConfig, r as CompactionContext, s as CompactionResult, t as LLMist, u as CompactionConfig, v as CompactionEvent, w as CompactionStats, H as HintTemplate, x as GadgetExample, B as BaseGadget, P as ParsedGadgetCall, y as GadgetExecutionResult, z as ProviderAdapter, D as ModelDescriptor, E as ModelSpec, F as LLMGenerationOptions, L as LLMStream } from './mock-stream-Cc47j12U.cjs';
4
+ export { V as AfterGadgetExecutionAction, W as AfterGadgetExecutionControllerContext, X as AfterLLMCallAction, Y as AfterLLMCallControllerContext, Z as AfterLLMErrorAction, K as AgentBuilder, _ as AgentOptions, $ as BeforeGadgetExecutionAction, a0 as BeforeLLMCallAction, a1 as ChunkInterceptorContext, a2 as Controllers, ao as CostEstimate, aj as DEFAULT_COMPACTION_CONFIG, ay as DEFAULT_HINTS, az as DEFAULT_PROMPTS, ak as DEFAULT_SUMMARIZATION_PROMPT, N as EventHandlers, aG as GadgetClass, a3 as GadgetExecutionControllerContext, aH as GadgetOrClass, a4 as GadgetParameterInterceptorContext, a5 as GadgetResultInterceptorContext, au as HintContext, J as HistoryMessage, a6 as Interceptors, a7 as LLMCallControllerContext, a8 as LLMErrorControllerContext, an as LLMMessageBuilder, am as LLMRole, al as LLMistOptions, a9 as MessageInterceptorContext, ah as MessageTurn, d as MockBuilder, f as MockManager, j as MockMatcher, k as MockMatcherContext, l as MockOptions, M as MockProviderAdapter, n as MockRegistration, o as MockResponse, p as MockStats, ap as ModelFeatures, at as ModelIdentifierParser, aq as ModelLimits, ar as ModelPricing, aa as ObserveChunkContext, ai as ObserveCompactionContext, ab as ObserveGadgetCompleteContext, ac as ObserveGadgetStartContext, ad as ObserveLLMCallContext, ae as ObserveLLMCompleteContext, af as ObserveLLMErrorContext, ag as Observers, av as PromptConfig, aw as PromptContext, ax as PromptTemplate, as as ProviderIdentifier, aD as QuickOptions, aI as TextOnlyAction, aJ as TextOnlyContext, aK as TextOnlyCustomHandler, aL as TextOnlyGadgetConfig, aM as TextOnlyHandler, aN as TextOnlyStrategy, O as collectEvents, Q as collectText, aE as complete, c as createMockAdapter, e as createMockClient, h as createMockStream, i as createTextMockStream, g as getMockManager, m as mockLLM, aA as resolveHintTemplate, aB as resolvePromptTemplate, aC as resolveRulesTemplate, U as runWithHandlers, aF as stream } from './mock-stream-Cc47j12U.cjs';
5
5
  import { Logger, ILogObj } from 'tslog';
6
6
  import { MessageCreateParamsStreaming, MessageStreamEvent } from '@anthropic-ai/sdk/resources/messages';
7
7
  import OpenAI from 'openai';
@@ -1786,7 +1786,11 @@ declare function Gadget<TSchema extends ZodType>(config: GadgetConfig<TSchema>):
1786
1786
  */
1787
1787
  execute(params: Record<string, unknown>): string | Promise<string>;
1788
1788
  get instruction(): string;
1789
- getInstruction(argPrefix?: string): string;
1789
+ getInstruction(optionsOrArgPrefix?: string | {
1790
+ argPrefix?: string;
1791
+ startPrefix?: string;
1792
+ endPrefix?: string;
1793
+ }): string;
1790
1794
  } & {
1791
1795
  params: InferSchema<TSchema>;
1792
1796
  };
@@ -1990,9 +1994,10 @@ declare abstract class BaseProviderAdapter implements ProviderAdapter {
1990
1994
  * This method must be implemented by each concrete provider.
1991
1995
  *
1992
1996
  * @param payload - The provider-specific payload
1997
+ * @param signal - Optional abort signal for cancelling the request
1993
1998
  * @returns An async iterable of provider-specific chunks
1994
1999
  */
1995
- protected abstract executeStreamRequest(payload: unknown): Promise<AsyncIterable<unknown>>;
2000
+ protected abstract executeStreamRequest(payload: unknown, signal?: AbortSignal): Promise<AsyncIterable<unknown>>;
1996
2001
  /**
1997
2002
  * Wrap the provider-specific stream into the universal LLMStream format.
1998
2003
  * This method must be implemented by each concrete provider.
@@ -2008,7 +2013,7 @@ declare class AnthropicMessagesProvider extends BaseProviderAdapter {
2008
2013
  supports(descriptor: ModelDescriptor): boolean;
2009
2014
  getModelSpecs(): ModelSpec[];
2010
2015
  protected buildRequestPayload(options: LLMGenerationOptions, descriptor: ModelDescriptor, spec: ModelSpec | undefined, messages: LLMMessage[]): MessageCreateParamsStreaming;
2011
- protected executeStreamRequest(payload: MessageCreateParamsStreaming): Promise<AsyncIterable<MessageStreamEvent>>;
2016
+ protected executeStreamRequest(payload: MessageCreateParamsStreaming, signal?: AbortSignal): Promise<AsyncIterable<MessageStreamEvent>>;
2012
2017
  protected wrapStream(iterable: AsyncIterable<unknown>): LLMStream;
2013
2018
  /**
2014
2019
  * Count tokens in messages using Anthropic's native token counting API.
@@ -2079,7 +2084,7 @@ declare class GeminiGenerativeProvider extends BaseProviderAdapter {
2079
2084
  }>;
2080
2085
  }>;
2081
2086
  config: Record<string, unknown>;
2082
- }): Promise<AsyncIterable<GeminiChunk>>;
2087
+ }, signal?: AbortSignal): Promise<AsyncIterable<GeminiChunk>>;
2083
2088
  /**
2084
2089
  * Convert LLM messages to Gemini contents format.
2085
2090
  *
@@ -2134,7 +2139,7 @@ declare class OpenAIChatProvider extends BaseProviderAdapter {
2134
2139
  supports(descriptor: ModelDescriptor): boolean;
2135
2140
  getModelSpecs(): ModelSpec[];
2136
2141
  protected buildRequestPayload(options: LLMGenerationOptions, descriptor: ModelDescriptor, spec: ModelSpec | undefined, messages: LLMMessage[]): Parameters<OpenAI["chat"]["completions"]["create"]>[0];
2137
- protected executeStreamRequest(payload: Parameters<OpenAI["chat"]["completions"]["create"]>[0]): Promise<AsyncIterable<ChatCompletionChunk>>;
2142
+ protected executeStreamRequest(payload: Parameters<OpenAI["chat"]["completions"]["create"]>[0], signal?: AbortSignal): Promise<AsyncIterable<ChatCompletionChunk>>;
2138
2143
  protected wrapStream(iterable: AsyncIterable<unknown>): LLMStream;
2139
2144
  /**
2140
2145
  * Count tokens in messages using OpenAI's tiktoken library.
package/dist/index.d.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { ZodType, ZodTypeAny } from 'zod';
2
2
  export { z } from 'zod';
3
- import { A as AgentHooks, q as ModelRegistry, I as IConversationManager, b as LLMMessage, S as StreamEvent, T as TokenUsage, G as GadgetRegistry, a as LLMStreamChunk, C as CompactionStrategy, R as ResolvedCompactionConfig, r as CompactionContext, s as CompactionResult, t as LLMist, u as CompactionConfig, v as CompactionEvent, w as CompactionStats, H as HintTemplate, x as GadgetExample, B as BaseGadget, P as ParsedGadgetCall, y as GadgetExecutionResult, z as ProviderAdapter, D as ModelDescriptor, E as ModelSpec, F as LLMGenerationOptions, L as LLMStream } from './mock-stream-DNt-HBTn.js';
4
- export { V as AfterGadgetExecutionAction, W as AfterGadgetExecutionControllerContext, X as AfterLLMCallAction, Y as AfterLLMCallControllerContext, Z as AfterLLMErrorAction, K as AgentBuilder, _ as AgentOptions, $ as BeforeGadgetExecutionAction, a0 as BeforeLLMCallAction, a1 as ChunkInterceptorContext, a2 as Controllers, ao as CostEstimate, aj as DEFAULT_COMPACTION_CONFIG, ay as DEFAULT_HINTS, az as DEFAULT_PROMPTS, ak as DEFAULT_SUMMARIZATION_PROMPT, N as EventHandlers, aG as GadgetClass, a3 as GadgetExecutionControllerContext, aH as GadgetOrClass, a4 as GadgetParameterInterceptorContext, a5 as GadgetResultInterceptorContext, au as HintContext, J as HistoryMessage, a6 as Interceptors, a7 as LLMCallControllerContext, a8 as LLMErrorControllerContext, an as LLMMessageBuilder, am as LLMRole, al as LLMistOptions, a9 as MessageInterceptorContext, ah as MessageTurn, d as MockBuilder, f as MockManager, j as MockMatcher, k as MockMatcherContext, l as MockOptions, M as MockProviderAdapter, n as MockRegistration, o as MockResponse, p as MockStats, ap as ModelFeatures, at as ModelIdentifierParser, aq as ModelLimits, ar as ModelPricing, aa as ObserveChunkContext, ai as ObserveCompactionContext, ab as ObserveGadgetCompleteContext, ac as ObserveGadgetStartContext, ad as ObserveLLMCallContext, ae as ObserveLLMCompleteContext, af as ObserveLLMErrorContext, ag as Observers, av as PromptConfig, aw as PromptContext, ax as PromptTemplate, as as ProviderIdentifier, aD as QuickOptions, aI as TextOnlyAction, aJ as TextOnlyContext, aK as TextOnlyCustomHandler, aL as TextOnlyGadgetConfig, aM as TextOnlyHandler, aN as TextOnlyStrategy, O as collectEvents, Q as collectText, aE as complete, c as createMockAdapter, e as createMockClient, h as createMockStream, i as createTextMockStream, g as getMockManager, m as mockLLM, aA as resolveHintTemplate, aB as resolvePromptTemplate, aC as resolveRulesTemplate, U as runWithHandlers, aF as stream } from './mock-stream-DNt-HBTn.js';
3
+ import { A as AgentHooks, q as ModelRegistry, I as IConversationManager, b as LLMMessage, S as StreamEvent, T as TokenUsage, G as GadgetRegistry, a as LLMStreamChunk, C as CompactionStrategy, R as ResolvedCompactionConfig, r as CompactionContext, s as CompactionResult, t as LLMist, u as CompactionConfig, v as CompactionEvent, w as CompactionStats, H as HintTemplate, x as GadgetExample, B as BaseGadget, P as ParsedGadgetCall, y as GadgetExecutionResult, z as ProviderAdapter, D as ModelDescriptor, E as ModelSpec, F as LLMGenerationOptions, L as LLMStream } from './mock-stream-Cc47j12U.js';
4
+ export { V as AfterGadgetExecutionAction, W as AfterGadgetExecutionControllerContext, X as AfterLLMCallAction, Y as AfterLLMCallControllerContext, Z as AfterLLMErrorAction, K as AgentBuilder, _ as AgentOptions, $ as BeforeGadgetExecutionAction, a0 as BeforeLLMCallAction, a1 as ChunkInterceptorContext, a2 as Controllers, ao as CostEstimate, aj as DEFAULT_COMPACTION_CONFIG, ay as DEFAULT_HINTS, az as DEFAULT_PROMPTS, ak as DEFAULT_SUMMARIZATION_PROMPT, N as EventHandlers, aG as GadgetClass, a3 as GadgetExecutionControllerContext, aH as GadgetOrClass, a4 as GadgetParameterInterceptorContext, a5 as GadgetResultInterceptorContext, au as HintContext, J as HistoryMessage, a6 as Interceptors, a7 as LLMCallControllerContext, a8 as LLMErrorControllerContext, an as LLMMessageBuilder, am as LLMRole, al as LLMistOptions, a9 as MessageInterceptorContext, ah as MessageTurn, d as MockBuilder, f as MockManager, j as MockMatcher, k as MockMatcherContext, l as MockOptions, M as MockProviderAdapter, n as MockRegistration, o as MockResponse, p as MockStats, ap as ModelFeatures, at as ModelIdentifierParser, aq as ModelLimits, ar as ModelPricing, aa as ObserveChunkContext, ai as ObserveCompactionContext, ab as ObserveGadgetCompleteContext, ac as ObserveGadgetStartContext, ad as ObserveLLMCallContext, ae as ObserveLLMCompleteContext, af as ObserveLLMErrorContext, ag as Observers, av as PromptConfig, aw as PromptContext, ax as PromptTemplate, as as ProviderIdentifier, aD as QuickOptions, aI as TextOnlyAction, aJ as TextOnlyContext, aK as TextOnlyCustomHandler, aL as TextOnlyGadgetConfig, aM as TextOnlyHandler, aN as TextOnlyStrategy, O as collectEvents, Q as collectText, aE as complete, c as createMockAdapter, e as createMockClient, h as createMockStream, i as createTextMockStream, g as getMockManager, m as mockLLM, aA as resolveHintTemplate, aB as resolvePromptTemplate, aC as resolveRulesTemplate, U as runWithHandlers, aF as stream } from './mock-stream-Cc47j12U.js';
5
5
  import { Logger, ILogObj } from 'tslog';
6
6
  import { MessageCreateParamsStreaming, MessageStreamEvent } from '@anthropic-ai/sdk/resources/messages';
7
7
  import OpenAI from 'openai';
@@ -1786,7 +1786,11 @@ declare function Gadget<TSchema extends ZodType>(config: GadgetConfig<TSchema>):
1786
1786
  */
1787
1787
  execute(params: Record<string, unknown>): string | Promise<string>;
1788
1788
  get instruction(): string;
1789
- getInstruction(argPrefix?: string): string;
1789
+ getInstruction(optionsOrArgPrefix?: string | {
1790
+ argPrefix?: string;
1791
+ startPrefix?: string;
1792
+ endPrefix?: string;
1793
+ }): string;
1790
1794
  } & {
1791
1795
  params: InferSchema<TSchema>;
1792
1796
  };
@@ -1990,9 +1994,10 @@ declare abstract class BaseProviderAdapter implements ProviderAdapter {
1990
1994
  * This method must be implemented by each concrete provider.
1991
1995
  *
1992
1996
  * @param payload - The provider-specific payload
1997
+ * @param signal - Optional abort signal for cancelling the request
1993
1998
  * @returns An async iterable of provider-specific chunks
1994
1999
  */
1995
- protected abstract executeStreamRequest(payload: unknown): Promise<AsyncIterable<unknown>>;
2000
+ protected abstract executeStreamRequest(payload: unknown, signal?: AbortSignal): Promise<AsyncIterable<unknown>>;
1996
2001
  /**
1997
2002
  * Wrap the provider-specific stream into the universal LLMStream format.
1998
2003
  * This method must be implemented by each concrete provider.
@@ -2008,7 +2013,7 @@ declare class AnthropicMessagesProvider extends BaseProviderAdapter {
2008
2013
  supports(descriptor: ModelDescriptor): boolean;
2009
2014
  getModelSpecs(): ModelSpec[];
2010
2015
  protected buildRequestPayload(options: LLMGenerationOptions, descriptor: ModelDescriptor, spec: ModelSpec | undefined, messages: LLMMessage[]): MessageCreateParamsStreaming;
2011
- protected executeStreamRequest(payload: MessageCreateParamsStreaming): Promise<AsyncIterable<MessageStreamEvent>>;
2016
+ protected executeStreamRequest(payload: MessageCreateParamsStreaming, signal?: AbortSignal): Promise<AsyncIterable<MessageStreamEvent>>;
2012
2017
  protected wrapStream(iterable: AsyncIterable<unknown>): LLMStream;
2013
2018
  /**
2014
2019
  * Count tokens in messages using Anthropic's native token counting API.
@@ -2079,7 +2084,7 @@ declare class GeminiGenerativeProvider extends BaseProviderAdapter {
2079
2084
  }>;
2080
2085
  }>;
2081
2086
  config: Record<string, unknown>;
2082
- }): Promise<AsyncIterable<GeminiChunk>>;
2087
+ }, signal?: AbortSignal): Promise<AsyncIterable<GeminiChunk>>;
2083
2088
  /**
2084
2089
  * Convert LLM messages to Gemini contents format.
2085
2090
  *
@@ -2134,7 +2139,7 @@ declare class OpenAIChatProvider extends BaseProviderAdapter {
2134
2139
  supports(descriptor: ModelDescriptor): boolean;
2135
2140
  getModelSpecs(): ModelSpec[];
2136
2141
  protected buildRequestPayload(options: LLMGenerationOptions, descriptor: ModelDescriptor, spec: ModelSpec | undefined, messages: LLMMessage[]): Parameters<OpenAI["chat"]["completions"]["create"]>[0];
2137
- protected executeStreamRequest(payload: Parameters<OpenAI["chat"]["completions"]["create"]>[0]): Promise<AsyncIterable<ChatCompletionChunk>>;
2142
+ protected executeStreamRequest(payload: Parameters<OpenAI["chat"]["completions"]["create"]>[0], signal?: AbortSignal): Promise<AsyncIterable<ChatCompletionChunk>>;
2138
2143
  protected wrapStream(iterable: AsyncIterable<unknown>): LLMStream;
2139
2144
  /**
2140
2145
  * Count tokens in messages using OpenAI's tiktoken library.