@reactive-agents/llm-provider 0.7.7 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -590,6 +590,30 @@ declare const ToolCallSchema: Schema.Struct<{
590
590
  * When the model decides to call a tool, this describes which tool and with what inputs.
591
591
  */
592
592
  type ToolCall = Schema.Schema.Type<typeof ToolCallSchema>;
593
+ /**
594
+ * Log probability information for a single token.
595
+ * Returned by providers that support logprobs (OpenAI, Ollama).
596
+ *
597
+ * @example
598
+ * ```typescript
599
+ * const logprob: TokenLogprob = {
600
+ * token: "Paris",
601
+ * logprob: -0.0234,
602
+ * topLogprobs: [
603
+ * { token: "Paris", logprob: -0.0234 },
604
+ * { token: "London", logprob: -3.89 },
605
+ * ]
606
+ * };
607
+ * ```
608
+ */
609
+ type TokenLogprob = {
610
+ readonly token: string;
611
+ readonly logprob: number;
612
+ readonly topLogprobs?: readonly {
613
+ token: string;
614
+ logprob: number;
615
+ }[];
616
+ };
593
617
  /**
594
618
  * Request to the LLM for a completion.
595
619
  * Includes messages, model configuration, tool definitions, and sampling parameters.
@@ -630,6 +654,10 @@ type CompletionRequest = {
630
654
  readonly tools?: readonly ToolDefinition[];
631
655
  /** System prompt (optional, prepended to user messages) */
632
656
  readonly systemPrompt?: string;
657
+ /** Request log probabilities for each output token (optional) */
658
+ readonly logprobs?: boolean;
659
+ /** Number of most likely tokens to return log probabilities for (optional, 1-20) */
660
+ readonly topLogprobs?: number;
633
661
  };
634
662
  /**
635
663
  * Schema for LLM response.
@@ -675,6 +703,15 @@ declare const CompletionResponseSchema: Schema.Struct<{
675
703
  }>>>;
676
704
  /** Internal reasoning from thinking models (e.g. <think> blocks from qwen3, DeepSeek-R1) */
677
705
  thinking: Schema.optional<typeof Schema.String>;
706
+ /** Token-level log probabilities (when requested via logprobs in CompletionRequest) */
707
+ logprobs: Schema.optional<Schema.Array$<Schema.Struct<{
708
+ token: typeof Schema.String;
709
+ logprob: typeof Schema.Number;
710
+ topLogprobs: Schema.optional<Schema.Array$<Schema.Struct<{
711
+ token: typeof Schema.String;
712
+ logprob: typeof Schema.Number;
713
+ }>>>;
714
+ }>>>;
678
715
  }>;
679
716
  /**
680
717
  * LLM response to a completion request.
@@ -729,6 +766,11 @@ type StreamEvent = {
729
766
  readonly type: "usage";
730
767
  /** Final token usage for the request */
731
768
  readonly usage: TokenUsage;
769
+ } | {
770
+ /** Token-level log probabilities (accumulated over the full response) */
771
+ readonly type: "logprobs";
772
+ /** Per-token logprob data */
773
+ readonly logprobs: readonly TokenLogprob[];
732
774
  } | {
733
775
  /** Error occurred during streaming */
734
776
  readonly type: "error";
@@ -1286,23 +1328,49 @@ declare const GeminiProviderLive: Layer.Layer<LLMService, never, LLMConfig>;
1286
1328
 
1287
1329
  declare const LiteLLMProviderLive: Layer.Layer<LLMService, never, LLMConfig>;
1288
1330
 
1331
+ interface ToolCallSpec {
1332
+ name: string;
1333
+ args: Record<string, unknown>;
1334
+ id?: string;
1335
+ }
1336
+ type TestTurn = {
1337
+ text: string;
1338
+ match?: string;
1339
+ } | {
1340
+ json: unknown;
1341
+ match?: string;
1342
+ } | {
1343
+ toolCall: ToolCallSpec;
1344
+ match?: string;
1345
+ } | {
1346
+ toolCalls: ToolCallSpec[];
1347
+ match?: string;
1348
+ } | {
1349
+ error: string;
1350
+ match?: string;
1351
+ };
1289
1352
  /**
1290
- * Create a deterministic test LLM service.
1291
- * Returns responses based on pattern matching against prompt content.
1353
+ * Create a deterministic test LLM service using a scenario of sequential turns.
1354
+ *
1355
+ * Turns are consumed in order. Each LLM call scans forward from the current
1356
+ * position for the first matching turn (or unconditional turn). The last turn
1357
+ * repeats when the scenario is exhausted, so single-turn tests need no special
1358
+ * handling.
1292
1359
  *
1293
1360
  * Usage:
1294
1361
  * ```ts
1295
- * const layer = TestLLMServiceLayer({
1296
- * "capital of France": "Paris",
1297
- * "plan": '{"goal":"test","steps":[]}',
1298
- * });
1362
+ * const layer = TestLLMServiceLayer([
1363
+ * { toolCall: { name: "web-search", args: { query: "AI news" } } },
1364
+ * { text: "Here is the summary..." },
1365
+ * ]);
1299
1366
  * ```
1300
1367
  */
1301
- declare const TestLLMService: (responses: Record<string, string>) => typeof LLMService.Service;
1368
+ declare const TestLLMService: (scenario: TestTurn[]) => typeof LLMService.Service;
1302
1369
  /**
1303
- * Create a test Layer for LLMService with optional pattern-matched responses.
1370
+ * Create a test Layer for LLMService with a deterministic turn scenario.
1371
+ * Turns are consumed sequentially; the last turn repeats when exhausted.
1304
1372
  */
1305
- declare const TestLLMServiceLayer: (responses?: Record<string, string>) => Layer.Layer<LLMService, never, never>;
1373
+ declare const TestLLMServiceLayer: (scenario?: TestTurn[]) => Layer.Layer<LLMService, never, never>;
1306
1374
 
1307
1375
  /**
1308
1376
  * Estimate token count for messages.
@@ -1422,7 +1490,7 @@ declare function getProviderDefaultModel(provider: string): string | undefined;
1422
1490
  * Create the LLM provider layer for a specific provider.
1423
1491
  * Uses env vars for configuration by default.
1424
1492
  */
1425
- declare const createLLMProviderLayer: (provider?: "anthropic" | "openai" | "ollama" | "gemini" | "litellm" | "test", testResponses?: Record<string, string>, model?: string, modelParams?: {
1493
+ declare const createLLMProviderLayer: (provider?: "anthropic" | "openai" | "ollama" | "gemini" | "litellm" | "test", testScenario?: TestTurn[], model?: string, modelParams?: {
1426
1494
  thinking?: boolean;
1427
1495
  temperature?: number;
1428
1496
  maxTokens?: number;
@@ -1476,4 +1544,119 @@ interface CircuitBreaker {
1476
1544
  */
1477
1545
  declare const makeCircuitBreaker: (config?: Partial<CircuitBreakerConfig>) => CircuitBreaker;
1478
1546
 
1479
- export { AnthropicProviderLive, type CacheControl, CacheControlSchema, type CacheableContentBlock, type CircuitBreaker, type CircuitBreakerConfig, type CompletionRequest, type CompletionResponse, CompletionResponseSchema, type ComplexityAnalysis, ComplexityAnalysisSchema, type ContentBlock, DefaultEmbeddingConfig, type EmbeddingCache, type EmbeddingConfig, EmbeddingConfigSchema, GeminiProviderLive, ImageContentBlockSchema, type ImageSource, ImageSourceSchema, LLMConfig, LLMConfigFromEnv, LLMContextOverflowError, LLMError, type LLMErrors, type LLMMessage, LLMParseError, type LLMProvider, LLMProviderType, LLMRateLimitError, LLMService, LLMTimeoutError, LiteLLMProviderLive, LocalProviderLive, type ModelConfig, ModelConfigSchema, type ModelPresetName, ModelPresets, OpenAIProviderLive, PROVIDER_DEFAULT_MODELS, type Plan, PlanSchema, PromptManager, PromptManagerLive, type ReActAction, ReActActionSchema, type Reflection, ReflectionSchema, type StopReason, StopReasonSchema, type StrategySelection, StrategySelectionSchema, type StreamEvent, type StructuredCompletionRequest, type StructuredOutputCapabilities, TestLLMService, TestLLMServiceLayer, TextContentBlockSchema, type ThoughtEvaluation, ThoughtEvaluationSchema, type TokenUsage, TokenUsageSchema, type ToolCall, ToolCallSchema, type ToolDefinition, ToolDefinitionSchema, ToolResultContentBlockSchema, ToolUseContentBlockSchema, type TruncationStrategy, calculateCost, createLLMProviderLayer, createLLMProviderLayerWithConfig, defaultCircuitBreakerConfig, estimateTokenCount, getProviderDefaultModel, llmConfigFromEnv, makeCacheable, makeCircuitBreaker, makeEmbeddingCache, retryPolicy };
1547
+ /**
1548
+ * Configuration for the FallbackChain graceful degradation strategy.
1549
+ *
1550
+ * Specifies ordered lists of fallback providers and models, along with
1551
+ * the error threshold that triggers switching to the next provider.
1552
+ *
1553
+ * @example
1554
+ * ```typescript
1555
+ * const config: FallbackConfig = {
1556
+ * providers: ["anthropic", "openai", "gemini"],
1557
+ * models: ["claude-sonnet-4-20250514", "claude-haiku-3-20250520"],
1558
+ * errorThreshold: 3,
1559
+ * };
1560
+ * ```
1561
+ */
1562
+ interface FallbackConfig {
1563
+ /** Ordered list of provider names to try in sequence. */
1564
+ readonly providers: string[];
1565
+ /** Ordered list of model names to try within the same provider. */
1566
+ readonly models?: string[];
1567
+ /** Consecutive errors on a provider before switching to next. Default: 3 */
1568
+ readonly errorThreshold?: number;
1569
+ }
1570
+ /**
1571
+ * FallbackChain manages graceful degradation when LLM providers or models fail.
1572
+ *
1573
+ * Tracks consecutive errors per provider and automatically switches to the next
1574
+ * provider when the error threshold is exceeded. On rate limits (429), falls back
1575
+ * to a cheaper model within the same provider.
1576
+ *
1577
+ * Use case: Deploy with Anthropic as primary, OpenAI as secondary, Gemini as
1578
+ * fallback. If Claude API goes down, automatically route to GPT. If quota exceeded,
1579
+ * switch from claude-sonnet to claude-haiku to reduce cost/load.
1580
+ *
1581
+ * @example
1582
+ * ```typescript
1583
+ * const chain = new FallbackChain({
1584
+ * providers: ["anthropic", "openai"],
1585
+ * models: ["claude-sonnet-4-20250514", "claude-haiku-3-20250520"],
1586
+ * errorThreshold: 3,
1587
+ * });
1588
+ *
1589
+ * // Record errors
1590
+ * chain.recordError("anthropic");
1591
+ * chain.recordError("anthropic");
1592
+ * chain.recordError("anthropic"); // threshold met, switch to openai
1593
+ *
1594
+ * console.log(chain.currentProvider()); // "openai"
1595
+ *
1596
+ * // Record rate limit, fall back to cheaper model
1597
+ * chain.recordRateLimit("openai");
1598
+ * console.log(chain.currentModel()); // "claude-haiku-3-20250520"
1599
+ *
1600
+ * // Successful call resets error count
1601
+ * chain.recordSuccess("openai");
1602
+ *
1603
+ * // Check if more fallbacks available
1604
+ * if (!chain.hasFallback()) {
1605
+ * console.log("All providers exhausted!");
1606
+ * }
1607
+ * ```
1608
+ */
1609
+ declare class FallbackChain {
1610
+ private readonly config;
1611
+ /** Error count per provider. */
1612
+ private readonly errorCounts;
1613
+ /** Current index in the providers list. */
1614
+ private currentProviderIndex;
1615
+ /** Current index in the models list. */
1616
+ private currentModelIndex;
1617
+ /** Threshold for switching to next provider. */
1618
+ private readonly threshold;
1619
+ constructor(config: FallbackConfig);
1620
+ /**
1621
+ * Record an error for the given provider.
1622
+ * Increments the error count and switches to the next provider if threshold is met.
1623
+ *
1624
+ * @param provider - Provider name that errored
1625
+ */
1626
+ recordError(provider: string): void;
1627
+ /**
1628
+ * Record a rate limit error (429) for the given provider.
1629
+ * Falls back to the next model in the chain.
1630
+ *
1631
+ * @param _provider - Provider name that was rate limited (parameter name _ to indicate unused)
1632
+ */
1633
+ recordRateLimit(_provider: string): void;
1634
+ /**
1635
+ * Record a successful call for the given provider.
1636
+ * Resets the error count for that provider.
1637
+ *
1638
+ * @param provider - Provider name that succeeded
1639
+ */
1640
+ recordSuccess(provider: string): void;
1641
+ /**
1642
+ * Get the currently active provider.
1643
+ *
1644
+ * @returns Name of the provider to use
1645
+ */
1646
+ currentProvider(): string;
1647
+ /**
1648
+ * Get the currently active model.
1649
+ * Returns undefined if no models are configured.
1650
+ *
1651
+ * @returns Name of the model to use, or undefined if no models configured
1652
+ */
1653
+ currentModel(): string | undefined;
1654
+ /**
1655
+ * Check if there are more fallbacks available (provider or model).
1656
+ *
1657
+ * @returns true if there are unused fallback providers or models, false if all exhausted
1658
+ */
1659
+ hasFallback(): boolean;
1660
+ }
1661
+
1662
+ export { AnthropicProviderLive, type CacheControl, CacheControlSchema, type CacheableContentBlock, type CircuitBreaker, type CircuitBreakerConfig, type CompletionRequest, type CompletionResponse, CompletionResponseSchema, type ComplexityAnalysis, ComplexityAnalysisSchema, type ContentBlock, DefaultEmbeddingConfig, type EmbeddingCache, type EmbeddingConfig, EmbeddingConfigSchema, FallbackChain, type FallbackConfig, GeminiProviderLive, ImageContentBlockSchema, type ImageSource, ImageSourceSchema, LLMConfig, LLMConfigFromEnv, LLMContextOverflowError, LLMError, type LLMErrors, type LLMMessage, LLMParseError, type LLMProvider, LLMProviderType, LLMRateLimitError, LLMService, LLMTimeoutError, LiteLLMProviderLive, LocalProviderLive, type ModelConfig, ModelConfigSchema, type ModelPresetName, ModelPresets, OpenAIProviderLive, PROVIDER_DEFAULT_MODELS, type Plan, PlanSchema, PromptManager, PromptManagerLive, type ReActAction, ReActActionSchema, type Reflection, ReflectionSchema, type StopReason, StopReasonSchema, type StrategySelection, StrategySelectionSchema, type StreamEvent, type StructuredCompletionRequest, type StructuredOutputCapabilities, TestLLMService, TestLLMServiceLayer, type TestTurn, TextContentBlockSchema, type ThoughtEvaluation, ThoughtEvaluationSchema, type TokenLogprob, type TokenUsage, TokenUsageSchema, type ToolCall, ToolCallSchema, type ToolCallSpec, type ToolDefinition, ToolDefinitionSchema, ToolResultContentBlockSchema, ToolUseContentBlockSchema, type TruncationStrategy, calculateCost, createLLMProviderLayer, createLLMProviderLayerWithConfig, defaultCircuitBreakerConfig, estimateTokenCount, getProviderDefaultModel, llmConfigFromEnv, makeCacheable, makeCircuitBreaker, makeEmbeddingCache, retryPolicy };