@reactive-agents/llm-provider 0.2.0 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +30 -2
- package/dist/index.js +1295 -173
- package/dist/index.js.map +1 -1
- package/package.json +6 -4
package/dist/index.d.ts
CHANGED
|
@@ -372,6 +372,34 @@ declare const LLMConfig_base: Context.TagClass<LLMConfig, "LLMConfig", {
|
|
|
372
372
|
*/
|
|
373
373
|
declare class LLMConfig extends LLMConfig_base {
|
|
374
374
|
}
|
|
375
|
+
/**
|
|
376
|
+
* Raw LLMConfig value from environment variables.
|
|
377
|
+
* Exported so callers can spread overrides (e.g. model) on top.
|
|
378
|
+
*/
|
|
379
|
+
declare const llmConfigFromEnv: {
|
|
380
|
+
readonly defaultProvider: LLMProvider;
|
|
381
|
+
readonly defaultModel: string;
|
|
382
|
+
readonly anthropicApiKey?: string;
|
|
383
|
+
readonly openaiApiKey?: string;
|
|
384
|
+
readonly googleApiKey?: string;
|
|
385
|
+
readonly ollamaEndpoint?: string;
|
|
386
|
+
/**
|
|
387
|
+
* Embedding configuration. Anthropic has no embeddings API;
|
|
388
|
+
* embeddings route to OpenAI (default) or Ollama.
|
|
389
|
+
* This is the SOLE embedding config for the entire framework.
|
|
390
|
+
*/
|
|
391
|
+
readonly embeddingConfig: EmbeddingConfig;
|
|
392
|
+
/**
|
|
393
|
+
* Enable Anthropic prompt caching.
|
|
394
|
+
* When true, memory context injections are wrapped in
|
|
395
|
+
* `cache_control: { type: "ephemeral" }` blocks.
|
|
396
|
+
*/
|
|
397
|
+
readonly supportsPromptCaching: boolean;
|
|
398
|
+
readonly maxRetries: number;
|
|
399
|
+
readonly timeoutMs: number;
|
|
400
|
+
readonly defaultMaxTokens: number;
|
|
401
|
+
readonly defaultTemperature: number;
|
|
402
|
+
};
|
|
375
403
|
/**
|
|
376
404
|
* Build LLMConfig from environment variables.
|
|
377
405
|
*/
|
|
@@ -533,10 +561,10 @@ type ComplexityAnalysis = Schema.Schema.Type<typeof ComplexityAnalysisSchema>;
|
|
|
533
561
|
* Create the LLM provider layer for a specific provider.
|
|
534
562
|
* Uses env vars for configuration by default.
|
|
535
563
|
*/
|
|
536
|
-
declare const createLLMProviderLayer: (provider?: "anthropic" | "openai" | "ollama" | "gemini" | "test", testResponses?: Record<string, string
|
|
564
|
+
declare const createLLMProviderLayer: (provider?: "anthropic" | "openai" | "ollama" | "gemini" | "test", testResponses?: Record<string, string>, model?: string) => Layer.Layer<LLMService | PromptManager, never, never>;
|
|
537
565
|
/**
|
|
538
566
|
* LLM layer with custom config (for programmatic use).
|
|
539
567
|
*/
|
|
540
568
|
declare const createLLMProviderLayerWithConfig: (config: typeof LLMConfig.Service, provider?: "anthropic" | "openai" | "ollama" | "gemini") => Layer.Layer<LLMService | PromptManager, never, never>;
|
|
541
569
|
|
|
542
|
-
export { AnthropicProviderLive, type CacheControl, CacheControlSchema, type CacheableContentBlock, type CompletionRequest, type CompletionResponse, CompletionResponseSchema, type ComplexityAnalysis, ComplexityAnalysisSchema, type ContentBlock, DefaultEmbeddingConfig, type EmbeddingConfig, EmbeddingConfigSchema, GeminiProviderLive, ImageContentBlockSchema, type ImageSource, ImageSourceSchema, LLMConfig, LLMConfigFromEnv, LLMContextOverflowError, LLMError, type LLMErrors, type LLMMessage, LLMParseError, type LLMProvider, LLMProviderType, LLMRateLimitError, LLMService, LLMTimeoutError, LocalProviderLive, type ModelConfig, ModelConfigSchema, type ModelPresetName, ModelPresets, OpenAIProviderLive, type Plan, PlanSchema, PromptManager, PromptManagerLive, type ReActAction, ReActActionSchema, type Reflection, ReflectionSchema, type StopReason, StopReasonSchema, type StrategySelection, StrategySelectionSchema, type StreamEvent, type StructuredCompletionRequest, TestLLMService, TestLLMServiceLayer, TextContentBlockSchema, type ThoughtEvaluation, ThoughtEvaluationSchema, type TokenUsage, TokenUsageSchema, type ToolCall, ToolCallSchema, type ToolDefinition, ToolDefinitionSchema, ToolResultContentBlockSchema, ToolUseContentBlockSchema, type TruncationStrategy, calculateCost, createLLMProviderLayer, createLLMProviderLayerWithConfig, estimateTokenCount, makeCacheable, retryPolicy };
|
|
570
|
+
export { AnthropicProviderLive, type CacheControl, CacheControlSchema, type CacheableContentBlock, type CompletionRequest, type CompletionResponse, CompletionResponseSchema, type ComplexityAnalysis, ComplexityAnalysisSchema, type ContentBlock, DefaultEmbeddingConfig, type EmbeddingConfig, EmbeddingConfigSchema, GeminiProviderLive, ImageContentBlockSchema, type ImageSource, ImageSourceSchema, LLMConfig, LLMConfigFromEnv, LLMContextOverflowError, LLMError, type LLMErrors, type LLMMessage, LLMParseError, type LLMProvider, LLMProviderType, LLMRateLimitError, LLMService, LLMTimeoutError, LocalProviderLive, type ModelConfig, ModelConfigSchema, type ModelPresetName, ModelPresets, OpenAIProviderLive, type Plan, PlanSchema, PromptManager, PromptManagerLive, type ReActAction, ReActActionSchema, type Reflection, ReflectionSchema, type StopReason, StopReasonSchema, type StrategySelection, StrategySelectionSchema, type StreamEvent, type StructuredCompletionRequest, TestLLMService, TestLLMServiceLayer, TextContentBlockSchema, type ThoughtEvaluation, ThoughtEvaluationSchema, type TokenUsage, TokenUsageSchema, type ToolCall, ToolCallSchema, type ToolDefinition, ToolDefinitionSchema, ToolResultContentBlockSchema, ToolUseContentBlockSchema, type TruncationStrategy, calculateCost, createLLMProviderLayer, createLLMProviderLayerWithConfig, estimateTokenCount, llmConfigFromEnv, makeCacheable, retryPolicy };
|