@reactive-agents/llm-provider 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Tyler Buell
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,108 @@
1
+ # @reactive-agents/llm-provider
2
+
3
+ LLM provider adapters for the [Reactive Agents](https://tylerjrbuell.github.io/reactive-agents-ts/) framework.
4
+
5
+ Provides a unified `LLMService` interface with adapters for Anthropic, OpenAI, Google Gemini, Ollama, and a deterministic test provider.
6
+
7
+ ## Installation
8
+
9
+ ```bash
10
+ bun add @reactive-agents/llm-provider effect
11
+ ```
12
+
13
+ Install the SDK for your chosen provider:
14
+
15
+ ```bash
16
+ bun add @anthropic-ai/sdk # Anthropic Claude
17
+ bun add openai # OpenAI GPT-4o
18
+ bun add @google/genai # Google Gemini
19
+ ```
20
+
21
+ ## Supported Providers
22
+
23
+ | Provider | Models | Streaming | Embeddings | Structured Output |
24
+ |----------|--------|-----------|------------|------------------|
25
+ | `anthropic` | claude-haiku, claude-sonnet, claude-opus | ✓ | — | ✓ |
26
+ | `openai` | gpt-4o, gpt-4o-mini, o1-* | ✓ | ✓ | ✓ |
27
+ | `gemini` | gemini-2.0-flash, gemini-2.5-pro | ✓ | ✓ | ✓ |
28
+ | `ollama` | any local model | ✓ | ✓ | ✓ |
29
+ | `test` | deterministic mock | ✓ | ✓ | — |
30
+
31
+ ## Usage
32
+
33
+ ### Anthropic
34
+
35
+ ```typescript
36
+ import { createLLMProviderLayer, LLMService } from "@reactive-agents/llm-provider";
37
+ import { Effect } from "effect";
38
+
39
+ const layer = createLLMProviderLayer("anthropic");
40
+
41
+ const result = await Effect.runPromise(
42
+ Effect.gen(function* () {
43
+ const llm = yield* LLMService;
44
+ return yield* llm.complete({
45
+ messages: [{ role: "user", content: "Hello!" }],
46
+ });
47
+ }).pipe(Effect.provide(layer)),
48
+ );
49
+ ```
50
+
51
+ ### Google Gemini
52
+
53
+ ```typescript
54
+ import { createLLMProviderLayer, LLMService } from "@reactive-agents/llm-provider";
55
+ import { Effect } from "effect";
56
+
57
+ // Set GOOGLE_API_KEY in your environment
58
+ const layer = createLLMProviderLayer("gemini");
59
+
60
+ const result = await Effect.runPromise(
61
+ Effect.gen(function* () {
62
+ const llm = yield* LLMService;
63
+ return yield* llm.complete({
64
+ messages: [{ role: "user", content: "Explain quantum entanglement." }],
65
+ model: { provider: "gemini", model: "gemini-2.0-flash" },
66
+ });
67
+ }).pipe(Effect.provide(layer)),
68
+ );
69
+ ```
70
+
71
+ ## Environment Variables
72
+
73
+ ```bash
74
+ ANTHROPIC_API_KEY=sk-ant-... # Anthropic Claude
75
+ OPENAI_API_KEY=sk-... # OpenAI GPT-4o
76
+ GOOGLE_API_KEY=... # Google Gemini
77
+ OLLAMA_ENDPOINT=http://localhost:11434 # Ollama (default)
78
+ ```
79
+
80
+ ## Model Presets
81
+
82
+ Built-in presets with cost estimates:
83
+
84
+ ```typescript
85
+ import { ModelPresets } from "@reactive-agents/llm-provider";
86
+
87
+ // Available: claude-haiku, claude-sonnet, claude-opus,
88
+ // gpt-4o-mini, gpt-4o,
89
+ // gemini-2.0-flash, gemini-2.5-pro
90
+ const preset = ModelPresets["gemini-2.0-flash"];
91
+ // { provider: "gemini", model: "gemini-2.0-flash", costPer1MInput: 0.10, ... }
92
+ ```
93
+
94
+ ## Test Provider
95
+
96
+ For deterministic testing without API calls:
97
+
98
+ ```typescript
99
+ import { TestLLMServiceLayer } from "@reactive-agents/llm-provider";
100
+
101
+ const layer = TestLLMServiceLayer({
102
+ "capital of France": "Paris is the capital of France.",
103
+ });
104
+ ```
105
+
106
+ ## Documentation
107
+
108
+ Full documentation at [tylerjrbuell.github.io/reactive-agents-ts](https://tylerjrbuell.github.io/reactive-agents-ts/)
@@ -0,0 +1,542 @@
1
+ import { Schema, Context, Effect, Stream, Layer, Schedule } from 'effect';
2
+ import * as effect_Cause from 'effect/Cause';
3
+ import * as effect_Types from 'effect/Types';
4
+ import * as effect_Duration from 'effect/Duration';
5
+
6
+ declare const LLMProviderType: Schema.Literal<["anthropic", "openai", "ollama", "gemini", "custom"]>;
7
+ type LLMProvider = Schema.Schema.Type<typeof LLMProviderType>;
8
+ declare const EmbeddingConfigSchema: Schema.Struct<{
9
+ model: typeof Schema.String;
10
+ dimensions: typeof Schema.Number;
11
+ provider: Schema.Literal<["openai", "ollama"]>;
12
+ batchSize: Schema.optional<typeof Schema.Number>;
13
+ }>;
14
+ type EmbeddingConfig = Schema.Schema.Type<typeof EmbeddingConfigSchema>;
15
+ declare const DefaultEmbeddingConfig: EmbeddingConfig;
16
+ declare const ModelConfigSchema: Schema.Struct<{
17
+ provider: Schema.Literal<["anthropic", "openai", "ollama", "gemini", "custom"]>;
18
+ model: typeof Schema.String;
19
+ maxTokens: Schema.optional<typeof Schema.Number>;
20
+ temperature: Schema.optional<typeof Schema.Number>;
21
+ topP: Schema.optional<typeof Schema.Number>;
22
+ stopSequences: Schema.optional<Schema.Array$<typeof Schema.String>>;
23
+ }>;
24
+ type ModelConfig = Schema.Schema.Type<typeof ModelConfigSchema>;
25
+ declare const ModelPresets: {
26
+ readonly "claude-haiku": {
27
+ readonly provider: "anthropic";
28
+ readonly model: "claude-3-5-haiku-20241022";
29
+ readonly costPer1MInput: 1;
30
+ readonly costPer1MOutput: 5;
31
+ readonly maxContext: 200000;
32
+ readonly quality: 0.6;
33
+ };
34
+ readonly "claude-sonnet": {
35
+ readonly provider: "anthropic";
36
+ readonly model: "claude-sonnet-4-20250514";
37
+ readonly costPer1MInput: 3;
38
+ readonly costPer1MOutput: 15;
39
+ readonly maxContext: 200000;
40
+ readonly quality: 0.85;
41
+ };
42
+ readonly "claude-sonnet-4-5": {
43
+ readonly provider: "anthropic";
44
+ readonly model: "claude-sonnet-4-5-20250929";
45
+ readonly costPer1MInput: 3;
46
+ readonly costPer1MOutput: 15;
47
+ readonly maxContext: 200000;
48
+ readonly quality: 0.9;
49
+ };
50
+ readonly "claude-opus": {
51
+ readonly provider: "anthropic";
52
+ readonly model: "claude-opus-4-20250514";
53
+ readonly costPer1MInput: 15;
54
+ readonly costPer1MOutput: 75;
55
+ readonly maxContext: 1000000;
56
+ readonly quality: 1;
57
+ };
58
+ readonly "gpt-4o-mini": {
59
+ readonly provider: "openai";
60
+ readonly model: "gpt-4o-mini";
61
+ readonly costPer1MInput: 0.15;
62
+ readonly costPer1MOutput: 0.6;
63
+ readonly maxContext: 128000;
64
+ readonly quality: 0.55;
65
+ };
66
+ readonly "gpt-4o": {
67
+ readonly provider: "openai";
68
+ readonly model: "gpt-4o";
69
+ readonly costPer1MInput: 2.5;
70
+ readonly costPer1MOutput: 10;
71
+ readonly maxContext: 128000;
72
+ readonly quality: 0.8;
73
+ };
74
+ readonly "gemini-2.0-flash": {
75
+ readonly provider: "gemini";
76
+ readonly model: "gemini-2.0-flash";
77
+ readonly costPer1MInput: 0.1;
78
+ readonly costPer1MOutput: 0.4;
79
+ readonly maxContext: 1000000;
80
+ readonly quality: 0.75;
81
+ };
82
+ readonly "gemini-2.5-pro": {
83
+ readonly provider: "gemini";
84
+ readonly model: "gemini-2.5-pro-preview-03-25";
85
+ readonly costPer1MInput: 1.25;
86
+ readonly costPer1MOutput: 10;
87
+ readonly maxContext: 1000000;
88
+ readonly quality: 0.95;
89
+ };
90
+ };
91
+ type ModelPresetName = keyof typeof ModelPresets;
92
+ declare const CacheControlSchema: Schema.Struct<{
93
+ type: Schema.Literal<["ephemeral"]>;
94
+ }>;
95
+ type CacheControl = Schema.Schema.Type<typeof CacheControlSchema>;
96
+ declare const ImageSourceSchema: Schema.Struct<{
97
+ type: Schema.Literal<["base64", "url"]>;
98
+ media_type: Schema.Literal<["image/png", "image/jpeg", "image/gif", "image/webp"]>;
99
+ data: typeof Schema.String;
100
+ }>;
101
+ type ImageSource = Schema.Schema.Type<typeof ImageSourceSchema>;
102
+ declare const TextContentBlockSchema: Schema.Struct<{
103
+ type: Schema.Literal<["text"]>;
104
+ text: typeof Schema.String;
105
+ cache_control: Schema.optional<Schema.Struct<{
106
+ type: Schema.Literal<["ephemeral"]>;
107
+ }>>;
108
+ }>;
109
+ declare const ImageContentBlockSchema: Schema.Struct<{
110
+ type: Schema.Literal<["image"]>;
111
+ source: Schema.Struct<{
112
+ type: Schema.Literal<["base64", "url"]>;
113
+ media_type: Schema.Literal<["image/png", "image/jpeg", "image/gif", "image/webp"]>;
114
+ data: typeof Schema.String;
115
+ }>;
116
+ }>;
117
+ declare const ToolUseContentBlockSchema: Schema.Struct<{
118
+ type: Schema.Literal<["tool_use"]>;
119
+ id: typeof Schema.String;
120
+ name: typeof Schema.String;
121
+ input: typeof Schema.Unknown;
122
+ }>;
123
+ declare const ToolResultContentBlockSchema: Schema.Struct<{
124
+ type: Schema.Literal<["tool_result"]>;
125
+ tool_use_id: typeof Schema.String;
126
+ content: typeof Schema.String;
127
+ }>;
128
+ type ContentBlock = {
129
+ readonly type: "text";
130
+ readonly text: string;
131
+ readonly cache_control?: CacheControl;
132
+ } | {
133
+ readonly type: "image";
134
+ readonly source: ImageSource;
135
+ } | {
136
+ readonly type: "tool_use";
137
+ readonly id: string;
138
+ readonly name: string;
139
+ readonly input: unknown;
140
+ } | {
141
+ readonly type: "tool_result";
142
+ readonly tool_use_id: string;
143
+ readonly content: string;
144
+ };
145
+ type CacheableContentBlock = {
146
+ readonly type: "text";
147
+ readonly text: string;
148
+ readonly cache_control: CacheControl;
149
+ };
150
+ /**
151
+ * Helper — wrap text in a cacheable content block.
152
+ * Non-Anthropic providers silently ignore `cache_control`.
153
+ */
154
+ declare const makeCacheable: (text: string) => CacheableContentBlock;
155
+ type LLMMessage = {
156
+ readonly role: "system";
157
+ readonly content: string;
158
+ } | {
159
+ readonly role: "user";
160
+ readonly content: string | readonly ContentBlock[];
161
+ } | {
162
+ readonly role: "assistant";
163
+ readonly content: string | readonly ContentBlock[];
164
+ };
165
+ declare const TokenUsageSchema: Schema.Struct<{
166
+ inputTokens: typeof Schema.Number;
167
+ outputTokens: typeof Schema.Number;
168
+ totalTokens: typeof Schema.Number;
169
+ estimatedCost: typeof Schema.Number;
170
+ }>;
171
+ type TokenUsage = Schema.Schema.Type<typeof TokenUsageSchema>;
172
+ declare const StopReasonSchema: Schema.Literal<["end_turn", "max_tokens", "stop_sequence", "tool_use"]>;
173
+ type StopReason = Schema.Schema.Type<typeof StopReasonSchema>;
174
+ declare const ToolDefinitionSchema: Schema.Struct<{
175
+ name: typeof Schema.String;
176
+ description: typeof Schema.String;
177
+ inputSchema: Schema.Record$<typeof Schema.String, typeof Schema.Unknown>;
178
+ }>;
179
+ type ToolDefinition = Schema.Schema.Type<typeof ToolDefinitionSchema>;
180
+ declare const ToolCallSchema: Schema.Struct<{
181
+ id: typeof Schema.String;
182
+ name: typeof Schema.String;
183
+ input: typeof Schema.Unknown;
184
+ }>;
185
+ type ToolCall = Schema.Schema.Type<typeof ToolCallSchema>;
186
+ type CompletionRequest = {
187
+ readonly messages: readonly LLMMessage[];
188
+ readonly model?: ModelConfig;
189
+ readonly maxTokens?: number;
190
+ readonly temperature?: number;
191
+ readonly stopSequences?: readonly string[];
192
+ readonly tools?: readonly ToolDefinition[];
193
+ readonly systemPrompt?: string;
194
+ };
195
+ declare const CompletionResponseSchema: Schema.Struct<{
196
+ content: typeof Schema.String;
197
+ stopReason: Schema.Literal<["end_turn", "max_tokens", "stop_sequence", "tool_use"]>;
198
+ usage: Schema.Struct<{
199
+ inputTokens: typeof Schema.Number;
200
+ outputTokens: typeof Schema.Number;
201
+ totalTokens: typeof Schema.Number;
202
+ estimatedCost: typeof Schema.Number;
203
+ }>;
204
+ model: typeof Schema.String;
205
+ toolCalls: Schema.optional<Schema.Array$<Schema.Struct<{
206
+ id: typeof Schema.String;
207
+ name: typeof Schema.String;
208
+ input: typeof Schema.Unknown;
209
+ }>>>;
210
+ }>;
211
+ type CompletionResponse = Schema.Schema.Type<typeof CompletionResponseSchema>;
212
+ type StreamEvent = {
213
+ readonly type: "text_delta";
214
+ readonly text: string;
215
+ } | {
216
+ readonly type: "tool_use_start";
217
+ readonly id: string;
218
+ readonly name: string;
219
+ } | {
220
+ readonly type: "tool_use_delta";
221
+ readonly input: string;
222
+ } | {
223
+ readonly type: "content_complete";
224
+ readonly content: string;
225
+ } | {
226
+ readonly type: "usage";
227
+ readonly usage: TokenUsage;
228
+ } | {
229
+ readonly type: "error";
230
+ readonly error: string;
231
+ };
232
+ type StructuredCompletionRequest<A> = CompletionRequest & {
233
+ readonly outputSchema: Schema.Schema<A>;
234
+ readonly retryOnParseFail?: boolean;
235
+ readonly maxParseRetries?: number;
236
+ };
237
+ type TruncationStrategy = "drop-oldest" | "summarize-middle" | "sliding-window" | "importance-based";
238
+
239
+ declare const LLMError_base: new <A extends Record<string, any> = {}>(args: effect_Types.Equals<A, {}> extends true ? void : { readonly [P in keyof A as P extends "_tag" ? never : P]: A[P]; }) => effect_Cause.YieldableError & {
240
+ readonly _tag: "LLMError";
241
+ } & Readonly<A>;
242
+ /**
243
+ * General LLM error — catch-all for unexpected provider failures.
244
+ */
245
+ declare class LLMError extends LLMError_base<{
246
+ readonly message: string;
247
+ readonly provider: LLMProvider;
248
+ readonly cause?: unknown;
249
+ }> {
250
+ }
251
+ declare const LLMRateLimitError_base: new <A extends Record<string, any> = {}>(args: effect_Types.Equals<A, {}> extends true ? void : { readonly [P in keyof A as P extends "_tag" ? never : P]: A[P]; }) => effect_Cause.YieldableError & {
252
+ readonly _tag: "LLMRateLimitError";
253
+ } & Readonly<A>;
254
+ /**
255
+ * Rate limit exceeded — includes retry-after hint.
256
+ */
257
+ declare class LLMRateLimitError extends LLMRateLimitError_base<{
258
+ readonly message: string;
259
+ readonly provider: LLMProvider;
260
+ readonly retryAfterMs: number;
261
+ }> {
262
+ }
263
+ declare const LLMTimeoutError_base: new <A extends Record<string, any> = {}>(args: effect_Types.Equals<A, {}> extends true ? void : { readonly [P in keyof A as P extends "_tag" ? never : P]: A[P]; }) => effect_Cause.YieldableError & {
264
+ readonly _tag: "LLMTimeoutError";
265
+ } & Readonly<A>;
266
+ /**
267
+ * Request timeout.
268
+ */
269
+ declare class LLMTimeoutError extends LLMTimeoutError_base<{
270
+ readonly message: string;
271
+ readonly provider: LLMProvider;
272
+ readonly timeoutMs: number;
273
+ }> {
274
+ }
275
+ declare const LLMParseError_base: new <A extends Record<string, any> = {}>(args: effect_Types.Equals<A, {}> extends true ? void : { readonly [P in keyof A as P extends "_tag" ? never : P]: A[P]; }) => effect_Cause.YieldableError & {
276
+ readonly _tag: "LLMParseError";
277
+ } & Readonly<A>;
278
+ /**
279
+ * Structured output parse failure.
280
+ */
281
+ declare class LLMParseError extends LLMParseError_base<{
282
+ readonly message: string;
283
+ readonly rawOutput: string;
284
+ readonly expectedSchema: string;
285
+ }> {
286
+ }
287
+ declare const LLMContextOverflowError_base: new <A extends Record<string, any> = {}>(args: effect_Types.Equals<A, {}> extends true ? void : { readonly [P in keyof A as P extends "_tag" ? never : P]: A[P]; }) => effect_Cause.YieldableError & {
288
+ readonly _tag: "LLMContextOverflowError";
289
+ } & Readonly<A>;
290
+ /**
291
+ * Context window overflow — too many tokens for the model.
292
+ */
293
+ declare class LLMContextOverflowError extends LLMContextOverflowError_base<{
294
+ readonly message: string;
295
+ readonly tokenCount: number;
296
+ readonly maxTokens: number;
297
+ }> {
298
+ }
299
+ /**
300
+ * Union of all LLM error types.
301
+ */
302
+ type LLMErrors = LLMError | LLMRateLimitError | LLMTimeoutError | LLMParseError | LLMContextOverflowError;
303
+
304
+ declare const LLMService_base: Context.TagClass<LLMService, "LLMService", {
305
+ /**
306
+ * Complete a prompt (non-streaming).
307
+ * Returns full response after generation completes.
308
+ */
309
+ readonly complete: (request: CompletionRequest) => Effect.Effect<CompletionResponse, LLMErrors>;
310
+ /**
311
+ * Stream a completion. Returns an Effect that yields a Stream of events.
312
+ * Use for real-time UI updates (collaborative mode).
313
+ */
314
+ readonly stream: (request: CompletionRequest) => Effect.Effect<Stream.Stream<StreamEvent, LLMErrors>, LLMErrors>;
315
+ /**
316
+ * Complete with structured output.
317
+ * Parses LLM response into a typed object using Effect Schema.
318
+ * Retries with parse error feedback if parsing fails.
319
+ */
320
+ readonly completeStructured: <A>(request: StructuredCompletionRequest<A>) => Effect.Effect<A, LLMErrors>;
321
+ /**
322
+ * Generate embeddings for text.
323
+ *
324
+ * This is the SOLE embedding source for the entire framework.
325
+ * Anthropic has no embeddings API — routes to OpenAI or Ollama
326
+ * per LLMConfig.embeddingConfig.
327
+ */
328
+ readonly embed: (texts: readonly string[], model?: string) => Effect.Effect<readonly number[][], LLMErrors>;
329
+ /**
330
+ * Count tokens for a set of messages.
331
+ * Used for context window management.
332
+ */
333
+ readonly countTokens: (messages: readonly LLMMessage[]) => Effect.Effect<number, LLMErrors>;
334
+ /**
335
+ * Get current model configuration.
336
+ */
337
+ readonly getModelConfig: () => Effect.Effect<ModelConfig, never>;
338
+ }>;
339
+ /**
340
+ * Core LLM service — all LLM interactions go through this.
341
+ * Layers 3, 4, 5, and 10 depend on this.
342
+ */
343
+ declare class LLMService extends LLMService_base {
344
+ }
345
+
346
+ declare const LLMConfig_base: Context.TagClass<LLMConfig, "LLMConfig", {
347
+ readonly defaultProvider: LLMProvider;
348
+ readonly defaultModel: string;
349
+ readonly anthropicApiKey?: string;
350
+ readonly openaiApiKey?: string;
351
+ readonly googleApiKey?: string;
352
+ readonly ollamaEndpoint?: string;
353
+ /**
354
+ * Embedding configuration. Anthropic has no embeddings API;
355
+ * embeddings route to OpenAI (default) or Ollama.
356
+ * This is the SOLE embedding config for the entire framework.
357
+ */
358
+ readonly embeddingConfig: EmbeddingConfig;
359
+ /**
360
+ * Enable Anthropic prompt caching.
361
+ * When true, memory context injections are wrapped in
362
+ * `cache_control: { type: "ephemeral" }` blocks.
363
+ */
364
+ readonly supportsPromptCaching: boolean;
365
+ readonly maxRetries: number;
366
+ readonly timeoutMs: number;
367
+ readonly defaultMaxTokens: number;
368
+ readonly defaultTemperature: number;
369
+ }>;
370
+ /**
371
+ * LLM configuration — provided via environment or config file.
372
+ */
373
+ declare class LLMConfig extends LLMConfig_base {
374
+ }
375
+ /**
376
+ * Build LLMConfig from environment variables.
377
+ */
378
+ declare const LLMConfigFromEnv: Layer.Layer<LLMConfig, never, never>;
379
+
380
+ declare const PromptManager_base: Context.TagClass<PromptManager, "PromptManager", {
381
+ /**
382
+ * Build a prompt within token budget.
383
+ * Automatically truncates conversation history if needed.
384
+ */
385
+ readonly buildPrompt: (options: {
386
+ readonly systemPrompt: string;
387
+ readonly messages: readonly LLMMessage[];
388
+ readonly reserveOutputTokens: number;
389
+ readonly maxContextTokens: number;
390
+ readonly truncationStrategy: TruncationStrategy;
391
+ }) => Effect.Effect<readonly LLMMessage[], LLMErrors>;
392
+ /**
393
+ * Check if messages fit within context window.
394
+ */
395
+ readonly fitsInContext: (messages: readonly LLMMessage[], maxTokens: number) => Effect.Effect<boolean, LLMErrors>;
396
+ }>;
397
+ /**
398
+ * Manages context window budgets.
399
+ * Ensures prompts don't exceed model limits.
400
+ * Implements truncation strategies.
401
+ */
402
+ declare class PromptManager extends PromptManager_base {
403
+ }
404
+ /**
405
+ * Live PromptManager that uses heuristic token counting
406
+ * and applies truncation strategies.
407
+ */
408
+ declare const PromptManagerLive: Layer.Layer<PromptManager, never, never>;
409
+
410
+ declare const AnthropicProviderLive: Layer.Layer<LLMService, never, LLMConfig>;
411
+
412
+ declare const OpenAIProviderLive: Layer.Layer<LLMService, never, LLMConfig>;
413
+
414
+ declare const LocalProviderLive: Layer.Layer<LLMService, never, LLMConfig>;
415
+
416
+ declare const GeminiProviderLive: Layer.Layer<LLMService, never, LLMConfig>;
417
+
418
+ /**
419
+ * Create a deterministic test LLM service.
420
+ * Returns responses based on pattern matching against prompt content.
421
+ *
422
+ * Usage:
423
+ * ```ts
424
+ * const layer = TestLLMServiceLayer({
425
+ * "capital of France": "Paris",
426
+ * "plan": '{"goal":"test","steps":[]}',
427
+ * });
428
+ * ```
429
+ */
430
+ declare const TestLLMService: (responses: Record<string, string>) => typeof LLMService.Service;
431
+ /**
432
+ * Create a test Layer for LLMService with optional pattern-matched responses.
433
+ */
434
+ declare const TestLLMServiceLayer: (responses?: Record<string, string>) => Layer.Layer<LLMService, never, never>;
435
+
436
+ /**
437
+ * Estimate token count for messages.
438
+ * Uses a simple heuristic: ~4 characters per token for English text.
439
+ * This is used as a fallback when the provider's token counting API is unavailable.
440
+ */
441
+ declare const estimateTokenCount: (messages: readonly LLMMessage[]) => Effect.Effect<number, never>;
442
+ /**
443
+ * Calculate cost in USD given token counts and model name.
444
+ */
445
+ declare const calculateCost: (inputTokens: number, outputTokens: number, model: string) => number;
446
+
447
+ /**
448
+ * Retry policy for LLM calls.
449
+ * Handles rate limits with exponential backoff.
450
+ * Only retries on rate limit and timeout errors.
451
+ */
452
+ declare const retryPolicy: Schedule.Schedule<[number, effect_Duration.Duration], LLMErrors, never>;
453
+
454
+ /**
455
+ * Schema for ReAct action parsing.
456
+ */
457
+ declare const ReActActionSchema: Schema.Struct<{
458
+ thought: typeof Schema.String;
459
+ action: Schema.optional<Schema.Struct<{
460
+ tool: typeof Schema.String;
461
+ input: typeof Schema.Unknown;
462
+ }>>;
463
+ finalAnswer: Schema.optional<typeof Schema.String>;
464
+ isComplete: typeof Schema.Boolean;
465
+ }>;
466
+ type ReActAction = Schema.Schema.Type<typeof ReActActionSchema>;
467
+ /**
468
+ * Schema for plan generation.
469
+ */
470
+ declare const PlanSchema: Schema.Struct<{
471
+ goal: typeof Schema.String;
472
+ steps: Schema.Array$<Schema.Struct<{
473
+ id: typeof Schema.Number;
474
+ description: typeof Schema.String;
475
+ tool: Schema.optional<typeof Schema.String>;
476
+ dependsOn: Schema.optional<Schema.Array$<typeof Schema.Number>>;
477
+ estimatedDuration: Schema.optional<typeof Schema.String>;
478
+ }>>;
479
+ }>;
480
+ type Plan = Schema.Schema.Type<typeof PlanSchema>;
481
+ /**
482
+ * Schema for reflection output.
483
+ */
484
+ declare const ReflectionSchema: Schema.Struct<{
485
+ taskAccomplished: typeof Schema.Boolean;
486
+ confidence: typeof Schema.Number;
487
+ strengths: Schema.Array$<typeof Schema.String>;
488
+ weaknesses: Schema.Array$<typeof Schema.String>;
489
+ needsRefinement: typeof Schema.Boolean;
490
+ refinementSuggestions: Schema.optional<Schema.Array$<typeof Schema.String>>;
491
+ }>;
492
+ type Reflection = Schema.Schema.Type<typeof ReflectionSchema>;
493
+ /**
494
+ * Schema for strategy selection.
495
+ */
496
+ declare const StrategySelectionSchema: Schema.Struct<{
497
+ selectedStrategy: typeof Schema.String;
498
+ reasoning: typeof Schema.String;
499
+ confidence: typeof Schema.Number;
500
+ alternativeStrategies: Schema.Array$<Schema.Struct<{
501
+ strategy: typeof Schema.String;
502
+ whyNot: typeof Schema.String;
503
+ }>>;
504
+ }>;
505
+ type StrategySelection = Schema.Schema.Type<typeof StrategySelectionSchema>;
506
+ /**
507
+ * Schema for thought evaluation (Tree-of-Thought).
508
+ */
509
+ declare const ThoughtEvaluationSchema: Schema.Struct<{
510
+ score: typeof Schema.Number;
511
+ reasoning: typeof Schema.String;
512
+ strengths: Schema.Array$<typeof Schema.String>;
513
+ weaknesses: Schema.Array$<typeof Schema.String>;
514
+ shouldExpand: typeof Schema.Boolean;
515
+ }>;
516
+ type ThoughtEvaluation = Schema.Schema.Type<typeof ThoughtEvaluationSchema>;
517
+ /**
518
+ * Schema for task complexity analysis.
519
+ */
520
+ declare const ComplexityAnalysisSchema: Schema.Struct<{
521
+ score: typeof Schema.Number;
522
+ factors: Schema.Array$<Schema.Struct<{
523
+ factor: typeof Schema.String;
524
+ weight: typeof Schema.Number;
525
+ reasoning: typeof Schema.String;
526
+ }>>;
527
+ recommendedStrategy: typeof Schema.String;
528
+ recommendedModel: typeof Schema.String;
529
+ }>;
530
+ type ComplexityAnalysis = Schema.Schema.Type<typeof ComplexityAnalysisSchema>;
531
+
532
+ /**
533
+ * Create the LLM provider layer for a specific provider.
534
+ * Uses env vars for configuration by default.
535
+ */
536
+ declare const createLLMProviderLayer: (provider?: "anthropic" | "openai" | "ollama" | "gemini" | "test", testResponses?: Record<string, string>) => Layer.Layer<LLMService | PromptManager, never, never>;
537
+ /**
538
+ * LLM layer with custom config (for programmatic use).
539
+ */
540
+ declare const createLLMProviderLayerWithConfig: (config: typeof LLMConfig.Service, provider?: "anthropic" | "openai" | "ollama" | "gemini") => Layer.Layer<LLMService | PromptManager, never, never>;
541
+
542
+ export { AnthropicProviderLive, type CacheControl, CacheControlSchema, type CacheableContentBlock, type CompletionRequest, type CompletionResponse, CompletionResponseSchema, type ComplexityAnalysis, ComplexityAnalysisSchema, type ContentBlock, DefaultEmbeddingConfig, type EmbeddingConfig, EmbeddingConfigSchema, GeminiProviderLive, ImageContentBlockSchema, type ImageSource, ImageSourceSchema, LLMConfig, LLMConfigFromEnv, LLMContextOverflowError, LLMError, type LLMErrors, type LLMMessage, LLMParseError, type LLMProvider, LLMProviderType, LLMRateLimitError, LLMService, LLMTimeoutError, LocalProviderLive, type ModelConfig, ModelConfigSchema, type ModelPresetName, ModelPresets, OpenAIProviderLive, type Plan, PlanSchema, PromptManager, PromptManagerLive, type ReActAction, ReActActionSchema, type Reflection, ReflectionSchema, type StopReason, StopReasonSchema, type StrategySelection, StrategySelectionSchema, type StreamEvent, type StructuredCompletionRequest, TestLLMService, TestLLMServiceLayer, TextContentBlockSchema, type ThoughtEvaluation, ThoughtEvaluationSchema, type TokenUsage, TokenUsageSchema, type ToolCall, ToolCallSchema, type ToolDefinition, ToolDefinitionSchema, ToolResultContentBlockSchema, ToolUseContentBlockSchema, type TruncationStrategy, calculateCost, createLLMProviderLayer, createLLMProviderLayerWithConfig, estimateTokenCount, makeCacheable, retryPolicy };