llmist 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,2602 @@
1
+ import { Logger, ILogObj } from 'tslog';
2
+ import { ZodTypeAny } from 'zod';
3
+
4
+ interface GadgetExecutionResult {
5
+ gadgetName: string;
6
+ invocationId: string;
7
+ parameters: Record<string, unknown>;
8
+ result?: string;
9
+ error?: string;
10
+ executionTimeMs: number;
11
+ breaksLoop?: boolean;
12
+ }
13
+ interface ParsedGadgetCall {
14
+ gadgetName: string;
15
+ invocationId: string;
16
+ parametersYaml: string;
17
+ parameters?: Record<string, unknown>;
18
+ parseError?: string;
19
+ }
20
+ type StreamEvent = {
21
+ type: "text";
22
+ content: string;
23
+ } | {
24
+ type: "gadget_call";
25
+ call: ParsedGadgetCall;
26
+ } | {
27
+ type: "gadget_result";
28
+ result: GadgetExecutionResult;
29
+ } | {
30
+ type: "human_input_required";
31
+ question: string;
32
+ gadgetName: string;
33
+ invocationId: string;
34
+ };
35
+
36
+ type TextOnlyHandler = TextOnlyStrategy | TextOnlyGadgetConfig | TextOnlyCustomHandler;
37
+ /**
38
+ * Simple strategies for common cases
39
+ * - 'terminate': End the loop (default behavior)
40
+ * - 'acknowledge': Continue to next iteration
41
+ * - 'wait_for_input': Request human input
42
+ */
43
+ type TextOnlyStrategy = "terminate" | "acknowledge" | "wait_for_input";
44
+ /**
45
+ * Configuration for triggering a gadget when receiving text-only response
46
+ */
47
+ interface TextOnlyGadgetConfig {
48
+ type: "gadget";
49
+ name: string;
50
+ /**
51
+ * Optional function to map text to gadget parameters.
52
+ * If not provided, text will be passed as { text: string }
53
+ */
54
+ parameterMapping?: (text: string) => Record<string, unknown>;
55
+ }
56
+ /**
57
+ * Custom handler for complex text-only response scenarios
58
+ */
59
+ interface TextOnlyCustomHandler {
60
+ type: "custom";
61
+ handler: (context: TextOnlyContext) => Promise<TextOnlyAction> | TextOnlyAction;
62
+ }
63
+ /**
64
+ * Context provided to custom text-only handlers
65
+ */
66
+ interface TextOnlyContext {
67
+ /** The complete text response from the LLM */
68
+ text: string;
69
+ /** Current iteration number */
70
+ iteration: number;
71
+ /** Full conversation history */
72
+ conversation: LLMMessage[];
73
+ /** Logger instance */
74
+ logger: Logger<ILogObj>;
75
+ }
76
+ /**
77
+ * Actions that can be returned by text-only handlers
78
+ */
79
+ type TextOnlyAction = {
80
+ action: "continue";
81
+ } | {
82
+ action: "terminate";
83
+ } | {
84
+ action: "wait_for_input";
85
+ question?: string;
86
+ } | {
87
+ action: "trigger_gadget";
88
+ name: string;
89
+ parameters: Record<string, unknown>;
90
+ };
91
+
92
+ type ParameterFormat = "json" | "yaml" | "auto";
93
+ interface StreamParserOptions {
94
+ startPrefix?: string;
95
+ endPrefix?: string;
96
+ /**
97
+ * Format for parsing gadget parameters.
98
+ * - 'json': Parse as JSON (more robust, recommended for complex nested data)
99
+ * - 'yaml': Parse as YAML (backward compatible)
100
+ * - 'auto': Try JSON first, fall back to YAML
101
+ * @default 'json'
102
+ */
103
+ parameterFormat?: ParameterFormat;
104
+ }
105
+ declare class StreamParser {
106
+ private buffer;
107
+ private lastReportedTextLength;
108
+ private readonly startPrefix;
109
+ private readonly endPrefix;
110
+ private readonly parameterFormat;
111
+ private invocationCounter;
112
+ constructor(options?: StreamParserOptions);
113
+ private takeTextUntil;
114
+ /**
115
+ * Parse parameter string according to configured format
116
+ */
117
+ private parseParameters;
118
+ feed(chunk: string): Generator<StreamEvent>;
119
+ finalize(): Generator<StreamEvent>;
120
+ reset(): void;
121
+ }
122
+
123
+ /**
124
+ * Internal base class for gadgets. Most users should use the `Gadget` class
125
+ * (formerly TypedGadget) or `createGadget()` function instead, as they provide
126
+ * better type safety and simpler APIs.
127
+ *
128
+ * @internal
129
+ */
130
+ declare abstract class BaseGadget {
131
+ /**
132
+ * The name of the gadget. Used for identification when LLM calls it.
133
+ * If not provided, defaults to the class name.
134
+ */
135
+ name?: string;
136
+ /**
137
+ * Human-readable description of what the gadget does.
138
+ */
139
+ abstract description: string;
140
+ /**
141
+ * Optional Zod schema describing the expected input payload. When provided,
142
+ * it will be validated before execution and transformed into a JSON Schema
143
+ * representation that is surfaced to the LLM as part of the instructions.
144
+ */
145
+ parameterSchema?: ZodTypeAny;
146
+ /**
147
+ * Optional timeout in milliseconds for gadget execution.
148
+ * If execution exceeds this timeout, a TimeoutException will be thrown.
149
+ * If not set, the global defaultGadgetTimeoutMs from runtime options will be used.
150
+ * Set to 0 or undefined to disable timeout for this gadget.
151
+ */
152
+ timeoutMs?: number;
153
+ /**
154
+ * Execute the gadget with the given parameters.
155
+ * Can be synchronous or asynchronous.
156
+ *
157
+ * @param params - Parameters passed from the LLM
158
+ * @returns Result as a string
159
+ */
160
+ abstract execute(params: Record<string, unknown>): string | Promise<string>;
161
+ /**
162
+ * Auto-generated instruction text for the LLM.
163
+ * Combines name, description, and parameter schema into a formatted instruction.
164
+ * @deprecated Use getInstruction(format) instead for format-specific schemas
165
+ */
166
+ get instruction(): string;
167
+ /**
168
+ * Generate instruction text for the LLM with format-specific schema.
169
+ * Combines name, description, and parameter schema into a formatted instruction.
170
+ *
171
+ * @param format - Format for the schema representation ('json' | 'yaml' | 'auto')
172
+ * @returns Formatted instruction string
173
+ */
174
+ getInstruction(format?: ParameterFormat): string;
175
+ }
176
+
177
+ /**
178
+ * Context provided to prompt template functions for rendering dynamic content.
179
+ */
180
+ interface PromptContext {
181
+ /** The parameter format being used (json or yaml) */
182
+ parameterFormat: ParameterFormat;
183
+ /** Custom gadget start prefix */
184
+ startPrefix: string;
185
+ /** Custom gadget end prefix */
186
+ endPrefix: string;
187
+ /** Number of gadgets being registered */
188
+ gadgetCount: number;
189
+ /** Names of all gadgets */
190
+ gadgetNames: string[];
191
+ }
192
+ /**
193
+ * Template that can be either a static string or a function that renders based on context.
194
+ */
195
+ type PromptTemplate = string | ((context: PromptContext) => string);
196
+ /**
197
+ * Configuration for customizing all prompts used internally by llmist.
198
+ *
199
+ * Each field can be either a string (static text) or a function that receives
200
+ * context and returns a string (for dynamic content).
201
+ *
202
+ * @example
203
+ * ```typescript
204
+ * const customConfig: PromptConfig = {
205
+ * mainInstruction: "USE ONLY THE GADGET MARKERS BELOW:",
206
+ * criticalUsage: "Important: Follow the exact format shown.",
207
+ * rules: (ctx) => [
208
+ * "Always use the markers to invoke gadgets",
209
+ * "Never use function calling",
210
+ * `You have ${ctx.gadgetCount} gadgets available`
211
+ * ]
212
+ * };
213
+ * ```
214
+ */
215
+ interface PromptConfig {
216
+ /**
217
+ * Main instruction block that appears at the start of the gadget system prompt.
218
+ * Default emphasizes using text markers instead of function calling.
219
+ */
220
+ mainInstruction?: PromptTemplate;
221
+ /**
222
+ * Critical usage instruction that appears in the usage section.
223
+ * Default emphasizes the exact format requirement.
224
+ */
225
+ criticalUsage?: PromptTemplate;
226
+ /**
227
+ * Format description for YAML parameter format.
228
+ * Default: "Parameters in YAML format (one per line)"
229
+ */
230
+ formatDescriptionYaml?: PromptTemplate;
231
+ /**
232
+ * Format description for JSON parameter format.
233
+ * Default: "Parameters in JSON format (valid JSON object)"
234
+ */
235
+ formatDescriptionJson?: PromptTemplate;
236
+ /**
237
+ * Rules that appear in the rules section.
238
+ * Can be an array of strings or a function that returns an array.
239
+ * Default includes 6 rules about not using function calling.
240
+ */
241
+ rules?: PromptTemplate | string[] | ((context: PromptContext) => string[]);
242
+ /**
243
+ * Schema label for JSON format.
244
+ * Default: "\n\nInput Schema (JSON):"
245
+ */
246
+ schemaLabelJson?: PromptTemplate;
247
+ /**
248
+ * Schema label for YAML format.
249
+ * Default: "\n\nInput Schema (YAML):"
250
+ */
251
+ schemaLabelYaml?: PromptTemplate;
252
+ /**
253
+ * Custom examples to show in the examples section.
254
+ * If provided, replaces the default examples entirely.
255
+ * Should be a function that returns formatted example strings.
256
+ */
257
+ customExamples?: (context: PromptContext) => string;
258
+ }
259
+ /**
260
+ * Default prompt templates used by llmist.
261
+ * These match the original hardcoded strings.
262
+ */
263
+ declare const DEFAULT_PROMPTS: Required<Omit<PromptConfig, "rules" | "customExamples"> & {
264
+ rules: (context: PromptContext) => string[];
265
+ customExamples: null;
266
+ }>;
267
+ /**
268
+ * Resolve a prompt template to a string using the given context.
269
+ */
270
+ declare function resolvePromptTemplate(template: PromptTemplate | undefined, defaultValue: PromptTemplate, context: PromptContext): string;
271
+ /**
272
+ * Resolve rules template to an array of strings.
273
+ */
274
+ declare function resolveRulesTemplate(rules: PromptConfig["rules"] | undefined, context: PromptContext): string[];
275
+
276
+ type LLMRole = "system" | "user" | "assistant";
277
+ interface LLMMessage {
278
+ role: LLMRole;
279
+ content: string;
280
+ name?: string;
281
+ metadata?: Record<string, unknown>;
282
+ }
283
+ declare class LLMMessageBuilder {
284
+ private readonly messages;
285
+ private startPrefix;
286
+ private endPrefix;
287
+ private promptConfig;
288
+ constructor(promptConfig?: PromptConfig);
289
+ addSystem(content: string, metadata?: Record<string, unknown>): this;
290
+ addGadgets(gadgets: BaseGadget[], parameterFormat?: ParameterFormat, options?: {
291
+ startPrefix?: string;
292
+ endPrefix?: string;
293
+ }): this;
294
+ private buildGadgetsXmlSection;
295
+ private buildUsageSection;
296
+ private buildExamplesSection;
297
+ private buildRulesSection;
298
+ addUser(content: string, metadata?: Record<string, unknown>): this;
299
+ addAssistant(content: string, metadata?: Record<string, unknown>): this;
300
+ addGadgetCall(gadget: string, parameters: Record<string, unknown>, result: string, parameterFormat?: ParameterFormat): this;
301
+ private formatParameters;
302
+ build(): LLMMessage[];
303
+ }
304
+
305
+ /**
306
+ * Model Catalog Types
307
+ *
308
+ * Type definitions for LLM model specifications including
309
+ * context windows, pricing, features, and capabilities.
310
+ */
311
+ interface ModelPricing {
312
+ /** Price per 1 million input tokens in USD */
313
+ input: number;
314
+ /** Price per 1 million output tokens in USD */
315
+ output: number;
316
+ /** Price per 1 million cached input tokens in USD (if supported) */
317
+ cachedInput?: number;
318
+ }
319
+ interface ModelFeatures {
320
+ /** Supports streaming responses */
321
+ streaming: boolean;
322
+ /** Supports function/tool calling */
323
+ functionCalling: boolean;
324
+ /** Supports vision/image input */
325
+ vision: boolean;
326
+ /** Supports extended thinking/reasoning */
327
+ reasoning?: boolean;
328
+ /** Supports structured outputs */
329
+ structuredOutputs?: boolean;
330
+ /** Supports fine-tuning */
331
+ fineTuning?: boolean;
332
+ }
333
+ interface ModelSpec {
334
+ /** Provider identifier (e.g., 'openai', 'anthropic', 'gemini') */
335
+ provider: string;
336
+ /** Full model identifier used in API calls */
337
+ modelId: string;
338
+ /** Human-readable display name */
339
+ displayName: string;
340
+ /** Maximum context window size in tokens */
341
+ contextWindow: number;
342
+ /** Maximum output tokens per request */
343
+ maxOutputTokens: number;
344
+ /** Pricing per 1M tokens */
345
+ pricing: ModelPricing;
346
+ /** Training data knowledge cutoff date (YYYY-MM-DD or description) */
347
+ knowledgeCutoff: string;
348
+ /** Supported features and capabilities */
349
+ features: ModelFeatures;
350
+ /** Additional metadata */
351
+ metadata?: {
352
+ /** Model family/series */
353
+ family?: string;
354
+ /** Release date */
355
+ releaseDate?: string;
356
+ /** Deprecation date if applicable */
357
+ deprecationDate?: string;
358
+ /** Notes or special information */
359
+ notes?: string;
360
+ /** Whether manual temperature configuration is supported (defaults to true) */
361
+ supportsTemperature?: boolean;
362
+ };
363
+ }
364
+ interface ModelLimits {
365
+ contextWindow: number;
366
+ maxOutputTokens: number;
367
+ }
368
+ interface CostEstimate {
369
+ inputCost: number;
370
+ outputCost: number;
371
+ totalCost: number;
372
+ currency: "USD";
373
+ }
374
+
375
+ interface LLMGenerationOptions {
376
+ model: string;
377
+ messages: LLMMessage[];
378
+ maxTokens?: number;
379
+ temperature?: number;
380
+ topP?: number;
381
+ stopSequences?: string[];
382
+ responseFormat?: "text";
383
+ metadata?: Record<string, unknown>;
384
+ extra?: Record<string, unknown>;
385
+ }
386
+ interface TokenUsage {
387
+ inputTokens: number;
388
+ outputTokens: number;
389
+ totalTokens: number;
390
+ }
391
+ interface LLMStreamChunk {
392
+ text: string;
393
+ /**
394
+ * Indicates that the provider has finished producing output and includes the reason if available.
395
+ */
396
+ finishReason?: string | null;
397
+ /**
398
+ * Token usage information, typically available in the final chunk when the stream completes.
399
+ */
400
+ usage?: TokenUsage;
401
+ /**
402
+ * Provider specific payload emitted at the same time as the text chunk. This is useful for debugging and tests.
403
+ */
404
+ rawEvent?: unknown;
405
+ }
406
+ interface LLMStream extends AsyncIterable<LLMStreamChunk> {
407
+ }
408
+ type ProviderIdentifier = string;
409
+ interface ModelDescriptor {
410
+ provider: string;
411
+ name: string;
412
+ }
413
+ declare class ModelIdentifierParser {
414
+ private readonly defaultProvider;
415
+ constructor(defaultProvider?: string);
416
+ parse(identifier: string): ModelDescriptor;
417
+ }
418
+
419
+ interface ProviderAdapter {
420
+ readonly providerId: string;
421
+ /**
422
+ * Optional priority for adapter resolution.
423
+ * Higher numbers = higher priority (checked first).
424
+ *
425
+ * When multiple adapters support the same model descriptor, the adapter
426
+ * with the highest priority is selected. Adapters with equal priority
427
+ * maintain their registration order (stable sort).
428
+ *
429
+ * Default: 0 (normal priority)
430
+ * Mock adapters use: 100 (high priority)
431
+ *
432
+ * @default 0
433
+ */
434
+ readonly priority?: number;
435
+ supports(model: ModelDescriptor): boolean;
436
+ stream(options: LLMGenerationOptions, descriptor: ModelDescriptor, spec?: ModelSpec): LLMStream;
437
+ /**
438
+ * Optionally provide model specifications for this provider.
439
+ * This allows the model registry to discover available models and their capabilities.
440
+ */
441
+ getModelSpecs?(): ModelSpec[];
442
+ /**
443
+ * Count tokens in messages before making an API call.
444
+ * Uses provider-specific native token counting methods.
445
+ * @param messages - Array of messages to count tokens for
446
+ * @param descriptor - Model descriptor
447
+ * @param spec - Optional model specification
448
+ * @returns Promise resolving to the number of input tokens
449
+ */
450
+ countTokens?(messages: LLMMessage[], descriptor: ModelDescriptor, spec?: ModelSpec): Promise<number>;
451
+ }
452
+
453
+ /**
454
+ * Model Registry
455
+ *
456
+ * Centralized registry for querying LLM model specifications,
457
+ * validating configurations, and estimating costs.
458
+ *
459
+ * Model data is provided by ProviderAdapter implementations and
460
+ * automatically populated when providers are registered.
461
+ */
462
+
463
+ declare class ModelRegistry {
464
+ private modelSpecs;
465
+ private providerMap;
466
+ /**
467
+ * Register a provider and collect its model specifications
468
+ */
469
+ registerProvider(provider: ProviderAdapter): void;
470
+ /**
471
+ * Register a custom model specification at runtime
472
+ *
473
+ * Use this to add models that aren't in the built-in catalog, such as:
474
+ * - Fine-tuned models with custom pricing
475
+ * - New models not yet supported by llmist
476
+ * - Custom deployments with different configurations
477
+ *
478
+ * @param spec - Complete model specification
479
+ * @throws {Error} If spec is missing required fields
480
+ *
481
+ * @example
482
+ * ```ts
483
+ * client.modelRegistry.registerModel({
484
+ * provider: "openai",
485
+ * modelId: "ft:gpt-4o-2024-08-06:my-org:custom:abc123",
486
+ * displayName: "My Fine-tuned GPT-4o",
487
+ * contextWindow: 128_000,
488
+ * maxOutputTokens: 16_384,
489
+ * pricing: { input: 7.5, output: 30.0 },
490
+ * knowledgeCutoff: "2024-08",
491
+ * features: { streaming: true, functionCalling: true, vision: true }
492
+ * });
493
+ * ```
494
+ */
495
+ registerModel(spec: ModelSpec): void;
496
+ /**
497
+ * Register multiple custom model specifications at once
498
+ *
499
+ * @param specs - Array of complete model specifications
500
+ *
501
+ * @example
502
+ * ```ts
503
+ * client.modelRegistry.registerModels([
504
+ * { provider: "openai", modelId: "gpt-5", ... },
505
+ * { provider: "openai", modelId: "gpt-5-mini", ... }
506
+ * ]);
507
+ * ```
508
+ */
509
+ registerModels(specs: ModelSpec[]): void;
510
+ /**
511
+ * Get model specification by model ID
512
+ * @param modelId - Full model identifier (e.g., 'gpt-5', 'claude-sonnet-4-5-20250929')
513
+ * @returns ModelSpec if found, undefined otherwise
514
+ */
515
+ getModelSpec(modelId: string): ModelSpec | undefined;
516
+ /**
517
+ * List all models, optionally filtered by provider
518
+ * @param providerId - Optional provider ID to filter by (e.g., 'openai', 'anthropic')
519
+ * @returns Array of ModelSpec objects
520
+ */
521
+ listModels(providerId?: string): ModelSpec[];
522
+ /**
523
+ * Get context window and output limits for a model
524
+ * @param modelId - Full model identifier
525
+ * @returns ModelLimits if model found, undefined otherwise
526
+ */
527
+ getModelLimits(modelId: string): ModelLimits | undefined;
528
+ /**
529
+ * Estimate API cost for a given model and token usage
530
+ * @param modelId - Full model identifier
531
+ * @param inputTokens - Number of input tokens
532
+ * @param outputTokens - Number of output tokens
533
+ * @param useCachedInput - Whether to use cached input pricing (if supported by provider)
534
+ * @returns CostEstimate if model found, undefined otherwise
535
+ */
536
+ estimateCost(modelId: string, inputTokens: number, outputTokens: number, useCachedInput?: boolean): CostEstimate | undefined;
537
+ /**
538
+ * Validate that requested token count fits within model limits
539
+ * @param modelId - Full model identifier
540
+ * @param requestedTokens - Total tokens requested (input + output)
541
+ * @returns true if valid, false if model not found or exceeds limits
542
+ */
543
+ validateModelConfig(modelId: string, requestedTokens: number): boolean;
544
+ /**
545
+ * Check if a model supports a specific feature
546
+ * @param modelId - Full model identifier
547
+ * @param feature - Feature to check ('streaming', 'functionCalling', 'vision', etc.)
548
+ * @returns true if model supports feature, false otherwise
549
+ */
550
+ supportsFeature(modelId: string, feature: keyof ModelSpec["features"]): boolean;
551
+ /**
552
+ * Get all models that support a specific feature
553
+ * @param feature - Feature to filter by
554
+ * @param providerId - Optional provider ID to filter by
555
+ * @returns Array of ModelSpec objects that support the feature
556
+ */
557
+ getModelsByFeature(feature: keyof ModelSpec["features"], providerId?: string): ModelSpec[];
558
+ /**
559
+ * Get the most cost-effective model for a given provider and token budget
560
+ * @param inputTokens - Expected input tokens
561
+ * @param outputTokens - Expected output tokens
562
+ * @param providerId - Optional provider ID to filter by
563
+ * @returns ModelSpec with lowest total cost, or undefined if no models found
564
+ */
565
+ getCheapestModel(inputTokens: number, outputTokens: number, providerId?: string): ModelSpec | undefined;
566
+ }
567
+
568
+ /**
569
+ * Quick execution methods for simple use cases.
570
+ *
571
+ * These methods provide convenient shortcuts for common operations
572
+ * without requiring full agent setup.
573
+ *
574
+ * @example
575
+ * ```typescript
576
+ * // Quick completion
577
+ * const answer = await llmist.complete("What is 2+2?");
578
+ *
579
+ * // Quick streaming
580
+ * for await (const chunk of llmist.stream("Tell me a story")) {
581
+ * process.stdout.write(chunk);
582
+ * }
583
+ * ```
584
+ */
585
+
586
+ /**
587
+ * Options for quick execution methods.
588
+ */
589
+ interface QuickOptions {
590
+ /** Model to use (supports aliases like "gpt4", "sonnet", "flash") */
591
+ model?: string;
592
+ /** Temperature (0-1) */
593
+ temperature?: number;
594
+ /** System prompt */
595
+ systemPrompt?: string;
596
+ /** Max tokens to generate */
597
+ maxTokens?: number;
598
+ }
599
+ /**
600
+ * Quick completion - returns final text response.
601
+ *
602
+ * @param client - LLMist client instance
603
+ * @param prompt - User prompt
604
+ * @param options - Optional configuration
605
+ * @returns Complete text response
606
+ *
607
+ * @example
608
+ * ```typescript
609
+ * const client = new LLMist();
610
+ * const answer = await complete(client, "What is 2+2?");
611
+ * console.log(answer); // "4" or "2+2 equals 4"
612
+ * ```
613
+ */
614
+ declare function complete(client: LLMist, prompt: string, options?: QuickOptions): Promise<string>;
615
+ /**
616
+ * Quick streaming - returns async generator of text chunks.
617
+ *
618
+ * @param client - LLMist client instance
619
+ * @param prompt - User prompt
620
+ * @param options - Optional configuration
621
+ * @returns Async generator yielding text chunks
622
+ *
623
+ * @example
624
+ * ```typescript
625
+ * const client = new LLMist();
626
+ *
627
+ * for await (const chunk of stream(client, "Tell me a story")) {
628
+ * process.stdout.write(chunk);
629
+ * }
630
+ * ```
631
+ */
632
+ declare function stream(client: LLMist, prompt: string, options?: QuickOptions): AsyncGenerator<string>;
633
+
634
+ interface LLMistOptions {
635
+ /**
636
+ * Provider adapters to register manually.
637
+ */
638
+ adapters?: ProviderAdapter[];
639
+ /**
640
+ * Default provider prefix applied when a model identifier omits it.
641
+ */
642
+ defaultProvider?: string;
643
+ /**
644
+ * Automatically discover built-in providers based on environment configuration.
645
+ * Enabled by default.
646
+ */
647
+ autoDiscoverProviders?: boolean;
648
+ /**
649
+ * Custom model specifications to register at initialization.
650
+ * Use this to define models not in the built-in catalog, such as:
651
+ * - Fine-tuned models with custom pricing
652
+ * - New models not yet supported by llmist
653
+ * - Custom deployments with different configurations
654
+ *
655
+ * @example
656
+ * ```ts
657
+ * new LLMist({
658
+ * customModels: [{
659
+ * provider: "openai",
660
+ * modelId: "ft:gpt-4o-2024-08-06:my-org:custom:abc123",
661
+ * displayName: "My Fine-tuned GPT-4o",
662
+ * contextWindow: 128_000,
663
+ * maxOutputTokens: 16_384,
664
+ * pricing: { input: 7.5, output: 30.0 },
665
+ * knowledgeCutoff: "2024-08",
666
+ * features: { streaming: true, functionCalling: true, vision: true }
667
+ * }]
668
+ * });
669
+ * ```
670
+ */
671
+ customModels?: ModelSpec[];
672
+ }
673
+ declare class LLMist {
674
+ private readonly parser;
675
+ readonly modelRegistry: ModelRegistry;
676
+ private readonly adapters;
677
+ constructor();
678
+ constructor(adapters: ProviderAdapter[]);
679
+ constructor(adapters: ProviderAdapter[], defaultProvider: string);
680
+ constructor(options: LLMistOptions);
681
+ stream(options: LLMGenerationOptions): LLMStream;
682
+ /**
683
+ * Count tokens in messages for a given model.
684
+ *
685
+ * Uses provider-specific token counting methods for accurate estimation:
686
+ * - OpenAI: tiktoken library with model-specific encodings
687
+ * - Anthropic: Native messages.countTokens() API
688
+ * - Gemini: SDK's countTokens() method
689
+ *
690
+ * Falls back to character-based estimation (4 chars/token) if the provider
691
+ * doesn't support native token counting or if counting fails.
692
+ *
693
+ * This is useful for:
694
+ * - Pre-request cost estimation
695
+ * - Context window management
696
+ * - Request batching optimization
697
+ *
698
+ * @param model - Model identifier (e.g., "openai:gpt-4", "anthropic:claude-3-5-sonnet-20241022")
699
+ * @param messages - Array of messages to count tokens for
700
+ * @returns Promise resolving to the estimated input token count
701
+ *
702
+ * @example
703
+ * ```typescript
704
+ * const client = new LLMist();
705
+ * const messages = [
706
+ * { role: 'system', content: 'You are a helpful assistant.' },
707
+ * { role: 'user', content: 'Hello!' }
708
+ * ];
709
+ *
710
+ * const tokenCount = await client.countTokens('openai:gpt-4', messages);
711
+ * console.log(`Estimated tokens: ${tokenCount}`);
712
+ * ```
713
+ */
714
+ countTokens(model: string, messages: LLMMessage[]): Promise<number>;
715
+ private resolveAdapter;
716
+ /**
717
+ * Quick completion - returns final text response.
718
+ * Convenient for simple queries without needing agent setup.
719
+ *
720
+ * @param prompt - User prompt
721
+ * @param options - Optional configuration
722
+ * @returns Complete text response
723
+ *
724
+ * @example
725
+ * ```typescript
726
+ * const answer = await LLMist.complete("What is 2+2?");
727
+ * console.log(answer); // "4" or "2+2 equals 4"
728
+ *
729
+ * const answer = await LLMist.complete("Tell me a joke", {
730
+ * model: "sonnet",
731
+ * temperature: 0.9
732
+ * });
733
+ * ```
734
+ */
735
+ static complete(prompt: string, options?: QuickOptions): Promise<string>;
736
+ /**
737
+ * Quick streaming - returns async generator of text chunks.
738
+ * Convenient for streaming responses without needing agent setup.
739
+ *
740
+ * @param prompt - User prompt
741
+ * @param options - Optional configuration
742
+ * @returns Async generator yielding text chunks
743
+ *
744
+ * @example
745
+ * ```typescript
746
+ * for await (const chunk of LLMist.stream("Tell me a story")) {
747
+ * process.stdout.write(chunk);
748
+ * }
749
+ *
750
+ * // With options
751
+ * for await (const chunk of LLMist.stream("Generate code", {
752
+ * model: "gpt4",
753
+ * systemPrompt: "You are a coding assistant"
754
+ * })) {
755
+ * process.stdout.write(chunk);
756
+ * }
757
+ * ```
758
+ */
759
+ static stream(prompt: string, options?: QuickOptions): AsyncGenerator<string>;
760
+ /**
761
+ * Instance method: Quick completion using this client instance.
762
+ *
763
+ * @param prompt - User prompt
764
+ * @param options - Optional configuration
765
+ * @returns Complete text response
766
+ */
767
+ complete(prompt: string, options?: QuickOptions): Promise<string>;
768
+ /**
769
+ * Instance method: Quick streaming using this client instance.
770
+ *
771
+ * @param prompt - User prompt
772
+ * @param options - Optional configuration
773
+ * @returns Async generator yielding text chunks
774
+ */
775
+ streamText(prompt: string, options?: QuickOptions): AsyncGenerator<string>;
776
+ /**
777
+ * Create a fluent agent builder.
778
+ * Provides a chainable API for configuring and creating agents.
779
+ *
780
+ * @returns AgentBuilder instance for chaining
781
+ *
782
+ * @example
783
+ * ```typescript
784
+ * const agent = LLMist.createAgent()
785
+ * .withModel("sonnet")
786
+ * .withSystem("You are a helpful assistant")
787
+ * .withGadgets(Calculator, Weather)
788
+ * .ask("What's the weather in Paris?");
789
+ *
790
+ * for await (const event of agent.run()) {
791
+ * // handle events
792
+ * }
793
+ * ```
794
+ *
795
+ * @example
796
+ * ```typescript
797
+ * // Quick one-liner for simple queries
798
+ * const answer = await LLMist.createAgent()
799
+ * .withModel("gpt4-mini")
800
+ * .askAndCollect("What is 2+2?");
801
+ * ```
802
+ */
803
+ static createAgent(): AgentBuilder;
804
+ /**
805
+ * Create agent builder with this client instance.
806
+ * Useful when you want to reuse a configured client.
807
+ *
808
+ * @returns AgentBuilder instance using this client
809
+ *
810
+ * @example
811
+ * ```typescript
812
+ * const client = new LLMist({ ... });
813
+ *
814
+ * const agent = client.createAgent()
815
+ * .withModel("sonnet")
816
+ * .ask("Hello");
817
+ * ```
818
+ */
819
+ createAgent(): AgentBuilder;
820
+ }
821
+
822
+ type GadgetClass = new (...args: unknown[]) => BaseGadget;
823
+ type GadgetOrClass = BaseGadget | GadgetClass;
824
+ declare class GadgetRegistry {
825
+ private readonly gadgets;
826
+ /**
827
+ * Creates a registry from an array of gadget classes or instances,
828
+ * or an object mapping names to gadgets.
829
+ *
830
+ * @param gadgets - Array of gadgets/classes or object with custom names
831
+ * @returns New GadgetRegistry with all gadgets registered
832
+ *
833
+ * @example
834
+ * ```typescript
835
+ * // From array of classes
836
+ * const registry = GadgetRegistry.from([Calculator, Weather]);
837
+ *
838
+ * // From array of instances
839
+ * const registry = GadgetRegistry.from([new Calculator(), new Weather()]);
840
+ *
841
+ * // From object with custom names
842
+ * const registry = GadgetRegistry.from({
843
+ * calc: Calculator,
844
+ * weather: new Weather({ apiKey: "..." })
845
+ * });
846
+ * ```
847
+ */
848
+ static from(gadgets: GadgetOrClass[] | Record<string, GadgetOrClass>): GadgetRegistry;
849
+ /**
850
+ * Registers multiple gadgets at once from an array.
851
+ *
852
+ * @param gadgets - Array of gadget instances or classes
853
+ * @returns This registry for chaining
854
+ *
855
+ * @example
856
+ * ```typescript
857
+ * registry.registerMany([Calculator, Weather, Email]);
858
+ * registry.registerMany([new Calculator(), new Weather()]);
859
+ * ```
860
+ */
861
+ registerMany(gadgets: GadgetOrClass[]): this;
862
+ register(name: string, gadget: BaseGadget): void;
863
+ registerByClass(gadget: BaseGadget): void;
864
+ get(name: string): BaseGadget | undefined;
865
+ has(name: string): boolean;
866
+ getNames(): string[];
867
+ getAll(): BaseGadget[];
868
+ unregister(name: string): boolean;
869
+ clear(): void;
870
+ }
871
+
872
+ /**
873
+ * Internal key for Agent instantiation.
874
+ * This Symbol is used to ensure only AgentBuilder can create Agent instances.
875
+ *
876
+ * @internal
877
+ */
878
+ declare const AGENT_INTERNAL_KEY: unique symbol;
879
+
880
+ /**
881
+ * Event handler sugar for cleaner event processing.
882
+ *
883
+ * Instead of verbose if/else chains, use named handlers
884
+ * for each event type.
885
+ *
886
+ * @example
887
+ * ```typescript
888
+ * await agent.runWith({
889
+ * onText: (content) => console.log("LLM:", content),
890
+ * onGadgetResult: (result) => console.log("Result:", result.result),
891
+ * });
892
+ * ```
893
+ */
894
+
895
+ /**
896
+ * Named event handlers for different event types.
897
+ */
898
+ interface EventHandlers {
899
+ /** Called when text is generated by the LLM */
900
+ onText?: (content: string) => void | Promise<void>;
901
+ /** Called when a gadget is about to be executed */
902
+ onGadgetCall?: (call: {
903
+ gadgetName: string;
904
+ parameters?: Record<string, unknown>;
905
+ parametersYaml: string;
906
+ }) => void | Promise<void>;
907
+ /** Called when a gadget execution completes */
908
+ onGadgetResult?: (result: {
909
+ gadgetName: string;
910
+ result?: string;
911
+ error?: string;
912
+ parameters: Record<string, unknown>;
913
+ }) => void | Promise<void>;
914
+ /** Called when human input is required */
915
+ onHumanInputRequired?: (data: {
916
+ question: string;
917
+ gadgetName: string;
918
+ }) => void | Promise<void>;
919
+ /** Called for any other event type */
920
+ onOther?: (event: StreamEvent) => void | Promise<void>;
921
+ }
922
+ /**
923
+ * Helper to run an agent with named event handlers.
924
+ *
925
+ * @param agentGenerator - Agent's run() async generator
926
+ * @param handlers - Named event handlers
927
+ *
928
+ * @example
929
+ * ```typescript
930
+ * await runWithHandlers(agent.run(), {
931
+ * onText: (text) => console.log("LLM:", text),
932
+ * onGadgetResult: (result) => console.log("Result:", result.result),
933
+ * });
934
+ * ```
935
+ */
936
+ declare function runWithHandlers(agentGenerator: AsyncGenerator<StreamEvent>, handlers: EventHandlers): Promise<void>;
937
+ /**
938
+ * Helper to collect events by type.
939
+ *
940
+ * @param agentGenerator - Agent's run() async generator
941
+ * @param collect - Object specifying which event types to collect
942
+ * @returns Object with collected events
943
+ *
944
+ * @example
945
+ * ```typescript
946
+ * const { text, gadgetResults } = await collectEvents(agent.run(), {
947
+ * text: true,
948
+ * gadgetResults: true,
949
+ * });
950
+ *
951
+ * console.log("Full response:", text.join(""));
952
+ * console.log("Gadget calls:", gadgetResults.length);
953
+ * ```
954
+ */
955
+ declare function collectEvents(agentGenerator: AsyncGenerator<StreamEvent>, collect: {
956
+ text?: boolean;
957
+ gadgetCalls?: boolean;
958
+ gadgetResults?: boolean;
959
+ }): Promise<{
960
+ text: string[];
961
+ gadgetCalls: Array<{
962
+ gadgetName: string;
963
+ parameters: Record<string, unknown>;
964
+ }>;
965
+ gadgetResults: Array<{
966
+ gadgetName: string;
967
+ result?: string;
968
+ error?: string;
969
+ parameters: Record<string, unknown>;
970
+ }>;
971
+ }>;
972
+ /**
973
+ * Helper to collect only text from an agent run.
974
+ *
975
+ * @param agentGenerator - Agent's run() async generator
976
+ * @returns Combined text response
977
+ *
978
+ * @example
979
+ * ```typescript
980
+ * const response = await collectText(agent.run());
981
+ * console.log(response);
982
+ * ```
983
+ */
984
+ declare function collectText(agentGenerator: AsyncGenerator<StreamEvent>): Promise<string>;
985
+
986
+ /**
987
+ * Clean, world-class hooks system with clear separation of concerns.
988
+ *
989
+ * ## Three Distinct Categories
990
+ *
991
+ * ### 1. OBSERVERS (Read-Only)
992
+ * **Purpose:** Logging, metrics, monitoring, analytics
993
+ * **Characteristics:**
994
+ * - Cannot modify data, only observe
995
+ * - Run in parallel (no ordering guarantees)
996
+ * - Errors are logged but don't crash the system
997
+ * - Both sync and async supported
998
+ *
999
+ * **When to use:** When you need to track what's happening without affecting execution.
1000
+ *
1001
+ * @example
1002
+ * ```typescript
1003
+ * observers: {
1004
+ * onLLMCallComplete: async (ctx) => {
1005
+ * metrics.track('llm_call', { tokens: ctx.usage?.totalTokens });
1006
+ * },
1007
+ * onGadgetExecutionComplete: async (ctx) => {
1008
+ * console.log(`${ctx.gadgetName} took ${ctx.executionTimeMs}ms`);
1009
+ * }
1010
+ * }
1011
+ * ```
1012
+ *
1013
+ * ### 2. INTERCEPTORS (Synchronous Transformations)
1014
+ * **Purpose:** Transform, filter, redact, format data in-flight
1015
+ * **Characteristics:**
1016
+ * - Pure functions: input -> output (or null to suppress)
1017
+ * - Run in sequence (order matters)
1018
+ * - Effect is immediate and visible to subsequent hooks
1019
+ * - Sync only (no async)
1020
+ *
1021
+ * **When to use:** When you need to modify data as it flows through the system.
1022
+ *
1023
+ * @example
1024
+ * ```typescript
1025
+ * interceptors: {
1026
+ * // Redact sensitive data
1027
+ * interceptRawChunk: (chunk) =>
1028
+ * chunk.replace(/api_key=\w+/g, 'api_key=[REDACTED]'),
1029
+ *
1030
+ * // Suppress certain outputs
1031
+ * interceptTextChunk: (chunk, ctx) =>
1032
+ * chunk.includes('[INTERNAL]') ? null : chunk,
1033
+ *
1034
+ * // Add metadata to results
1035
+ * interceptGadgetResult: (result, ctx) =>
1036
+ * `[${ctx.gadgetName}] ${result}`
1037
+ * }
1038
+ * ```
1039
+ *
1040
+ * ### 3. CONTROLLERS (Async Lifecycle Control)
1041
+ * **Purpose:** Control execution flow, skip operations, provide fallbacks
1042
+ * **Characteristics:**
1043
+ * - Async functions returning action objects
1044
+ * - Can skip operations or provide synthetic/fallback responses
1045
+ * - Run at specific decision points in the lifecycle
1046
+ * - Actions are validated at runtime
1047
+ *
1048
+ * **When to use:** When you need to conditionally modify behavior or recover from errors.
1049
+ *
1050
+ * @example
1051
+ * ```typescript
1052
+ * controllers: {
1053
+ * // Skip LLM call and return cached response
1054
+ * beforeLLMCall: async (ctx) => {
1055
+ * const cached = cache.get(ctx.options.messages);
1056
+ * if (cached) return { action: 'skip', syntheticResponse: cached };
1057
+ * return { action: 'proceed' };
1058
+ * },
1059
+ *
1060
+ * // Recover from LLM errors
1061
+ * afterLLMError: async (ctx) => ({
1062
+ * action: 'recover',
1063
+ * fallbackResponse: 'Sorry, I encountered an error. Please try again.'
1064
+ * }),
1065
+ *
1066
+ * // Skip expensive gadgets in certain conditions
1067
+ * beforeGadgetExecution: async (ctx) => {
1068
+ * if (ctx.gadgetName === 'SlowSearch' && ctx.iteration > 2) {
1069
+ * return { action: 'skip', syntheticResult: 'Search skipped to save time' };
1070
+ * }
1071
+ * return { action: 'proceed' };
1072
+ * }
1073
+ * }
1074
+ * ```
1075
+ *
1076
+ * ## Hook Execution Order
1077
+ *
1078
+ * ```
1079
+ * LLM CALL LIFECYCLE:
1080
+ * 1. onLLMCallStart (observer)
1081
+ * 2. beforeLLMCall (controller) - can skip/modify
1082
+ * 3. [LLM API Call]
1083
+ * 4. For each stream chunk:
1084
+ * a. interceptRawChunk (interceptor)
1085
+ * b. onStreamChunk (observer)
1086
+ * c. Parse for gadgets
1087
+ * d. If gadget found -> GADGET LIFECYCLE
1088
+ * e. If text -> interceptTextChunk -> emit
1089
+ * 5. afterLLMCall (controller) - can append/modify
1090
+ * 6. interceptAssistantMessage (interceptor)
1091
+ * 7. onLLMCallComplete (observer)
1092
+ *
1093
+ * GADGET LIFECYCLE:
1094
+ * 1. interceptGadgetParameters (interceptor)
1095
+ * 2. beforeGadgetExecution (controller) - can skip
1096
+ * 3. onGadgetExecutionStart (observer)
1097
+ * 4. [Execute gadget]
1098
+ * 5. interceptGadgetResult (interceptor)
1099
+ * 6. afterGadgetExecution (controller) - can recover
1100
+ * 7. onGadgetExecutionComplete (observer)
1101
+ * ```
1102
+ *
1103
+ * @module agent/hooks
1104
+ */
1105
+
1106
+ /**
1107
+ * Context provided when an LLM call starts.
1108
+ * Read-only observation point.
1109
+ */
1110
+ interface ObserveLLMCallContext {
1111
+ iteration: number;
1112
+ options: Readonly<LLMGenerationOptions>;
1113
+ logger: Logger<ILogObj>;
1114
+ }
1115
+ /**
1116
+ * Context provided when an LLM call completes successfully.
1117
+ * Read-only observation point.
1118
+ */
1119
+ interface ObserveLLMCompleteContext {
1120
+ iteration: number;
1121
+ options: Readonly<LLMGenerationOptions>;
1122
+ finishReason: string | null;
1123
+ usage?: {
1124
+ inputTokens: number;
1125
+ outputTokens: number;
1126
+ totalTokens: number;
1127
+ };
1128
+ /** The complete raw response text */
1129
+ rawResponse: string;
1130
+ /** The final message that will be added to history (after interceptors) */
1131
+ finalMessage: string;
1132
+ logger: Logger<ILogObj>;
1133
+ }
1134
+ /**
1135
+ * Context provided when an LLM call fails.
1136
+ * Read-only observation point.
1137
+ */
1138
+ interface ObserveLLMErrorContext {
1139
+ iteration: number;
1140
+ options: Readonly<LLMGenerationOptions>;
1141
+ error: Error;
1142
+ /** Whether the error was recovered by a controller */
1143
+ recovered: boolean;
1144
+ logger: Logger<ILogObj>;
1145
+ }
1146
+ /**
1147
+ * Context provided when a gadget execution starts.
1148
+ * Read-only observation point.
1149
+ */
1150
+ interface ObserveGadgetStartContext {
1151
+ iteration: number;
1152
+ gadgetName: string;
1153
+ invocationId: string;
1154
+ /** Parameters after controller modifications */
1155
+ parameters: Readonly<Record<string, unknown>>;
1156
+ logger: Logger<ILogObj>;
1157
+ }
1158
+ /**
1159
+ * Context provided when a gadget execution completes.
1160
+ * Read-only observation point.
1161
+ */
1162
+ interface ObserveGadgetCompleteContext {
1163
+ iteration: number;
1164
+ gadgetName: string;
1165
+ invocationId: string;
1166
+ parameters: Readonly<Record<string, unknown>>;
1167
+ /** Original result before interceptors */
1168
+ originalResult?: string;
1169
+ /** Final result after interceptors */
1170
+ finalResult?: string;
1171
+ error?: string;
1172
+ executionTimeMs: number;
1173
+ breaksLoop?: boolean;
1174
+ logger: Logger<ILogObj>;
1175
+ }
1176
+ /**
1177
+ * Context provided for each stream chunk.
1178
+ * Read-only observation point.
1179
+ */
1180
+ interface ObserveChunkContext {
1181
+ iteration: number;
1182
+ /** The raw chunk from the LLM */
1183
+ rawChunk: string;
1184
+ /** Accumulated text so far */
1185
+ accumulatedText: string;
1186
+ /** Token usage if available (Anthropic sends input tokens at stream start) */
1187
+ usage?: {
1188
+ inputTokens: number;
1189
+ outputTokens: number;
1190
+ totalTokens: number;
1191
+ };
1192
+ logger: Logger<ILogObj>;
1193
+ }
1194
+ /**
1195
+ * Observers: Read-only hooks for side effects.
1196
+ * - Cannot modify data
1197
+ * - Errors are logged but don't crash the system
1198
+ * - Run in parallel (no ordering guarantees)
1199
+ */
1200
+ interface Observers {
1201
+ /** Called when an LLM call starts */
1202
+ onLLMCallStart?: (context: ObserveLLMCallContext) => void | Promise<void>;
1203
+ /** Called when an LLM call completes successfully */
1204
+ onLLMCallComplete?: (context: ObserveLLMCompleteContext) => void | Promise<void>;
1205
+ /** Called when an LLM call fails */
1206
+ onLLMCallError?: (context: ObserveLLMErrorContext) => void | Promise<void>;
1207
+ /** Called when a gadget execution starts */
1208
+ onGadgetExecutionStart?: (context: ObserveGadgetStartContext) => void | Promise<void>;
1209
+ /** Called when a gadget execution completes (success or error) */
1210
+ onGadgetExecutionComplete?: (context: ObserveGadgetCompleteContext) => void | Promise<void>;
1211
+ /** Called for each stream chunk */
1212
+ onStreamChunk?: (context: ObserveChunkContext) => void | Promise<void>;
1213
+ }
1214
+ /**
1215
+ * Context for chunk interception.
1216
+ */
1217
+ interface ChunkInterceptorContext {
1218
+ iteration: number;
1219
+ accumulatedText: string;
1220
+ logger: Logger<ILogObj>;
1221
+ }
1222
+ /**
1223
+ * Context for message interception.
1224
+ */
1225
+ interface MessageInterceptorContext {
1226
+ iteration: number;
1227
+ /** The raw LLM response */
1228
+ rawResponse: string;
1229
+ logger: Logger<ILogObj>;
1230
+ }
1231
+ /**
1232
+ * Context for gadget parameter interception.
1233
+ */
1234
+ interface GadgetParameterInterceptorContext {
1235
+ iteration: number;
1236
+ gadgetName: string;
1237
+ invocationId: string;
1238
+ logger: Logger<ILogObj>;
1239
+ }
1240
+ /**
1241
+ * Context for gadget result interception.
1242
+ */
1243
+ interface GadgetResultInterceptorContext {
1244
+ iteration: number;
1245
+ gadgetName: string;
1246
+ invocationId: string;
1247
+ parameters: Readonly<Record<string, unknown>>;
1248
+ executionTimeMs: number;
1249
+ logger: Logger<ILogObj>;
1250
+ }
1251
+ /**
1252
+ * Interceptors: Synchronous transformations with predictable timing.
1253
+ * - Pure functions with clear input -> output
1254
+ * - Run in sequence (order matters)
1255
+ * - Effect is immediate (no confusion about timing)
1256
+ */
1257
+ interface Interceptors {
1258
+ /**
1259
+ * Intercept and transform raw chunks from the LLM stream.
1260
+ * Affects current stream immediately.
1261
+ *
1262
+ * @param chunk - The raw chunk text from the LLM
1263
+ * @param context - Context information including iteration and accumulated text
1264
+ * @returns Transformed chunk text, or null to suppress the chunk entirely
1265
+ */
1266
+ interceptRawChunk?: (chunk: string, context: ChunkInterceptorContext) => string | null;
1267
+ /**
1268
+ * Intercept and transform text chunks before they're displayed.
1269
+ * Affects current output immediately.
1270
+ *
1271
+ * @param chunk - The text chunk to be displayed
1272
+ * @param context - Context information including iteration and accumulated text
1273
+ * @returns Transformed chunk text, or null to suppress the chunk entirely
1274
+ */
1275
+ interceptTextChunk?: (chunk: string, context: ChunkInterceptorContext) => string | null;
1276
+ /**
1277
+ * Intercept and transform the final assistant message before it's added to conversation history.
1278
+ * This is the last chance to modify what gets stored.
1279
+ *
1280
+ * @param message - The final message text
1281
+ * @param context - Context information including raw response
1282
+ * @returns Transformed message text (cannot be suppressed)
1283
+ */
1284
+ interceptAssistantMessage?: (message: string, context: MessageInterceptorContext) => string;
1285
+ /**
1286
+ * Intercept and transform gadget parameters before execution.
1287
+ *
1288
+ * IMPORTANT: The intercepted parameters are used to update the original call object.
1289
+ * This means the modified parameters will be visible in subsequent hooks.
1290
+ *
1291
+ * @param parameters - The original parameters (readonly - create new object if modifying)
1292
+ * @param context - Context information including gadget name and invocation ID
1293
+ * @returns Modified parameters object
1294
+ */
1295
+ interceptGadgetParameters?: (parameters: Readonly<Record<string, unknown>>, context: GadgetParameterInterceptorContext) => Record<string, unknown>;
1296
+ /**
1297
+ * Intercept and transform gadget results after execution.
1298
+ * This affects what gets sent back to the LLM and stored in history.
1299
+ *
1300
+ * @param result - The gadget result text
1301
+ * @param context - Context information including parameters and execution time
1302
+ * @returns Transformed result text (cannot be suppressed)
1303
+ */
1304
+ interceptGadgetResult?: (result: string, context: GadgetResultInterceptorContext) => string;
1305
+ }
1306
+ /**
1307
+ * Context for LLM call controller.
1308
+ */
1309
+ interface LLMCallControllerContext {
1310
+ iteration: number;
1311
+ options: LLMGenerationOptions;
1312
+ logger: Logger<ILogObj>;
1313
+ }
1314
+ /**
1315
+ * Action returned by beforeLLMCall controller.
1316
+ */
1317
+ type BeforeLLMCallAction = {
1318
+ action: "proceed";
1319
+ modifiedOptions?: Partial<LLMGenerationOptions>;
1320
+ } | {
1321
+ action: "skip";
1322
+ syntheticResponse: string;
1323
+ };
1324
+ /**
1325
+ * Context for after LLM call controller.
1326
+ */
1327
+ interface AfterLLMCallControllerContext {
1328
+ iteration: number;
1329
+ options: Readonly<LLMGenerationOptions>;
1330
+ finishReason: string | null;
1331
+ usage?: {
1332
+ inputTokens: number;
1333
+ outputTokens: number;
1334
+ totalTokens: number;
1335
+ };
1336
+ /** The final message (after interceptors) that will be added to history */
1337
+ finalMessage: string;
1338
+ logger: Logger<ILogObj>;
1339
+ }
1340
+ /**
1341
+ * Action returned by afterLLMCall controller.
1342
+ */
1343
+ type AfterLLMCallAction = {
1344
+ action: "continue";
1345
+ } | {
1346
+ action: "append_messages";
1347
+ messages: LLMMessage[];
1348
+ } | {
1349
+ action: "modify_and_continue";
1350
+ modifiedMessage: string;
1351
+ } | {
1352
+ action: "append_and_modify";
1353
+ modifiedMessage: string;
1354
+ messages: LLMMessage[];
1355
+ };
1356
+ /**
1357
+ * Context for LLM error controller.
1358
+ */
1359
+ interface LLMErrorControllerContext {
1360
+ iteration: number;
1361
+ options: Readonly<LLMGenerationOptions>;
1362
+ error: Error;
1363
+ logger: Logger<ILogObj>;
1364
+ }
1365
+ /**
1366
+ * Action returned by LLM error controller.
1367
+ */
1368
+ type AfterLLMErrorAction = {
1369
+ action: "rethrow";
1370
+ } | {
1371
+ action: "recover";
1372
+ fallbackResponse: string;
1373
+ };
1374
+ /**
1375
+ * Context for gadget execution controller.
1376
+ */
1377
+ interface GadgetExecutionControllerContext {
1378
+ iteration: number;
1379
+ gadgetName: string;
1380
+ invocationId: string;
1381
+ /** Parameters after interceptors have run */
1382
+ parameters: Record<string, unknown>;
1383
+ logger: Logger<ILogObj>;
1384
+ }
1385
+ /**
1386
+ * Action returned by beforeGadgetExecution controller.
1387
+ */
1388
+ type BeforeGadgetExecutionAction = {
1389
+ action: "proceed";
1390
+ } | {
1391
+ action: "skip";
1392
+ syntheticResult: string;
1393
+ };
1394
+ /**
1395
+ * Context for after gadget execution controller.
1396
+ */
1397
+ interface AfterGadgetExecutionControllerContext {
1398
+ iteration: number;
1399
+ gadgetName: string;
1400
+ invocationId: string;
1401
+ parameters: Readonly<Record<string, unknown>>;
1402
+ /** Result after interceptors (if successful) */
1403
+ result?: string;
1404
+ error?: string;
1405
+ executionTimeMs: number;
1406
+ logger: Logger<ILogObj>;
1407
+ }
1408
+ /**
1409
+ * Action returned by afterGadgetExecution controller.
1410
+ */
1411
+ type AfterGadgetExecutionAction = {
1412
+ action: "continue";
1413
+ } | {
1414
+ action: "recover";
1415
+ fallbackResult: string;
1416
+ };
1417
+ /**
1418
+ * Controllers: Async lifecycle hooks that control execution flow.
1419
+ * - Can short-circuit execution
1420
+ * - Can modify options and provide fallbacks
1421
+ * - Run at specific lifecycle points
1422
+ */
1423
+ interface Controllers {
1424
+ /**
1425
+ * Called before making an LLM API call.
1426
+ * Can modify options or skip the call entirely.
1427
+ */
1428
+ beforeLLMCall?: (context: LLMCallControllerContext) => Promise<BeforeLLMCallAction>;
1429
+ /**
1430
+ * Called after a successful LLM call (after interceptors have run).
1431
+ * Can append messages to conversation or modify the final message.
1432
+ */
1433
+ afterLLMCall?: (context: AfterLLMCallControllerContext) => Promise<AfterLLMCallAction>;
1434
+ /**
1435
+ * Called after an LLM call fails.
1436
+ * Can provide a fallback response to recover from the error.
1437
+ */
1438
+ afterLLMError?: (context: LLMErrorControllerContext) => Promise<AfterLLMErrorAction>;
1439
+ /**
1440
+ * Called before executing a gadget (after interceptors have run).
1441
+ * Can skip execution and provide a synthetic result.
1442
+ */
1443
+ beforeGadgetExecution?: (context: GadgetExecutionControllerContext) => Promise<BeforeGadgetExecutionAction>;
1444
+ /**
1445
+ * Called after a gadget execution (success or error).
1446
+ * Can provide a fallback result to recover from errors.
1447
+ */
1448
+ afterGadgetExecution?: (context: AfterGadgetExecutionControllerContext) => Promise<AfterGadgetExecutionAction>;
1449
+ }
1450
+ /**
1451
+ * Clean hooks system with three distinct categories:
1452
+ * - Observers: Read-only, for logging and metrics
1453
+ * - Interceptors: Synchronous transformations with immediate effect
1454
+ * - Controllers: Async lifecycle control with short-circuit capability
1455
+ */
1456
+ interface AgentHooks {
1457
+ /** Read-only observation hooks for logging, metrics, etc. */
1458
+ observers?: Observers;
1459
+ /** Synchronous transformation hooks that affect current execution */
1460
+ interceptors?: Interceptors;
1461
+ /** Async lifecycle control hooks */
1462
+ controllers?: Controllers;
1463
+ }
1464
+
1465
+ /**
1466
+ * Agent: Lean orchestrator using the clean hooks architecture.
1467
+ *
1468
+ * The Agent delegates ALL stream processing and hook coordination to StreamProcessor,
1469
+ * making it a simple loop orchestrator with clear responsibilities.
1470
+ */
1471
+
1472
+ /**
1473
+ * Configuration options for the Agent.
1474
+ */
1475
+ interface AgentOptions {
1476
+ /** The LLM client */
1477
+ client: LLMist;
1478
+ /** The model ID */
1479
+ model: string;
1480
+ /** System prompt */
1481
+ systemPrompt?: string;
1482
+ /** Initial user prompt (optional if using build()) */
1483
+ userPrompt?: string;
1484
+ /** Maximum iterations */
1485
+ maxIterations?: number;
1486
+ /** Temperature */
1487
+ temperature?: number;
1488
+ /** Gadget registry */
1489
+ registry: GadgetRegistry;
1490
+ /** Logger */
1491
+ logger?: Logger<ILogObj>;
1492
+ /** Clean hooks system */
1493
+ hooks?: AgentHooks;
1494
+ /** Callback for human input */
1495
+ onHumanInputRequired?: (question: string) => Promise<string>;
1496
+ /** Parameter format */
1497
+ parameterFormat?: ParameterFormat;
1498
+ /** Custom gadget start prefix */
1499
+ gadgetStartPrefix?: string;
1500
+ /** Custom gadget end prefix */
1501
+ gadgetEndPrefix?: string;
1502
+ /** Initial messages */
1503
+ initialMessages?: Array<{
1504
+ role: "system" | "user" | "assistant";
1505
+ content: string;
1506
+ }>;
1507
+ /** Text-only handler */
1508
+ textOnlyHandler?: TextOnlyHandler;
1509
+ /** Stop on gadget error */
1510
+ stopOnGadgetError?: boolean;
1511
+ /** Custom error continuation logic */
1512
+ shouldContinueAfterError?: (context: {
1513
+ error: string;
1514
+ gadgetName: string;
1515
+ errorType: "parse" | "validation" | "execution";
1516
+ parameters?: Record<string, unknown>;
1517
+ }) => boolean | Promise<boolean>;
1518
+ /** Default gadget timeout */
1519
+ defaultGadgetTimeoutMs?: number;
1520
+ /** Custom prompt configuration for gadget system prompts */
1521
+ promptConfig?: PromptConfig;
1522
+ }
1523
+ /**
1524
+ * Agent: Lean orchestrator that delegates to StreamProcessor.
1525
+ *
1526
+ * Responsibilities:
1527
+ * - Run the main agent loop
1528
+ * - Call LLM API
1529
+ * - Delegate stream processing to StreamProcessor
1530
+ * - Coordinate conversation management
1531
+ * - Execute top-level lifecycle controllers
1532
+ *
1533
+ * NOT responsible for:
1534
+ * - Stream parsing (StreamProcessor)
1535
+ * - Hook coordination (StreamProcessor)
1536
+ * - Gadget execution (StreamProcessor -> GadgetExecutor)
1537
+ */
1538
+ declare class Agent {
1539
+ private readonly client;
1540
+ private readonly model;
1541
+ private readonly maxIterations;
1542
+ private readonly temperature?;
1543
+ private readonly logger;
1544
+ private readonly hooks;
1545
+ private readonly conversation;
1546
+ private readonly registry;
1547
+ private readonly parameterFormat;
1548
+ private readonly gadgetStartPrefix?;
1549
+ private readonly gadgetEndPrefix?;
1550
+ private readonly onHumanInputRequired?;
1551
+ private readonly textOnlyHandler;
1552
+ private readonly stopOnGadgetError;
1553
+ private readonly shouldContinueAfterError?;
1554
+ private readonly defaultGadgetTimeoutMs?;
1555
+ private readonly defaultMaxTokens?;
1556
+ private userPromptProvided;
1557
+ /**
1558
+ * Creates a new Agent instance.
1559
+ * @internal This constructor is private. Use LLMist.createAgent() or AgentBuilder instead.
1560
+ */
1561
+ constructor(key: typeof AGENT_INTERNAL_KEY, options: AgentOptions);
1562
+ /**
1563
+ * Get the gadget registry for this agent.
1564
+ *
1565
+ * Useful for inspecting registered gadgets in tests or advanced use cases.
1566
+ *
1567
+ * @returns The GadgetRegistry instance
1568
+ *
1569
+ * @example
1570
+ * ```typescript
1571
+ * const agent = new AgentBuilder()
1572
+ * .withModel("sonnet")
1573
+ * .withGadgets(Calculator, Weather)
1574
+ * .build();
1575
+ *
1576
+ * // Inspect registered gadgets
1577
+ * console.log(agent.getRegistry().getNames()); // ['Calculator', 'Weather']
1578
+ * ```
1579
+ */
1580
+ getRegistry(): GadgetRegistry;
1581
+ /**
1582
+ * Run the agent loop.
1583
+ * Clean, simple orchestration - all complexity is in StreamProcessor.
1584
+ *
1585
+ * @throws {Error} If no user prompt was provided (when using build() without ask())
1586
+ */
1587
+ run(): AsyncGenerator<StreamEvent>;
1588
+ /**
1589
+ * Handle LLM error through controller.
1590
+ */
1591
+ private handleLLMError;
1592
+ /**
1593
+ * Handle text-only response (no gadgets called).
1594
+ */
1595
+ private handleTextOnlyResponse;
1596
+ /**
1597
+ * Safely execute an observer, catching and logging any errors.
1598
+ */
1599
+ private safeObserve;
1600
+ /**
1601
+ * Resolve max tokens from model catalog.
1602
+ */
1603
+ private resolveMaxTokensFromCatalog;
1604
+ /**
1605
+ * Run agent with named event handlers (syntactic sugar).
1606
+ *
1607
+ * Instead of verbose if/else chains, use named handlers for cleaner code.
1608
+ *
1609
+ * @param handlers - Named event handlers
1610
+ *
1611
+ * @example
1612
+ * ```typescript
1613
+ * await agent.runWith({
1614
+ * onText: (text) => console.log("LLM:", text),
1615
+ * onGadgetResult: (result) => console.log("Result:", result.result),
1616
+ * onGadgetCall: (call) => console.log("Calling:", call.gadgetName),
1617
+ * });
1618
+ * ```
1619
+ */
1620
+ runWith(handlers: EventHandlers): Promise<void>;
1621
+ }
1622
+
1623
+ /**
1624
+ * Fluent builder for creating agents with delightful DX.
1625
+ *
1626
+ * @example
1627
+ * ```typescript
1628
+ * const agent = await LLMist.createAgent()
1629
+ * .withModel("sonnet")
1630
+ * .withSystem("You are a helpful assistant")
1631
+ * .withGadgets(Calculator, Weather)
1632
+ * .withMaxIterations(10)
1633
+ * .ask("What's the weather in Paris?");
1634
+ *
1635
+ * for await (const event of agent.run()) {
1636
+ * // process events
1637
+ * }
1638
+ * ```
1639
+ */
1640
+
1641
+ /**
1642
+ * Message for conversation history.
1643
+ */
1644
+ type HistoryMessage = {
1645
+ user: string;
1646
+ } | {
1647
+ assistant: string;
1648
+ } | {
1649
+ system: string;
1650
+ };
1651
+ /**
1652
+ * Fluent builder for creating agents.
1653
+ *
1654
+ * Provides a chainable API for configuring and creating agents,
1655
+ * making the code more expressive and easier to read.
1656
+ */
1657
+ declare class AgentBuilder {
1658
+ private client?;
1659
+ private model?;
1660
+ private systemPrompt?;
1661
+ private temperature?;
1662
+ private maxIterations?;
1663
+ private logger?;
1664
+ private hooks?;
1665
+ private promptConfig?;
1666
+ private gadgets;
1667
+ private initialMessages;
1668
+ private onHumanInputRequired?;
1669
+ private parameterFormat?;
1670
+ private gadgetStartPrefix?;
1671
+ private gadgetEndPrefix?;
1672
+ private textOnlyHandler?;
1673
+ private stopOnGadgetError?;
1674
+ private shouldContinueAfterError?;
1675
+ private defaultGadgetTimeoutMs?;
1676
+ constructor(client?: LLMist);
1677
+ /**
1678
+ * Set the model to use.
1679
+ * Supports aliases like "gpt4", "sonnet", "flash".
1680
+ *
1681
+ * @param model - Model name or alias
1682
+ * @returns This builder for chaining
1683
+ *
1684
+ * @example
1685
+ * ```typescript
1686
+ * .withModel("sonnet") // Alias
1687
+ * .withModel("gpt-5-nano") // Auto-detects provider
1688
+ * .withModel("openai:gpt-5") // Explicit provider
1689
+ * ```
1690
+ */
1691
+ withModel(model: string): this;
1692
+ /**
1693
+ * Set the system prompt.
1694
+ *
1695
+ * @param prompt - System prompt
1696
+ * @returns This builder for chaining
1697
+ */
1698
+ withSystem(prompt: string): this;
1699
+ /**
1700
+ * Set the temperature (0-1).
1701
+ *
1702
+ * @param temperature - Temperature value
1703
+ * @returns This builder for chaining
1704
+ */
1705
+ withTemperature(temperature: number): this;
1706
+ /**
1707
+ * Set maximum iterations.
1708
+ *
1709
+ * @param max - Maximum number of iterations
1710
+ * @returns This builder for chaining
1711
+ */
1712
+ withMaxIterations(max: number): this;
1713
+ /**
1714
+ * Set logger instance.
1715
+ *
1716
+ * @param logger - Logger instance
1717
+ * @returns This builder for chaining
1718
+ */
1719
+ withLogger(logger: Logger<ILogObj>): this;
1720
+ /**
1721
+ * Add hooks for agent lifecycle events.
1722
+ *
1723
+ * @param hooks - Agent hooks configuration
1724
+ * @returns This builder for chaining
1725
+ *
1726
+ * @example
1727
+ * ```typescript
1728
+ * import { HookPresets } from 'llmist/hooks';
1729
+ *
1730
+ * .withHooks(HookPresets.logging())
1731
+ * .withHooks(HookPresets.merge(
1732
+ * HookPresets.logging(),
1733
+ * HookPresets.timing()
1734
+ * ))
1735
+ * ```
1736
+ */
1737
+ withHooks(hooks: AgentHooks): this;
1738
+ /**
1739
+ * Configure custom prompts for gadget system messages.
1740
+ *
1741
+ * @param config - Prompt configuration object
1742
+ * @returns This builder for chaining
1743
+ *
1744
+ * @example
1745
+ * ```typescript
1746
+ * .withPromptConfig({
1747
+ * mainInstruction: "Use the gadget markers below:",
1748
+ * rules: ["Always use markers", "Never use function calling"]
1749
+ * })
1750
+ * ```
1751
+ */
1752
+ withPromptConfig(config: PromptConfig): this;
1753
+ /**
1754
+ * Add gadgets (classes or instances).
1755
+ * Can be called multiple times to add more gadgets.
1756
+ *
1757
+ * @param gadgets - Gadget classes or instances
1758
+ * @returns This builder for chaining
1759
+ *
1760
+ * @example
1761
+ * ```typescript
1762
+ * .withGadgets(Calculator, Weather, Email)
1763
+ * .withGadgets(new Calculator(), new Weather())
1764
+ * .withGadgets(createGadget({ ... }))
1765
+ * ```
1766
+ */
1767
+ withGadgets(...gadgets: GadgetOrClass[]): this;
1768
+ /**
1769
+ * Add conversation history messages.
1770
+ * Useful for continuing previous conversations.
1771
+ *
1772
+ * @param messages - Array of history messages
1773
+ * @returns This builder for chaining
1774
+ *
1775
+ * @example
1776
+ * ```typescript
1777
+ * .withHistory([
1778
+ * { user: "Hello" },
1779
+ * { assistant: "Hi there!" },
1780
+ * { user: "How are you?" },
1781
+ * { assistant: "I'm doing well, thanks!" }
1782
+ * ])
1783
+ * ```
1784
+ */
1785
+ withHistory(messages: HistoryMessage[]): this;
1786
+ /**
1787
+ * Add a single message to the conversation history.
1788
+ *
1789
+ * @param message - Single history message
1790
+ * @returns This builder for chaining
1791
+ *
1792
+ * @example
1793
+ * ```typescript
1794
+ * .addMessage({ user: "Hello" })
1795
+ * .addMessage({ assistant: "Hi there!" })
1796
+ * ```
1797
+ */
1798
+ addMessage(message: HistoryMessage): this;
1799
+ /**
1800
+ * Set the human input handler for interactive conversations.
1801
+ *
1802
+ * @param handler - Function to handle human input requests
1803
+ * @returns This builder for chaining
1804
+ *
1805
+ * @example
1806
+ * ```typescript
1807
+ * .onHumanInput(async (question) => {
1808
+ * return await promptUser(question);
1809
+ * })
1810
+ * ```
1811
+ */
1812
+ onHumanInput(handler: (question: string) => Promise<string>): this;
1813
+ /**
1814
+ * Set the parameter format for gadget calls.
1815
+ *
1816
+ * @param format - Parameter format ("json" or "xml")
1817
+ * @returns This builder for chaining
1818
+ *
1819
+ * @example
1820
+ * ```typescript
1821
+ * .withParameterFormat("xml")
1822
+ * ```
1823
+ */
1824
+ withParameterFormat(format: ParameterFormat): this;
1825
+ /**
1826
+ * Set custom gadget marker prefix.
1827
+ *
1828
+ * @param prefix - Custom start prefix for gadget markers
1829
+ * @returns This builder for chaining
1830
+ *
1831
+ * @example
1832
+ * ```typescript
1833
+ * .withGadgetStartPrefix("<<GADGET_START>>")
1834
+ * ```
1835
+ */
1836
+ withGadgetStartPrefix(prefix: string): this;
1837
+ /**
1838
+ * Set custom gadget marker suffix.
1839
+ *
1840
+ * @param suffix - Custom end suffix for gadget markers
1841
+ * @returns This builder for chaining
1842
+ *
1843
+ * @example
1844
+ * ```typescript
1845
+ * .withGadgetEndPrefix("<<GADGET_END>>")
1846
+ * ```
1847
+ */
1848
+ withGadgetEndPrefix(suffix: string): this;
1849
+ /**
1850
+ * Set the text-only handler strategy.
1851
+ *
1852
+ * Controls what happens when the LLM returns text without calling any gadgets:
1853
+ * - "terminate": End the agent loop (default)
1854
+ * - "acknowledge": Continue the loop for another iteration
1855
+ * - "wait_for_input": Wait for human input
1856
+ * - Custom handler: Provide a function for dynamic behavior
1857
+ *
1858
+ * @param handler - Text-only handler strategy or custom handler
1859
+ * @returns This builder for chaining
1860
+ *
1861
+ * @example
1862
+ * ```typescript
1863
+ * // Simple strategy
1864
+ * .withTextOnlyHandler("acknowledge")
1865
+ *
1866
+ * // Custom handler
1867
+ * .withTextOnlyHandler({
1868
+ * type: "custom",
1869
+ * handler: async (context) => {
1870
+ * if (context.text.includes("?")) {
1871
+ * return { action: "wait_for_input", question: context.text };
1872
+ * }
1873
+ * return { action: "continue" };
1874
+ * }
1875
+ * })
1876
+ * ```
1877
+ */
1878
+ withTextOnlyHandler(handler: TextOnlyHandler): this;
1879
+ /**
1880
+ * Set whether to stop gadget execution on first error.
1881
+ *
1882
+ * When true (default), if a gadget fails:
1883
+ * - Subsequent gadgets in the same response are skipped
1884
+ * - LLM stream is cancelled to save costs
1885
+ * - Agent loop continues with error in context
1886
+ *
1887
+ * When false:
1888
+ * - All gadgets in the response still execute
1889
+ * - LLM stream continues to completion
1890
+ *
1891
+ * @param stop - Whether to stop on gadget error
1892
+ * @returns This builder for chaining
1893
+ *
1894
+ * @example
1895
+ * ```typescript
1896
+ * .withStopOnGadgetError(false)
1897
+ * ```
1898
+ */
1899
+ withStopOnGadgetError(stop: boolean): this;
1900
+ /**
1901
+ * Set custom error handling logic.
1902
+ *
1903
+ * Provides fine-grained control over whether to continue after different types of errors.
1904
+ * Overrides `stopOnGadgetError` when provided.
1905
+ *
1906
+ * **Note:** This builder method configures the underlying `shouldContinueAfterError` option
1907
+ * in `AgentOptions`. The method is named `withErrorHandler` for better developer experience,
1908
+ * but maps to the `shouldContinueAfterError` property internally.
1909
+ *
1910
+ * @param handler - Function that decides whether to continue after an error.
1911
+ * Return `true` to continue execution, `false` to stop.
1912
+ * @returns This builder for chaining
1913
+ *
1914
+ * @example
1915
+ * ```typescript
1916
+ * .withErrorHandler((context) => {
1917
+ * // Stop on parse errors, continue on validation/execution errors
1918
+ * if (context.errorType === "parse") {
1919
+ * return false;
1920
+ * }
1921
+ * if (context.error.includes("CRITICAL")) {
1922
+ * return false;
1923
+ * }
1924
+ * return true;
1925
+ * })
1926
+ * ```
1927
+ */
1928
+ withErrorHandler(handler: (context: {
1929
+ error: string;
1930
+ gadgetName: string;
1931
+ errorType: "parse" | "validation" | "execution";
1932
+ parameters?: Record<string, unknown>;
1933
+ }) => boolean | Promise<boolean>): this;
1934
+ /**
1935
+ * Set default timeout for gadget execution.
1936
+ *
1937
+ * @param timeoutMs - Timeout in milliseconds (must be non-negative)
1938
+ * @returns This builder for chaining
1939
+ * @throws {Error} If timeout is negative
1940
+ *
1941
+ * @example
1942
+ * ```typescript
1943
+ * .withDefaultGadgetTimeout(5000) // 5 second timeout
1944
+ * ```
1945
+ */
1946
+ withDefaultGadgetTimeout(timeoutMs: number): this;
1947
+ /**
1948
+ * Build and create the agent with the given user prompt.
1949
+ * Returns the Agent instance ready to run.
1950
+ *
1951
+ * @param userPrompt - User's question or request
1952
+ * @returns Configured Agent instance
1953
+ *
1954
+ * @example
1955
+ * ```typescript
1956
+ * const agent = await LLMist.createAgent()
1957
+ * .withModel("sonnet")
1958
+ * .withGadgets(Calculator)
1959
+ * .ask("What is 2+2?");
1960
+ *
1961
+ * for await (const event of agent.run()) {
1962
+ * // handle events
1963
+ * }
1964
+ * ```
1965
+ */
1966
+ ask(userPrompt: string): Agent;
1967
+ /**
1968
+ * Build, run, and collect only the text response.
1969
+ * Convenient for simple queries where you just want the final answer.
1970
+ *
1971
+ * @param userPrompt - User's question or request
1972
+ * @returns Promise resolving to the complete text response
1973
+ *
1974
+ * @example
1975
+ * ```typescript
1976
+ * const answer = await LLMist.createAgent()
1977
+ * .withModel("gpt4-mini")
1978
+ * .withGadgets(Calculator)
1979
+ * .askAndCollect("What is 42 * 7?");
1980
+ *
1981
+ * console.log(answer); // "294"
1982
+ * ```
1983
+ */
1984
+ askAndCollect(userPrompt: string): Promise<string>;
1985
+ /**
1986
+ * Build and run with event handlers.
1987
+ * Combines agent creation and event handling in one call.
1988
+ *
1989
+ * @param userPrompt - User's question or request
1990
+ * @param handlers - Event handlers
1991
+ *
1992
+ * @example
1993
+ * ```typescript
1994
+ * await LLMist.createAgent()
1995
+ * .withModel("sonnet")
1996
+ * .withGadgets(Calculator)
1997
+ * .askWith("What is 2+2?", {
1998
+ * onText: (text) => console.log("LLM:", text),
1999
+ * onGadgetResult: (result) => console.log("Result:", result.result),
2000
+ * });
2001
+ * ```
2002
+ */
2003
+ askWith(userPrompt: string, handlers: EventHandlers): Promise<void>;
2004
+ /**
2005
+ * Build the agent without a user prompt.
2006
+ *
2007
+ * Returns an Agent instance that can be inspected (e.g., check registered gadgets)
2008
+ * but cannot be run without first calling .ask(prompt).
2009
+ *
2010
+ * This is useful for:
2011
+ * - Testing: Inspect the registry, configuration, etc.
2012
+ * - Advanced use cases: Build agent configuration separately from execution
2013
+ *
2014
+ * @returns Configured Agent instance (without user prompt)
2015
+ *
2016
+ * @example
2017
+ * ```typescript
2018
+ * // Build agent for inspection
2019
+ * const agent = new AgentBuilder()
2020
+ * .withModel("sonnet")
2021
+ * .withGadgets(Calculator, Weather)
2022
+ * .build();
2023
+ *
2024
+ * // Inspect registered gadgets
2025
+ * console.log(agent.getRegistry().getNames()); // ['Calculator', 'Weather']
2026
+ *
2027
+ * // Note: Calling agent.run() will throw an error
2028
+ * // Use .ask(prompt) instead if you want to run the agent
2029
+ * ```
2030
+ */
2031
+ build(): Agent;
2032
+ }
2033
+
2034
+ /**
2035
+ * Context provided to matcher functions to determine if a mock should be used.
2036
+ */
2037
+ interface MockMatcherContext {
2038
+ /** The model descriptor (e.g., "openai:gpt-5") */
2039
+ model: string;
2040
+ /** The provider ID extracted from the model */
2041
+ provider: string;
2042
+ /** The model name without provider prefix */
2043
+ modelName: string;
2044
+ /** The complete LLM generation options */
2045
+ options: LLMGenerationOptions;
2046
+ /** The messages being sent to the LLM */
2047
+ messages: LLMMessage[];
2048
+ }
2049
+ /**
2050
+ * Matcher function that determines if a mock should be used for an LLM call.
2051
+ *
2052
+ * @param context - The context of the LLM call
2053
+ * @returns true if this mock should be used, false otherwise
2054
+ *
2055
+ * @example
2056
+ * // Match any call to GPT-5
2057
+ * const matcher: MockMatcher = (ctx) => ctx.modelName.includes('gpt-5');
2058
+ *
2059
+ * @example
2060
+ * // Match calls with specific message content
2061
+ * const matcher: MockMatcher = (ctx) => {
2062
+ * const lastMessage = ctx.messages[ctx.messages.length - 1];
2063
+ * return lastMessage?.content?.includes('calculate');
2064
+ * };
2065
+ *
2066
+ * @example
2067
+ * // Match by provider
2068
+ * const matcher: MockMatcher = (ctx) => ctx.provider === 'anthropic';
2069
+ */
2070
+ type MockMatcher = (context: MockMatcherContext) => boolean | Promise<boolean>;
2071
+ /**
2072
+ * A mock response that will be returned when a matcher succeeds.
2073
+ */
2074
+ interface MockResponse {
2075
+ /**
2076
+ * Plain text content to return (will be streamed as text chunks)
2077
+ * Can include gadget markers like \n<GADGET_name>...</GADGET_END>
2078
+ */
2079
+ text?: string;
2080
+ /**
2081
+ * Pre-parsed gadget calls to inject into the response stream
2082
+ * These will be emitted as gadget_call events
2083
+ */
2084
+ gadgetCalls?: Array<{
2085
+ gadgetName: string;
2086
+ parameters: Record<string, unknown>;
2087
+ /** Optional invocationId, will be auto-generated if not provided */
2088
+ invocationId?: string;
2089
+ }>;
2090
+ /**
2091
+ * Simulated token usage statistics
2092
+ */
2093
+ usage?: {
2094
+ inputTokens: number;
2095
+ outputTokens: number;
2096
+ totalTokens: number;
2097
+ };
2098
+ /**
2099
+ * Simulated finish reason
2100
+ */
2101
+ finishReason?: string;
2102
+ /**
2103
+ * Delay in milliseconds before starting to stream the response
2104
+ * Useful for simulating network latency
2105
+ */
2106
+ delayMs?: number;
2107
+ /**
2108
+ * Delay in milliseconds between each chunk when streaming
2109
+ * Useful for simulating realistic streaming behavior
2110
+ */
2111
+ streamDelayMs?: number;
2112
+ }
2113
+ /**
2114
+ * A registered mock configuration combining a matcher with a response.
2115
+ */
2116
+ interface MockRegistration {
2117
+ /** Unique identifier for this mock (auto-generated if not provided) */
2118
+ id: string;
2119
+ /** The matcher function to determine if this mock applies */
2120
+ matcher: MockMatcher;
2121
+ /** The response to return when matched */
2122
+ response: MockResponse | ((context: MockMatcherContext) => MockResponse | Promise<MockResponse>);
2123
+ /** Optional label for debugging */
2124
+ label?: string;
2125
+ /** If true, this mock will only be used once then automatically removed */
2126
+ once?: boolean;
2127
+ }
2128
+ /**
2129
+ * Statistics about mock usage.
2130
+ */
2131
+ interface MockStats {
2132
+ /** Number of times this mock was matched and used */
2133
+ matchCount: number;
2134
+ /** Last time this mock was used */
2135
+ lastUsed?: Date;
2136
+ }
2137
+ /**
2138
+ * Options for configuring the mock system.
2139
+ */
2140
+ interface MockOptions {
2141
+ /**
2142
+ * If true, throws an error when no mock matches an LLM call.
2143
+ * If false, logs a warning and returns an empty response.
2144
+ * Default: false
2145
+ */
2146
+ strictMode?: boolean;
2147
+ /**
2148
+ * If true, logs detailed information about mock matching and execution.
2149
+ * Default: false
2150
+ */
2151
+ debug?: boolean;
2152
+ /**
2153
+ * If true, records statistics about mock usage.
2154
+ * Default: true
2155
+ */
2156
+ recordStats?: boolean;
2157
+ }
2158
+
2159
+ /**
2160
+ * Provider adapter that serves mock responses instead of making real LLM API calls.
2161
+ * This is useful for testing applications that use llmist without incurring API costs.
2162
+ *
2163
+ * The MockProviderAdapter has high priority (100) and is always checked before
2164
+ * real providers when both are registered. This enables selective mocking where
2165
+ * some models use mocks while others use real providers. If no matching mock is
2166
+ * found and strictMode is disabled, requests return an empty response.
2167
+ *
2168
+ * @example
2169
+ * ```typescript
2170
+ * import { LLMist, createMockAdapter, mockLLM } from 'llmist/testing';
2171
+ *
2172
+ * // Use with real providers for selective mocking
2173
+ * const client = new LLMist({
2174
+ * adapters: [createMockAdapter()],
2175
+ * autoDiscoverProviders: true // Also loads real OpenAI, Anthropic, etc.
2176
+ * });
2177
+ *
2178
+ * // Register mocks for specific models
2179
+ * mockLLM()
2180
+ * .forModel('gpt-5-nano')
2181
+ * .returns('Test response')
2182
+ * .register();
2183
+ *
2184
+ * // gpt-5-nano uses mock, other models use real providers
2185
+ * const stream = client.stream({
2186
+ * model: 'openai:gpt-5-nano',
2187
+ * messages: [{ role: 'user', content: 'test' }]
2188
+ * });
2189
+ * ```
2190
+ */
2191
+ declare class MockProviderAdapter implements ProviderAdapter {
2192
+ readonly providerId = "mock";
2193
+ readonly priority = 100;
2194
+ private readonly mockManager;
2195
+ constructor(options?: MockOptions);
2196
+ supports(descriptor: ModelDescriptor): boolean;
2197
+ stream(options: LLMGenerationOptions, descriptor: ModelDescriptor, spec?: unknown): LLMStream;
2198
+ private createMockStreamFromContext;
2199
+ }
2200
+ /**
2201
+ * Create a mock provider adapter instance.
2202
+ * This is a convenience factory function.
2203
+ *
2204
+ * @param options - Optional configuration for the mock system
2205
+ * @returns A configured MockProviderAdapter
2206
+ *
2207
+ * @example
2208
+ * ```typescript
2209
+ * const adapter = createMockAdapter({ strictMode: true, debug: true });
2210
+ * const client = new LLMist([adapter]);
2211
+ * ```
2212
+ */
2213
+ declare function createMockAdapter(options?: MockOptions): MockProviderAdapter;
2214
+
2215
+ /**
2216
+ * Fluent builder for creating mock responses and registrations.
2217
+ * Provides a convenient API for common mocking scenarios.
2218
+ *
2219
+ * @example
2220
+ * ```typescript
2221
+ * import { mockLLM } from 'llmist';
2222
+ *
2223
+ * // Simple text mock
2224
+ * mockLLM()
2225
+ * .forModel('gpt-5')
2226
+ * .returns('Hello, world!')
2227
+ * .register();
2228
+ *
2229
+ * // Mock with gadget calls
2230
+ * mockLLM()
2231
+ * .forProvider('anthropic')
2232
+ * .whenMessageContains('calculate')
2233
+ * .returnsGadgetCalls([
2234
+ * { gadgetName: 'calculator', parameters: { operation: 'add', a: 1, b: 2 } }
2235
+ * ])
2236
+ * .register();
2237
+ *
2238
+ * // Complex conditional mock
2239
+ * mockLLM()
2240
+ * .when((ctx) => ctx.messages.length > 5)
2241
+ * .returns('This conversation is getting long!')
2242
+ * .once()
2243
+ * .register();
2244
+ * ```
2245
+ */
2246
+ declare class MockBuilder {
2247
+ private matchers;
2248
+ private response;
2249
+ private label?;
2250
+ private isOnce;
2251
+ private id?;
2252
+ /**
2253
+ * Match calls to a specific model (by name, supports partial matching).
2254
+ *
2255
+ * @example
2256
+ * mockLLM().forModel('gpt-5')
2257
+ * mockLLM().forModel('claude') // matches any Claude model
2258
+ */
2259
+ forModel(modelName: string): this;
2260
+ /**
2261
+ * Match calls to any model.
2262
+ * Useful when you want to mock responses regardless of the model used.
2263
+ *
2264
+ * @example
2265
+ * mockLLM().forAnyModel()
2266
+ */
2267
+ forAnyModel(): this;
2268
+ /**
2269
+ * Match calls to a specific provider.
2270
+ *
2271
+ * @example
2272
+ * mockLLM().forProvider('openai')
2273
+ * mockLLM().forProvider('anthropic')
2274
+ */
2275
+ forProvider(provider: string): this;
2276
+ /**
2277
+ * Match calls to any provider.
2278
+ * Useful when you want to mock responses regardless of the provider used.
2279
+ *
2280
+ * @example
2281
+ * mockLLM().forAnyProvider()
2282
+ */
2283
+ forAnyProvider(): this;
2284
+ /**
2285
+ * Match when any message contains the given text (case-insensitive).
2286
+ *
2287
+ * @example
2288
+ * mockLLM().whenMessageContains('hello')
2289
+ */
2290
+ whenMessageContains(text: string): this;
2291
+ /**
2292
+ * Match when the last message contains the given text (case-insensitive).
2293
+ *
2294
+ * @example
2295
+ * mockLLM().whenLastMessageContains('goodbye')
2296
+ */
2297
+ whenLastMessageContains(text: string): this;
2298
+ /**
2299
+ * Match when any message matches the given regex.
2300
+ *
2301
+ * @example
2302
+ * mockLLM().whenMessageMatches(/calculate \d+/)
2303
+ */
2304
+ whenMessageMatches(regex: RegExp): this;
2305
+ /**
2306
+ * Match when a message with a specific role contains text.
2307
+ *
2308
+ * @example
2309
+ * mockLLM().whenRoleContains('system', 'You are a helpful assistant')
2310
+ */
2311
+ whenRoleContains(role: LLMMessage["role"], text: string): this;
2312
+ /**
2313
+ * Match based on the number of messages in the conversation.
2314
+ *
2315
+ * @example
2316
+ * mockLLM().whenMessageCount((count) => count > 10)
2317
+ */
2318
+ whenMessageCount(predicate: (count: number) => boolean): this;
2319
+ /**
2320
+ * Add a custom matcher function.
2321
+ * This provides full control over matching logic.
2322
+ *
2323
+ * @example
2324
+ * mockLLM().when((ctx) => {
2325
+ * return ctx.options.temperature > 0.8;
2326
+ * })
2327
+ */
2328
+ when(matcher: MockMatcher): this;
2329
+ /**
2330
+ * Set the text response to return.
2331
+ * Can be a static string or a function that returns a string dynamically.
2332
+ *
2333
+ * @example
2334
+ * mockLLM().returns('Hello, world!')
2335
+ * mockLLM().returns(() => `Response at ${Date.now()}`)
2336
+ * mockLLM().returns((ctx) => `You said: ${ctx.messages[0]?.content}`)
2337
+ */
2338
+ returns(text: string | ((context: MockMatcherContext) => string | Promise<string>)): this;
2339
+ /**
2340
+ * Set gadget calls to include in the response.
2341
+ *
2342
+ * @example
2343
+ * mockLLM().returnsGadgetCalls([
2344
+ * { gadgetName: 'calculator', parameters: { op: 'add', a: 1, b: 2 } }
2345
+ * ])
2346
+ */
2347
+ returnsGadgetCalls(calls: Array<{
2348
+ gadgetName: string;
2349
+ parameters: Record<string, unknown>;
2350
+ invocationId?: string;
2351
+ }>): this;
2352
+ /**
2353
+ * Add a single gadget call to the response.
2354
+ *
2355
+ * @example
2356
+ * mockLLM()
2357
+ * .returnsGadgetCall('calculator', { op: 'add', a: 1, b: 2 })
2358
+ * .returnsGadgetCall('logger', { message: 'Done!' })
2359
+ */
2360
+ returnsGadgetCall(gadgetName: string, parameters: Record<string, unknown>): this;
2361
+ /**
2362
+ * Set the complete mock response object.
2363
+ * This allows full control over all response properties.
2364
+ * Can also be a function that generates the response dynamically based on context.
2365
+ *
2366
+ * @example
2367
+ * // Static response
2368
+ * mockLLM().withResponse({
2369
+ * text: 'Hello',
2370
+ * usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
2371
+ * finishReason: 'stop'
2372
+ * })
2373
+ *
2374
+ * @example
2375
+ * // Dynamic response
2376
+ * mockLLM().withResponse((ctx) => ({
2377
+ * text: `You said: ${ctx.messages[ctx.messages.length - 1]?.content}`,
2378
+ * usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }
2379
+ * }))
2380
+ */
2381
+ withResponse(response: MockResponse | ((context: MockMatcherContext) => MockResponse | Promise<MockResponse>)): this;
2382
+ /**
2383
+ * Set simulated token usage.
2384
+ *
2385
+ * @example
2386
+ * mockLLM().withUsage({ inputTokens: 100, outputTokens: 50, totalTokens: 150 })
2387
+ */
2388
+ withUsage(usage: {
2389
+ inputTokens: number;
2390
+ outputTokens: number;
2391
+ totalTokens: number;
2392
+ }): this;
2393
+ /**
2394
+ * Set the finish reason.
2395
+ *
2396
+ * @example
2397
+ * mockLLM().withFinishReason('stop')
2398
+ * mockLLM().withFinishReason('length')
2399
+ */
2400
+ withFinishReason(reason: string): this;
2401
+ /**
2402
+ * Set initial delay before streaming starts (simulates network latency).
2403
+ *
2404
+ * @example
2405
+ * mockLLM().withDelay(100) // 100ms delay
2406
+ */
2407
+ withDelay(ms: number): this;
2408
+ /**
2409
+ * Set delay between stream chunks (simulates realistic streaming).
2410
+ *
2411
+ * @example
2412
+ * mockLLM().withStreamDelay(10) // 10ms between chunks
2413
+ */
2414
+ withStreamDelay(ms: number): this;
2415
+ /**
2416
+ * Set a label for this mock (useful for debugging).
2417
+ *
2418
+ * @example
2419
+ * mockLLM().withLabel('greeting mock')
2420
+ */
2421
+ withLabel(label: string): this;
2422
+ /**
2423
+ * Set a specific ID for this mock.
2424
+ *
2425
+ * @example
2426
+ * mockLLM().withId('my-custom-mock-id')
2427
+ */
2428
+ withId(id: string): this;
2429
+ /**
2430
+ * Mark this mock as one-time use (will be removed after first match).
2431
+ *
2432
+ * @example
2433
+ * mockLLM().once()
2434
+ */
2435
+ once(): this;
2436
+ /**
2437
+ * Build the mock registration without registering it.
2438
+ * Useful if you want to register it manually later.
2439
+ *
2440
+ * @returns The built MockRegistration object (without id if not specified)
2441
+ */
2442
+ build(): Omit<MockRegistration, "id"> & {
2443
+ id?: string;
2444
+ };
2445
+ /**
2446
+ * Register this mock with the global MockManager.
2447
+ * Returns the ID of the registered mock.
2448
+ *
2449
+ * @example
2450
+ * const mockId = mockLLM().forModel('gpt-5').returns('Hello!').register();
2451
+ * // Later: getMockManager().unregister(mockId);
2452
+ */
2453
+ register(): string;
2454
+ }
2455
+ /**
2456
+ * Create a new MockBuilder instance.
2457
+ * This is the main entry point for the fluent mock API.
2458
+ *
2459
+ * @example
2460
+ * ```typescript
2461
+ * import { mockLLM } from 'llmist';
2462
+ *
2463
+ * mockLLM()
2464
+ * .forModel('gpt-5')
2465
+ * .whenMessageContains('hello')
2466
+ * .returns('Hello there!')
2467
+ * .register();
2468
+ * ```
2469
+ */
2470
+ declare function mockLLM(): MockBuilder;
2471
+
2472
+ /**
2473
+ * Create a preconfigured LLMist client with mock adapter.
2474
+ * This is a convenience function for testing scenarios.
2475
+ *
2476
+ * @param options - Optional configuration for the mock system
2477
+ * @returns A LLMist instance configured to use mocks
2478
+ *
2479
+ * @example
2480
+ * ```typescript
2481
+ * import { createMockClient, getMockManager } from 'llmist';
2482
+ *
2483
+ * // Setup
2484
+ * const client = createMockClient({ strictMode: true });
2485
+ * const mockManager = getMockManager();
2486
+ *
2487
+ * // Register mocks
2488
+ * mockManager.register({
2489
+ * matcher: (ctx) => ctx.modelName === 'gpt-4',
2490
+ * response: { text: 'Mocked response' }
2491
+ * });
2492
+ *
2493
+ * // Use in tests
2494
+ * const stream = client.stream({
2495
+ * model: 'mock:gpt-4',
2496
+ * messages: [{ role: 'user', content: 'test' }]
2497
+ * });
2498
+ * ```
2499
+ */
2500
+ declare function createMockClient(options?: MockOptions): LLMist;
2501
+
2502
+ /**
2503
+ * Global singleton instance for managing LLM mocks.
2504
+ * This allows mocks to be registered once and used across the application.
2505
+ */
2506
+ declare class MockManager {
2507
+ private static instance;
2508
+ private mocks;
2509
+ private stats;
2510
+ private options;
2511
+ private logger;
2512
+ private nextId;
2513
+ private constructor();
2514
+ /**
2515
+ * Get the global MockManager instance.
2516
+ * Creates one if it doesn't exist.
2517
+ */
2518
+ static getInstance(options?: MockOptions): MockManager;
2519
+ /**
2520
+ * Reset the global instance (useful for testing).
2521
+ */
2522
+ static reset(): void;
2523
+ /**
2524
+ * Register a new mock.
2525
+ *
2526
+ * @param registration - The mock registration configuration
2527
+ * @returns The ID of the registered mock
2528
+ *
2529
+ * @example
2530
+ * const manager = MockManager.getInstance();
2531
+ * const mockId = manager.register({
2532
+ * label: 'GPT-4 mock',
2533
+ * matcher: (ctx) => ctx.modelName.includes('gpt-4'),
2534
+ * response: { text: 'Mocked response' }
2535
+ * });
2536
+ */
2537
+ register(registration: Omit<MockRegistration, "id"> & {
2538
+ id?: string;
2539
+ }): string;
2540
+ /**
2541
+ * Unregister a mock by ID.
2542
+ */
2543
+ unregister(id: string): boolean;
2544
+ /**
2545
+ * Clear all registered mocks.
2546
+ */
2547
+ clear(): void;
2548
+ /**
2549
+ * Find and return a matching mock for the given context.
2550
+ * Returns the mock response if found, null otherwise.
2551
+ */
2552
+ findMatch(context: MockMatcherContext): Promise<MockResponse | null>;
2553
+ /**
2554
+ * Get statistics for a specific mock.
2555
+ */
2556
+ getStats(id: string): MockStats | undefined;
2557
+ /**
2558
+ * Get all registered mock IDs.
2559
+ */
2560
+ getMockIds(): string[];
2561
+ /**
2562
+ * Get the number of registered mocks.
2563
+ */
2564
+ getCount(): number;
2565
+ /**
2566
+ * Update the mock manager options.
2567
+ */
2568
+ setOptions(options: Partial<MockOptions>): void;
2569
+ }
2570
+ /**
2571
+ * Helper function to get the global mock manager instance.
2572
+ */
2573
+ declare function getMockManager(options?: MockOptions): MockManager;
2574
+
2575
+ /**
2576
+ * Create a mock LLM stream from a mock response.
2577
+ * This simulates the streaming behavior of real LLM providers.
2578
+ *
2579
+ * @param response - The mock response configuration
2580
+ * @returns An async iterable that yields LLMStreamChunks
2581
+ */
2582
+ declare function createMockStream(response: MockResponse): LLMStream;
2583
+ /**
2584
+ * Create a simple text-only mock stream.
2585
+ * Convenience helper for quickly creating mock responses.
2586
+ *
2587
+ * @param text - The text to stream
2588
+ * @param options - Optional streaming configuration
2589
+ *
2590
+ * @example
2591
+ * const stream = createTextMockStream('Hello, world!');
2592
+ * for await (const chunk of stream) {
2593
+ * console.log(chunk.text);
2594
+ * }
2595
+ */
2596
+ declare function createTextMockStream(text: string, options?: {
2597
+ delayMs?: number;
2598
+ streamDelayMs?: number;
2599
+ usage?: MockResponse["usage"];
2600
+ }): LLMStream;
2601
+
2602
+ export { type ObserveGadgetCompleteContext as $, type AgentHooks as A, BaseGadget as B, type AfterGadgetExecutionAction as C, type AfterGadgetExecutionControllerContext as D, type EventHandlers as E, type AfterLLMCallAction as F, GadgetRegistry as G, type HistoryMessage as H, type AfterLLMCallControllerContext as I, type AfterLLMErrorAction as J, type AgentOptions as K, type LLMMessage as L, MockProviderAdapter as M, type BeforeGadgetExecutionAction as N, type BeforeLLMCallAction as O, type ParameterFormat as P, type ChunkInterceptorContext as Q, type Controllers as R, type StreamEvent as S, type GadgetExecutionControllerContext as T, type GadgetParameterInterceptorContext as U, type GadgetResultInterceptorContext as V, type Interceptors as W, type LLMCallControllerContext as X, type LLMErrorControllerContext as Y, type MessageInterceptorContext as Z, type ObserveChunkContext as _, MockBuilder as a, type ObserveGadgetStartContext as a0, type ObserveLLMCallContext as a1, type ObserveLLMCompleteContext as a2, type ObserveLLMErrorContext as a3, type Observers as a4, type LLMistOptions as a5, LLMist as a6, type LLMRole as a7, LLMMessageBuilder as a8, type CostEstimate as a9, type ModelFeatures as aa, type ModelLimits as ab, type ModelPricing as ac, ModelRegistry as ad, type ProviderIdentifier as ae, type TokenUsage as af, ModelIdentifierParser as ag, type PromptConfig as ah, type PromptContext as ai, type PromptTemplate as aj, DEFAULT_PROMPTS as ak, resolvePromptTemplate as al, resolveRulesTemplate as am, type QuickOptions as an, complete as ao, stream as ap, StreamParser as aq, type GadgetClass as ar, type GadgetOrClass as as, type TextOnlyAction as at, type TextOnlyContext as au, type TextOnlyCustomHandler as av, type TextOnlyGadgetConfig as aw, type TextOnlyHandler as ax, type TextOnlyStrategy as ay, createMockClient as b, createMockAdapter as c, MockManager as d, createMockStream as e, createTextMockStream as f, getMockManager as g, type MockMatcher as h, type MockMatcherContext as i, type MockOptions as j, type MockRegistration as k, type MockResponse as l, mockLLM as m, type MockStats as n, type LLMStreamChunk as o, type ParsedGadgetCall as p, type GadgetExecutionResult as q, type ProviderAdapter as r, type ModelDescriptor as s, type ModelSpec as t, type LLMGenerationOptions as u, type LLMStream as v, AgentBuilder as w, collectEvents as x, collectText as y, runWithHandlers as z };