@llmist/testing 9.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1422 @@
1
+ import { PassThrough, Readable, Writable } from 'node:stream';
2
+ import { LLMMessage, AbstractGadget, LLMGenerationOptions, ImageMimeType, AudioMimeType, ProviderAdapter, ModelDescriptor, LLMStream, ImageGenerationOptions, ImageGenerationResult, SpeechGenerationOptions, SpeechGenerationResult, LLMist, IConversationManager, LLMStreamChunk } from 'llmist';
3
+ import { ZodType } from 'zod';
4
+
5
+ /**
6
+ * CLI testing utilities for llmist.
7
+ * Provides helpers for testing CLI commands without real I/O.
8
+ */
9
+
10
+ /**
11
+ * Options for creating a test environment.
12
+ */
13
+ interface TestEnvironmentOptions {
14
+ /** Input to provide via stdin (string or line array) */
15
+ stdin?: string | string[];
16
+ /** Whether stdin is a TTY (default: false) */
17
+ isTTY?: boolean;
18
+ /** Environment variables to set */
19
+ env?: Record<string, string>;
20
+ /** Command line arguments (default: ["node", "llmist"]) */
21
+ argv?: string[];
22
+ }
23
+ /**
24
+ * A test environment with captured I/O streams.
25
+ */
26
+ interface TestEnvironment {
27
+ /** Stdin readable stream */
28
+ stdin: Readable;
29
+ /** Stdout writable stream (PassThrough for capturing) */
30
+ stdout: PassThrough;
31
+ /** Stderr writable stream (PassThrough for capturing) */
32
+ stderr: PassThrough;
33
+ /** Whether stdin is TTY */
34
+ isTTY: boolean;
35
+ /** Command line arguments */
36
+ argv: string[];
37
+ /** Environment variables */
38
+ env: Record<string, string>;
39
+ /** Exit code if set */
40
+ exitCode?: number;
41
+ /** Function to set exit code */
42
+ setExitCode: (code: number) => void;
43
+ }
44
+ /**
45
+ * Create a test environment with mocked I/O streams.
46
+ *
47
+ * @param options - Configuration options
48
+ * @returns A test environment with captured streams
49
+ *
50
+ * @example
51
+ * ```typescript
52
+ * const env = createTestEnvironment({
53
+ * stdin: '{"param": "value"}',
54
+ * isTTY: false
55
+ * });
56
+ *
57
+ * // Pass to CLI command
58
+ * await executeCommand(env);
59
+ *
60
+ * // Check output
61
+ * const output = await collectOutput(env.stdout);
62
+ * expect(output).toContain("Success");
63
+ * ```
64
+ */
65
+ declare function createTestEnvironment(options?: TestEnvironmentOptions): TestEnvironment;
66
+ /**
67
+ * Create a readable stream from a string or array of lines.
68
+ *
69
+ * @param input - String content or array of lines
70
+ * @returns A Readable stream
71
+ *
72
+ * @example
73
+ * ```typescript
74
+ * const stream = createMockReadable("line1\nline2\n");
75
+ * // or
76
+ * const stream = createMockReadable(["line1", "line2"]);
77
+ * ```
78
+ */
79
+ declare function createMockReadable(input?: string | string[]): Readable;
80
+ /**
81
+ * Create a writable stream that collects all written data.
82
+ *
83
+ * @returns A writable stream with getData() method
84
+ */
85
+ declare function createMockWritable(): Writable & {
86
+ getData(): string;
87
+ };
88
+ /**
89
+ * Collect all output from a PassThrough stream.
90
+ * Waits for the stream to end before returning.
91
+ *
92
+ * @param stream - The stream to collect from
93
+ * @param timeout - Maximum time to wait in ms (default: 5000)
94
+ * @returns All data written to the stream
95
+ *
96
+ * @example
97
+ * ```typescript
98
+ * const output = await collectOutput(env.stdout);
99
+ * expect(output).toContain("Expected text");
100
+ * ```
101
+ */
102
+ declare function collectOutput(stream: PassThrough, timeout?: number): Promise<string>;
103
+ /**
104
+ * Collect output without waiting for stream end.
105
+ * Returns immediately with whatever has been written.
106
+ *
107
+ * @param stream - The stream to read from
108
+ * @returns Currently buffered data
109
+ */
110
+ declare function getBufferedOutput(stream: PassThrough): string;
111
+ /**
112
+ * Create a mock prompt function for testing interactive input.
113
+ *
114
+ * @param responses - Array of responses to return in order
115
+ * @returns A prompt function that returns the next response
116
+ *
117
+ * @example
118
+ * ```typescript
119
+ * const prompt = createMockPrompt(["yes", "no", "maybe"]);
120
+ * expect(await prompt("Question 1?")).toBe("yes");
121
+ * expect(await prompt("Question 2?")).toBe("no");
122
+ * ```
123
+ */
124
+ declare function createMockPrompt(responses: string[]): (question: string) => Promise<string>;
125
+ /**
126
+ * Mock prompt that records questions and returns configured responses.
127
+ */
128
+ declare class MockPromptRecorder {
129
+ private responses;
130
+ private index;
131
+ private questions;
132
+ constructor(responses: string[]);
133
+ /**
134
+ * The prompt function to use in tests.
135
+ */
136
+ prompt: (question: string) => Promise<string>;
137
+ /**
138
+ * Get all questions that were asked.
139
+ */
140
+ getQuestions(): string[];
141
+ /**
142
+ * Get the number of questions asked.
143
+ */
144
+ getQuestionCount(): number;
145
+ /**
146
+ * Reset the recorder state.
147
+ */
148
+ reset(newResponses?: string[]): void;
149
+ }
150
+ /**
151
+ * Wait for a condition to be true, with timeout.
152
+ * Useful for async testing scenarios.
153
+ *
154
+ * @param condition - Function that returns true when condition is met
155
+ * @param timeout - Maximum time to wait in ms (default: 5000)
156
+ * @param interval - Check interval in ms (default: 50)
157
+ */
158
+ declare function waitFor(condition: () => boolean, timeout?: number, interval?: number): Promise<void>;
159
+
160
+ /**
161
+ * Conversation fixture generators for testing.
162
+ * Provides utilities for creating test conversation data.
163
+ */
164
+
165
+ /**
166
+ * Create a conversation with a specified number of turns.
167
+ * Each turn consists of a user message and an assistant response.
168
+ *
169
+ * @param turnCount - Number of conversation turns to generate
170
+ * @param options - Configuration options
171
+ * @returns Array of LLMMessages representing the conversation
172
+ *
173
+ * @example
174
+ * ```typescript
175
+ * const messages = createConversation(5);
176
+ * // Creates 10 messages: 5 user + 5 assistant
177
+ * ```
178
+ */
179
+ declare function createConversation(turnCount: number, options?: {
180
+ /** Prefix for user messages (default: "User message") */
181
+ userPrefix?: string;
182
+ /** Prefix for assistant messages (default: "Assistant response") */
183
+ assistantPrefix?: string;
184
+ /** Base content length per message (default: 100 chars) */
185
+ contentLength?: number;
186
+ }): LLMMessage[];
187
+ /**
188
+ * Create a conversation with gadget calls interspersed.
189
+ * Simulates an agent conversation with tool usage.
190
+ *
191
+ * @param turnCount - Number of conversation turns
192
+ * @param gadgetCallsPerTurn - Number of gadget calls per assistant turn
193
+ * @returns Array of LLMMessages including gadget call/result pairs
194
+ *
195
+ * @example
196
+ * ```typescript
197
+ * const messages = createConversationWithGadgets(3, 2);
198
+ * // Creates: user, assistant+gadget, gadget-result, assistant+gadget, gadget-result, assistant (per turn)
199
+ * ```
200
+ */
201
+ declare function createConversationWithGadgets(turnCount: number, gadgetCallsPerTurn?: number, options?: {
202
+ /** Gadget names to cycle through (default: ["search", "calculate", "read"]) */
203
+ gadgetNames?: string[];
204
+ /** Content length for messages */
205
+ contentLength?: number;
206
+ }): LLMMessage[];
207
+ /**
208
+ * Estimate token count for a message array.
209
+ * Uses a simple 4-characters-per-token heuristic.
210
+ *
211
+ * @param messages - Messages to estimate tokens for
212
+ * @returns Estimated token count
213
+ *
214
+ * @example
215
+ * ```typescript
216
+ * const messages = createConversation(10);
217
+ * const tokens = estimateTokens(messages);
218
+ * // Returns approximate token count
219
+ * ```
220
+ */
221
+ declare function estimateTokens(messages: LLMMessage[]): number;
222
+ /**
223
+ * Create a single user message.
224
+ */
225
+ declare function createUserMessage(content: string): LLMMessage;
226
+ /**
227
+ * Create a single assistant message.
228
+ */
229
+ declare function createAssistantMessage(content: string): LLMMessage;
230
+ /**
231
+ * Create a system message.
232
+ */
233
+ declare function createSystemMessage(content: string): LLMMessage;
234
+ /**
235
+ * Create a minimal conversation for quick tests.
236
+ * Returns a single turn: one user message and one assistant response.
237
+ */
238
+ declare function createMinimalConversation(): LLMMessage[];
239
+ /**
240
+ * Create a conversation that exceeds a target token count.
241
+ * Useful for testing compaction triggers.
242
+ *
243
+ * @param targetTokens - Minimum token count to exceed
244
+ * @param options - Configuration options
245
+ * @returns Conversation with at least targetTokens tokens
246
+ */
247
+ declare function createLargeConversation(targetTokens: number, options?: {
248
+ /** Average tokens per turn (default: 200) */
249
+ tokensPerTurn?: number;
250
+ }): LLMMessage[];
251
+
252
+ /**
253
+ * Testing utilities for gadgets.
254
+ *
255
+ * Provides helpers for testing gadgets with schema validation without
256
+ * requiring full executor setup.
257
+ *
258
+ * @module testing/gadget-testing
259
+ */
260
+
261
+ /**
262
+ * Result of testing a gadget.
263
+ */
264
+ interface TestGadgetResult {
265
+ /** Result string if execution succeeded */
266
+ result?: string;
267
+ /** Error message if validation or execution failed */
268
+ error?: string;
269
+ /** Parameters after validation and default application */
270
+ validatedParams?: Record<string, unknown>;
271
+ /** Cost reported by the gadget in USD (e.g., 0.001 for $0.001) */
272
+ cost?: number;
273
+ }
274
+ /**
275
+ * Options for testGadget.
276
+ */
277
+ interface TestGadgetOptions {
278
+ /**
279
+ * If true, skip schema validation.
280
+ * Useful for testing gadget behavior with invalid parameters.
281
+ */
282
+ skipValidation?: boolean;
283
+ }
284
+ /**
285
+ * Test a gadget with schema validation and default application.
286
+ *
287
+ * This helper replicates the validation behavior from GadgetExecutor.execute(),
288
+ * making it easy to test gadgets in isolation without setting up a full
289
+ * registry and executor.
290
+ *
291
+ * @param gadget - Gadget instance to test
292
+ * @param params - Raw parameters (before validation)
293
+ * @param options - Test options
294
+ * @returns Promise resolving to test result
295
+ *
296
+ * @example
297
+ * ```typescript
298
+ * import { testGadget } from 'llmist/testing';
299
+ * import { createGadget } from 'llmist';
300
+ * import { z } from 'zod';
301
+ *
302
+ * const calculator = createGadget({
303
+ * description: 'Add numbers',
304
+ * schema: z.object({
305
+ * a: z.number(),
306
+ * b: z.number().default(0),
307
+ * }),
308
+ * execute: ({ a, b }) => String(a + b),
309
+ * });
310
+ *
311
+ * // Test with defaults applied
312
+ * const result = await testGadget(calculator, { a: 5 });
313
+ * expect(result.result).toBe('5');
314
+ * expect(result.validatedParams).toEqual({ a: 5, b: 0 });
315
+ *
316
+ * // Test validation errors
317
+ * const invalid = await testGadget(calculator, { a: 'not a number' });
318
+ * expect(invalid.error).toContain('Invalid parameters');
319
+ *
320
+ * // Test with validation skipped
321
+ * const skipped = await testGadget(calculator, { a: 5 }, { skipValidation: true });
322
+ * expect(skipped.validatedParams).toEqual({ a: 5 }); // No defaults applied
323
+ * ```
324
+ */
325
+ declare function testGadget(gadget: AbstractGadget, params: Record<string, unknown>, options?: TestGadgetOptions): Promise<TestGadgetResult>;
326
+ /**
327
+ * Test multiple parameter sets against a gadget.
328
+ *
329
+ * Convenience helper for running the same gadget with different inputs.
330
+ *
331
+ * @param gadget - Gadget instance to test
332
+ * @param paramSets - Array of parameter sets to test
333
+ * @param options - Test options applied to all tests
334
+ * @returns Promise resolving to array of test results
335
+ *
336
+ * @example
337
+ * ```typescript
338
+ * const results = await testGadgetBatch(calculator, [
339
+ * { a: 1, b: 2 },
340
+ * { a: 5 },
341
+ * { a: 'invalid' },
342
+ * ]);
343
+ *
344
+ * expect(results[0].result).toBe('3');
345
+ * expect(results[1].result).toBe('5');
346
+ * expect(results[2].error).toBeDefined();
347
+ * ```
348
+ */
349
+ declare function testGadgetBatch(gadget: AbstractGadget, paramSets: Record<string, unknown>[], options?: TestGadgetOptions): Promise<TestGadgetResult[]>;
350
+
351
+ /**
352
+ * Context provided to matcher functions to determine if a mock should be used.
353
+ */
354
+ interface MockMatcherContext {
355
+ /** The model descriptor (e.g., "openai:gpt-5") */
356
+ model: string;
357
+ /** The provider ID extracted from the model */
358
+ provider: string;
359
+ /** The model name without provider prefix */
360
+ modelName: string;
361
+ /** The complete LLM generation options */
362
+ options: LLMGenerationOptions;
363
+ /** The messages being sent to the LLM */
364
+ messages: LLMMessage[];
365
+ }
366
+ /**
367
+ * Matcher function that determines if a mock should be used for an LLM call.
368
+ *
369
+ * @param context - The context of the LLM call
370
+ * @returns true if this mock should be used, false otherwise
371
+ *
372
+ * @example
373
+ * // Match any call to GPT-5
374
+ * const matcher: MockMatcher = (ctx) => ctx.modelName.includes('gpt-5');
375
+ *
376
+ * @example
377
+ * // Match calls with specific message content
378
+ * const matcher: MockMatcher = (ctx) => {
379
+ * const lastMessage = ctx.messages[ctx.messages.length - 1];
380
+ * return lastMessage?.content?.includes('calculate');
381
+ * };
382
+ *
383
+ * @example
384
+ * // Match by provider
385
+ * const matcher: MockMatcher = (ctx) => ctx.provider === 'anthropic';
386
+ */
387
+ type MockMatcher = (context: MockMatcherContext) => boolean | Promise<boolean>;
388
+ /**
389
+ * Image data in a mock response.
390
+ */
391
+ interface MockImageData {
392
+ /** Base64-encoded image data */
393
+ data: string;
394
+ /** MIME type of the image */
395
+ mimeType: ImageMimeType;
396
+ /** Revised prompt (for image generation responses) */
397
+ revisedPrompt?: string;
398
+ }
399
+ /**
400
+ * Audio data in a mock response.
401
+ */
402
+ interface MockAudioData {
403
+ /** Base64-encoded audio data */
404
+ data: string;
405
+ /** MIME type of the audio */
406
+ mimeType: AudioMimeType;
407
+ }
408
+ /**
409
+ * A mock response that will be returned when a matcher succeeds.
410
+ */
411
+ interface MockResponse {
412
+ /**
413
+ * Plain text content to return (will be streamed as text chunks)
414
+ * Can include gadget markers like \n<GADGET_name>...</GADGET_END>
415
+ */
416
+ text?: string;
417
+ /**
418
+ * Pre-parsed gadget calls to inject into the response stream
419
+ * These will be emitted as gadget_call events
420
+ */
421
+ gadgetCalls?: Array<{
422
+ gadgetName: string;
423
+ parameters: Record<string, unknown>;
424
+ /** Optional invocationId, will be auto-generated if not provided */
425
+ invocationId?: string;
426
+ }>;
427
+ /**
428
+ * Image data to return in the response (e.g., for image generation mocks).
429
+ * Each image will be yielded as a separate chunk in the stream.
430
+ */
431
+ images?: MockImageData[];
432
+ /**
433
+ * Audio data to return in the response (e.g., for speech synthesis mocks).
434
+ * Will be yielded as a chunk in the stream.
435
+ */
436
+ audio?: MockAudioData;
437
+ /**
438
+ * Simulated token usage statistics
439
+ */
440
+ usage?: {
441
+ inputTokens: number;
442
+ outputTokens: number;
443
+ totalTokens: number;
444
+ };
445
+ /**
446
+ * Simulated finish reason
447
+ */
448
+ finishReason?: string;
449
+ /**
450
+ * Delay in milliseconds before starting to stream the response
451
+ * Useful for simulating network latency
452
+ */
453
+ delayMs?: number;
454
+ /**
455
+ * Delay in milliseconds between each chunk when streaming
456
+ * Useful for simulating realistic streaming behavior
457
+ */
458
+ streamDelayMs?: number;
459
+ }
460
+ /**
461
+ * A registered mock configuration combining a matcher with a response.
462
+ */
463
+ interface MockRegistration {
464
+ /** Unique identifier for this mock (auto-generated if not provided) */
465
+ id: string;
466
+ /** The matcher function to determine if this mock applies */
467
+ matcher: MockMatcher;
468
+ /** The response to return when matched */
469
+ response: MockResponse | ((context: MockMatcherContext) => MockResponse | Promise<MockResponse>);
470
+ /** Optional label for debugging */
471
+ label?: string;
472
+ /** If true, this mock will only be used once then automatically removed */
473
+ once?: boolean;
474
+ }
475
+ /**
476
+ * Statistics about mock usage.
477
+ */
478
+ interface MockStats {
479
+ /** Number of times this mock was matched and used */
480
+ matchCount: number;
481
+ /** Last time this mock was used */
482
+ lastUsed?: Date;
483
+ }
484
+ /**
485
+ * Options for configuring the mock system.
486
+ */
487
+ interface MockOptions {
488
+ /**
489
+ * If true, throws an error when no mock matches an LLM call.
490
+ * If false, logs a warning and returns an empty response.
491
+ * Default: false
492
+ */
493
+ strictMode?: boolean;
494
+ /**
495
+ * If true, logs detailed information about mock matching and execution.
496
+ * Default: false
497
+ */
498
+ debug?: boolean;
499
+ /**
500
+ * If true, records statistics about mock usage.
501
+ * Default: true
502
+ */
503
+ recordStats?: boolean;
504
+ }
505
+
506
+ /**
507
+ * Provider adapter that serves mock responses instead of making real LLM API calls.
508
+ * This is useful for testing applications that use llmist without incurring API costs.
509
+ *
510
+ * The MockProviderAdapter has high priority (100) and is always checked before
511
+ * real providers when both are registered. This enables selective mocking where
512
+ * some models use mocks while others use real providers. If no matching mock is
513
+ * found and strictMode is disabled, requests return an empty response.
514
+ *
515
+ * @example
516
+ * ```typescript
517
+ * import { LLMist, createMockAdapter, mockLLM } from 'llmist/testing';
518
+ *
519
+ * // Use with real providers for selective mocking
520
+ * const client = new LLMist({
521
+ * adapters: [createMockAdapter()],
522
+ * autoDiscoverProviders: true // Also loads real OpenAI, Anthropic, etc.
523
+ * });
524
+ *
525
+ * // Register mocks for specific models
526
+ * mockLLM()
527
+ * .forModel('gpt-5-nano')
528
+ * .returns('Test response')
529
+ * .register();
530
+ *
531
+ * // gpt-5-nano uses mock, other models use real providers
532
+ * const stream = client.stream({
533
+ * model: 'openai:gpt-5-nano',
534
+ * messages: [{ role: 'user', content: 'test' }]
535
+ * });
536
+ * ```
537
+ */
538
+ declare class MockProviderAdapter implements ProviderAdapter {
539
+ readonly providerId = "mock";
540
+ readonly priority = 100;
541
+ private readonly mockManager;
542
+ constructor(options?: MockOptions);
543
+ supports(_descriptor: ModelDescriptor): boolean;
544
+ stream(options: LLMGenerationOptions, descriptor: ModelDescriptor, _spec?: unknown): LLMStream;
545
+ private createMockStreamFromContext;
546
+ /**
547
+ * Check if this adapter supports image generation for a given model.
548
+ * Returns true if there's a registered mock with images for this model.
549
+ */
550
+ supportsImageGeneration(_modelId: string): boolean;
551
+ /**
552
+ * Generate mock images based on registered mocks.
553
+ *
554
+ * @param options - Image generation options
555
+ * @returns Mock image generation result
556
+ */
557
+ generateImage(options: ImageGenerationOptions): Promise<ImageGenerationResult>;
558
+ /**
559
+ * Transform mock response into ImageGenerationResult format.
560
+ *
561
+ * @param options - Original image generation options
562
+ * @param mockResponse - Mock response containing image data
563
+ * @returns ImageGenerationResult with mock data and zero cost
564
+ */
565
+ private createImageResult;
566
+ /**
567
+ * Check if this adapter supports speech generation for a given model.
568
+ * Returns true if there's a registered mock with audio for this model.
569
+ */
570
+ supportsSpeechGeneration(_modelId: string): boolean;
571
+ /**
572
+ * Generate mock speech based on registered mocks.
573
+ *
574
+ * @param options - Speech generation options
575
+ * @returns Mock speech generation result
576
+ */
577
+ generateSpeech(options: SpeechGenerationOptions): Promise<SpeechGenerationResult>;
578
+ /**
579
+ * Transform mock response into SpeechGenerationResult format.
580
+ * Converts base64 audio data to ArrayBuffer.
581
+ *
582
+ * @param options - Original speech generation options
583
+ * @param mockResponse - Mock response containing audio data
584
+ * @returns SpeechGenerationResult with mock data and zero cost
585
+ */
586
+ private createSpeechResult;
587
+ /**
588
+ * Map MIME type to audio format for SpeechGenerationResult.
589
+ * Defaults to "mp3" for unknown MIME types.
590
+ *
591
+ * @param mimeType - Audio MIME type string
592
+ * @returns Audio format identifier
593
+ */
594
+ private mimeTypeToAudioFormat;
595
+ }
596
+ /**
597
+ * Create a mock provider adapter instance.
598
+ * This is a convenience factory function.
599
+ *
600
+ * @param options - Optional configuration for the mock system
601
+ * @returns A configured MockProviderAdapter
602
+ *
603
+ * @example
604
+ * ```typescript
605
+ * const adapter = createMockAdapter({ strictMode: true, debug: true });
606
+ * const client = new LLMist([adapter]);
607
+ * ```
608
+ */
609
+ declare function createMockAdapter(options?: MockOptions): MockProviderAdapter;
610
+
611
+ /**
612
+ * Fluent builder for creating mock responses and registrations.
613
+ * Provides a convenient API for common mocking scenarios.
614
+ *
615
+ * @example
616
+ * ```typescript
617
+ * import { mockLLM } from 'llmist';
618
+ *
619
+ * // Simple text mock
620
+ * mockLLM()
621
+ * .forModel('gpt-5')
622
+ * .returns('Hello, world!')
623
+ * .register();
624
+ *
625
+ * // Mock with gadget calls
626
+ * mockLLM()
627
+ * .forProvider('anthropic')
628
+ * .whenMessageContains('calculate')
629
+ * .returnsGadgetCalls([
630
+ * { gadgetName: 'calculator', parameters: { operation: 'add', a: 1, b: 2 } }
631
+ * ])
632
+ * .register();
633
+ *
634
+ * // Complex conditional mock
635
+ * mockLLM()
636
+ * .when((ctx) => ctx.messages.length > 5)
637
+ * .returns('This conversation is getting long!')
638
+ * .once()
639
+ * .register();
640
+ * ```
641
+ */
642
+ declare class MockBuilder {
643
+ private matchers;
644
+ private response;
645
+ private label?;
646
+ private isOnce;
647
+ private id?;
648
+ /**
649
+ * Match calls to a specific model (by name, supports partial matching).
650
+ *
651
+ * @example
652
+ * mockLLM().forModel('gpt-5')
653
+ * mockLLM().forModel('claude') // matches any Claude model
654
+ */
655
+ forModel(modelName: string): this;
656
+ /**
657
+ * Match calls to any model.
658
+ * Useful when you want to mock responses regardless of the model used.
659
+ *
660
+ * @example
661
+ * mockLLM().forAnyModel()
662
+ */
663
+ forAnyModel(): this;
664
+ /**
665
+ * Match calls to a specific provider.
666
+ *
667
+ * @example
668
+ * mockLLM().forProvider('openai')
669
+ * mockLLM().forProvider('anthropic')
670
+ */
671
+ forProvider(provider: string): this;
672
+ /**
673
+ * Match calls to any provider.
674
+ * Useful when you want to mock responses regardless of the provider used.
675
+ *
676
+ * @example
677
+ * mockLLM().forAnyProvider()
678
+ */
679
+ forAnyProvider(): this;
680
+ /**
681
+ * Match when any message contains the given text (case-insensitive).
682
+ *
683
+ * @example
684
+ * mockLLM().whenMessageContains('hello')
685
+ */
686
+ whenMessageContains(text: string): this;
687
+ /**
688
+ * Match when the last message contains the given text (case-insensitive).
689
+ *
690
+ * @example
691
+ * mockLLM().whenLastMessageContains('goodbye')
692
+ */
693
+ whenLastMessageContains(text: string): this;
694
+ /**
695
+ * Match when any message matches the given regex.
696
+ *
697
+ * @example
698
+ * mockLLM().whenMessageMatches(/calculate \d+/)
699
+ */
700
+ whenMessageMatches(regex: RegExp): this;
701
+ /**
702
+ * Match when a message with a specific role contains text.
703
+ *
704
+ * @example
705
+ * mockLLM().whenRoleContains('system', 'You are a helpful assistant')
706
+ */
707
+ whenRoleContains(role: LLMMessage["role"], text: string): this;
708
+ /**
709
+ * Match based on the number of messages in the conversation.
710
+ *
711
+ * @example
712
+ * mockLLM().whenMessageCount((count) => count > 10)
713
+ */
714
+ whenMessageCount(predicate: (count: number) => boolean): this;
715
+ /**
716
+ * Add a custom matcher function.
717
+ * This provides full control over matching logic.
718
+ *
719
+ * @example
720
+ * mockLLM().when((ctx) => {
721
+ * return ctx.options.temperature > 0.8;
722
+ * })
723
+ */
724
+ when(matcher: MockMatcher): this;
725
+ /**
726
+ * Match when any message contains an image.
727
+ *
728
+ * @example
729
+ * mockLLM().whenMessageHasImage().returns("I see an image of a sunset.")
730
+ */
731
+ whenMessageHasImage(): this;
732
+ /**
733
+ * Match when any message contains audio.
734
+ *
735
+ * @example
736
+ * mockLLM().whenMessageHasAudio().returns("I hear music playing.")
737
+ */
738
+ whenMessageHasAudio(): this;
739
+ /**
740
+ * Match based on the number of images in the last message.
741
+ *
742
+ * @example
743
+ * mockLLM().whenImageCount((n) => n >= 2).returns("Comparing multiple images...")
744
+ */
745
+ whenImageCount(predicate: (count: number) => boolean): this;
746
+ /**
747
+ * Set the text response to return.
748
+ * Can be a static string or a function that returns a string dynamically.
749
+ *
750
+ * @example
751
+ * mockLLM().returns('Hello, world!')
752
+ * mockLLM().returns(() => `Response at ${Date.now()}`)
753
+ * mockLLM().returns((ctx) => `You said: ${ctx.messages[0]?.content}`)
754
+ */
755
+ returns(text: string | ((context: MockMatcherContext) => string | Promise<string>)): this;
756
+ /**
757
+ * Set gadget calls to include in the response.
758
+ *
759
+ * @example
760
+ * mockLLM().returnsGadgetCalls([
761
+ * { gadgetName: 'calculator', parameters: { op: 'add', a: 1, b: 2 } }
762
+ * ])
763
+ */
764
+ returnsGadgetCalls(calls: Array<{
765
+ gadgetName: string;
766
+ parameters: Record<string, unknown>;
767
+ invocationId?: string;
768
+ }>): this;
769
+ /**
770
+ * Add a single gadget call to the response.
771
+ *
772
+ * @example
773
+ * mockLLM()
774
+ * .returnsGadgetCall('calculator', { op: 'add', a: 1, b: 2 })
775
+ * .returnsGadgetCall('logger', { message: 'Done!' })
776
+ */
777
+ returnsGadgetCall(gadgetName: string, parameters: Record<string, unknown>): this;
778
+ /**
779
+ * Return a single image in the response.
780
+ * Useful for mocking image generation endpoints.
781
+ *
782
+ * @param data - Image data (base64 string or Buffer)
783
+ * @param mimeType - MIME type (auto-detected if Buffer provided without type)
784
+ *
785
+ * @example
786
+ * mockLLM()
787
+ * .forModel('dall-e-3')
788
+ * .returnsImage(pngBuffer)
789
+ * .register();
790
+ */
791
+ returnsImage(data: string | Buffer | Uint8Array, mimeType?: ImageMimeType): this;
792
+ /**
793
+ * Return multiple images in the response.
794
+ *
795
+ * @example
796
+ * mockLLM()
797
+ * .forModel('dall-e-3')
798
+ * .returnsImages([
799
+ * { data: pngBuffer1 },
800
+ * { data: pngBuffer2 },
801
+ * ])
802
+ * .register();
803
+ */
804
+ returnsImages(images: Array<{
805
+ data: string | Buffer | Uint8Array;
806
+ mimeType?: ImageMimeType;
807
+ revisedPrompt?: string;
808
+ }>): this;
809
+ /**
810
+ * Return audio data in the response.
811
+ * Useful for mocking speech synthesis endpoints.
812
+ *
813
+ * @param data - Audio data (base64 string or Buffer)
814
+ * @param mimeType - MIME type (auto-detected if Buffer provided without type)
815
+ *
816
+ * @example
817
+ * mockLLM()
818
+ * .forModel('tts-1')
819
+ * .returnsAudio(mp3Buffer)
820
+ * .register();
821
+ */
822
+ returnsAudio(data: string | Buffer | Uint8Array, mimeType?: AudioMimeType): this;
823
+ /**
824
+ * Set the complete mock response object.
825
+ * This allows full control over all response properties.
826
+ * Can also be a function that generates the response dynamically based on context.
827
+ *
828
+ * @example
829
+ * // Static response
830
+ * mockLLM().withResponse({
831
+ * text: 'Hello',
832
+ * usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
833
+ * finishReason: 'stop'
834
+ * })
835
+ *
836
+ * @example
837
+ * // Dynamic response
838
+ * mockLLM().withResponse((ctx) => ({
839
+ * text: `You said: ${ctx.messages[ctx.messages.length - 1]?.content}`,
840
+ * usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }
841
+ * }))
842
+ */
843
+ withResponse(response: MockResponse | ((context: MockMatcherContext) => MockResponse | Promise<MockResponse>)): this;
844
+ /**
845
+ * Set simulated token usage.
846
+ *
847
+ * @example
848
+ * mockLLM().withUsage({ inputTokens: 100, outputTokens: 50, totalTokens: 150 })
849
+ */
850
+ withUsage(usage: {
851
+ inputTokens: number;
852
+ outputTokens: number;
853
+ totalTokens: number;
854
+ }): this;
855
+ /**
856
+ * Set the finish reason.
857
+ *
858
+ * @example
859
+ * mockLLM().withFinishReason('stop')
860
+ * mockLLM().withFinishReason('length')
861
+ */
862
+ withFinishReason(reason: string): this;
863
+ /**
864
+ * Set initial delay before streaming starts (simulates network latency).
865
+ *
866
+ * @example
867
+ * mockLLM().withDelay(100) // 100ms delay
868
+ */
869
+ withDelay(ms: number): this;
870
+ /**
871
+ * Set delay between stream chunks (simulates realistic streaming).
872
+ *
873
+ * @example
874
+ * mockLLM().withStreamDelay(10) // 10ms between chunks
875
+ */
876
+ withStreamDelay(ms: number): this;
877
+ /**
878
+ * Set a label for this mock (useful for debugging).
879
+ *
880
+ * @example
881
+ * mockLLM().withLabel('greeting mock')
882
+ */
883
+ withLabel(label: string): this;
884
+ /**
885
+ * Set a specific ID for this mock.
886
+ *
887
+ * @example
888
+ * mockLLM().withId('my-custom-mock-id')
889
+ */
890
+ withId(id: string): this;
891
+ /**
892
+ * Mark this mock as one-time use (will be removed after first match).
893
+ *
894
+ * @example
895
+ * mockLLM().once()
896
+ */
897
+ once(): this;
898
+ /**
899
+ * Build the mock registration without registering it.
900
+ * Useful if you want to register it manually later.
901
+ *
902
+ * @returns The built MockRegistration object (without id if not specified)
903
+ */
904
+ build(): Omit<MockRegistration, "id"> & {
905
+ id?: string;
906
+ };
907
+ /**
908
+ * Register this mock with the global MockManager.
909
+ * Returns the ID of the registered mock.
910
+ *
911
+ * @example
912
+ * const mockId = mockLLM().forModel('gpt-5').returns('Hello!').register();
913
+ * // Later: getMockManager().unregister(mockId);
914
+ */
915
+ register(): string;
916
+ }
917
+ /**
918
+ * Create a new MockBuilder instance.
919
+ * This is the main entry point for the fluent mock API.
920
+ *
921
+ * @example
922
+ * ```typescript
923
+ * import { mockLLM } from 'llmist';
924
+ *
925
+ * mockLLM()
926
+ * .forModel('gpt-5')
927
+ * .whenMessageContains('hello')
928
+ * .returns('Hello there!')
929
+ * .register();
930
+ * ```
931
+ */
932
+ declare function mockLLM(): MockBuilder;
933
+
934
+ /**
935
+ * Create a preconfigured LLMist client with mock adapter.
936
+ * This is a convenience function for testing scenarios.
937
+ *
938
+ * @param options - Optional configuration for the mock system
939
+ * @returns A LLMist instance configured to use mocks
940
+ *
941
+ * @example
942
+ * ```typescript
943
+ * import { createMockClient, getMockManager } from 'llmist';
944
+ *
945
+ * // Setup
946
+ * const client = createMockClient({ strictMode: true });
947
+ * const mockManager = getMockManager();
948
+ *
949
+ * // Register mocks
950
+ * mockManager.register({
951
+ * matcher: (ctx) => ctx.modelName === 'gpt-4',
952
+ * response: { text: 'Mocked response' }
953
+ * });
954
+ *
955
+ * // Use in tests
956
+ * const stream = client.stream({
957
+ * model: 'mock:gpt-4',
958
+ * messages: [{ role: 'user', content: 'test' }]
959
+ * });
960
+ * ```
961
+ */
962
+ declare function createMockClient(options?: MockOptions): LLMist;
963
+
964
+ /**
965
+ * Mock ConversationManager for testing compaction and agent components.
966
+ * Implements IConversationManager interface with test-friendly features.
967
+ */
968
+
969
+ /**
970
+ * A mock implementation of IConversationManager for testing.
971
+ * Tracks all operations and allows inspection of state changes.
972
+ *
973
+ * @example
974
+ * ```typescript
975
+ * const mockConvo = new MockConversationManager([
976
+ * { role: "user", content: "Hello" },
977
+ * { role: "assistant", content: "Hi!" }
978
+ * ]);
979
+ *
980
+ * // Use in compaction tests
981
+ * compactionManager.checkAndCompact(mockConvo, 1);
982
+ *
983
+ * // Assert on state changes
984
+ * expect(mockConvo.wasReplaceHistoryCalled()).toBe(true);
985
+ * expect(mockConvo.getReplacementHistory()).toHaveLength(2);
986
+ * ```
987
+ */
988
+ declare class MockConversationManager implements IConversationManager {
989
+ private history;
990
+ private readonly baseMessages;
991
+ private replacementHistory;
992
+ private replaceHistoryCallCount;
993
+ private addedMessages;
994
+ constructor(history?: LLMMessage[], baseMessages?: LLMMessage[]);
995
+ addUserMessage(content: string): void;
996
+ addAssistantMessage(content: string): void;
997
+ addGadgetCallResult(gadgetName: string, parameters: Record<string, unknown>, result: string, invocationId: string): void;
998
+ getMessages(): LLMMessage[];
999
+ getHistoryMessages(): LLMMessage[];
1000
+ getBaseMessages(): LLMMessage[];
1001
+ getConversationHistory(): LLMMessage[];
1002
+ replaceHistory(newHistory: LLMMessage[]): void;
1003
+ /**
1004
+ * Check if replaceHistory was called.
1005
+ */
1006
+ wasReplaceHistoryCalled(): boolean;
1007
+ /**
1008
+ * Get the number of times replaceHistory was called.
1009
+ */
1010
+ getReplaceHistoryCallCount(): number;
1011
+ /**
1012
+ * Get the most recent history passed to replaceHistory.
1013
+ * Returns undefined if replaceHistory was never called.
1014
+ */
1015
+ getReplacementHistory(): LLMMessage[] | undefined;
1016
+ /**
1017
+ * Get all messages that were added via add* methods.
1018
+ */
1019
+ getAddedMessages(): LLMMessage[];
1020
+ /**
1021
+ * Reset all tracking state while preserving the conversation.
1022
+ */
1023
+ resetTracking(): void;
1024
+ /**
1025
+ * Completely reset the mock to initial state.
1026
+ * Note: baseMessages cannot be changed after construction.
1027
+ */
1028
+ reset(history?: LLMMessage[]): void;
1029
+ /**
1030
+ * Set the history directly (for test setup).
1031
+ */
1032
+ setHistory(messages: LLMMessage[]): void;
1033
+ /**
1034
+ * Get the current history length.
1035
+ */
1036
+ getHistoryLength(): number;
1037
+ /**
1038
+ * Get total message count (base + history).
1039
+ */
1040
+ getTotalMessageCount(): number;
1041
+ }
1042
+ /**
1043
+ * Create a mock conversation manager with a pre-populated conversation.
1044
+ *
1045
+ * @param turnCount - Number of conversation turns
1046
+ * @param baseMessages - Optional base messages (system prompts)
1047
+ * @returns Configured MockConversationManager
1048
+ */
1049
+ declare function createMockConversationManager(turnCount: number, baseMessages?: LLMMessage[]): MockConversationManager;
1050
+
1051
+ /**
1052
+ * Mock gadget utilities for testing.
1053
+ *
1054
+ * Provides helpers for creating mock gadgets with configurable behavior
1055
+ * and call tracking.
1056
+ *
1057
+ * @module testing/mock-gadget
1058
+ */
1059
+
1060
+ /**
1061
+ * Recorded gadget call for tracking.
1062
+ */
1063
+ interface RecordedCall {
1064
+ /** Parameters passed to execute() */
1065
+ params: Record<string, unknown>;
1066
+ /** When the call was made */
1067
+ timestamp: number;
1068
+ }
1069
+ /**
1070
+ * Mock gadget with call tracking capabilities.
1071
+ */
1072
+ interface MockGadget extends AbstractGadget {
1073
+ /** Get all recorded calls */
1074
+ getCalls(): RecordedCall[];
1075
+ /** Get number of times the gadget was executed */
1076
+ getCallCount(): number;
1077
+ /** Reset call history */
1078
+ resetCalls(): void;
1079
+ /** Check if gadget was called with specific params (partial match) */
1080
+ wasCalledWith(params: Partial<Record<string, unknown>>): boolean;
1081
+ /** Get the last call's parameters */
1082
+ getLastCall(): RecordedCall | undefined;
1083
+ }
1084
+ /**
1085
+ * Configuration for creating a mock gadget.
1086
+ */
1087
+ interface MockGadgetConfig<TSchema extends ZodType = ZodType> {
1088
+ /** Gadget name (required) */
1089
+ name: string;
1090
+ /** Gadget description */
1091
+ description?: string;
1092
+ /** Parameter schema */
1093
+ schema?: TSchema;
1094
+ /** Static result to return */
1095
+ result?: string;
1096
+ /** Dynamic result based on parameters */
1097
+ resultFn?: (params: Record<string, unknown>) => string | Promise<string>;
1098
+ /** Error to throw on execution */
1099
+ error?: Error | string;
1100
+ /** Enable call tracking (default: true) */
1101
+ trackCalls?: boolean;
1102
+ /** Execution delay in ms */
1103
+ delayMs?: number;
1104
+ /** Gadget timeout setting */
1105
+ timeoutMs?: number;
1106
+ }
1107
+ /**
1108
+ * Create a mock gadget for testing.
1109
+ *
1110
+ * @param config - Mock gadget configuration
1111
+ * @returns MockGadget instance with call tracking
1112
+ *
1113
+ * @example
1114
+ * ```typescript
1115
+ * import { createMockGadget } from 'llmist/testing';
1116
+ * import { z } from 'zod';
1117
+ *
1118
+ * const calculator = createMockGadget({
1119
+ * name: 'Calculator',
1120
+ * schema: z.object({ a: z.number(), b: z.number() }),
1121
+ * resultFn: ({ a, b }) => String(Number(a) + Number(b)),
1122
+ * });
1123
+ *
1124
+ * // Use in tests
1125
+ * const registry = new GadgetRegistry();
1126
+ * registry.registerByClass(calculator);
1127
+ *
1128
+ * // After running agent...
1129
+ * expect(calculator.getCallCount()).toBe(1);
1130
+ * expect(calculator.wasCalledWith({ a: 5 })).toBe(true);
1131
+ * ```
1132
+ */
1133
+ declare function createMockGadget<TSchema extends ZodType>(config: MockGadgetConfig<TSchema>): MockGadget;
1134
+ /**
1135
+ * Fluent builder for creating mock gadgets.
1136
+ *
1137
+ * @example
1138
+ * ```typescript
1139
+ * import { mockGadget } from 'llmist/testing';
1140
+ * import { z } from 'zod';
1141
+ *
1142
+ * const mock = mockGadget()
1143
+ * .withName('Weather')
1144
+ * .withDescription('Get weather for a city')
1145
+ * .withSchema(z.object({ city: z.string() }))
1146
+ * .returns('Sunny, 72F')
1147
+ * .trackCalls()
1148
+ * .build();
1149
+ *
1150
+ * // Or for error testing
1151
+ * const errorMock = mockGadget()
1152
+ * .withName('Unstable')
1153
+ * .throws('Service unavailable')
1154
+ * .build();
1155
+ * ```
1156
+ */
1157
+ declare class MockGadgetBuilder {
1158
+ private config;
1159
+ /**
1160
+ * Set the gadget name.
1161
+ */
1162
+ withName(name: string): this;
1163
+ /**
1164
+ * Set the gadget description.
1165
+ */
1166
+ withDescription(description: string): this;
1167
+ /**
1168
+ * Set the parameter schema.
1169
+ */
1170
+ withSchema<T extends ZodType>(schema: T): MockGadgetBuilder;
1171
+ /**
1172
+ * Set a static result to return.
1173
+ */
1174
+ returns(result: string): this;
1175
+ /**
1176
+ * Set a dynamic result function.
1177
+ */
1178
+ returnsAsync(resultFn: (params: Record<string, unknown>) => string | Promise<string>): this;
1179
+ /**
1180
+ * Make the gadget throw an error on execution.
1181
+ */
1182
+ throws(error: Error | string): this;
1183
+ /**
1184
+ * Add execution delay.
1185
+ */
1186
+ withDelay(ms: number): this;
1187
+ /**
1188
+ * Set timeout for the gadget.
1189
+ */
1190
+ withTimeout(ms: number): this;
1191
+ /**
1192
+ * Enable call tracking (enabled by default).
1193
+ */
1194
+ trackCalls(): this;
1195
+ /**
1196
+ * Disable call tracking.
1197
+ */
1198
+ noTracking(): this;
1199
+ /**
1200
+ * Build the mock gadget.
1201
+ */
1202
+ build(): MockGadget;
1203
+ }
1204
+ /**
1205
+ * Create a fluent builder for mock gadgets.
1206
+ *
1207
+ * @returns New MockGadgetBuilder instance
1208
+ *
1209
+ * @example
1210
+ * ```typescript
1211
+ * const mock = mockGadget()
1212
+ * .withName('Search')
1213
+ * .withSchema(z.object({ query: z.string() }))
1214
+ * .returnsAsync(async ({ query }) => {
1215
+ * return `Results for: ${query}`;
1216
+ * })
1217
+ * .build();
1218
+ * ```
1219
+ */
1220
+ declare function mockGadget(): MockGadgetBuilder;
1221
+
1222
+ /**
1223
+ * Global singleton instance for managing LLM mocks.
1224
+ * This allows mocks to be registered once and used across the application.
1225
+ */
1226
+ declare class MockManager {
1227
+ private static instance;
1228
+ private mocks;
1229
+ private stats;
1230
+ private options;
1231
+ private logger;
1232
+ private nextId;
1233
+ private constructor();
1234
+ /**
1235
+ * Get the global MockManager instance.
1236
+ * Creates one if it doesn't exist.
1237
+ */
1238
+ static getInstance(options?: MockOptions): MockManager;
1239
+ /**
1240
+ * Reset the global instance (useful for testing).
1241
+ */
1242
+ static reset(): void;
1243
+ /**
1244
+ * Register a new mock.
1245
+ *
1246
+ * @param registration - The mock registration configuration
1247
+ * @returns The ID of the registered mock
1248
+ *
1249
+ * @example
1250
+ * const manager = MockManager.getInstance();
1251
+ * const mockId = manager.register({
1252
+ * label: 'GPT-4 mock',
1253
+ * matcher: (ctx) => ctx.modelName.includes('gpt-4'),
1254
+ * response: { text: 'Mocked response' }
1255
+ * });
1256
+ */
1257
+ register(registration: Omit<MockRegistration, "id"> & {
1258
+ id?: string;
1259
+ }): string;
1260
+ /**
1261
+ * Unregister a mock by ID.
1262
+ */
1263
+ unregister(id: string): boolean;
1264
+ /**
1265
+ * Clear all registered mocks.
1266
+ */
1267
+ clear(): void;
1268
+ /**
1269
+ * Find and return a matching mock for the given context.
1270
+ * Returns the mock response if found, null otherwise.
1271
+ */
1272
+ findMatch(context: MockMatcherContext): Promise<MockResponse | null>;
1273
+ /**
1274
+ * Get statistics for a specific mock.
1275
+ */
1276
+ getStats(id: string): MockStats | undefined;
1277
+ /**
1278
+ * Get all registered mock IDs.
1279
+ */
1280
+ getMockIds(): string[];
1281
+ /**
1282
+ * Get the number of registered mocks.
1283
+ */
1284
+ getCount(): number;
1285
+ /**
1286
+ * Update the mock manager options.
1287
+ */
1288
+ setOptions(options: Partial<MockOptions>): void;
1289
+ }
1290
+ /**
1291
+ * Helper function to get the global mock manager instance.
1292
+ */
1293
+ declare function getMockManager(options?: MockOptions): MockManager;
1294
+
1295
+ /**
1296
+ * Create a mock LLM stream from a mock response.
1297
+ * This simulates the streaming behavior of real LLM providers.
1298
+ *
1299
+ * @param response - The mock response configuration
1300
+ * @returns An async iterable that yields LLMStreamChunks
1301
+ */
1302
+ declare function createMockStream(response: MockResponse): LLMStream;
1303
+ /**
1304
+ * Create a simple text-only mock stream.
1305
+ * Convenience helper for quickly creating mock responses.
1306
+ *
1307
+ * @param text - The text to stream
1308
+ * @param options - Optional streaming configuration
1309
+ *
1310
+ * @example
1311
+ * const stream = createTextMockStream('Hello, world!');
1312
+ * for await (const chunk of stream) {
1313
+ * console.log(chunk.text);
1314
+ * }
1315
+ */
1316
+ declare function createTextMockStream(text: string, options?: {
1317
+ delayMs?: number;
1318
+ streamDelayMs?: number;
1319
+ usage?: MockResponse["usage"];
1320
+ }): LLMStream;
1321
+
1322
+ /**
1323
+ * Stream testing utilities for llmist.
1324
+ * Provides helpers for creating and consuming test streams.
1325
+ */
1326
+
1327
+ /**
1328
+ * Create an async iterable stream from an array of chunks.
1329
+ * Useful for creating deterministic test streams.
1330
+ *
1331
+ * @param chunks - Array of chunks to yield
1332
+ * @returns An async iterable that yields the chunks in order
1333
+ *
1334
+ * @example
1335
+ * ```typescript
1336
+ * const stream = createTestStream([
1337
+ * { text: "Hello " },
1338
+ * { text: "world", finishReason: "stop", usage: { inputTokens: 10, outputTokens: 5 } }
1339
+ * ]);
1340
+ * ```
1341
+ */
1342
+ declare function createTestStream(chunks: LLMStreamChunk[]): LLMStream;
1343
+ /**
1344
+ * Create a stream that yields text in specified chunks.
1345
+ * Automatically adds finishReason and usage to the final chunk.
1346
+ *
1347
+ * @param text - The full text to stream
1348
+ * @param options - Configuration options
1349
+ * @returns An async iterable stream
1350
+ *
1351
+ * @example
1352
+ * ```typescript
1353
+ * const stream = createTextStream("Hello, world!", { chunkSize: 5 });
1354
+ * // Yields: "Hello", ", wor", "ld!"
1355
+ * ```
1356
+ */
1357
+ declare function createTextStream(text: string, options?: {
1358
+ /** Size of each chunk (default: entire text as one chunk) */
1359
+ chunkSize?: number;
1360
+ /** Delay before starting the stream in ms */
1361
+ delayMs?: number;
1362
+ /** Delay between chunks in ms */
1363
+ chunkDelayMs?: number;
1364
+ /** Custom usage stats */
1365
+ usage?: {
1366
+ inputTokens: number;
1367
+ outputTokens: number;
1368
+ totalTokens: number;
1369
+ };
1370
+ /** Custom finish reason (default: "stop") */
1371
+ finishReason?: string;
1372
+ }): LLMStream;
1373
+ /**
1374
+ * Collect all chunks from a stream into an array.
1375
+ * Useful for asserting on stream output in tests.
1376
+ *
1377
+ * @param stream - The stream to collect from
1378
+ * @returns Array of all chunks from the stream
1379
+ *
1380
+ * @example
1381
+ * ```typescript
1382
+ * const chunks = await collectStream(myStream);
1383
+ * expect(chunks).toHaveLength(3);
1384
+ * expect(chunks[2].finishReason).toBe("stop");
1385
+ * ```
1386
+ */
1387
+ declare function collectStream(stream: LLMStream): Promise<LLMStreamChunk[]>;
1388
+ /**
1389
+ * Collect all text from a stream into a single string.
1390
+ *
1391
+ * @param stream - The stream to collect from
1392
+ * @returns Concatenated text from all chunks
1393
+ *
1394
+ * @example
1395
+ * ```typescript
1396
+ * const text = await collectStreamText(myStream);
1397
+ * expect(text).toBe("Hello, world!");
1398
+ * ```
1399
+ */
1400
+ declare function collectStreamText(stream: LLMStream): Promise<string>;
1401
+ /**
1402
+ * Get the final chunk from a stream (containing finishReason and usage).
1403
+ *
1404
+ * @param stream - The stream to consume
1405
+ * @returns The final chunk from the stream
1406
+ */
1407
+ declare function getStreamFinalChunk(stream: LLMStream): Promise<LLMStreamChunk | undefined>;
1408
+ /**
1409
+ * Create an empty stream that yields nothing.
1410
+ * Useful for testing edge cases.
1411
+ */
1412
+ declare function createEmptyStream(): LLMStream;
1413
+ /**
1414
+ * Create a stream that throws an error after yielding some chunks.
1415
+ * Useful for testing error handling.
1416
+ *
1417
+ * @param chunksBeforeError - Chunks to yield before throwing
1418
+ * @param error - The error to throw
1419
+ */
1420
+ declare function createErrorStream(chunksBeforeError: LLMStreamChunk[], error: Error): LLMStream;
1421
+
1422
+ export { type MockAudioData, MockBuilder, MockConversationManager, type MockGadget, MockGadgetBuilder, type MockGadgetConfig, type MockImageData, MockManager, type MockMatcher, type MockMatcherContext, type MockOptions, MockPromptRecorder, MockProviderAdapter, type MockRegistration, type MockResponse, type MockStats, type RecordedCall, type TestEnvironment, type TestEnvironmentOptions, type TestGadgetOptions, type TestGadgetResult, collectOutput, collectStream, collectStreamText, createAssistantMessage, createConversation, createConversationWithGadgets, createEmptyStream, createErrorStream, createLargeConversation, createMinimalConversation, createMockAdapter, createMockClient, createMockConversationManager, createMockGadget, createMockPrompt, createMockReadable, createMockStream, createMockWritable, createSystemMessage, createTestEnvironment, createTestStream, createTextMockStream, createTextStream, createUserMessage, estimateTokens, getBufferedOutput, getMockManager, getStreamFinalChunk, mockGadget, mockLLM, testGadget, testGadgetBatch, waitFor };