@llmist/testing 9.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,1658 @@
1
+ // src/cli-helpers.ts
2
+ import { PassThrough, Readable, Writable } from "stream";
3
+ function createTestEnvironment(options = {}) {
4
+ const stdin = createMockReadable(options.stdin);
5
+ const stdout = new PassThrough();
6
+ const stderr = new PassThrough();
7
+ let exitCode;
8
+ return {
9
+ stdin,
10
+ stdout,
11
+ stderr,
12
+ isTTY: options.isTTY ?? false,
13
+ argv: options.argv ?? ["node", "llmist"],
14
+ env: { ...filterDefinedEnv(process.env), ...options.env },
15
+ get exitCode() {
16
+ return exitCode;
17
+ },
18
+ setExitCode: (code) => {
19
+ exitCode = code;
20
+ }
21
+ };
22
+ }
23
+ function createMockReadable(input) {
24
+ if (!input) {
25
+ const stream2 = new Readable({ read() {
26
+ } });
27
+ stream2.push(null);
28
+ return stream2;
29
+ }
30
+ const content = Array.isArray(input) ? `${input.join("\n")}
31
+ ` : input;
32
+ const stream = new Readable({ read() {
33
+ } });
34
+ stream.push(content);
35
+ stream.push(null);
36
+ return stream;
37
+ }
38
+ function createMockWritable() {
39
+ const chunks = [];
40
+ const stream = new Writable({
41
+ write(chunk, _encoding, callback) {
42
+ chunks.push(Buffer.from(chunk));
43
+ callback();
44
+ }
45
+ });
46
+ stream.getData = () => Buffer.concat(chunks).toString("utf8");
47
+ return stream;
48
+ }
49
+ async function collectOutput(stream, timeout = 5e3) {
50
+ return new Promise((resolve, reject) => {
51
+ const chunks = [];
52
+ const timeoutId = setTimeout(() => {
53
+ resolve(Buffer.concat(chunks).toString("utf8"));
54
+ }, timeout);
55
+ stream.on("data", (chunk) => {
56
+ chunks.push(Buffer.from(chunk));
57
+ });
58
+ stream.on("end", () => {
59
+ clearTimeout(timeoutId);
60
+ resolve(Buffer.concat(chunks).toString("utf8"));
61
+ });
62
+ stream.on("error", (err) => {
63
+ clearTimeout(timeoutId);
64
+ reject(err);
65
+ });
66
+ });
67
+ }
68
+ function getBufferedOutput(stream) {
69
+ const chunks = [];
70
+ for (; ; ) {
71
+ const chunk = stream.read();
72
+ if (chunk === null) break;
73
+ chunks.push(chunk);
74
+ }
75
+ return Buffer.concat(chunks).toString("utf8");
76
+ }
77
+ function createMockPrompt(responses) {
78
+ let index = 0;
79
+ return async (_question) => {
80
+ if (index >= responses.length) {
81
+ throw new Error(`Mock prompt exhausted: no response for question ${index + 1}`);
82
+ }
83
+ return responses[index++];
84
+ };
85
+ }
86
+ var MockPromptRecorder = class {
87
+ responses;
88
+ index = 0;
89
+ questions = [];
90
+ constructor(responses) {
91
+ this.responses = responses;
92
+ }
93
+ /**
94
+ * The prompt function to use in tests.
95
+ */
96
+ prompt = async (question) => {
97
+ this.questions.push(question);
98
+ if (this.index >= this.responses.length) {
99
+ throw new Error(`Mock prompt exhausted after ${this.index} questions`);
100
+ }
101
+ return this.responses[this.index++];
102
+ };
103
+ /**
104
+ * Get all questions that were asked.
105
+ */
106
+ getQuestions() {
107
+ return [...this.questions];
108
+ }
109
+ /**
110
+ * Get the number of questions asked.
111
+ */
112
+ getQuestionCount() {
113
+ return this.questions.length;
114
+ }
115
+ /**
116
+ * Reset the recorder state.
117
+ */
118
+ reset(newResponses) {
119
+ this.index = 0;
120
+ this.questions = [];
121
+ if (newResponses) {
122
+ this.responses = newResponses;
123
+ }
124
+ }
125
+ };
126
+ async function waitFor(condition, timeout = 5e3, interval = 50) {
127
+ const startTime = Date.now();
128
+ while (!condition()) {
129
+ if (Date.now() - startTime > timeout) {
130
+ throw new Error(`waitFor timed out after ${timeout}ms`);
131
+ }
132
+ await sleep(interval);
133
+ }
134
+ }
135
+ function sleep(ms) {
136
+ return new Promise((resolve) => setTimeout(resolve, ms));
137
+ }
138
+ function filterDefinedEnv(env) {
139
+ const result = {};
140
+ for (const [key, value] of Object.entries(env)) {
141
+ if (value !== void 0) {
142
+ result[key] = value;
143
+ }
144
+ }
145
+ return result;
146
+ }
147
+
148
+ // src/conversation-fixtures.ts
149
+ function createConversation(turnCount, options) {
150
+ const messages = [];
151
+ const userPrefix = options?.userPrefix ?? "User message";
152
+ const assistantPrefix = options?.assistantPrefix ?? "Assistant response";
153
+ const contentLength = options?.contentLength ?? 100;
154
+ for (let i = 0; i < turnCount; i++) {
155
+ const padding = " ".repeat(Math.max(0, contentLength - 30));
156
+ messages.push({
157
+ role: "user",
158
+ content: `${userPrefix} ${i + 1}: This is turn ${i + 1} of the conversation.${padding}`
159
+ });
160
+ messages.push({
161
+ role: "assistant",
162
+ content: `${assistantPrefix} ${i + 1}: I acknowledge turn ${i + 1}.${padding}`
163
+ });
164
+ }
165
+ return messages;
166
+ }
167
+ function createConversationWithGadgets(turnCount, gadgetCallsPerTurn = 1, options) {
168
+ const messages = [];
169
+ const gadgetNames = options?.gadgetNames ?? ["search", "calculate", "read"];
170
+ const contentLength = options?.contentLength ?? 50;
171
+ let gadgetIndex = 0;
172
+ for (let turn = 0; turn < turnCount; turn++) {
173
+ messages.push({
174
+ role: "user",
175
+ content: `User request ${turn + 1}${"x".repeat(contentLength)}`
176
+ });
177
+ for (let g = 0; g < gadgetCallsPerTurn; g++) {
178
+ const gadgetName = gadgetNames[gadgetIndex % gadgetNames.length];
179
+ gadgetIndex++;
180
+ messages.push({
181
+ role: "assistant",
182
+ content: `!!!GADGET_START:${gadgetName}
183
+ !!!ARG:query
184
+ test query ${turn}-${g}
185
+ !!!GADGET_END`
186
+ });
187
+ messages.push({
188
+ role: "user",
189
+ content: `Result: Gadget ${gadgetName} returned result for query ${turn}-${g}`
190
+ });
191
+ }
192
+ messages.push({
193
+ role: "assistant",
194
+ content: `Final response for turn ${turn + 1}${"y".repeat(contentLength)}`
195
+ });
196
+ }
197
+ return messages;
198
+ }
199
+ function estimateTokens(messages) {
200
+ return Math.ceil(messages.reduce((sum, msg) => sum + (msg.content?.length ?? 0), 0) / 4);
201
+ }
202
+ function createUserMessage(content) {
203
+ return { role: "user", content };
204
+ }
205
+ function createAssistantMessage(content) {
206
+ return { role: "assistant", content };
207
+ }
208
+ function createSystemMessage(content) {
209
+ return { role: "system", content };
210
+ }
211
+ function createMinimalConversation() {
212
+ return [
213
+ { role: "user", content: "Hello" },
214
+ { role: "assistant", content: "Hi there!" }
215
+ ];
216
+ }
217
+ function createLargeConversation(targetTokens, options) {
218
+ const tokensPerTurn = options?.tokensPerTurn ?? 200;
219
+ const turnsNeeded = Math.ceil(targetTokens / tokensPerTurn);
220
+ const charsPerMessage = Math.floor(tokensPerTurn * 4 / 2);
221
+ return createConversation(turnsNeeded, {
222
+ contentLength: charsPerMessage
223
+ });
224
+ }
225
+
226
+ // src/gadget-testing.ts
227
+ import { validateGadgetParams } from "llmist";
228
+ async function testGadget(gadget, params, options) {
229
+ let validatedParams = params;
230
+ if (!options?.skipValidation) {
231
+ const validationResult = validateGadgetParams(gadget, params);
232
+ if (!validationResult.success) {
233
+ return {
234
+ error: validationResult.error,
235
+ validatedParams: params
236
+ };
237
+ }
238
+ validatedParams = validationResult.data;
239
+ }
240
+ try {
241
+ const rawResult = await Promise.resolve(gadget.execute(validatedParams));
242
+ if (typeof rawResult === "string") {
243
+ return {
244
+ result: rawResult,
245
+ validatedParams,
246
+ cost: 0
247
+ };
248
+ }
249
+ return {
250
+ result: rawResult.result,
251
+ validatedParams,
252
+ cost: rawResult.cost ?? 0
253
+ };
254
+ } catch (error) {
255
+ return {
256
+ error: error instanceof Error ? error.message : String(error),
257
+ validatedParams
258
+ };
259
+ }
260
+ }
261
+ async function testGadgetBatch(gadget, paramSets, options) {
262
+ return Promise.all(paramSets.map((params) => testGadget(gadget, params, options)));
263
+ }
264
+
265
+ // src/mock-manager.ts
266
+ import { createLogger } from "llmist";
267
+ var MockManager = class _MockManager {
268
+ static instance = null;
269
+ mocks = /* @__PURE__ */ new Map();
270
+ stats = /* @__PURE__ */ new Map();
271
+ options;
272
+ logger;
273
+ nextId = 1;
274
+ constructor(options = {}) {
275
+ this.options = {
276
+ strictMode: options.strictMode ?? false,
277
+ debug: options.debug ?? false,
278
+ recordStats: options.recordStats ?? true
279
+ };
280
+ this.logger = createLogger({ name: "MockManager", minLevel: this.options.debug ? 2 : 3 });
281
+ }
282
+ /**
283
+ * Get the global MockManager instance.
284
+ * Creates one if it doesn't exist.
285
+ */
286
+ static getInstance(options) {
287
+ if (!_MockManager.instance) {
288
+ _MockManager.instance = new _MockManager(options);
289
+ } else if (options) {
290
+ console.warn(
291
+ "MockManager.getInstance() called with options, but instance already exists. Options are ignored. Use setOptions() to update options or reset() to reinitialize."
292
+ );
293
+ }
294
+ return _MockManager.instance;
295
+ }
296
+ /**
297
+ * Reset the global instance (useful for testing).
298
+ */
299
+ static reset() {
300
+ _MockManager.instance = null;
301
+ }
302
+ /**
303
+ * Register a new mock.
304
+ *
305
+ * @param registration - The mock registration configuration
306
+ * @returns The ID of the registered mock
307
+ *
308
+ * @example
309
+ * const manager = MockManager.getInstance();
310
+ * const mockId = manager.register({
311
+ * label: 'GPT-4 mock',
312
+ * matcher: (ctx) => ctx.modelName.includes('gpt-4'),
313
+ * response: { text: 'Mocked response' }
314
+ * });
315
+ */
316
+ register(registration) {
317
+ const id = registration.id ?? `mock-${this.nextId++}`;
318
+ const mock = {
319
+ id,
320
+ matcher: registration.matcher,
321
+ response: registration.response,
322
+ label: registration.label,
323
+ once: registration.once
324
+ };
325
+ this.mocks.set(id, mock);
326
+ if (this.options.recordStats) {
327
+ this.stats.set(id, { matchCount: 0 });
328
+ }
329
+ this.logger.debug(
330
+ `Registered mock: ${id}${mock.label ? ` (${mock.label})` : ""}${mock.once ? " [once]" : ""}`
331
+ );
332
+ return id;
333
+ }
334
+ /**
335
+ * Unregister a mock by ID.
336
+ */
337
+ unregister(id) {
338
+ const deleted = this.mocks.delete(id);
339
+ if (deleted) {
340
+ this.stats.delete(id);
341
+ this.logger.debug(`Unregistered mock: ${id}`);
342
+ }
343
+ return deleted;
344
+ }
345
+ /**
346
+ * Clear all registered mocks.
347
+ */
348
+ clear() {
349
+ this.mocks.clear();
350
+ this.stats.clear();
351
+ this.logger.debug("Cleared all mocks");
352
+ }
353
+ /**
354
+ * Find and return a matching mock for the given context.
355
+ * Returns the mock response if found, null otherwise.
356
+ */
357
+ async findMatch(context) {
358
+ this.logger.debug(
359
+ `Finding match for: ${context.provider}:${context.modelName} (${this.mocks.size} mocks registered)`
360
+ );
361
+ for (const [id, mock] of this.mocks.entries()) {
362
+ let matches = false;
363
+ try {
364
+ matches = await Promise.resolve(mock.matcher(context));
365
+ } catch (error) {
366
+ this.logger.warn(`Error in matcher ${id}:`, error);
367
+ if (this.options.strictMode) {
368
+ throw new Error(`Matcher error in mock ${id}: ${error}`);
369
+ }
370
+ continue;
371
+ }
372
+ if (matches) {
373
+ this.logger.debug(`Mock matched: ${id}${mock.label ? ` (${mock.label})` : ""}`);
374
+ if (this.options.recordStats) {
375
+ const stats = this.stats.get(id);
376
+ if (stats) {
377
+ stats.matchCount++;
378
+ stats.lastUsed = /* @__PURE__ */ new Date();
379
+ }
380
+ }
381
+ if (mock.once) {
382
+ this.mocks.delete(id);
383
+ this.stats.delete(id);
384
+ this.logger.debug(`Removed one-time mock: ${id}`);
385
+ }
386
+ const response = typeof mock.response === "function" ? await Promise.resolve(mock.response(context)) : mock.response;
387
+ return response;
388
+ }
389
+ }
390
+ this.logger.debug("No mock matched");
391
+ if (this.options.strictMode) {
392
+ throw new Error(
393
+ `No mock registered for ${context.provider}:${context.modelName}. Register a mock using MockManager.getInstance().register() or disable strictMode.`
394
+ );
395
+ }
396
+ return {
397
+ text: "",
398
+ usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 },
399
+ finishReason: "stop"
400
+ };
401
+ }
402
+ /**
403
+ * Get statistics for a specific mock.
404
+ */
405
+ getStats(id) {
406
+ return this.stats.get(id);
407
+ }
408
+ /**
409
+ * Get all registered mock IDs.
410
+ */
411
+ getMockIds() {
412
+ return Array.from(this.mocks.keys());
413
+ }
414
+ /**
415
+ * Get the number of registered mocks.
416
+ */
417
+ getCount() {
418
+ return this.mocks.size;
419
+ }
420
+ /**
421
+ * Update the mock manager options.
422
+ */
423
+ setOptions(options) {
424
+ this.options = { ...this.options, ...options };
425
+ this.logger = createLogger({ name: "MockManager", minLevel: this.options.debug ? 2 : 3 });
426
+ }
427
+ };
428
+ function getMockManager(options) {
429
+ return MockManager.getInstance(options);
430
+ }
431
+
432
+ // src/mock-stream.ts
433
+ import { GADGET_ARG_PREFIX, GADGET_END_PREFIX, GADGET_START_PREFIX } from "llmist";
434
+ function sleep2(ms) {
435
+ return new Promise((resolve) => setTimeout(resolve, ms));
436
+ }
437
+ function generateInvocationId() {
438
+ return `inv-${Date.now()}-${Math.random().toString(36).substring(2, 9)}`;
439
+ }
440
+ function splitIntoChunks(text, minChunkSize = 5, maxChunkSize = 30) {
441
+ const chunks = [];
442
+ let remaining = text;
443
+ while (remaining.length > 0) {
444
+ const chunkSize = Math.min(
445
+ Math.floor(Math.random() * (maxChunkSize - minChunkSize + 1)) + minChunkSize,
446
+ remaining.length
447
+ );
448
+ let chunk;
449
+ if (chunkSize < remaining.length) {
450
+ const substr = remaining.substring(0, chunkSize);
451
+ const lastSpace = substr.lastIndexOf(" ");
452
+ if (lastSpace > minChunkSize / 2) {
453
+ chunk = substr.substring(0, lastSpace + 1);
454
+ } else {
455
+ chunk = substr;
456
+ }
457
+ } else {
458
+ chunk = remaining;
459
+ }
460
+ chunks.push(chunk);
461
+ remaining = remaining.substring(chunk.length);
462
+ }
463
+ return chunks;
464
+ }
465
+ function serializeToBlockFormat(obj, prefix = "") {
466
+ let result = "";
467
+ for (const [key, value] of Object.entries(obj)) {
468
+ const pointer = prefix ? `${prefix}/${key}` : key;
469
+ if (value === null || value === void 0) {
470
+ continue;
471
+ }
472
+ if (Array.isArray(value)) {
473
+ for (let i = 0; i < value.length; i++) {
474
+ const item = value[i];
475
+ const itemPointer = `${pointer}/${i}`;
476
+ if (typeof item === "object" && item !== null && !Array.isArray(item)) {
477
+ result += serializeToBlockFormat(item, itemPointer);
478
+ } else if (Array.isArray(item)) {
479
+ for (let j = 0; j < item.length; j++) {
480
+ result += `${GADGET_ARG_PREFIX}${itemPointer}/${j}
481
+ ${String(item[j])}
482
+ `;
483
+ }
484
+ } else {
485
+ result += `${GADGET_ARG_PREFIX}${itemPointer}
486
+ ${String(item)}
487
+ `;
488
+ }
489
+ }
490
+ } else if (typeof value === "object") {
491
+ result += serializeToBlockFormat(value, pointer);
492
+ } else {
493
+ result += `${GADGET_ARG_PREFIX}${pointer}
494
+ ${String(value)}
495
+ `;
496
+ }
497
+ }
498
+ return result;
499
+ }
500
+ function formatGadgetCalls(gadgetCalls) {
501
+ let text = "";
502
+ const calls = [];
503
+ for (const call of gadgetCalls) {
504
+ const invocationId = call.invocationId ?? generateInvocationId();
505
+ calls.push({ name: call.gadgetName, invocationId });
506
+ const blockParams = serializeToBlockFormat(call.parameters);
507
+ text += `
508
+ ${GADGET_START_PREFIX}${call.gadgetName}
509
+ ${blockParams}${GADGET_END_PREFIX}`;
510
+ }
511
+ return { text, calls };
512
+ }
513
+ async function* createMockStream(response) {
514
+ if (response.delayMs) {
515
+ await sleep2(response.delayMs);
516
+ }
517
+ const streamDelay = response.streamDelayMs ?? 0;
518
+ let fullText = response.text ?? "";
519
+ if (response.gadgetCalls && response.gadgetCalls.length > 0) {
520
+ const { text: gadgetText } = formatGadgetCalls(response.gadgetCalls);
521
+ fullText += gadgetText;
522
+ }
523
+ if (fullText.length > 0) {
524
+ const chunks = streamDelay > 0 ? splitIntoChunks(fullText) : [fullText];
525
+ for (let i = 0; i < chunks.length; i++) {
526
+ const isLast = i === chunks.length - 1;
527
+ const chunk = {
528
+ text: chunks[i]
529
+ };
530
+ if (isLast) {
531
+ if (response.finishReason !== void 0) {
532
+ chunk.finishReason = response.finishReason;
533
+ }
534
+ if (response.usage) {
535
+ chunk.usage = response.usage;
536
+ }
537
+ }
538
+ yield chunk;
539
+ if (streamDelay > 0 && !isLast) {
540
+ await sleep2(streamDelay);
541
+ }
542
+ }
543
+ } else {
544
+ yield {
545
+ text: "",
546
+ finishReason: response.finishReason ?? "stop",
547
+ usage: response.usage ?? { inputTokens: 0, outputTokens: 0, totalTokens: 0 }
548
+ };
549
+ }
550
+ }
551
+ function createTextMockStream(text, options) {
552
+ return createMockStream({
553
+ text,
554
+ delayMs: options?.delayMs,
555
+ streamDelayMs: options?.streamDelayMs,
556
+ usage: options?.usage,
557
+ finishReason: "stop"
558
+ });
559
+ }
560
+
561
+ // src/mock-adapter.ts
562
+ var MockProviderAdapter = class {
563
+ providerId = "mock";
564
+ priority = 100;
565
+ // High priority: check mocks before real providers
566
+ mockManager;
567
+ constructor(options) {
568
+ this.mockManager = getMockManager(options);
569
+ }
570
+ supports(_descriptor) {
571
+ return true;
572
+ }
573
+ stream(options, descriptor, _spec) {
574
+ const context = {
575
+ model: options.model,
576
+ provider: descriptor.provider,
577
+ modelName: descriptor.name,
578
+ options,
579
+ messages: options.messages
580
+ };
581
+ return this.createMockStreamFromContext(context);
582
+ }
583
+ async *createMockStreamFromContext(context) {
584
+ const mockResponse = await this.mockManager.findMatch(context);
585
+ if (!mockResponse) {
586
+ yield {
587
+ text: "",
588
+ finishReason: "stop",
589
+ usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 }
590
+ };
591
+ return;
592
+ }
593
+ yield* createMockStream(mockResponse);
594
+ }
595
+ // ==========================================================================
596
+ // Image Generation Support
597
+ // ==========================================================================
598
+ /**
599
+ * Check if this adapter supports image generation for a given model.
600
+ * Returns true if there's a registered mock with images for this model.
601
+ */
602
+ supportsImageGeneration(_modelId) {
603
+ return true;
604
+ }
605
+ /**
606
+ * Generate mock images based on registered mocks.
607
+ *
608
+ * @param options - Image generation options
609
+ * @returns Mock image generation result
610
+ */
611
+ async generateImage(options) {
612
+ const context = {
613
+ model: options.model,
614
+ provider: "mock",
615
+ modelName: options.model,
616
+ options: {
617
+ model: options.model,
618
+ messages: [{ role: "user", content: options.prompt }]
619
+ },
620
+ messages: [{ role: "user", content: options.prompt }]
621
+ };
622
+ const mockResponse = await this.mockManager.findMatch(context);
623
+ if (!mockResponse?.images || mockResponse.images.length === 0) {
624
+ throw new Error(
625
+ `No mock registered for image generation with model "${options.model}". Use mockLLM().forModel("${options.model}").returnsImage(...).register() to add one.`
626
+ );
627
+ }
628
+ return this.createImageResult(options, mockResponse);
629
+ }
630
+ /**
631
+ * Transform mock response into ImageGenerationResult format.
632
+ *
633
+ * @param options - Original image generation options
634
+ * @param mockResponse - Mock response containing image data
635
+ * @returns ImageGenerationResult with mock data and zero cost
636
+ */
637
+ createImageResult(options, mockResponse) {
638
+ const images = mockResponse.images ?? [];
639
+ return {
640
+ images: images.map((img) => ({
641
+ b64Json: img.data,
642
+ revisedPrompt: img.revisedPrompt
643
+ })),
644
+ model: options.model,
645
+ usage: {
646
+ imagesGenerated: images.length,
647
+ size: options.size ?? "1024x1024",
648
+ quality: options.quality ?? "standard"
649
+ },
650
+ cost: 0
651
+ // Mock cost is always 0
652
+ };
653
+ }
654
+ // ==========================================================================
655
+ // Speech Generation Support
656
+ // ==========================================================================
657
+ /**
658
+ * Check if this adapter supports speech generation for a given model.
659
+ * Returns true if there's a registered mock with audio for this model.
660
+ */
661
+ supportsSpeechGeneration(_modelId) {
662
+ return true;
663
+ }
664
+ /**
665
+ * Generate mock speech based on registered mocks.
666
+ *
667
+ * @param options - Speech generation options
668
+ * @returns Mock speech generation result
669
+ */
670
+ async generateSpeech(options) {
671
+ const context = {
672
+ model: options.model,
673
+ provider: "mock",
674
+ modelName: options.model,
675
+ options: {
676
+ model: options.model,
677
+ messages: [{ role: "user", content: options.input }]
678
+ },
679
+ messages: [{ role: "user", content: options.input }]
680
+ };
681
+ const mockResponse = await this.mockManager.findMatch(context);
682
+ if (!mockResponse?.audio) {
683
+ throw new Error(
684
+ `No mock registered for speech generation with model "${options.model}". Use mockLLM().forModel("${options.model}").returnsAudio(...).register() to add one.`
685
+ );
686
+ }
687
+ return this.createSpeechResult(options, mockResponse);
688
+ }
689
+ /**
690
+ * Transform mock response into SpeechGenerationResult format.
691
+ * Converts base64 audio data to ArrayBuffer.
692
+ *
693
+ * @param options - Original speech generation options
694
+ * @param mockResponse - Mock response containing audio data
695
+ * @returns SpeechGenerationResult with mock data and zero cost
696
+ */
697
+ createSpeechResult(options, mockResponse) {
698
+ const audio = mockResponse.audio;
699
+ const binaryString = atob(audio.data);
700
+ const bytes = new Uint8Array(binaryString.length);
701
+ for (let i = 0; i < binaryString.length; i++) {
702
+ bytes[i] = binaryString.charCodeAt(i);
703
+ }
704
+ const format = this.mimeTypeToAudioFormat(audio.mimeType);
705
+ return {
706
+ audio: bytes.buffer,
707
+ model: options.model,
708
+ usage: {
709
+ characterCount: options.input.length
710
+ },
711
+ cost: 0,
712
+ // Mock cost is always 0
713
+ format
714
+ };
715
+ }
716
+ /**
717
+ * Map MIME type to audio format for SpeechGenerationResult.
718
+ * Defaults to "mp3" for unknown MIME types.
719
+ *
720
+ * @param mimeType - Audio MIME type string
721
+ * @returns Audio format identifier
722
+ */
723
+ mimeTypeToAudioFormat(mimeType) {
724
+ const mapping = {
725
+ "audio/mp3": "mp3",
726
+ "audio/mpeg": "mp3",
727
+ "audio/wav": "wav",
728
+ "audio/webm": "opus",
729
+ "audio/ogg": "opus"
730
+ };
731
+ return mapping[mimeType] ?? "mp3";
732
+ }
733
+ };
734
+ function createMockAdapter(options) {
735
+ return new MockProviderAdapter(options);
736
+ }
737
+
738
+ // src/mock-builder.ts
739
+ import {
740
+ detectAudioMimeType,
741
+ detectImageMimeType,
742
+ isAudioPart,
743
+ isImagePart,
744
+ toBase64
745
+ } from "llmist";
746
+ import { extractMessageText } from "llmist";
747
+ function hasImageContent(content) {
748
+ if (typeof content === "string") return false;
749
+ return content.some((part) => isImagePart(part));
750
+ }
751
+ function hasAudioContent(content) {
752
+ if (typeof content === "string") return false;
753
+ return content.some((part) => isAudioPart(part));
754
+ }
755
+ function countImages(content) {
756
+ if (typeof content === "string") return 0;
757
+ return content.filter((part) => isImagePart(part)).length;
758
+ }
759
+ var MockBuilder = class {
760
+ matchers = [];
761
+ response = {};
762
+ label;
763
+ isOnce = false;
764
+ id;
765
+ /**
766
+ * Match calls to a specific model (by name, supports partial matching).
767
+ *
768
+ * @example
769
+ * mockLLM().forModel('gpt-5')
770
+ * mockLLM().forModel('claude') // matches any Claude model
771
+ */
772
+ forModel(modelName) {
773
+ if (!modelName || modelName.trim() === "") {
774
+ throw new Error("Model name cannot be empty");
775
+ }
776
+ this.matchers.push((ctx) => ctx.modelName.includes(modelName));
777
+ return this;
778
+ }
779
+ /**
780
+ * Match calls to any model.
781
+ * Useful when you want to mock responses regardless of the model used.
782
+ *
783
+ * @example
784
+ * mockLLM().forAnyModel()
785
+ */
786
+ forAnyModel() {
787
+ this.matchers.push(() => true);
788
+ return this;
789
+ }
790
+ /**
791
+ * Match calls to a specific provider.
792
+ *
793
+ * @example
794
+ * mockLLM().forProvider('openai')
795
+ * mockLLM().forProvider('anthropic')
796
+ */
797
+ forProvider(provider) {
798
+ if (!provider || provider.trim() === "") {
799
+ throw new Error("Provider name cannot be empty");
800
+ }
801
+ this.matchers.push((ctx) => ctx.provider === provider);
802
+ return this;
803
+ }
804
+ /**
805
+ * Match calls to any provider.
806
+ * Useful when you want to mock responses regardless of the provider used.
807
+ *
808
+ * @example
809
+ * mockLLM().forAnyProvider()
810
+ */
811
+ forAnyProvider() {
812
+ this.matchers.push(() => true);
813
+ return this;
814
+ }
815
+ /**
816
+ * Match when any message contains the given text (case-insensitive).
817
+ *
818
+ * @example
819
+ * mockLLM().whenMessageContains('hello')
820
+ */
821
+ whenMessageContains(text) {
822
+ this.matchers.push(
823
+ (ctx) => ctx.messages.some(
824
+ (msg) => extractMessageText(msg.content).toLowerCase().includes(text.toLowerCase())
825
+ )
826
+ );
827
+ return this;
828
+ }
829
+ /**
830
+ * Match when the last message contains the given text (case-insensitive).
831
+ *
832
+ * @example
833
+ * mockLLM().whenLastMessageContains('goodbye')
834
+ */
835
+ whenLastMessageContains(text) {
836
+ this.matchers.push((ctx) => {
837
+ const lastMsg = ctx.messages[ctx.messages.length - 1];
838
+ if (!lastMsg) return false;
839
+ return extractMessageText(lastMsg.content).toLowerCase().includes(text.toLowerCase());
840
+ });
841
+ return this;
842
+ }
843
+ /**
844
+ * Match when any message matches the given regex.
845
+ *
846
+ * @example
847
+ * mockLLM().whenMessageMatches(/calculate \d+/)
848
+ */
849
+ whenMessageMatches(regex) {
850
+ this.matchers.push((ctx) => ctx.messages.some((msg) => regex.test(extractMessageText(msg.content))));
851
+ return this;
852
+ }
853
+ /**
854
+ * Match when a message with a specific role contains text.
855
+ *
856
+ * @example
857
+ * mockLLM().whenRoleContains('system', 'You are a helpful assistant')
858
+ */
859
+ whenRoleContains(role, text) {
860
+ this.matchers.push(
861
+ (ctx) => ctx.messages.some(
862
+ (msg) => msg.role === role && extractMessageText(msg.content).toLowerCase().includes(text.toLowerCase())
863
+ )
864
+ );
865
+ return this;
866
+ }
867
+ /**
868
+ * Match based on the number of messages in the conversation.
869
+ *
870
+ * @example
871
+ * mockLLM().whenMessageCount((count) => count > 10)
872
+ */
873
+ whenMessageCount(predicate) {
874
+ this.matchers.push((ctx) => predicate(ctx.messages.length));
875
+ return this;
876
+ }
877
+ /**
878
+ * Add a custom matcher function.
879
+ * This provides full control over matching logic.
880
+ *
881
+ * @example
882
+ * mockLLM().when((ctx) => {
883
+ * return ctx.options.temperature > 0.8;
884
+ * })
885
+ */
886
+ when(matcher) {
887
+ this.matchers.push(matcher);
888
+ return this;
889
+ }
890
+ // ==========================================================================
891
+ // Multimodal Matchers
892
+ // ==========================================================================
893
+ /**
894
+ * Match when any message contains an image.
895
+ *
896
+ * @example
897
+ * mockLLM().whenMessageHasImage().returns("I see an image of a sunset.")
898
+ */
899
+ whenMessageHasImage() {
900
+ this.matchers.push((ctx) => ctx.messages.some((msg) => hasImageContent(msg.content)));
901
+ return this;
902
+ }
903
+ /**
904
+ * Match when any message contains audio.
905
+ *
906
+ * @example
907
+ * mockLLM().whenMessageHasAudio().returns("I hear music playing.")
908
+ */
909
+ whenMessageHasAudio() {
910
+ this.matchers.push((ctx) => ctx.messages.some((msg) => hasAudioContent(msg.content)));
911
+ return this;
912
+ }
913
+ /**
914
+ * Match based on the number of images in the last message.
915
+ *
916
+ * @example
917
+ * mockLLM().whenImageCount((n) => n >= 2).returns("Comparing multiple images...")
918
+ */
919
+ whenImageCount(predicate) {
920
+ this.matchers.push((ctx) => {
921
+ const lastMsg = ctx.messages[ctx.messages.length - 1];
922
+ if (!lastMsg) return false;
923
+ return predicate(countImages(lastMsg.content));
924
+ });
925
+ return this;
926
+ }
927
+ /**
928
+ * Set the text response to return.
929
+ * Can be a static string or a function that returns a string dynamically.
930
+ *
931
+ * @example
932
+ * mockLLM().returns('Hello, world!')
933
+ * mockLLM().returns(() => `Response at ${Date.now()}`)
934
+ * mockLLM().returns((ctx) => `You said: ${ctx.messages[0]?.content}`)
935
+ */
936
+ returns(text) {
937
+ if (typeof text === "function") {
938
+ this.response = async (ctx) => {
939
+ const resolvedText = await Promise.resolve().then(() => text(ctx));
940
+ return { text: resolvedText };
941
+ };
942
+ } else {
943
+ if (typeof this.response === "function") {
944
+ throw new Error("Cannot use returns() after withResponse() with a function");
945
+ }
946
+ this.response.text = text;
947
+ }
948
+ return this;
949
+ }
950
+ /**
951
+ * Set gadget calls to include in the response.
952
+ *
953
+ * @example
954
+ * mockLLM().returnsGadgetCalls([
955
+ * { gadgetName: 'calculator', parameters: { op: 'add', a: 1, b: 2 } }
956
+ * ])
957
+ */
958
+ returnsGadgetCalls(calls) {
959
+ if (typeof this.response === "function") {
960
+ throw new Error("Cannot use returnsGadgetCalls() after withResponse() with a function");
961
+ }
962
+ this.response.gadgetCalls = calls;
963
+ return this;
964
+ }
965
+ /**
966
+ * Add a single gadget call to the response.
967
+ *
968
+ * @example
969
+ * mockLLM()
970
+ * .returnsGadgetCall('calculator', { op: 'add', a: 1, b: 2 })
971
+ * .returnsGadgetCall('logger', { message: 'Done!' })
972
+ */
973
+ returnsGadgetCall(gadgetName, parameters) {
974
+ if (typeof this.response === "function") {
975
+ throw new Error("Cannot use returnsGadgetCall() after withResponse() with a function");
976
+ }
977
+ if (!this.response.gadgetCalls) {
978
+ this.response.gadgetCalls = [];
979
+ }
980
+ this.response.gadgetCalls.push({ gadgetName, parameters });
981
+ return this;
982
+ }
983
+ // ==========================================================================
984
+ // Multimodal Response Helpers
985
+ // ==========================================================================
986
+ /**
987
+ * Return a single image in the response.
988
+ * Useful for mocking image generation endpoints.
989
+ *
990
+ * @param data - Image data (base64 string or Buffer)
991
+ * @param mimeType - MIME type (auto-detected if Buffer provided without type)
992
+ *
993
+ * @example
994
+ * mockLLM()
995
+ * .forModel('dall-e-3')
996
+ * .returnsImage(pngBuffer)
997
+ * .register();
998
+ */
999
+ returnsImage(data, mimeType) {
1000
+ if (typeof this.response === "function") {
1001
+ throw new Error("Cannot use returnsImage() after withResponse() with a function");
1002
+ }
1003
+ let imageData;
1004
+ let imageMime;
1005
+ if (typeof data === "string") {
1006
+ imageData = data;
1007
+ if (!mimeType) {
1008
+ throw new Error("MIME type is required when providing base64 string data");
1009
+ }
1010
+ imageMime = mimeType;
1011
+ } else {
1012
+ imageData = toBase64(data);
1013
+ const detected = mimeType ?? detectImageMimeType(data);
1014
+ if (!detected) {
1015
+ throw new Error(
1016
+ "Could not detect image MIME type. Please provide the mimeType parameter explicitly."
1017
+ );
1018
+ }
1019
+ imageMime = detected;
1020
+ }
1021
+ if (!this.response.images) {
1022
+ this.response.images = [];
1023
+ }
1024
+ this.response.images.push({ data: imageData, mimeType: imageMime });
1025
+ return this;
1026
+ }
1027
+ /**
1028
+ * Return multiple images in the response.
1029
+ *
1030
+ * @example
1031
+ * mockLLM()
1032
+ * .forModel('dall-e-3')
1033
+ * .returnsImages([
1034
+ * { data: pngBuffer1 },
1035
+ * { data: pngBuffer2 },
1036
+ * ])
1037
+ * .register();
1038
+ */
1039
+ returnsImages(images) {
1040
+ for (const img of images) {
1041
+ this.returnsImage(img.data, img.mimeType);
1042
+ if (img.revisedPrompt && this.response && typeof this.response !== "function") {
1043
+ const lastImage = this.response.images?.[this.response.images.length - 1];
1044
+ if (lastImage) {
1045
+ lastImage.revisedPrompt = img.revisedPrompt;
1046
+ }
1047
+ }
1048
+ }
1049
+ return this;
1050
+ }
1051
+ /**
1052
+ * Return audio data in the response.
1053
+ * Useful for mocking speech synthesis endpoints.
1054
+ *
1055
+ * @param data - Audio data (base64 string or Buffer)
1056
+ * @param mimeType - MIME type (auto-detected if Buffer provided without type)
1057
+ *
1058
+ * @example
1059
+ * mockLLM()
1060
+ * .forModel('tts-1')
1061
+ * .returnsAudio(mp3Buffer)
1062
+ * .register();
1063
+ */
1064
+ returnsAudio(data, mimeType) {
1065
+ if (typeof this.response === "function") {
1066
+ throw new Error("Cannot use returnsAudio() after withResponse() with a function");
1067
+ }
1068
+ let audioData;
1069
+ let audioMime;
1070
+ if (typeof data === "string") {
1071
+ audioData = data;
1072
+ if (!mimeType) {
1073
+ throw new Error("MIME type is required when providing base64 string data");
1074
+ }
1075
+ audioMime = mimeType;
1076
+ } else {
1077
+ audioData = toBase64(data);
1078
+ const detected = mimeType ?? detectAudioMimeType(data);
1079
+ if (!detected) {
1080
+ throw new Error(
1081
+ "Could not detect audio MIME type. Please provide the mimeType parameter explicitly."
1082
+ );
1083
+ }
1084
+ audioMime = detected;
1085
+ }
1086
+ this.response.audio = { data: audioData, mimeType: audioMime };
1087
+ return this;
1088
+ }
1089
+ /**
1090
+ * Set the complete mock response object.
1091
+ * This allows full control over all response properties.
1092
+ * Can also be a function that generates the response dynamically based on context.
1093
+ *
1094
+ * @example
1095
+ * // Static response
1096
+ * mockLLM().withResponse({
1097
+ * text: 'Hello',
1098
+ * usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
1099
+ * finishReason: 'stop'
1100
+ * })
1101
+ *
1102
+ * @example
1103
+ * // Dynamic response
1104
+ * mockLLM().withResponse((ctx) => ({
1105
+ * text: `You said: ${ctx.messages[ctx.messages.length - 1]?.content}`,
1106
+ * usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }
1107
+ * }))
1108
+ */
1109
+ withResponse(response) {
1110
+ this.response = response;
1111
+ return this;
1112
+ }
1113
+ /**
1114
+ * Set simulated token usage.
1115
+ *
1116
+ * @example
1117
+ * mockLLM().withUsage({ inputTokens: 100, outputTokens: 50, totalTokens: 150 })
1118
+ */
1119
+ withUsage(usage) {
1120
+ if (typeof this.response === "function") {
1121
+ throw new Error("Cannot use withUsage() after withResponse() with a function");
1122
+ }
1123
+ if (usage.inputTokens < 0 || usage.outputTokens < 0 || usage.totalTokens < 0) {
1124
+ throw new Error("Token counts cannot be negative");
1125
+ }
1126
+ if (usage.totalTokens !== usage.inputTokens + usage.outputTokens) {
1127
+ throw new Error("totalTokens must equal inputTokens + outputTokens");
1128
+ }
1129
+ this.response.usage = usage;
1130
+ return this;
1131
+ }
1132
+ /**
1133
+ * Set the finish reason.
1134
+ *
1135
+ * @example
1136
+ * mockLLM().withFinishReason('stop')
1137
+ * mockLLM().withFinishReason('length')
1138
+ */
1139
+ withFinishReason(reason) {
1140
+ if (typeof this.response === "function") {
1141
+ throw new Error("Cannot use withFinishReason() after withResponse() with a function");
1142
+ }
1143
+ this.response.finishReason = reason;
1144
+ return this;
1145
+ }
1146
+ /**
1147
+ * Set initial delay before streaming starts (simulates network latency).
1148
+ *
1149
+ * @example
1150
+ * mockLLM().withDelay(100) // 100ms delay
1151
+ */
1152
+ withDelay(ms) {
1153
+ if (typeof this.response === "function") {
1154
+ throw new Error("Cannot use withDelay() after withResponse() with a function");
1155
+ }
1156
+ if (ms < 0) {
1157
+ throw new Error("Delay must be non-negative");
1158
+ }
1159
+ this.response.delayMs = ms;
1160
+ return this;
1161
+ }
1162
+ /**
1163
+ * Set delay between stream chunks (simulates realistic streaming).
1164
+ *
1165
+ * @example
1166
+ * mockLLM().withStreamDelay(10) // 10ms between chunks
1167
+ */
1168
+ withStreamDelay(ms) {
1169
+ if (typeof this.response === "function") {
1170
+ throw new Error("Cannot use withStreamDelay() after withResponse() with a function");
1171
+ }
1172
+ if (ms < 0) {
1173
+ throw new Error("Stream delay must be non-negative");
1174
+ }
1175
+ this.response.streamDelayMs = ms;
1176
+ return this;
1177
+ }
1178
+ /**
1179
+ * Set a label for this mock (useful for debugging).
1180
+ *
1181
+ * @example
1182
+ * mockLLM().withLabel('greeting mock')
1183
+ */
1184
+ withLabel(label) {
1185
+ this.label = label;
1186
+ return this;
1187
+ }
1188
+ /**
1189
+ * Set a specific ID for this mock.
1190
+ *
1191
+ * @example
1192
+ * mockLLM().withId('my-custom-mock-id')
1193
+ */
1194
+ withId(id) {
1195
+ this.id = id;
1196
+ return this;
1197
+ }
1198
+ /**
1199
+ * Mark this mock as one-time use (will be removed after first match).
1200
+ *
1201
+ * @example
1202
+ * mockLLM().once()
1203
+ */
1204
+ once() {
1205
+ this.isOnce = true;
1206
+ return this;
1207
+ }
1208
+ /**
1209
+ * Build the mock registration without registering it.
1210
+ * Useful if you want to register it manually later.
1211
+ *
1212
+ * @returns The built MockRegistration object (without id if not specified)
1213
+ */
1214
+ build() {
1215
+ if (this.matchers.length === 0) {
1216
+ throw new Error(
1217
+ "Mock must have at least one matcher. Use .when(), .forModel(), .forProvider(), etc."
1218
+ );
1219
+ }
1220
+ const combinedMatcher = async (ctx) => {
1221
+ for (const matcher of this.matchers) {
1222
+ const matches = await Promise.resolve(matcher(ctx));
1223
+ if (!matches) return false;
1224
+ }
1225
+ return true;
1226
+ };
1227
+ return {
1228
+ id: this.id,
1229
+ matcher: combinedMatcher,
1230
+ response: this.response,
1231
+ label: this.label,
1232
+ once: this.isOnce
1233
+ };
1234
+ }
1235
+ /**
1236
+ * Register this mock with the global MockManager.
1237
+ * Returns the ID of the registered mock.
1238
+ *
1239
+ * @example
1240
+ * const mockId = mockLLM().forModel('gpt-5').returns('Hello!').register();
1241
+ * // Later: getMockManager().unregister(mockId);
1242
+ */
1243
+ register() {
1244
+ const mockManager = getMockManager();
1245
+ const registration = this.build();
1246
+ return mockManager.register(registration);
1247
+ }
1248
+ };
1249
+ function mockLLM() {
1250
+ return new MockBuilder();
1251
+ }
1252
+
1253
+ // src/mock-client.ts
1254
+ import { LLMist } from "llmist";
1255
+ function createMockClient(options) {
1256
+ return new LLMist({
1257
+ adapters: [new MockProviderAdapter(options)],
1258
+ autoDiscoverProviders: false,
1259
+ defaultProvider: "mock"
1260
+ });
1261
+ }
1262
+
1263
+ // src/mock-conversation.ts
1264
+ var MockConversationManager = class {
1265
+ history;
1266
+ baseMessages;
1267
+ replacementHistory;
1268
+ replaceHistoryCallCount = 0;
1269
+ addedMessages = [];
1270
+ constructor(history = [], baseMessages = []) {
1271
+ this.history = [...history];
1272
+ this.baseMessages = [...baseMessages];
1273
+ }
1274
+ addUserMessage(content) {
1275
+ const msg = { role: "user", content };
1276
+ this.history.push(msg);
1277
+ this.addedMessages.push(msg);
1278
+ }
1279
+ addAssistantMessage(content) {
1280
+ const msg = { role: "assistant", content };
1281
+ this.history.push(msg);
1282
+ this.addedMessages.push(msg);
1283
+ }
1284
+ addGadgetCallResult(gadgetName, parameters, result, invocationId) {
1285
+ const assistantMsg = {
1286
+ role: "assistant",
1287
+ content: `!!!GADGET_START:${gadgetName}:${invocationId}
1288
+ ${JSON.stringify(parameters)}
1289
+ !!!GADGET_END`
1290
+ };
1291
+ const resultMsg = {
1292
+ role: "user",
1293
+ content: `Result (${invocationId}): ${result}`
1294
+ };
1295
+ this.history.push(assistantMsg);
1296
+ this.history.push(resultMsg);
1297
+ this.addedMessages.push(assistantMsg);
1298
+ this.addedMessages.push(resultMsg);
1299
+ }
1300
+ getMessages() {
1301
+ return [...this.baseMessages, ...this.history];
1302
+ }
1303
+ getHistoryMessages() {
1304
+ return [...this.history];
1305
+ }
1306
+ getBaseMessages() {
1307
+ return [...this.baseMessages];
1308
+ }
1309
+ getConversationHistory() {
1310
+ return [...this.history];
1311
+ }
1312
+ replaceHistory(newHistory) {
1313
+ this.replacementHistory = [...newHistory];
1314
+ this.history = [...newHistory];
1315
+ this.replaceHistoryCallCount++;
1316
+ }
1317
+ // ============================================
1318
+ // Test Helper Methods
1319
+ // ============================================
1320
+ /**
1321
+ * Check if replaceHistory was called.
1322
+ */
1323
+ wasReplaceHistoryCalled() {
1324
+ return this.replaceHistoryCallCount > 0;
1325
+ }
1326
+ /**
1327
+ * Get the number of times replaceHistory was called.
1328
+ */
1329
+ getReplaceHistoryCallCount() {
1330
+ return this.replaceHistoryCallCount;
1331
+ }
1332
+ /**
1333
+ * Get the most recent history passed to replaceHistory.
1334
+ * Returns undefined if replaceHistory was never called.
1335
+ */
1336
+ getReplacementHistory() {
1337
+ return this.replacementHistory;
1338
+ }
1339
+ /**
1340
+ * Get all messages that were added via add* methods.
1341
+ */
1342
+ getAddedMessages() {
1343
+ return [...this.addedMessages];
1344
+ }
1345
+ /**
1346
+ * Reset all tracking state while preserving the conversation.
1347
+ */
1348
+ resetTracking() {
1349
+ this.replacementHistory = void 0;
1350
+ this.replaceHistoryCallCount = 0;
1351
+ this.addedMessages = [];
1352
+ }
1353
+ /**
1354
+ * Completely reset the mock to initial state.
1355
+ * Note: baseMessages cannot be changed after construction.
1356
+ */
1357
+ reset(history = []) {
1358
+ this.history = [...history];
1359
+ this.resetTracking();
1360
+ }
1361
+ /**
1362
+ * Set the history directly (for test setup).
1363
+ */
1364
+ setHistory(messages) {
1365
+ this.history = [...messages];
1366
+ }
1367
+ /**
1368
+ * Get the current history length.
1369
+ */
1370
+ getHistoryLength() {
1371
+ return this.history.length;
1372
+ }
1373
+ /**
1374
+ * Get total message count (base + history).
1375
+ */
1376
+ getTotalMessageCount() {
1377
+ return this.baseMessages.length + this.history.length;
1378
+ }
1379
+ };
1380
+ function createMockConversationManager(turnCount, baseMessages = []) {
1381
+ const history = [];
1382
+ for (let i = 0; i < turnCount; i++) {
1383
+ history.push({
1384
+ role: "user",
1385
+ content: `User message ${i + 1}: This is turn ${i + 1} of the conversation.`
1386
+ });
1387
+ history.push({
1388
+ role: "assistant",
1389
+ content: `Assistant response ${i + 1}: I acknowledge turn ${i + 1}.`
1390
+ });
1391
+ }
1392
+ return new MockConversationManager(history, baseMessages);
1393
+ }
1394
+
1395
+ // src/mock-gadget.ts
1396
+ import { AbstractGadget } from "llmist";
1397
+ var MockGadgetImpl = class extends AbstractGadget {
1398
+ name;
1399
+ description;
1400
+ parameterSchema;
1401
+ timeoutMs;
1402
+ calls = [];
1403
+ resultValue;
1404
+ resultFn;
1405
+ errorToThrow;
1406
+ delayMs;
1407
+ shouldTrackCalls;
1408
+ constructor(config) {
1409
+ super();
1410
+ this.name = config.name;
1411
+ this.description = config.description ?? `Mock gadget: ${config.name}`;
1412
+ this.parameterSchema = config.schema;
1413
+ this.resultValue = config.result;
1414
+ this.resultFn = config.resultFn;
1415
+ this.delayMs = config.delayMs ?? 0;
1416
+ this.shouldTrackCalls = config.trackCalls ?? true;
1417
+ this.timeoutMs = config.timeoutMs;
1418
+ if (config.error) {
1419
+ this.errorToThrow = typeof config.error === "string" ? new Error(config.error) : config.error;
1420
+ }
1421
+ }
1422
+ async execute(params) {
1423
+ if (this.shouldTrackCalls) {
1424
+ this.calls.push({ params: { ...params }, timestamp: Date.now() });
1425
+ }
1426
+ if (this.delayMs > 0) {
1427
+ await new Promise((resolve) => setTimeout(resolve, this.delayMs));
1428
+ }
1429
+ if (this.errorToThrow) {
1430
+ throw this.errorToThrow;
1431
+ }
1432
+ if (this.resultFn) {
1433
+ return this.resultFn(params);
1434
+ }
1435
+ return this.resultValue ?? "mock result";
1436
+ }
1437
+ getCalls() {
1438
+ return [...this.calls];
1439
+ }
1440
+ getCallCount() {
1441
+ return this.calls.length;
1442
+ }
1443
+ resetCalls() {
1444
+ this.calls = [];
1445
+ }
1446
+ wasCalledWith(params) {
1447
+ return this.calls.some(
1448
+ (call) => Object.entries(params).every(([key, value]) => call.params[key] === value)
1449
+ );
1450
+ }
1451
+ getLastCall() {
1452
+ return this.calls.length > 0 ? this.calls[this.calls.length - 1] : void 0;
1453
+ }
1454
+ };
1455
+ function createMockGadget(config) {
1456
+ return new MockGadgetImpl(config);
1457
+ }
1458
+ var MockGadgetBuilder = class {
1459
+ config = { name: "MockGadget" };
1460
+ /**
1461
+ * Set the gadget name.
1462
+ */
1463
+ withName(name) {
1464
+ this.config.name = name;
1465
+ return this;
1466
+ }
1467
+ /**
1468
+ * Set the gadget description.
1469
+ */
1470
+ withDescription(description) {
1471
+ this.config.description = description;
1472
+ return this;
1473
+ }
1474
+ /**
1475
+ * Set the parameter schema.
1476
+ */
1477
+ withSchema(schema) {
1478
+ this.config.schema = schema;
1479
+ return this;
1480
+ }
1481
+ /**
1482
+ * Set a static result to return.
1483
+ */
1484
+ returns(result) {
1485
+ this.config.result = result;
1486
+ this.config.resultFn = void 0;
1487
+ return this;
1488
+ }
1489
+ /**
1490
+ * Set a dynamic result function.
1491
+ */
1492
+ returnsAsync(resultFn) {
1493
+ this.config.resultFn = resultFn;
1494
+ this.config.result = void 0;
1495
+ return this;
1496
+ }
1497
+ /**
1498
+ * Make the gadget throw an error on execution.
1499
+ */
1500
+ throws(error) {
1501
+ this.config.error = error;
1502
+ return this;
1503
+ }
1504
+ /**
1505
+ * Add execution delay.
1506
+ */
1507
+ withDelay(ms) {
1508
+ this.config.delayMs = ms;
1509
+ return this;
1510
+ }
1511
+ /**
1512
+ * Set timeout for the gadget.
1513
+ */
1514
+ withTimeout(ms) {
1515
+ this.config.timeoutMs = ms;
1516
+ return this;
1517
+ }
1518
+ /**
1519
+ * Enable call tracking (enabled by default).
1520
+ */
1521
+ trackCalls() {
1522
+ this.config.trackCalls = true;
1523
+ return this;
1524
+ }
1525
+ /**
1526
+ * Disable call tracking.
1527
+ */
1528
+ noTracking() {
1529
+ this.config.trackCalls = false;
1530
+ return this;
1531
+ }
1532
+ /**
1533
+ * Build the mock gadget.
1534
+ */
1535
+ build() {
1536
+ return createMockGadget(this.config);
1537
+ }
1538
+ };
1539
+ function mockGadget() {
1540
+ return new MockGadgetBuilder();
1541
+ }
1542
+
1543
+ // src/stream-helpers.ts
1544
+ function createTestStream(chunks) {
1545
+ return (async function* () {
1546
+ for (const chunk of chunks) {
1547
+ yield chunk;
1548
+ }
1549
+ })();
1550
+ }
1551
+ function createTextStream(text, options) {
1552
+ return (async function* () {
1553
+ if (options?.delayMs) {
1554
+ await sleep3(options.delayMs);
1555
+ }
1556
+ const chunkSize = options?.chunkSize ?? text.length;
1557
+ const chunks = [];
1558
+ for (let i = 0; i < text.length; i += chunkSize) {
1559
+ chunks.push(text.slice(i, i + chunkSize));
1560
+ }
1561
+ for (let i = 0; i < chunks.length; i++) {
1562
+ const isLast = i === chunks.length - 1;
1563
+ const chunk = { text: chunks[i] };
1564
+ if (isLast) {
1565
+ chunk.finishReason = options?.finishReason ?? "stop";
1566
+ const inputTokens = Math.ceil(text.length / 4);
1567
+ const outputTokens = Math.ceil(text.length / 4);
1568
+ chunk.usage = options?.usage ?? {
1569
+ inputTokens,
1570
+ outputTokens,
1571
+ totalTokens: inputTokens + outputTokens
1572
+ };
1573
+ }
1574
+ yield chunk;
1575
+ if (options?.chunkDelayMs && !isLast) {
1576
+ await sleep3(options.chunkDelayMs);
1577
+ }
1578
+ }
1579
+ })();
1580
+ }
1581
+ async function collectStream(stream) {
1582
+ const chunks = [];
1583
+ for await (const chunk of stream) {
1584
+ chunks.push(chunk);
1585
+ }
1586
+ return chunks;
1587
+ }
1588
+ async function collectStreamText(stream) {
1589
+ let text = "";
1590
+ for await (const chunk of stream) {
1591
+ text += chunk.text ?? "";
1592
+ }
1593
+ return text;
1594
+ }
1595
+ async function getStreamFinalChunk(stream) {
1596
+ let lastChunk;
1597
+ for await (const chunk of stream) {
1598
+ lastChunk = chunk;
1599
+ }
1600
+ return lastChunk;
1601
+ }
1602
+ function createEmptyStream() {
1603
+ return (async function* () {
1604
+ })();
1605
+ }
1606
+ function createErrorStream(chunksBeforeError, error) {
1607
+ return (async function* () {
1608
+ for (const chunk of chunksBeforeError) {
1609
+ yield chunk;
1610
+ }
1611
+ throw error;
1612
+ })();
1613
+ }
1614
+ function sleep3(ms) {
1615
+ return new Promise((resolve) => setTimeout(resolve, ms));
1616
+ }
1617
+ export {
1618
+ MockBuilder,
1619
+ MockConversationManager,
1620
+ MockGadgetBuilder,
1621
+ MockManager,
1622
+ MockPromptRecorder,
1623
+ MockProviderAdapter,
1624
+ collectOutput,
1625
+ collectStream,
1626
+ collectStreamText,
1627
+ createAssistantMessage,
1628
+ createConversation,
1629
+ createConversationWithGadgets,
1630
+ createEmptyStream,
1631
+ createErrorStream,
1632
+ createLargeConversation,
1633
+ createMinimalConversation,
1634
+ createMockAdapter,
1635
+ createMockClient,
1636
+ createMockConversationManager,
1637
+ createMockGadget,
1638
+ createMockPrompt,
1639
+ createMockReadable,
1640
+ createMockStream,
1641
+ createMockWritable,
1642
+ createSystemMessage,
1643
+ createTestEnvironment,
1644
+ createTestStream,
1645
+ createTextMockStream,
1646
+ createTextStream,
1647
+ createUserMessage,
1648
+ estimateTokens,
1649
+ getBufferedOutput,
1650
+ getMockManager,
1651
+ getStreamFinalChunk,
1652
+ mockGadget,
1653
+ mockLLM,
1654
+ testGadget,
1655
+ testGadgetBatch,
1656
+ waitFor
1657
+ };
1658
+ //# sourceMappingURL=index.js.map