@llmist/testing 9.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs ADDED
@@ -0,0 +1,1717 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
+ MockBuilder: () => MockBuilder,
24
+ MockConversationManager: () => MockConversationManager,
25
+ MockGadgetBuilder: () => MockGadgetBuilder,
26
+ MockManager: () => MockManager,
27
+ MockPromptRecorder: () => MockPromptRecorder,
28
+ MockProviderAdapter: () => MockProviderAdapter,
29
+ collectOutput: () => collectOutput,
30
+ collectStream: () => collectStream,
31
+ collectStreamText: () => collectStreamText,
32
+ createAssistantMessage: () => createAssistantMessage,
33
+ createConversation: () => createConversation,
34
+ createConversationWithGadgets: () => createConversationWithGadgets,
35
+ createEmptyStream: () => createEmptyStream,
36
+ createErrorStream: () => createErrorStream,
37
+ createLargeConversation: () => createLargeConversation,
38
+ createMinimalConversation: () => createMinimalConversation,
39
+ createMockAdapter: () => createMockAdapter,
40
+ createMockClient: () => createMockClient,
41
+ createMockConversationManager: () => createMockConversationManager,
42
+ createMockGadget: () => createMockGadget,
43
+ createMockPrompt: () => createMockPrompt,
44
+ createMockReadable: () => createMockReadable,
45
+ createMockStream: () => createMockStream,
46
+ createMockWritable: () => createMockWritable,
47
+ createSystemMessage: () => createSystemMessage,
48
+ createTestEnvironment: () => createTestEnvironment,
49
+ createTestStream: () => createTestStream,
50
+ createTextMockStream: () => createTextMockStream,
51
+ createTextStream: () => createTextStream,
52
+ createUserMessage: () => createUserMessage,
53
+ estimateTokens: () => estimateTokens,
54
+ getBufferedOutput: () => getBufferedOutput,
55
+ getMockManager: () => getMockManager,
56
+ getStreamFinalChunk: () => getStreamFinalChunk,
57
+ mockGadget: () => mockGadget,
58
+ mockLLM: () => mockLLM,
59
+ testGadget: () => testGadget,
60
+ testGadgetBatch: () => testGadgetBatch,
61
+ waitFor: () => waitFor
62
+ });
63
+ module.exports = __toCommonJS(index_exports);
64
+
65
+ // src/cli-helpers.ts
66
+ var import_node_stream = require("stream");
67
+ function createTestEnvironment(options = {}) {
68
+ const stdin = createMockReadable(options.stdin);
69
+ const stdout = new import_node_stream.PassThrough();
70
+ const stderr = new import_node_stream.PassThrough();
71
+ let exitCode;
72
+ return {
73
+ stdin,
74
+ stdout,
75
+ stderr,
76
+ isTTY: options.isTTY ?? false,
77
+ argv: options.argv ?? ["node", "llmist"],
78
+ env: { ...filterDefinedEnv(process.env), ...options.env },
79
+ get exitCode() {
80
+ return exitCode;
81
+ },
82
+ setExitCode: (code) => {
83
+ exitCode = code;
84
+ }
85
+ };
86
+ }
87
+ function createMockReadable(input) {
88
+ if (!input) {
89
+ const stream2 = new import_node_stream.Readable({ read() {
90
+ } });
91
+ stream2.push(null);
92
+ return stream2;
93
+ }
94
+ const content = Array.isArray(input) ? `${input.join("\n")}
95
+ ` : input;
96
+ const stream = new import_node_stream.Readable({ read() {
97
+ } });
98
+ stream.push(content);
99
+ stream.push(null);
100
+ return stream;
101
+ }
102
+ function createMockWritable() {
103
+ const chunks = [];
104
+ const stream = new import_node_stream.Writable({
105
+ write(chunk, _encoding, callback) {
106
+ chunks.push(Buffer.from(chunk));
107
+ callback();
108
+ }
109
+ });
110
+ stream.getData = () => Buffer.concat(chunks).toString("utf8");
111
+ return stream;
112
+ }
113
+ async function collectOutput(stream, timeout = 5e3) {
114
+ return new Promise((resolve, reject) => {
115
+ const chunks = [];
116
+ const timeoutId = setTimeout(() => {
117
+ resolve(Buffer.concat(chunks).toString("utf8"));
118
+ }, timeout);
119
+ stream.on("data", (chunk) => {
120
+ chunks.push(Buffer.from(chunk));
121
+ });
122
+ stream.on("end", () => {
123
+ clearTimeout(timeoutId);
124
+ resolve(Buffer.concat(chunks).toString("utf8"));
125
+ });
126
+ stream.on("error", (err) => {
127
+ clearTimeout(timeoutId);
128
+ reject(err);
129
+ });
130
+ });
131
+ }
132
+ function getBufferedOutput(stream) {
133
+ const chunks = [];
134
+ for (; ; ) {
135
+ const chunk = stream.read();
136
+ if (chunk === null) break;
137
+ chunks.push(chunk);
138
+ }
139
+ return Buffer.concat(chunks).toString("utf8");
140
+ }
141
+ function createMockPrompt(responses) {
142
+ let index = 0;
143
+ return async (_question) => {
144
+ if (index >= responses.length) {
145
+ throw new Error(`Mock prompt exhausted: no response for question ${index + 1}`);
146
+ }
147
+ return responses[index++];
148
+ };
149
+ }
150
+ var MockPromptRecorder = class {
151
+ responses;
152
+ index = 0;
153
+ questions = [];
154
+ constructor(responses) {
155
+ this.responses = responses;
156
+ }
157
+ /**
158
+ * The prompt function to use in tests.
159
+ */
160
+ prompt = async (question) => {
161
+ this.questions.push(question);
162
+ if (this.index >= this.responses.length) {
163
+ throw new Error(`Mock prompt exhausted after ${this.index} questions`);
164
+ }
165
+ return this.responses[this.index++];
166
+ };
167
+ /**
168
+ * Get all questions that were asked.
169
+ */
170
+ getQuestions() {
171
+ return [...this.questions];
172
+ }
173
+ /**
174
+ * Get the number of questions asked.
175
+ */
176
+ getQuestionCount() {
177
+ return this.questions.length;
178
+ }
179
+ /**
180
+ * Reset the recorder state.
181
+ */
182
+ reset(newResponses) {
183
+ this.index = 0;
184
+ this.questions = [];
185
+ if (newResponses) {
186
+ this.responses = newResponses;
187
+ }
188
+ }
189
+ };
190
+ async function waitFor(condition, timeout = 5e3, interval = 50) {
191
+ const startTime = Date.now();
192
+ while (!condition()) {
193
+ if (Date.now() - startTime > timeout) {
194
+ throw new Error(`waitFor timed out after ${timeout}ms`);
195
+ }
196
+ await sleep(interval);
197
+ }
198
+ }
199
+ function sleep(ms) {
200
+ return new Promise((resolve) => setTimeout(resolve, ms));
201
+ }
202
+ function filterDefinedEnv(env) {
203
+ const result = {};
204
+ for (const [key, value] of Object.entries(env)) {
205
+ if (value !== void 0) {
206
+ result[key] = value;
207
+ }
208
+ }
209
+ return result;
210
+ }
211
+
212
+ // src/conversation-fixtures.ts
213
+ function createConversation(turnCount, options) {
214
+ const messages = [];
215
+ const userPrefix = options?.userPrefix ?? "User message";
216
+ const assistantPrefix = options?.assistantPrefix ?? "Assistant response";
217
+ const contentLength = options?.contentLength ?? 100;
218
+ for (let i = 0; i < turnCount; i++) {
219
+ const padding = " ".repeat(Math.max(0, contentLength - 30));
220
+ messages.push({
221
+ role: "user",
222
+ content: `${userPrefix} ${i + 1}: This is turn ${i + 1} of the conversation.${padding}`
223
+ });
224
+ messages.push({
225
+ role: "assistant",
226
+ content: `${assistantPrefix} ${i + 1}: I acknowledge turn ${i + 1}.${padding}`
227
+ });
228
+ }
229
+ return messages;
230
+ }
231
+ function createConversationWithGadgets(turnCount, gadgetCallsPerTurn = 1, options) {
232
+ const messages = [];
233
+ const gadgetNames = options?.gadgetNames ?? ["search", "calculate", "read"];
234
+ const contentLength = options?.contentLength ?? 50;
235
+ let gadgetIndex = 0;
236
+ for (let turn = 0; turn < turnCount; turn++) {
237
+ messages.push({
238
+ role: "user",
239
+ content: `User request ${turn + 1}${"x".repeat(contentLength)}`
240
+ });
241
+ for (let g = 0; g < gadgetCallsPerTurn; g++) {
242
+ const gadgetName = gadgetNames[gadgetIndex % gadgetNames.length];
243
+ gadgetIndex++;
244
+ messages.push({
245
+ role: "assistant",
246
+ content: `!!!GADGET_START:${gadgetName}
247
+ !!!ARG:query
248
+ test query ${turn}-${g}
249
+ !!!GADGET_END`
250
+ });
251
+ messages.push({
252
+ role: "user",
253
+ content: `Result: Gadget ${gadgetName} returned result for query ${turn}-${g}`
254
+ });
255
+ }
256
+ messages.push({
257
+ role: "assistant",
258
+ content: `Final response for turn ${turn + 1}${"y".repeat(contentLength)}`
259
+ });
260
+ }
261
+ return messages;
262
+ }
263
+ function estimateTokens(messages) {
264
+ return Math.ceil(messages.reduce((sum, msg) => sum + (msg.content?.length ?? 0), 0) / 4);
265
+ }
266
+ function createUserMessage(content) {
267
+ return { role: "user", content };
268
+ }
269
+ function createAssistantMessage(content) {
270
+ return { role: "assistant", content };
271
+ }
272
+ function createSystemMessage(content) {
273
+ return { role: "system", content };
274
+ }
275
+ function createMinimalConversation() {
276
+ return [
277
+ { role: "user", content: "Hello" },
278
+ { role: "assistant", content: "Hi there!" }
279
+ ];
280
+ }
281
+ function createLargeConversation(targetTokens, options) {
282
+ const tokensPerTurn = options?.tokensPerTurn ?? 200;
283
+ const turnsNeeded = Math.ceil(targetTokens / tokensPerTurn);
284
+ const charsPerMessage = Math.floor(tokensPerTurn * 4 / 2);
285
+ return createConversation(turnsNeeded, {
286
+ contentLength: charsPerMessage
287
+ });
288
+ }
289
+
290
+ // src/gadget-testing.ts
291
+ var import_llmist = require("llmist");
292
+ async function testGadget(gadget, params, options) {
293
+ let validatedParams = params;
294
+ if (!options?.skipValidation) {
295
+ const validationResult = (0, import_llmist.validateGadgetParams)(gadget, params);
296
+ if (!validationResult.success) {
297
+ return {
298
+ error: validationResult.error,
299
+ validatedParams: params
300
+ };
301
+ }
302
+ validatedParams = validationResult.data;
303
+ }
304
+ try {
305
+ const rawResult = await Promise.resolve(gadget.execute(validatedParams));
306
+ if (typeof rawResult === "string") {
307
+ return {
308
+ result: rawResult,
309
+ validatedParams,
310
+ cost: 0
311
+ };
312
+ }
313
+ return {
314
+ result: rawResult.result,
315
+ validatedParams,
316
+ cost: rawResult.cost ?? 0
317
+ };
318
+ } catch (error) {
319
+ return {
320
+ error: error instanceof Error ? error.message : String(error),
321
+ validatedParams
322
+ };
323
+ }
324
+ }
325
+ async function testGadgetBatch(gadget, paramSets, options) {
326
+ return Promise.all(paramSets.map((params) => testGadget(gadget, params, options)));
327
+ }
328
+
329
+ // src/mock-manager.ts
330
+ var import_llmist2 = require("llmist");
331
+ var MockManager = class _MockManager {
332
+ static instance = null;
333
+ mocks = /* @__PURE__ */ new Map();
334
+ stats = /* @__PURE__ */ new Map();
335
+ options;
336
+ logger;
337
+ nextId = 1;
338
+ constructor(options = {}) {
339
+ this.options = {
340
+ strictMode: options.strictMode ?? false,
341
+ debug: options.debug ?? false,
342
+ recordStats: options.recordStats ?? true
343
+ };
344
+ this.logger = (0, import_llmist2.createLogger)({ name: "MockManager", minLevel: this.options.debug ? 2 : 3 });
345
+ }
346
+ /**
347
+ * Get the global MockManager instance.
348
+ * Creates one if it doesn't exist.
349
+ */
350
+ static getInstance(options) {
351
+ if (!_MockManager.instance) {
352
+ _MockManager.instance = new _MockManager(options);
353
+ } else if (options) {
354
+ console.warn(
355
+ "MockManager.getInstance() called with options, but instance already exists. Options are ignored. Use setOptions() to update options or reset() to reinitialize."
356
+ );
357
+ }
358
+ return _MockManager.instance;
359
+ }
360
+ /**
361
+ * Reset the global instance (useful for testing).
362
+ */
363
+ static reset() {
364
+ _MockManager.instance = null;
365
+ }
366
+ /**
367
+ * Register a new mock.
368
+ *
369
+ * @param registration - The mock registration configuration
370
+ * @returns The ID of the registered mock
371
+ *
372
+ * @example
373
+ * const manager = MockManager.getInstance();
374
+ * const mockId = manager.register({
375
+ * label: 'GPT-4 mock',
376
+ * matcher: (ctx) => ctx.modelName.includes('gpt-4'),
377
+ * response: { text: 'Mocked response' }
378
+ * });
379
+ */
380
+ register(registration) {
381
+ const id = registration.id ?? `mock-${this.nextId++}`;
382
+ const mock = {
383
+ id,
384
+ matcher: registration.matcher,
385
+ response: registration.response,
386
+ label: registration.label,
387
+ once: registration.once
388
+ };
389
+ this.mocks.set(id, mock);
390
+ if (this.options.recordStats) {
391
+ this.stats.set(id, { matchCount: 0 });
392
+ }
393
+ this.logger.debug(
394
+ `Registered mock: ${id}${mock.label ? ` (${mock.label})` : ""}${mock.once ? " [once]" : ""}`
395
+ );
396
+ return id;
397
+ }
398
+ /**
399
+ * Unregister a mock by ID.
400
+ */
401
+ unregister(id) {
402
+ const deleted = this.mocks.delete(id);
403
+ if (deleted) {
404
+ this.stats.delete(id);
405
+ this.logger.debug(`Unregistered mock: ${id}`);
406
+ }
407
+ return deleted;
408
+ }
409
+ /**
410
+ * Clear all registered mocks.
411
+ */
412
+ clear() {
413
+ this.mocks.clear();
414
+ this.stats.clear();
415
+ this.logger.debug("Cleared all mocks");
416
+ }
417
+ /**
418
+ * Find and return a matching mock for the given context.
419
+ * Returns the mock response if found, null otherwise.
420
+ */
421
+ async findMatch(context) {
422
+ this.logger.debug(
423
+ `Finding match for: ${context.provider}:${context.modelName} (${this.mocks.size} mocks registered)`
424
+ );
425
+ for (const [id, mock] of this.mocks.entries()) {
426
+ let matches = false;
427
+ try {
428
+ matches = await Promise.resolve(mock.matcher(context));
429
+ } catch (error) {
430
+ this.logger.warn(`Error in matcher ${id}:`, error);
431
+ if (this.options.strictMode) {
432
+ throw new Error(`Matcher error in mock ${id}: ${error}`);
433
+ }
434
+ continue;
435
+ }
436
+ if (matches) {
437
+ this.logger.debug(`Mock matched: ${id}${mock.label ? ` (${mock.label})` : ""}`);
438
+ if (this.options.recordStats) {
439
+ const stats = this.stats.get(id);
440
+ if (stats) {
441
+ stats.matchCount++;
442
+ stats.lastUsed = /* @__PURE__ */ new Date();
443
+ }
444
+ }
445
+ if (mock.once) {
446
+ this.mocks.delete(id);
447
+ this.stats.delete(id);
448
+ this.logger.debug(`Removed one-time mock: ${id}`);
449
+ }
450
+ const response = typeof mock.response === "function" ? await Promise.resolve(mock.response(context)) : mock.response;
451
+ return response;
452
+ }
453
+ }
454
+ this.logger.debug("No mock matched");
455
+ if (this.options.strictMode) {
456
+ throw new Error(
457
+ `No mock registered for ${context.provider}:${context.modelName}. Register a mock using MockManager.getInstance().register() or disable strictMode.`
458
+ );
459
+ }
460
+ return {
461
+ text: "",
462
+ usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 },
463
+ finishReason: "stop"
464
+ };
465
+ }
466
+ /**
467
+ * Get statistics for a specific mock.
468
+ */
469
+ getStats(id) {
470
+ return this.stats.get(id);
471
+ }
472
+ /**
473
+ * Get all registered mock IDs.
474
+ */
475
+ getMockIds() {
476
+ return Array.from(this.mocks.keys());
477
+ }
478
+ /**
479
+ * Get the number of registered mocks.
480
+ */
481
+ getCount() {
482
+ return this.mocks.size;
483
+ }
484
+ /**
485
+ * Update the mock manager options.
486
+ */
487
+ setOptions(options) {
488
+ this.options = { ...this.options, ...options };
489
+ this.logger = (0, import_llmist2.createLogger)({ name: "MockManager", minLevel: this.options.debug ? 2 : 3 });
490
+ }
491
+ };
492
+ function getMockManager(options) {
493
+ return MockManager.getInstance(options);
494
+ }
495
+
496
+ // src/mock-stream.ts
497
+ var import_llmist3 = require("llmist");
498
+ function sleep2(ms) {
499
+ return new Promise((resolve) => setTimeout(resolve, ms));
500
+ }
501
+ function generateInvocationId() {
502
+ return `inv-${Date.now()}-${Math.random().toString(36).substring(2, 9)}`;
503
+ }
504
+ function splitIntoChunks(text, minChunkSize = 5, maxChunkSize = 30) {
505
+ const chunks = [];
506
+ let remaining = text;
507
+ while (remaining.length > 0) {
508
+ const chunkSize = Math.min(
509
+ Math.floor(Math.random() * (maxChunkSize - minChunkSize + 1)) + minChunkSize,
510
+ remaining.length
511
+ );
512
+ let chunk;
513
+ if (chunkSize < remaining.length) {
514
+ const substr = remaining.substring(0, chunkSize);
515
+ const lastSpace = substr.lastIndexOf(" ");
516
+ if (lastSpace > minChunkSize / 2) {
517
+ chunk = substr.substring(0, lastSpace + 1);
518
+ } else {
519
+ chunk = substr;
520
+ }
521
+ } else {
522
+ chunk = remaining;
523
+ }
524
+ chunks.push(chunk);
525
+ remaining = remaining.substring(chunk.length);
526
+ }
527
+ return chunks;
528
+ }
529
+ function serializeToBlockFormat(obj, prefix = "") {
530
+ let result = "";
531
+ for (const [key, value] of Object.entries(obj)) {
532
+ const pointer = prefix ? `${prefix}/${key}` : key;
533
+ if (value === null || value === void 0) {
534
+ continue;
535
+ }
536
+ if (Array.isArray(value)) {
537
+ for (let i = 0; i < value.length; i++) {
538
+ const item = value[i];
539
+ const itemPointer = `${pointer}/${i}`;
540
+ if (typeof item === "object" && item !== null && !Array.isArray(item)) {
541
+ result += serializeToBlockFormat(item, itemPointer);
542
+ } else if (Array.isArray(item)) {
543
+ for (let j = 0; j < item.length; j++) {
544
+ result += `${import_llmist3.GADGET_ARG_PREFIX}${itemPointer}/${j}
545
+ ${String(item[j])}
546
+ `;
547
+ }
548
+ } else {
549
+ result += `${import_llmist3.GADGET_ARG_PREFIX}${itemPointer}
550
+ ${String(item)}
551
+ `;
552
+ }
553
+ }
554
+ } else if (typeof value === "object") {
555
+ result += serializeToBlockFormat(value, pointer);
556
+ } else {
557
+ result += `${import_llmist3.GADGET_ARG_PREFIX}${pointer}
558
+ ${String(value)}
559
+ `;
560
+ }
561
+ }
562
+ return result;
563
+ }
564
+ function formatGadgetCalls(gadgetCalls) {
565
+ let text = "";
566
+ const calls = [];
567
+ for (const call of gadgetCalls) {
568
+ const invocationId = call.invocationId ?? generateInvocationId();
569
+ calls.push({ name: call.gadgetName, invocationId });
570
+ const blockParams = serializeToBlockFormat(call.parameters);
571
+ text += `
572
+ ${import_llmist3.GADGET_START_PREFIX}${call.gadgetName}
573
+ ${blockParams}${import_llmist3.GADGET_END_PREFIX}`;
574
+ }
575
+ return { text, calls };
576
+ }
577
+ async function* createMockStream(response) {
578
+ if (response.delayMs) {
579
+ await sleep2(response.delayMs);
580
+ }
581
+ const streamDelay = response.streamDelayMs ?? 0;
582
+ let fullText = response.text ?? "";
583
+ if (response.gadgetCalls && response.gadgetCalls.length > 0) {
584
+ const { text: gadgetText } = formatGadgetCalls(response.gadgetCalls);
585
+ fullText += gadgetText;
586
+ }
587
+ if (fullText.length > 0) {
588
+ const chunks = streamDelay > 0 ? splitIntoChunks(fullText) : [fullText];
589
+ for (let i = 0; i < chunks.length; i++) {
590
+ const isLast = i === chunks.length - 1;
591
+ const chunk = {
592
+ text: chunks[i]
593
+ };
594
+ if (isLast) {
595
+ if (response.finishReason !== void 0) {
596
+ chunk.finishReason = response.finishReason;
597
+ }
598
+ if (response.usage) {
599
+ chunk.usage = response.usage;
600
+ }
601
+ }
602
+ yield chunk;
603
+ if (streamDelay > 0 && !isLast) {
604
+ await sleep2(streamDelay);
605
+ }
606
+ }
607
+ } else {
608
+ yield {
609
+ text: "",
610
+ finishReason: response.finishReason ?? "stop",
611
+ usage: response.usage ?? { inputTokens: 0, outputTokens: 0, totalTokens: 0 }
612
+ };
613
+ }
614
+ }
615
+ function createTextMockStream(text, options) {
616
+ return createMockStream({
617
+ text,
618
+ delayMs: options?.delayMs,
619
+ streamDelayMs: options?.streamDelayMs,
620
+ usage: options?.usage,
621
+ finishReason: "stop"
622
+ });
623
+ }
624
+
625
+ // src/mock-adapter.ts
626
+ var MockProviderAdapter = class {
627
+ providerId = "mock";
628
+ priority = 100;
629
+ // High priority: check mocks before real providers
630
+ mockManager;
631
+ constructor(options) {
632
+ this.mockManager = getMockManager(options);
633
+ }
634
+ supports(_descriptor) {
635
+ return true;
636
+ }
637
+ stream(options, descriptor, _spec) {
638
+ const context = {
639
+ model: options.model,
640
+ provider: descriptor.provider,
641
+ modelName: descriptor.name,
642
+ options,
643
+ messages: options.messages
644
+ };
645
+ return this.createMockStreamFromContext(context);
646
+ }
647
+ async *createMockStreamFromContext(context) {
648
+ const mockResponse = await this.mockManager.findMatch(context);
649
+ if (!mockResponse) {
650
+ yield {
651
+ text: "",
652
+ finishReason: "stop",
653
+ usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 }
654
+ };
655
+ return;
656
+ }
657
+ yield* createMockStream(mockResponse);
658
+ }
659
+ // ==========================================================================
660
+ // Image Generation Support
661
+ // ==========================================================================
662
+ /**
663
+ * Check if this adapter supports image generation for a given model.
664
+ * Returns true if there's a registered mock with images for this model.
665
+ */
666
+ supportsImageGeneration(_modelId) {
667
+ return true;
668
+ }
669
+ /**
670
+ * Generate mock images based on registered mocks.
671
+ *
672
+ * @param options - Image generation options
673
+ * @returns Mock image generation result
674
+ */
675
+ async generateImage(options) {
676
+ const context = {
677
+ model: options.model,
678
+ provider: "mock",
679
+ modelName: options.model,
680
+ options: {
681
+ model: options.model,
682
+ messages: [{ role: "user", content: options.prompt }]
683
+ },
684
+ messages: [{ role: "user", content: options.prompt }]
685
+ };
686
+ const mockResponse = await this.mockManager.findMatch(context);
687
+ if (!mockResponse?.images || mockResponse.images.length === 0) {
688
+ throw new Error(
689
+ `No mock registered for image generation with model "${options.model}". Use mockLLM().forModel("${options.model}").returnsImage(...).register() to add one.`
690
+ );
691
+ }
692
+ return this.createImageResult(options, mockResponse);
693
+ }
694
+ /**
695
+ * Transform mock response into ImageGenerationResult format.
696
+ *
697
+ * @param options - Original image generation options
698
+ * @param mockResponse - Mock response containing image data
699
+ * @returns ImageGenerationResult with mock data and zero cost
700
+ */
701
+ createImageResult(options, mockResponse) {
702
+ const images = mockResponse.images ?? [];
703
+ return {
704
+ images: images.map((img) => ({
705
+ b64Json: img.data,
706
+ revisedPrompt: img.revisedPrompt
707
+ })),
708
+ model: options.model,
709
+ usage: {
710
+ imagesGenerated: images.length,
711
+ size: options.size ?? "1024x1024",
712
+ quality: options.quality ?? "standard"
713
+ },
714
+ cost: 0
715
+ // Mock cost is always 0
716
+ };
717
+ }
718
+ // ==========================================================================
719
+ // Speech Generation Support
720
+ // ==========================================================================
721
+ /**
722
+ * Check if this adapter supports speech generation for a given model.
723
+ * Returns true if there's a registered mock with audio for this model.
724
+ */
725
+ supportsSpeechGeneration(_modelId) {
726
+ return true;
727
+ }
728
+ /**
729
+ * Generate mock speech based on registered mocks.
730
+ *
731
+ * @param options - Speech generation options
732
+ * @returns Mock speech generation result
733
+ */
734
+ async generateSpeech(options) {
735
+ const context = {
736
+ model: options.model,
737
+ provider: "mock",
738
+ modelName: options.model,
739
+ options: {
740
+ model: options.model,
741
+ messages: [{ role: "user", content: options.input }]
742
+ },
743
+ messages: [{ role: "user", content: options.input }]
744
+ };
745
+ const mockResponse = await this.mockManager.findMatch(context);
746
+ if (!mockResponse?.audio) {
747
+ throw new Error(
748
+ `No mock registered for speech generation with model "${options.model}". Use mockLLM().forModel("${options.model}").returnsAudio(...).register() to add one.`
749
+ );
750
+ }
751
+ return this.createSpeechResult(options, mockResponse);
752
+ }
753
+ /**
754
+ * Transform mock response into SpeechGenerationResult format.
755
+ * Converts base64 audio data to ArrayBuffer.
756
+ *
757
+ * @param options - Original speech generation options
758
+ * @param mockResponse - Mock response containing audio data
759
+ * @returns SpeechGenerationResult with mock data and zero cost
760
+ */
761
+ createSpeechResult(options, mockResponse) {
762
+ const audio = mockResponse.audio;
763
+ const binaryString = atob(audio.data);
764
+ const bytes = new Uint8Array(binaryString.length);
765
+ for (let i = 0; i < binaryString.length; i++) {
766
+ bytes[i] = binaryString.charCodeAt(i);
767
+ }
768
+ const format = this.mimeTypeToAudioFormat(audio.mimeType);
769
+ return {
770
+ audio: bytes.buffer,
771
+ model: options.model,
772
+ usage: {
773
+ characterCount: options.input.length
774
+ },
775
+ cost: 0,
776
+ // Mock cost is always 0
777
+ format
778
+ };
779
+ }
780
+ /**
781
+ * Map MIME type to audio format for SpeechGenerationResult.
782
+ * Defaults to "mp3" for unknown MIME types.
783
+ *
784
+ * @param mimeType - Audio MIME type string
785
+ * @returns Audio format identifier
786
+ */
787
+ mimeTypeToAudioFormat(mimeType) {
788
+ const mapping = {
789
+ "audio/mp3": "mp3",
790
+ "audio/mpeg": "mp3",
791
+ "audio/wav": "wav",
792
+ "audio/webm": "opus",
793
+ "audio/ogg": "opus"
794
+ };
795
+ return mapping[mimeType] ?? "mp3";
796
+ }
797
+ };
798
+ function createMockAdapter(options) {
799
+ return new MockProviderAdapter(options);
800
+ }
801
+
802
+ // src/mock-builder.ts
803
+ var import_llmist4 = require("llmist");
804
+ var import_llmist5 = require("llmist");
805
+ function hasImageContent(content) {
806
+ if (typeof content === "string") return false;
807
+ return content.some((part) => (0, import_llmist4.isImagePart)(part));
808
+ }
809
+ function hasAudioContent(content) {
810
+ if (typeof content === "string") return false;
811
+ return content.some((part) => (0, import_llmist4.isAudioPart)(part));
812
+ }
813
+ function countImages(content) {
814
+ if (typeof content === "string") return 0;
815
+ return content.filter((part) => (0, import_llmist4.isImagePart)(part)).length;
816
+ }
817
+ var MockBuilder = class {
818
+ matchers = [];
819
+ response = {};
820
+ label;
821
+ isOnce = false;
822
+ id;
823
+ /**
824
+ * Match calls to a specific model (by name, supports partial matching).
825
+ *
826
+ * @example
827
+ * mockLLM().forModel('gpt-5')
828
+ * mockLLM().forModel('claude') // matches any Claude model
829
+ */
830
+ forModel(modelName) {
831
+ if (!modelName || modelName.trim() === "") {
832
+ throw new Error("Model name cannot be empty");
833
+ }
834
+ this.matchers.push((ctx) => ctx.modelName.includes(modelName));
835
+ return this;
836
+ }
837
+ /**
838
+ * Match calls to any model.
839
+ * Useful when you want to mock responses regardless of the model used.
840
+ *
841
+ * @example
842
+ * mockLLM().forAnyModel()
843
+ */
844
+ forAnyModel() {
845
+ this.matchers.push(() => true);
846
+ return this;
847
+ }
848
+ /**
849
+ * Match calls to a specific provider.
850
+ *
851
+ * @example
852
+ * mockLLM().forProvider('openai')
853
+ * mockLLM().forProvider('anthropic')
854
+ */
855
+ forProvider(provider) {
856
+ if (!provider || provider.trim() === "") {
857
+ throw new Error("Provider name cannot be empty");
858
+ }
859
+ this.matchers.push((ctx) => ctx.provider === provider);
860
+ return this;
861
+ }
862
+ /**
863
+ * Match calls to any provider.
864
+ * Useful when you want to mock responses regardless of the provider used.
865
+ *
866
+ * @example
867
+ * mockLLM().forAnyProvider()
868
+ */
869
+ forAnyProvider() {
870
+ this.matchers.push(() => true);
871
+ return this;
872
+ }
873
+ /**
874
+ * Match when any message contains the given text (case-insensitive).
875
+ *
876
+ * @example
877
+ * mockLLM().whenMessageContains('hello')
878
+ */
879
+ whenMessageContains(text) {
880
+ this.matchers.push(
881
+ (ctx) => ctx.messages.some(
882
+ (msg) => (0, import_llmist5.extractMessageText)(msg.content).toLowerCase().includes(text.toLowerCase())
883
+ )
884
+ );
885
+ return this;
886
+ }
887
+ /**
888
+ * Match when the last message contains the given text (case-insensitive).
889
+ *
890
+ * @example
891
+ * mockLLM().whenLastMessageContains('goodbye')
892
+ */
893
+ whenLastMessageContains(text) {
894
+ this.matchers.push((ctx) => {
895
+ const lastMsg = ctx.messages[ctx.messages.length - 1];
896
+ if (!lastMsg) return false;
897
+ return (0, import_llmist5.extractMessageText)(lastMsg.content).toLowerCase().includes(text.toLowerCase());
898
+ });
899
+ return this;
900
+ }
901
+ /**
902
+ * Match when any message matches the given regex.
903
+ *
904
+ * @example
905
+ * mockLLM().whenMessageMatches(/calculate \d+/)
906
+ */
907
+ whenMessageMatches(regex) {
908
+ this.matchers.push((ctx) => ctx.messages.some((msg) => regex.test((0, import_llmist5.extractMessageText)(msg.content))));
909
+ return this;
910
+ }
911
+ /**
912
+ * Match when a message with a specific role contains text.
913
+ *
914
+ * @example
915
+ * mockLLM().whenRoleContains('system', 'You are a helpful assistant')
916
+ */
917
+ whenRoleContains(role, text) {
918
+ this.matchers.push(
919
+ (ctx) => ctx.messages.some(
920
+ (msg) => msg.role === role && (0, import_llmist5.extractMessageText)(msg.content).toLowerCase().includes(text.toLowerCase())
921
+ )
922
+ );
923
+ return this;
924
+ }
925
+ /**
926
+ * Match based on the number of messages in the conversation.
927
+ *
928
+ * @example
929
+ * mockLLM().whenMessageCount((count) => count > 10)
930
+ */
931
+ whenMessageCount(predicate) {
932
+ this.matchers.push((ctx) => predicate(ctx.messages.length));
933
+ return this;
934
+ }
935
+ /**
936
+ * Add a custom matcher function.
937
+ * This provides full control over matching logic.
938
+ *
939
+ * @example
940
+ * mockLLM().when((ctx) => {
941
+ * return ctx.options.temperature > 0.8;
942
+ * })
943
+ */
944
+ when(matcher) {
945
+ this.matchers.push(matcher);
946
+ return this;
947
+ }
948
+ // ==========================================================================
949
+ // Multimodal Matchers
950
+ // ==========================================================================
951
+ /**
952
+ * Match when any message contains an image.
953
+ *
954
+ * @example
955
+ * mockLLM().whenMessageHasImage().returns("I see an image of a sunset.")
956
+ */
957
+ whenMessageHasImage() {
958
+ this.matchers.push((ctx) => ctx.messages.some((msg) => hasImageContent(msg.content)));
959
+ return this;
960
+ }
961
+ /**
962
+ * Match when any message contains audio.
963
+ *
964
+ * @example
965
+ * mockLLM().whenMessageHasAudio().returns("I hear music playing.")
966
+ */
967
+ whenMessageHasAudio() {
968
+ this.matchers.push((ctx) => ctx.messages.some((msg) => hasAudioContent(msg.content)));
969
+ return this;
970
+ }
971
+ /**
972
+ * Match based on the number of images in the last message.
973
+ *
974
+ * @example
975
+ * mockLLM().whenImageCount((n) => n >= 2).returns("Comparing multiple images...")
976
+ */
977
+ whenImageCount(predicate) {
978
+ this.matchers.push((ctx) => {
979
+ const lastMsg = ctx.messages[ctx.messages.length - 1];
980
+ if (!lastMsg) return false;
981
+ return predicate(countImages(lastMsg.content));
982
+ });
983
+ return this;
984
+ }
985
+ /**
986
+ * Set the text response to return.
987
+ * Can be a static string or a function that returns a string dynamically.
988
+ *
989
+ * @example
990
+ * mockLLM().returns('Hello, world!')
991
+ * mockLLM().returns(() => `Response at ${Date.now()}`)
992
+ * mockLLM().returns((ctx) => `You said: ${ctx.messages[0]?.content}`)
993
+ */
994
+ returns(text) {
995
+ if (typeof text === "function") {
996
+ this.response = async (ctx) => {
997
+ const resolvedText = await Promise.resolve().then(() => text(ctx));
998
+ return { text: resolvedText };
999
+ };
1000
+ } else {
1001
+ if (typeof this.response === "function") {
1002
+ throw new Error("Cannot use returns() after withResponse() with a function");
1003
+ }
1004
+ this.response.text = text;
1005
+ }
1006
+ return this;
1007
+ }
1008
+ /**
1009
+ * Set gadget calls to include in the response.
1010
+ *
1011
+ * @example
1012
+ * mockLLM().returnsGadgetCalls([
1013
+ * { gadgetName: 'calculator', parameters: { op: 'add', a: 1, b: 2 } }
1014
+ * ])
1015
+ */
1016
+ returnsGadgetCalls(calls) {
1017
+ if (typeof this.response === "function") {
1018
+ throw new Error("Cannot use returnsGadgetCalls() after withResponse() with a function");
1019
+ }
1020
+ this.response.gadgetCalls = calls;
1021
+ return this;
1022
+ }
1023
+ /**
1024
+ * Add a single gadget call to the response.
1025
+ *
1026
+ * @example
1027
+ * mockLLM()
1028
+ * .returnsGadgetCall('calculator', { op: 'add', a: 1, b: 2 })
1029
+ * .returnsGadgetCall('logger', { message: 'Done!' })
1030
+ */
1031
+ returnsGadgetCall(gadgetName, parameters) {
1032
+ if (typeof this.response === "function") {
1033
+ throw new Error("Cannot use returnsGadgetCall() after withResponse() with a function");
1034
+ }
1035
+ if (!this.response.gadgetCalls) {
1036
+ this.response.gadgetCalls = [];
1037
+ }
1038
+ this.response.gadgetCalls.push({ gadgetName, parameters });
1039
+ return this;
1040
+ }
1041
+ // ==========================================================================
1042
+ // Multimodal Response Helpers
1043
+ // ==========================================================================
1044
+ /**
1045
+ * Return a single image in the response.
1046
+ * Useful for mocking image generation endpoints.
1047
+ *
1048
+ * @param data - Image data (base64 string or Buffer)
1049
+ * @param mimeType - MIME type (auto-detected if Buffer provided without type)
1050
+ *
1051
+ * @example
1052
+ * mockLLM()
1053
+ * .forModel('dall-e-3')
1054
+ * .returnsImage(pngBuffer)
1055
+ * .register();
1056
+ */
1057
+ returnsImage(data, mimeType) {
1058
+ if (typeof this.response === "function") {
1059
+ throw new Error("Cannot use returnsImage() after withResponse() with a function");
1060
+ }
1061
+ let imageData;
1062
+ let imageMime;
1063
+ if (typeof data === "string") {
1064
+ imageData = data;
1065
+ if (!mimeType) {
1066
+ throw new Error("MIME type is required when providing base64 string data");
1067
+ }
1068
+ imageMime = mimeType;
1069
+ } else {
1070
+ imageData = (0, import_llmist4.toBase64)(data);
1071
+ const detected = mimeType ?? (0, import_llmist4.detectImageMimeType)(data);
1072
+ if (!detected) {
1073
+ throw new Error(
1074
+ "Could not detect image MIME type. Please provide the mimeType parameter explicitly."
1075
+ );
1076
+ }
1077
+ imageMime = detected;
1078
+ }
1079
+ if (!this.response.images) {
1080
+ this.response.images = [];
1081
+ }
1082
+ this.response.images.push({ data: imageData, mimeType: imageMime });
1083
+ return this;
1084
+ }
1085
+ /**
1086
+ * Return multiple images in the response.
1087
+ *
1088
+ * @example
1089
+ * mockLLM()
1090
+ * .forModel('dall-e-3')
1091
+ * .returnsImages([
1092
+ * { data: pngBuffer1 },
1093
+ * { data: pngBuffer2 },
1094
+ * ])
1095
+ * .register();
1096
+ */
1097
+ returnsImages(images) {
1098
+ for (const img of images) {
1099
+ this.returnsImage(img.data, img.mimeType);
1100
+ if (img.revisedPrompt && this.response && typeof this.response !== "function") {
1101
+ const lastImage = this.response.images?.[this.response.images.length - 1];
1102
+ if (lastImage) {
1103
+ lastImage.revisedPrompt = img.revisedPrompt;
1104
+ }
1105
+ }
1106
+ }
1107
+ return this;
1108
+ }
1109
+ /**
1110
+ * Return audio data in the response.
1111
+ * Useful for mocking speech synthesis endpoints.
1112
+ *
1113
+ * @param data - Audio data (base64 string or Buffer)
1114
+ * @param mimeType - MIME type (auto-detected if Buffer provided without type)
1115
+ *
1116
+ * @example
1117
+ * mockLLM()
1118
+ * .forModel('tts-1')
1119
+ * .returnsAudio(mp3Buffer)
1120
+ * .register();
1121
+ */
1122
+ returnsAudio(data, mimeType) {
1123
+ if (typeof this.response === "function") {
1124
+ throw new Error("Cannot use returnsAudio() after withResponse() with a function");
1125
+ }
1126
+ let audioData;
1127
+ let audioMime;
1128
+ if (typeof data === "string") {
1129
+ audioData = data;
1130
+ if (!mimeType) {
1131
+ throw new Error("MIME type is required when providing base64 string data");
1132
+ }
1133
+ audioMime = mimeType;
1134
+ } else {
1135
+ audioData = (0, import_llmist4.toBase64)(data);
1136
+ const detected = mimeType ?? (0, import_llmist4.detectAudioMimeType)(data);
1137
+ if (!detected) {
1138
+ throw new Error(
1139
+ "Could not detect audio MIME type. Please provide the mimeType parameter explicitly."
1140
+ );
1141
+ }
1142
+ audioMime = detected;
1143
+ }
1144
+ this.response.audio = { data: audioData, mimeType: audioMime };
1145
+ return this;
1146
+ }
1147
+ /**
1148
+ * Set the complete mock response object.
1149
+ * This allows full control over all response properties.
1150
+ * Can also be a function that generates the response dynamically based on context.
1151
+ *
1152
+ * @example
1153
+ * // Static response
1154
+ * mockLLM().withResponse({
1155
+ * text: 'Hello',
1156
+ * usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 },
1157
+ * finishReason: 'stop'
1158
+ * })
1159
+ *
1160
+ * @example
1161
+ * // Dynamic response
1162
+ * mockLLM().withResponse((ctx) => ({
1163
+ * text: `You said: ${ctx.messages[ctx.messages.length - 1]?.content}`,
1164
+ * usage: { inputTokens: 10, outputTokens: 5, totalTokens: 15 }
1165
+ * }))
1166
+ */
1167
+ withResponse(response) {
1168
+ this.response = response;
1169
+ return this;
1170
+ }
1171
+ /**
1172
+ * Set simulated token usage.
1173
+ *
1174
+ * @example
1175
+ * mockLLM().withUsage({ inputTokens: 100, outputTokens: 50, totalTokens: 150 })
1176
+ */
1177
+ withUsage(usage) {
1178
+ if (typeof this.response === "function") {
1179
+ throw new Error("Cannot use withUsage() after withResponse() with a function");
1180
+ }
1181
+ if (usage.inputTokens < 0 || usage.outputTokens < 0 || usage.totalTokens < 0) {
1182
+ throw new Error("Token counts cannot be negative");
1183
+ }
1184
+ if (usage.totalTokens !== usage.inputTokens + usage.outputTokens) {
1185
+ throw new Error("totalTokens must equal inputTokens + outputTokens");
1186
+ }
1187
+ this.response.usage = usage;
1188
+ return this;
1189
+ }
1190
+ /**
1191
+ * Set the finish reason.
1192
+ *
1193
+ * @example
1194
+ * mockLLM().withFinishReason('stop')
1195
+ * mockLLM().withFinishReason('length')
1196
+ */
1197
+ withFinishReason(reason) {
1198
+ if (typeof this.response === "function") {
1199
+ throw new Error("Cannot use withFinishReason() after withResponse() with a function");
1200
+ }
1201
+ this.response.finishReason = reason;
1202
+ return this;
1203
+ }
1204
+ /**
1205
+ * Set initial delay before streaming starts (simulates network latency).
1206
+ *
1207
+ * @example
1208
+ * mockLLM().withDelay(100) // 100ms delay
1209
+ */
1210
+ withDelay(ms) {
1211
+ if (typeof this.response === "function") {
1212
+ throw new Error("Cannot use withDelay() after withResponse() with a function");
1213
+ }
1214
+ if (ms < 0) {
1215
+ throw new Error("Delay must be non-negative");
1216
+ }
1217
+ this.response.delayMs = ms;
1218
+ return this;
1219
+ }
1220
+ /**
1221
+ * Set delay between stream chunks (simulates realistic streaming).
1222
+ *
1223
+ * @example
1224
+ * mockLLM().withStreamDelay(10) // 10ms between chunks
1225
+ */
1226
+ withStreamDelay(ms) {
1227
+ if (typeof this.response === "function") {
1228
+ throw new Error("Cannot use withStreamDelay() after withResponse() with a function");
1229
+ }
1230
+ if (ms < 0) {
1231
+ throw new Error("Stream delay must be non-negative");
1232
+ }
1233
+ this.response.streamDelayMs = ms;
1234
+ return this;
1235
+ }
1236
+ /**
1237
+ * Set a label for this mock (useful for debugging).
1238
+ *
1239
+ * @example
1240
+ * mockLLM().withLabel('greeting mock')
1241
+ */
1242
+ withLabel(label) {
1243
+ this.label = label;
1244
+ return this;
1245
+ }
1246
+ /**
1247
+ * Set a specific ID for this mock.
1248
+ *
1249
+ * @example
1250
+ * mockLLM().withId('my-custom-mock-id')
1251
+ */
1252
+ withId(id) {
1253
+ this.id = id;
1254
+ return this;
1255
+ }
1256
+ /**
1257
+ * Mark this mock as one-time use (will be removed after first match).
1258
+ *
1259
+ * @example
1260
+ * mockLLM().once()
1261
+ */
1262
+ once() {
1263
+ this.isOnce = true;
1264
+ return this;
1265
+ }
1266
+ /**
1267
+ * Build the mock registration without registering it.
1268
+ * Useful if you want to register it manually later.
1269
+ *
1270
+ * @returns The built MockRegistration object (without id if not specified)
1271
+ */
1272
+ build() {
1273
+ if (this.matchers.length === 0) {
1274
+ throw new Error(
1275
+ "Mock must have at least one matcher. Use .when(), .forModel(), .forProvider(), etc."
1276
+ );
1277
+ }
1278
+ const combinedMatcher = async (ctx) => {
1279
+ for (const matcher of this.matchers) {
1280
+ const matches = await Promise.resolve(matcher(ctx));
1281
+ if (!matches) return false;
1282
+ }
1283
+ return true;
1284
+ };
1285
+ return {
1286
+ id: this.id,
1287
+ matcher: combinedMatcher,
1288
+ response: this.response,
1289
+ label: this.label,
1290
+ once: this.isOnce
1291
+ };
1292
+ }
1293
+ /**
1294
+ * Register this mock with the global MockManager.
1295
+ * Returns the ID of the registered mock.
1296
+ *
1297
+ * @example
1298
+ * const mockId = mockLLM().forModel('gpt-5').returns('Hello!').register();
1299
+ * // Later: getMockManager().unregister(mockId);
1300
+ */
1301
+ register() {
1302
+ const mockManager = getMockManager();
1303
+ const registration = this.build();
1304
+ return mockManager.register(registration);
1305
+ }
1306
+ };
1307
+ function mockLLM() {
1308
+ return new MockBuilder();
1309
+ }
1310
+
1311
+ // src/mock-client.ts
1312
+ var import_llmist6 = require("llmist");
1313
+ function createMockClient(options) {
1314
+ return new import_llmist6.LLMist({
1315
+ adapters: [new MockProviderAdapter(options)],
1316
+ autoDiscoverProviders: false,
1317
+ defaultProvider: "mock"
1318
+ });
1319
+ }
1320
+
1321
+ // src/mock-conversation.ts
1322
+ var MockConversationManager = class {
1323
+ history;
1324
+ baseMessages;
1325
+ replacementHistory;
1326
+ replaceHistoryCallCount = 0;
1327
+ addedMessages = [];
1328
+ constructor(history = [], baseMessages = []) {
1329
+ this.history = [...history];
1330
+ this.baseMessages = [...baseMessages];
1331
+ }
1332
+ addUserMessage(content) {
1333
+ const msg = { role: "user", content };
1334
+ this.history.push(msg);
1335
+ this.addedMessages.push(msg);
1336
+ }
1337
+ addAssistantMessage(content) {
1338
+ const msg = { role: "assistant", content };
1339
+ this.history.push(msg);
1340
+ this.addedMessages.push(msg);
1341
+ }
1342
+ addGadgetCallResult(gadgetName, parameters, result, invocationId) {
1343
+ const assistantMsg = {
1344
+ role: "assistant",
1345
+ content: `!!!GADGET_START:${gadgetName}:${invocationId}
1346
+ ${JSON.stringify(parameters)}
1347
+ !!!GADGET_END`
1348
+ };
1349
+ const resultMsg = {
1350
+ role: "user",
1351
+ content: `Result (${invocationId}): ${result}`
1352
+ };
1353
+ this.history.push(assistantMsg);
1354
+ this.history.push(resultMsg);
1355
+ this.addedMessages.push(assistantMsg);
1356
+ this.addedMessages.push(resultMsg);
1357
+ }
1358
+ getMessages() {
1359
+ return [...this.baseMessages, ...this.history];
1360
+ }
1361
+ getHistoryMessages() {
1362
+ return [...this.history];
1363
+ }
1364
+ getBaseMessages() {
1365
+ return [...this.baseMessages];
1366
+ }
1367
+ getConversationHistory() {
1368
+ return [...this.history];
1369
+ }
1370
+ replaceHistory(newHistory) {
1371
+ this.replacementHistory = [...newHistory];
1372
+ this.history = [...newHistory];
1373
+ this.replaceHistoryCallCount++;
1374
+ }
1375
+ // ============================================
1376
+ // Test Helper Methods
1377
+ // ============================================
1378
+ /**
1379
+ * Check if replaceHistory was called.
1380
+ */
1381
+ wasReplaceHistoryCalled() {
1382
+ return this.replaceHistoryCallCount > 0;
1383
+ }
1384
+ /**
1385
+ * Get the number of times replaceHistory was called.
1386
+ */
1387
+ getReplaceHistoryCallCount() {
1388
+ return this.replaceHistoryCallCount;
1389
+ }
1390
+ /**
1391
+ * Get the most recent history passed to replaceHistory.
1392
+ * Returns undefined if replaceHistory was never called.
1393
+ */
1394
+ getReplacementHistory() {
1395
+ return this.replacementHistory;
1396
+ }
1397
+ /**
1398
+ * Get all messages that were added via add* methods.
1399
+ */
1400
+ getAddedMessages() {
1401
+ return [...this.addedMessages];
1402
+ }
1403
+ /**
1404
+ * Reset all tracking state while preserving the conversation.
1405
+ */
1406
+ resetTracking() {
1407
+ this.replacementHistory = void 0;
1408
+ this.replaceHistoryCallCount = 0;
1409
+ this.addedMessages = [];
1410
+ }
1411
+ /**
1412
+ * Completely reset the mock to initial state.
1413
+ * Note: baseMessages cannot be changed after construction.
1414
+ */
1415
+ reset(history = []) {
1416
+ this.history = [...history];
1417
+ this.resetTracking();
1418
+ }
1419
+ /**
1420
+ * Set the history directly (for test setup).
1421
+ */
1422
+ setHistory(messages) {
1423
+ this.history = [...messages];
1424
+ }
1425
+ /**
1426
+ * Get the current history length.
1427
+ */
1428
+ getHistoryLength() {
1429
+ return this.history.length;
1430
+ }
1431
+ /**
1432
+ * Get total message count (base + history).
1433
+ */
1434
+ getTotalMessageCount() {
1435
+ return this.baseMessages.length + this.history.length;
1436
+ }
1437
+ };
1438
+ function createMockConversationManager(turnCount, baseMessages = []) {
1439
+ const history = [];
1440
+ for (let i = 0; i < turnCount; i++) {
1441
+ history.push({
1442
+ role: "user",
1443
+ content: `User message ${i + 1}: This is turn ${i + 1} of the conversation.`
1444
+ });
1445
+ history.push({
1446
+ role: "assistant",
1447
+ content: `Assistant response ${i + 1}: I acknowledge turn ${i + 1}.`
1448
+ });
1449
+ }
1450
+ return new MockConversationManager(history, baseMessages);
1451
+ }
1452
+
1453
+ // src/mock-gadget.ts
1454
+ var import_llmist7 = require("llmist");
1455
+ var MockGadgetImpl = class extends import_llmist7.AbstractGadget {
1456
+ name;
1457
+ description;
1458
+ parameterSchema;
1459
+ timeoutMs;
1460
+ calls = [];
1461
+ resultValue;
1462
+ resultFn;
1463
+ errorToThrow;
1464
+ delayMs;
1465
+ shouldTrackCalls;
1466
+ constructor(config) {
1467
+ super();
1468
+ this.name = config.name;
1469
+ this.description = config.description ?? `Mock gadget: ${config.name}`;
1470
+ this.parameterSchema = config.schema;
1471
+ this.resultValue = config.result;
1472
+ this.resultFn = config.resultFn;
1473
+ this.delayMs = config.delayMs ?? 0;
1474
+ this.shouldTrackCalls = config.trackCalls ?? true;
1475
+ this.timeoutMs = config.timeoutMs;
1476
+ if (config.error) {
1477
+ this.errorToThrow = typeof config.error === "string" ? new Error(config.error) : config.error;
1478
+ }
1479
+ }
1480
+ async execute(params) {
1481
+ if (this.shouldTrackCalls) {
1482
+ this.calls.push({ params: { ...params }, timestamp: Date.now() });
1483
+ }
1484
+ if (this.delayMs > 0) {
1485
+ await new Promise((resolve) => setTimeout(resolve, this.delayMs));
1486
+ }
1487
+ if (this.errorToThrow) {
1488
+ throw this.errorToThrow;
1489
+ }
1490
+ if (this.resultFn) {
1491
+ return this.resultFn(params);
1492
+ }
1493
+ return this.resultValue ?? "mock result";
1494
+ }
1495
+ getCalls() {
1496
+ return [...this.calls];
1497
+ }
1498
+ getCallCount() {
1499
+ return this.calls.length;
1500
+ }
1501
+ resetCalls() {
1502
+ this.calls = [];
1503
+ }
1504
+ wasCalledWith(params) {
1505
+ return this.calls.some(
1506
+ (call) => Object.entries(params).every(([key, value]) => call.params[key] === value)
1507
+ );
1508
+ }
1509
+ getLastCall() {
1510
+ return this.calls.length > 0 ? this.calls[this.calls.length - 1] : void 0;
1511
+ }
1512
+ };
1513
+ function createMockGadget(config) {
1514
+ return new MockGadgetImpl(config);
1515
+ }
1516
+ var MockGadgetBuilder = class {
1517
+ config = { name: "MockGadget" };
1518
+ /**
1519
+ * Set the gadget name.
1520
+ */
1521
+ withName(name) {
1522
+ this.config.name = name;
1523
+ return this;
1524
+ }
1525
+ /**
1526
+ * Set the gadget description.
1527
+ */
1528
+ withDescription(description) {
1529
+ this.config.description = description;
1530
+ return this;
1531
+ }
1532
+ /**
1533
+ * Set the parameter schema.
1534
+ */
1535
+ withSchema(schema) {
1536
+ this.config.schema = schema;
1537
+ return this;
1538
+ }
1539
+ /**
1540
+ * Set a static result to return.
1541
+ */
1542
+ returns(result) {
1543
+ this.config.result = result;
1544
+ this.config.resultFn = void 0;
1545
+ return this;
1546
+ }
1547
+ /**
1548
+ * Set a dynamic result function.
1549
+ */
1550
+ returnsAsync(resultFn) {
1551
+ this.config.resultFn = resultFn;
1552
+ this.config.result = void 0;
1553
+ return this;
1554
+ }
1555
+ /**
1556
+ * Make the gadget throw an error on execution.
1557
+ */
1558
+ throws(error) {
1559
+ this.config.error = error;
1560
+ return this;
1561
+ }
1562
+ /**
1563
+ * Add execution delay.
1564
+ */
1565
+ withDelay(ms) {
1566
+ this.config.delayMs = ms;
1567
+ return this;
1568
+ }
1569
+ /**
1570
+ * Set timeout for the gadget.
1571
+ */
1572
+ withTimeout(ms) {
1573
+ this.config.timeoutMs = ms;
1574
+ return this;
1575
+ }
1576
+ /**
1577
+ * Enable call tracking (enabled by default).
1578
+ */
1579
+ trackCalls() {
1580
+ this.config.trackCalls = true;
1581
+ return this;
1582
+ }
1583
+ /**
1584
+ * Disable call tracking.
1585
+ */
1586
+ noTracking() {
1587
+ this.config.trackCalls = false;
1588
+ return this;
1589
+ }
1590
+ /**
1591
+ * Build the mock gadget.
1592
+ */
1593
+ build() {
1594
+ return createMockGadget(this.config);
1595
+ }
1596
+ };
1597
+ function mockGadget() {
1598
+ return new MockGadgetBuilder();
1599
+ }
1600
+
1601
+ // src/stream-helpers.ts
1602
+ function createTestStream(chunks) {
1603
+ return (async function* () {
1604
+ for (const chunk of chunks) {
1605
+ yield chunk;
1606
+ }
1607
+ })();
1608
+ }
1609
+ function createTextStream(text, options) {
1610
+ return (async function* () {
1611
+ if (options?.delayMs) {
1612
+ await sleep3(options.delayMs);
1613
+ }
1614
+ const chunkSize = options?.chunkSize ?? text.length;
1615
+ const chunks = [];
1616
+ for (let i = 0; i < text.length; i += chunkSize) {
1617
+ chunks.push(text.slice(i, i + chunkSize));
1618
+ }
1619
+ for (let i = 0; i < chunks.length; i++) {
1620
+ const isLast = i === chunks.length - 1;
1621
+ const chunk = { text: chunks[i] };
1622
+ if (isLast) {
1623
+ chunk.finishReason = options?.finishReason ?? "stop";
1624
+ const inputTokens = Math.ceil(text.length / 4);
1625
+ const outputTokens = Math.ceil(text.length / 4);
1626
+ chunk.usage = options?.usage ?? {
1627
+ inputTokens,
1628
+ outputTokens,
1629
+ totalTokens: inputTokens + outputTokens
1630
+ };
1631
+ }
1632
+ yield chunk;
1633
+ if (options?.chunkDelayMs && !isLast) {
1634
+ await sleep3(options.chunkDelayMs);
1635
+ }
1636
+ }
1637
+ })();
1638
+ }
1639
+ async function collectStream(stream) {
1640
+ const chunks = [];
1641
+ for await (const chunk of stream) {
1642
+ chunks.push(chunk);
1643
+ }
1644
+ return chunks;
1645
+ }
1646
+ async function collectStreamText(stream) {
1647
+ let text = "";
1648
+ for await (const chunk of stream) {
1649
+ text += chunk.text ?? "";
1650
+ }
1651
+ return text;
1652
+ }
1653
+ async function getStreamFinalChunk(stream) {
1654
+ let lastChunk;
1655
+ for await (const chunk of stream) {
1656
+ lastChunk = chunk;
1657
+ }
1658
+ return lastChunk;
1659
+ }
1660
+ function createEmptyStream() {
1661
+ return (async function* () {
1662
+ })();
1663
+ }
1664
+ function createErrorStream(chunksBeforeError, error) {
1665
+ return (async function* () {
1666
+ for (const chunk of chunksBeforeError) {
1667
+ yield chunk;
1668
+ }
1669
+ throw error;
1670
+ })();
1671
+ }
1672
+ function sleep3(ms) {
1673
+ return new Promise((resolve) => setTimeout(resolve, ms));
1674
+ }
1675
+ // Annotate the CommonJS export names for ESM import in node:
1676
+ 0 && (module.exports = {
1677
+ MockBuilder,
1678
+ MockConversationManager,
1679
+ MockGadgetBuilder,
1680
+ MockManager,
1681
+ MockPromptRecorder,
1682
+ MockProviderAdapter,
1683
+ collectOutput,
1684
+ collectStream,
1685
+ collectStreamText,
1686
+ createAssistantMessage,
1687
+ createConversation,
1688
+ createConversationWithGadgets,
1689
+ createEmptyStream,
1690
+ createErrorStream,
1691
+ createLargeConversation,
1692
+ createMinimalConversation,
1693
+ createMockAdapter,
1694
+ createMockClient,
1695
+ createMockConversationManager,
1696
+ createMockGadget,
1697
+ createMockPrompt,
1698
+ createMockReadable,
1699
+ createMockStream,
1700
+ createMockWritable,
1701
+ createSystemMessage,
1702
+ createTestEnvironment,
1703
+ createTestStream,
1704
+ createTextMockStream,
1705
+ createTextStream,
1706
+ createUserMessage,
1707
+ estimateTokens,
1708
+ getBufferedOutput,
1709
+ getMockManager,
1710
+ getStreamFinalChunk,
1711
+ mockGadget,
1712
+ mockLLM,
1713
+ testGadget,
1714
+ testGadgetBatch,
1715
+ waitFor
1716
+ });
1717
+ //# sourceMappingURL=index.cjs.map