llm-mock-server 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/.github/dependabot.yml +11 -0
  2. package/.github/workflows/test.yml +34 -0
  3. package/.markdownlint.jsonc +11 -0
  4. package/.node-version +1 -0
  5. package/.oxlintrc.json +35 -0
  6. package/ARCHITECTURE.md +125 -0
  7. package/LICENCE +21 -0
  8. package/README.md +448 -0
  9. package/package.json +55 -0
  10. package/src/cli-validators.ts +56 -0
  11. package/src/cli.ts +128 -0
  12. package/src/formats/anthropic/index.ts +14 -0
  13. package/src/formats/anthropic/parse.ts +48 -0
  14. package/src/formats/anthropic/schema.ts +133 -0
  15. package/src/formats/anthropic/serialize.ts +91 -0
  16. package/src/formats/openai/index.ts +14 -0
  17. package/src/formats/openai/parse.ts +34 -0
  18. package/src/formats/openai/schema.ts +147 -0
  19. package/src/formats/openai/serialize.ts +92 -0
  20. package/src/formats/parse-helpers.ts +79 -0
  21. package/src/formats/responses/index.ts +14 -0
  22. package/src/formats/responses/parse.ts +56 -0
  23. package/src/formats/responses/schema.ts +143 -0
  24. package/src/formats/responses/serialize.ts +129 -0
  25. package/src/formats/types.ts +17 -0
  26. package/src/history.ts +66 -0
  27. package/src/index.ts +44 -0
  28. package/src/loader.ts +213 -0
  29. package/src/logger.ts +58 -0
  30. package/src/mock-server.ts +237 -0
  31. package/src/route-handler.ts +113 -0
  32. package/src/rule-engine.ts +119 -0
  33. package/src/sse-writer.ts +35 -0
  34. package/src/types/index.ts +4 -0
  35. package/src/types/reply.ts +49 -0
  36. package/src/types/request.ts +45 -0
  37. package/src/types/rule.ts +74 -0
  38. package/src/types.ts +5 -0
  39. package/test/cli-validators.test.ts +131 -0
  40. package/test/formats/anthropic-schema.test.ts +192 -0
  41. package/test/formats/anthropic.test.ts +260 -0
  42. package/test/formats/openai-schema.test.ts +105 -0
  43. package/test/formats/openai.test.ts +243 -0
  44. package/test/formats/responses-schema.test.ts +114 -0
  45. package/test/formats/responses.test.ts +299 -0
  46. package/test/loader.test.ts +314 -0
  47. package/test/mock-server.test.ts +565 -0
  48. package/test/rule-engine.test.ts +213 -0
  49. package/tsconfig.json +26 -0
  50. package/tsconfig.test.json +11 -0
  51. package/vitest.config.ts +18 -0
@@ -0,0 +1,565 @@
1
+ import { describe, it, expect, beforeEach, afterEach } from "vitest";
2
+ import { createMock, MockServer } from "../src/index.js";
3
+
4
+ interface OpenAIResponse {
5
+ choices: { message: { role: string; content: string }; finish_reason: string }[];
6
+ error?: { type: string; message: string };
7
+ }
8
+
9
+ interface AnthropicResponse {
10
+ content: { type: string; text?: string; thinking?: string }[];
11
+ error?: { type: string; message: string };
12
+ }
13
+
14
+ interface ResponsesAPIResponse {
15
+ output: { type: string; content: { type: string; text: string }[] }[];
16
+ }
17
+
18
+ describe("MockServer (end-to-end)", () => {
19
+ let server: MockServer;
20
+
21
+ beforeEach(async () => {
22
+ server = await createMock({ port: 0 });
23
+ });
24
+
25
+ afterEach(async () => {
26
+ await server.stop();
27
+ });
28
+
29
+ async function post(path: string, body: unknown): Promise<Response> {
30
+ return fetch(`${server.url}${path}`, {
31
+ method: "POST",
32
+ headers: { "Content-Type": "application/json" },
33
+ body: JSON.stringify(body),
34
+ });
35
+ }
36
+
37
+ async function postOpenAI(content: string, opts: Record<string, unknown> = {}): Promise<OpenAIResponse> {
38
+ const res = await post("/v1/chat/completions", {
39
+ model: "gpt-5.4",
40
+ messages: [{ role: "user", content }],
41
+ stream: false,
42
+ ...opts,
43
+ });
44
+ return res.json() as Promise<OpenAIResponse>;
45
+ }
46
+
47
+ async function postAnthropic(content: string, opts: Record<string, unknown> = {}): Promise<AnthropicResponse> {
48
+ const res = await post("/v1/messages", {
49
+ model: "claude-sonnet-4-6",
50
+ messages: [{ role: "user", content }],
51
+ max_tokens: 100,
52
+ stream: false,
53
+ ...opts,
54
+ });
55
+ return res.json() as Promise<AnthropicResponse>;
56
+ }
57
+
58
+ async function postResponses(input: string, opts: Record<string, unknown> = {}): Promise<ResponsesAPIResponse> {
59
+ const res = await post("/v1/responses", {
60
+ model: "codex-mini",
61
+ input,
62
+ stream: false,
63
+ ...opts,
64
+ });
65
+ return res.json() as Promise<ResponsesAPIResponse>;
66
+ }
67
+
68
+ async function readSSE(res: Response): Promise<string[]> {
69
+ const text = await res.text();
70
+ return text
71
+ .split("\n")
72
+ .filter((line) => line.startsWith("data: "))
73
+ .map((line) => line.slice(6));
74
+ }
75
+
76
+ describe("shared rules across endpoints", () => {
77
+ it("same rule matches on all three endpoints", async () => {
78
+ server.when("hello").reply("Hi there!");
79
+
80
+ const openai = await postOpenAI("hello");
81
+ expect(openai.choices[0]!.message.content).toBe("Hi there!");
82
+
83
+ const anthropic = await postAnthropic("hello");
84
+ expect(anthropic.content[0]!.text).toBe("Hi there!");
85
+
86
+ const responses = await postResponses("hello");
87
+ expect(responses.output[0]!.content[0]!.text).toBe("Hi there!");
88
+ });
89
+ });
90
+
91
+ describe("OpenAI streaming", () => {
92
+ it("streams SSE chunks ending with [DONE]", async () => {
93
+ server.when("hello").reply("Hi!");
94
+ const res = await post("/v1/chat/completions", {
95
+ model: "gpt-5.4",
96
+ messages: [{ role: "user", content: "hello" }],
97
+ });
98
+ expect(res.headers.get("content-type")).toBe("text/event-stream");
99
+ const data = await readSSE(res);
100
+ expect(data.at(-1)).toBe("[DONE]");
101
+
102
+ const contentChunk = JSON.parse(data[1]!);
103
+ expect(contentChunk.choices[0].delta.content).toBe("Hi!");
104
+ });
105
+ });
106
+
107
+ describe("Anthropic streaming", () => {
108
+ it("streams named SSE events", async () => {
109
+ server.when("hello").reply("Hi!");
110
+ const res = await post("/v1/messages", {
111
+ model: "claude-sonnet-4-6",
112
+ messages: [{ role: "user", content: "hello" }],
113
+ max_tokens: 100,
114
+ });
115
+ const text = await res.text();
116
+ expect(text).toContain("event: message_start");
117
+ expect(text).toContain("event: content_block_delta");
118
+ expect(text).toContain("event: message_stop");
119
+ });
120
+ });
121
+
122
+ describe("Responses API streaming", () => {
123
+ it("streams response events", async () => {
124
+ server.when("hello").reply("Hi!");
125
+ const res = await post("/v1/responses", {
126
+ model: "codex-mini",
127
+ input: "hello",
128
+ });
129
+ const data = await readSSE(res);
130
+ const types = data.map((d) => JSON.parse(d).type);
131
+ expect(types).toContain("response.created");
132
+ expect(types).toContain("response.output_text.delta");
133
+ expect(types).toContain("response.completed");
134
+ });
135
+ });
136
+
137
+ describe("regex match", () => {
138
+ it("matches regex against last user message", async () => {
139
+ server.when(/explain (\w+)/i).reply("Here is an explanation.");
140
+ const json = await postOpenAI("Can you explain recursion?");
141
+ expect(json.choices[0]!.message.content).toBe("Here is an explanation.");
142
+ });
143
+ });
144
+
145
+ describe("dynamic resolver", () => {
146
+ it("calls resolver function with MockRequest", async () => {
147
+ server.when("hello").reply((req) => `You said: ${req.lastMessage}`);
148
+ const json = await postOpenAI("hello");
149
+ expect(json.choices[0]!.message.content).toBe("You said: hello");
150
+ });
151
+ });
152
+
153
+ describe("async resolver", () => {
154
+ it("supports async resolver functions", async () => {
155
+ server.when("async").reply(async () => {
156
+ return { text: "async result" };
157
+ });
158
+ const json = await postOpenAI("async");
159
+ expect(json.choices[0]!.message.content).toBe("async result");
160
+ });
161
+ });
162
+
163
+ describe("structured reply (text + reasoning)", () => {
164
+ it("sends text and reasoning in Anthropic format", async () => {
165
+ server.when("think").reply({ text: "42", reasoning: "Deep thought..." });
166
+ const json = await postAnthropic("think");
167
+ expect(json.content[0]!.type).toBe("thinking");
168
+ expect(json.content[0]!.thinking).toBe("Deep thought...");
169
+ expect(json.content[1]!.type).toBe("text");
170
+ expect(json.content[1]!.text).toBe("42");
171
+ });
172
+ });
173
+
174
+ describe("tool call reply", () => {
175
+ it("returns tool calls in OpenAI format", async () => {
176
+ server.when("read").reply({
177
+ tools: [{ name: "read_file", args: { path: "/tmp/foo" } }],
178
+ });
179
+ const json = await postOpenAI("read the file");
180
+ expect(json.choices[0]!.finish_reason).toBe("tool_calls");
181
+ });
182
+ });
183
+
184
+ describe("times()", () => {
185
+ it("rule is consumed after N matches", async () => {
186
+ server.when("once").reply("First time!").times(1);
187
+ server.fallback("Fallback.");
188
+
189
+ const j1 = await postOpenAI("once");
190
+ expect(j1.choices[0]!.message.content).toBe("First time!");
191
+
192
+ const j2 = await postOpenAI("once");
193
+ expect(j2.choices[0]!.message.content).toBe("Fallback.");
194
+ });
195
+ });
196
+
197
+ describe("fallback", () => {
198
+ it("uses fallback when no rule matches", async () => {
199
+ server.fallback("I don't understand.");
200
+ const json = await postOpenAI("something random");
201
+ expect(json.choices[0]!.message.content).toBe("I don't understand.");
202
+ });
203
+ });
204
+
205
+ describe("history", () => {
206
+ it("records requests with matched rule info", async () => {
207
+ server.when("hello").reply("Hi!");
208
+ await postOpenAI("hello");
209
+
210
+ expect(server.history.count()).toBe(1);
211
+ expect(server.history.last()?.request.lastMessage).toBe("hello");
212
+ expect(server.history.first()?.rule).toBe('"hello"');
213
+ });
214
+
215
+ it("captures request headers and path", async () => {
216
+ server.when("hello").reply("Hi!");
217
+ await fetch(`${server.url}/v1/chat/completions`, {
218
+ method: "POST",
219
+ headers: { "Content-Type": "application/json", "X-Custom": "test-value" },
220
+ body: JSON.stringify({
221
+ model: "gpt-5.4",
222
+ messages: [{ role: "user", content: "hello" }],
223
+ stream: false,
224
+ }),
225
+ });
226
+
227
+ const entry = server.history.last()!;
228
+ expect(entry.request.path).toBe("/v1/chat/completions");
229
+ expect(entry.request.headers["x-custom"]).toBe("test-value");
230
+ });
231
+ });
232
+
233
+ describe("request metadata in predicates", () => {
234
+ it("matches on headers", async () => {
235
+ server.when({ predicate: (req) => req.headers["x-team"] === "alpha" }).reply("Alpha team!");
236
+ server.when("hello").reply("Default");
237
+
238
+ const res = await fetch(`${server.url}/v1/chat/completions`, {
239
+ method: "POST",
240
+ headers: { "Content-Type": "application/json", "X-Team": "alpha" },
241
+ body: JSON.stringify({
242
+ model: "gpt-5.4",
243
+ messages: [{ role: "user", content: "hello" }],
244
+ stream: false,
245
+ }),
246
+ });
247
+
248
+ expect(await res.json()).toMatchObject({
249
+ choices: [{ message: { content: "Alpha team!" } }],
250
+ });
251
+ });
252
+ });
253
+
254
+ describe("rules", () => {
255
+ it("returns summaries of registered rules", () => {
256
+ server.when("hello").reply("Hi!");
257
+ server.when(/bye/i).reply("Goodbye!").times(3);
258
+
259
+ expect(server.rules).toEqual([
260
+ { description: '"hello"', remaining: Infinity },
261
+ { description: "/bye/i", remaining: 3 },
262
+ ]);
263
+ });
264
+ });
265
+
266
+ describe("isDone()", () => {
267
+ it("returns true when all limited rules consumed", async () => {
268
+ server.when("hello").reply("Hi!").times(1);
269
+ expect(server.isDone()).toBe(false);
270
+
271
+ await postOpenAI("hello");
272
+ expect(server.isDone()).toBe(true);
273
+ });
274
+ });
275
+
276
+ describe("replySequence()", () => {
277
+ it("advances through the sequence and then stops matching", async () => {
278
+ server.when("step").replySequence(["First.", "Second.", "Third."]);
279
+ server.fallback("Done.");
280
+
281
+ const results: string[] = [];
282
+ for (let i = 0; i < 4; i++) {
283
+ const json = await postOpenAI("step");
284
+ results.push(json.choices[0]!.message.content);
285
+ }
286
+
287
+ expect(results).toEqual(["First.", "Second.", "Third.", "Done."]);
288
+ });
289
+
290
+ it("supports per-step options", async () => {
291
+ server.when("step").replySequence([
292
+ "Plain.",
293
+ { reply: { text: "With options." }, options: { chunkSize: 5 } },
294
+ ]);
295
+
296
+ const json = await postOpenAI("step");
297
+ expect(json.choices[0]!.message.content).toBe("Plain.");
298
+ });
299
+
300
+ it("throws on empty sequence", () => {
301
+ expect(() => server.when("step").replySequence([])).toThrow(
302
+ "replySequence requires at least one entry",
303
+ );
304
+ });
305
+ });
306
+
307
+ describe("request validation", () => {
308
+ it("returns 400 for invalid request body", async () => {
309
+ const res = await post("/v1/chat/completions", { invalid: true });
310
+ expect(res.status).toBe(400);
311
+ const json = (await res.json()) as OpenAIResponse;
312
+ expect(json.error?.type).toBe("invalid_request_error");
313
+ });
314
+
315
+ it("returns 400 for Anthropic request missing max_tokens", async () => {
316
+ const res = await post("/v1/messages", {
317
+ model: "claude-sonnet-4-6",
318
+ messages: [{ role: "user", content: "hi" }],
319
+ });
320
+ expect(res.status).toBe(400);
321
+ });
322
+ });
323
+
324
+ describe("history fluent API", () => {
325
+ it("at() returns entry by index", async () => {
326
+ server.when("a").reply("A");
327
+ server.when("b").reply("B");
328
+ await postOpenAI("a");
329
+ await postOpenAI("b");
330
+
331
+ expect(server.history.at(0)?.request.lastMessage).toBe("a");
332
+ expect(server.history.at(1)?.request.lastMessage).toBe("b");
333
+ expect(server.history.at(99)).toBeUndefined();
334
+ });
335
+
336
+ it("all returns readonly array of entries", async () => {
337
+ server.when("hello").reply("Hi!");
338
+ await postOpenAI("hello");
339
+
340
+ expect(server.history.all).toHaveLength(1);
341
+ expect(server.history.all[0]?.request.lastMessage).toBe("hello");
342
+ });
343
+
344
+ it("is iterable with for...of", async () => {
345
+ server.when("hello").reply("Hi!");
346
+ await postOpenAI("hello");
347
+
348
+ const messages: string[] = [];
349
+ for (const entry of server.history) {
350
+ messages.push(entry.request.lastMessage);
351
+ }
352
+ expect(messages).toEqual(["hello"]);
353
+ });
354
+ });
355
+
356
+ describe("reset()", () => {
357
+ it("clears rules and history", async () => {
358
+ server.when("hello").reply("Hi!");
359
+ await postOpenAI("hello");
360
+ expect(server.history.count()).toBe(1);
361
+
362
+ server.reset();
363
+ expect(server.history.count()).toBe(0);
364
+ expect(server.ruleCount).toBe(0);
365
+ });
366
+ });
367
+
368
+ describe("model match", () => {
369
+ it("matches on model name", async () => {
370
+ server.when({ model: "gpt-5.4" }).reply("I'm GPT-5.4.");
371
+ server.when({ model: /claude/ }).reply("I'm Claude.");
372
+
373
+ const openai = await postOpenAI("who are you");
374
+ expect(openai.choices[0]!.message.content).toBe("I'm GPT-5.4.");
375
+
376
+ const anthropic = await postAnthropic("who are you");
377
+ expect(anthropic.content[0]!.text).toBe("I'm Claude.");
378
+ });
379
+ });
380
+
381
+ describe("url property", () => {
382
+ it("returns the base URL", () => {
383
+ expect(server.url).toMatch(/^http:\/\/127\.0\.0\.1:\d+$/);
384
+ });
385
+ });
386
+
387
+ describe("error injection", () => {
388
+ it("nextError returns a one-shot error response", async () => {
389
+ server.nextError(429, "Rate limited", "rate_limit_error");
390
+ server.when("hello").reply("Hi!");
391
+
392
+ const r1 = await post("/v1/chat/completions", {
393
+ model: "gpt-5.4",
394
+ messages: [{ role: "user", content: "hello" }],
395
+ stream: false,
396
+ });
397
+ expect(r1.status).toBe(429);
398
+ const err = (await r1.json()) as OpenAIResponse;
399
+ expect(err.error?.message).toBe("Rate limited");
400
+
401
+ const r2 = await post("/v1/chat/completions", {
402
+ model: "gpt-5.4",
403
+ messages: [{ role: "user", content: "hello" }],
404
+ stream: false,
405
+ });
406
+ expect(r2.status).toBe(200);
407
+ });
408
+
409
+ it("error reply works as a normal rule", async () => {
410
+ server.when("fail").reply({ error: { status: 500, message: "Internal error" } });
411
+ server.when("hello").reply("Hi!");
412
+
413
+ const r1 = await post("/v1/chat/completions", {
414
+ model: "gpt-5.4",
415
+ messages: [{ role: "user", content: "fail please" }],
416
+ stream: false,
417
+ });
418
+ expect(r1.status).toBe(500);
419
+
420
+ const r2 = await post("/v1/chat/completions", {
421
+ model: "gpt-5.4",
422
+ messages: [{ role: "user", content: "hello" }],
423
+ stream: false,
424
+ });
425
+ expect(r2.status).toBe(200);
426
+ });
427
+ });
428
+
429
+ describe("chunkSize", () => {
430
+ it("splits text into multiple SSE delta chunks", async () => {
431
+ server.when("hello").reply("Hello, world!", { chunkSize: 5 });
432
+ const res = await post("/v1/chat/completions", {
433
+ model: "gpt-5.4",
434
+ messages: [{ role: "user", content: "hello" }],
435
+ });
436
+ const data = await readSSE(res);
437
+ const contentDeltas = data
438
+ .filter((d) => d !== "[DONE]")
439
+ .map((d) => JSON.parse(d))
440
+ .filter((d: { choices?: { delta?: { content?: string } }[] }) =>
441
+ d.choices?.[0]?.delta?.content !== undefined,
442
+ )
443
+ .map((d: { choices: { delta: { content: string } }[] }) =>
444
+ d.choices[0]!.delta.content,
445
+ );
446
+ expect(contentDeltas.length).toBe(3);
447
+ expect(contentDeltas.join("")).toBe("Hello, world!");
448
+ });
449
+ });
450
+
451
+ describe("whenTool()", () => {
452
+ it("matches when request has the specified tool", async () => {
453
+ server.whenTool("get_weather").reply("Weather tool detected!");
454
+ server.fallback("No match.");
455
+
456
+ const j1 = await postOpenAI("what's the weather?", {
457
+ tools: [{ type: "function", function: { name: "get_weather", parameters: {} } }],
458
+ });
459
+ expect(j1.choices[0]!.message.content).toBe("Weather tool detected!");
460
+
461
+ const j2 = await postOpenAI("what's the weather?");
462
+ expect(j2.choices[0]!.message.content).toBe("No match.");
463
+ });
464
+ });
465
+
466
+ describe("whenToolResult()", () => {
467
+ it("matches when request has a tool result with the specified id", async () => {
468
+ server.whenToolResult("call_abc").reply("Got the tool result!");
469
+ server.fallback("No match.");
470
+
471
+ const json = await postOpenAI("use the tool", {
472
+ messages: [
473
+ { role: "user", content: "use the tool" },
474
+ { role: "assistant", content: null, tool_calls: [{ id: "call_abc", type: "function", function: { name: "test", arguments: "{}" } }] },
475
+ { role: "tool", tool_call_id: "call_abc", content: "result data" },
476
+ ],
477
+ });
478
+ expect(json.choices[0]!.message.content).toBe("Got the tool result!");
479
+ });
480
+ });
481
+
482
+ describe(".first()", () => {
483
+ it("moves a rule to the front of the match list", async () => {
484
+ server.when("hello").reply("First rule");
485
+ server.when("hello").reply("Second rule").first();
486
+
487
+ const json = await postOpenAI("hello");
488
+ expect(json.choices[0]!.message.content).toBe("Second rule");
489
+ });
490
+ });
491
+
492
+ describe(".times() chaining", () => {
493
+ it("returns RuleHandle for chaining with .first()", async () => {
494
+ server.when("hello").reply("Normal");
495
+ server.when("hello").reply("Priority one-shot").times(1).first();
496
+ server.fallback("Fallback.");
497
+
498
+ const j1 = await postOpenAI("hello");
499
+ expect(j1.choices[0]!.message.content).toBe("Priority one-shot");
500
+
501
+ const j2 = await postOpenAI("hello");
502
+ expect(j2.choices[0]!.message.content).toBe("Normal");
503
+ });
504
+ });
505
+
506
+ describe("resolver error handling", () => {
507
+ it("falls back when resolver throws", async () => {
508
+ server.when("boom").reply(() => { throw new Error("resolver failed"); });
509
+ server.fallback("Safe fallback.");
510
+
511
+ const json = await postOpenAI("boom");
512
+ expect(json.choices[0]!.message.content).toBe("Safe fallback.");
513
+ });
514
+ });
515
+
516
+ describe("async dispose", () => {
517
+ it("stops the server via Symbol.asyncDispose", async () => {
518
+ const s = await createMock({ port: 0 });
519
+ const url = s.url;
520
+ await s[Symbol.asyncDispose]();
521
+ await expect(fetch(`${url}/v1/chat/completions`)).rejects.toThrow();
522
+ });
523
+ });
524
+
525
+ describe("logging", () => {
526
+ it("exercises all log levels including warn and error paths", async () => {
527
+ const s = await createMock({ port: 0, logLevel: "debug" });
528
+
529
+ s.when("test").reply("ok");
530
+ await fetch(`${s.url}/v1/chat/completions`, {
531
+ method: "POST",
532
+ headers: { "Content-Type": "application/json" },
533
+ body: JSON.stringify({ model: "gpt-5.4", messages: [{ role: "user", content: "test" }], stream: false }),
534
+ });
535
+
536
+ await fetch(`${s.url}/v1/chat/completions`, {
537
+ method: "POST",
538
+ headers: { "Content-Type": "application/json" },
539
+ body: JSON.stringify({ model: "gpt-5.4", messages: [{ role: "user", content: "unmatched" }], stream: false }),
540
+ });
541
+
542
+ s.when("throw").reply(() => { throw new Error("boom"); });
543
+ await fetch(`${s.url}/v1/chat/completions`, {
544
+ method: "POST",
545
+ headers: { "Content-Type": "application/json" },
546
+ body: JSON.stringify({ model: "gpt-5.4", messages: [{ role: "user", content: "throw" }], stream: false }),
547
+ });
548
+
549
+ await s.stop();
550
+ });
551
+ });
552
+
553
+ describe("streaming with latency", () => {
554
+ it("streams with latency between chunks", async () => {
555
+ server.when("hello").reply("Hi!", { latency: 5 });
556
+ const res = await post("/v1/chat/completions", {
557
+ model: "gpt-5.4",
558
+ messages: [{ role: "user", content: "hello" }],
559
+ });
560
+ expect(res.headers.get("content-type")).toBe("text/event-stream");
561
+ const data = await readSSE(res);
562
+ expect(data.at(-1)).toBe("[DONE]");
563
+ });
564
+ });
565
+ });