@durable-streams/client-conformance-tests 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/README.md +451 -0
  2. package/dist/adapters/typescript-adapter.d.ts +1 -0
  3. package/dist/adapters/typescript-adapter.js +586 -0
  4. package/dist/benchmark-runner-C_Yghc8f.js +1333 -0
  5. package/dist/cli.d.ts +1 -0
  6. package/dist/cli.js +265 -0
  7. package/dist/index.d.ts +508 -0
  8. package/dist/index.js +4 -0
  9. package/dist/protocol-DyEvTHPF.d.ts +472 -0
  10. package/dist/protocol-qb83AeUH.js +120 -0
  11. package/dist/protocol.d.ts +2 -0
  12. package/dist/protocol.js +3 -0
  13. package/package.json +53 -0
  14. package/src/adapters/typescript-adapter.ts +848 -0
  15. package/src/benchmark-runner.ts +860 -0
  16. package/src/benchmark-scenarios.ts +311 -0
  17. package/src/cli.ts +294 -0
  18. package/src/index.ts +50 -0
  19. package/src/protocol.ts +656 -0
  20. package/src/runner.ts +1191 -0
  21. package/src/test-cases.ts +475 -0
  22. package/test-cases/consumer/cache-headers.yaml +150 -0
  23. package/test-cases/consumer/error-handling.yaml +108 -0
  24. package/test-cases/consumer/message-ordering.yaml +209 -0
  25. package/test-cases/consumer/offset-handling.yaml +209 -0
  26. package/test-cases/consumer/offset-resumption.yaml +197 -0
  27. package/test-cases/consumer/read-catchup.yaml +173 -0
  28. package/test-cases/consumer/read-longpoll.yaml +132 -0
  29. package/test-cases/consumer/read-sse.yaml +145 -0
  30. package/test-cases/consumer/retry-resilience.yaml +160 -0
  31. package/test-cases/consumer/streaming-equivalence.yaml +226 -0
  32. package/test-cases/lifecycle/dynamic-headers.yaml +147 -0
  33. package/test-cases/lifecycle/headers-params.yaml +117 -0
  34. package/test-cases/lifecycle/stream-lifecycle.yaml +148 -0
  35. package/test-cases/producer/append-data.yaml +142 -0
  36. package/test-cases/producer/batching.yaml +112 -0
  37. package/test-cases/producer/create-stream.yaml +87 -0
  38. package/test-cases/producer/error-handling.yaml +90 -0
  39. package/test-cases/producer/sequence-ordering.yaml +148 -0
@@ -0,0 +1,311 @@
1
+ /**
2
+ * Benchmark scenario definitions.
3
+ *
4
+ * Each scenario defines what to measure, how many iterations,
5
+ * and success criteria for the benchmark.
6
+ */
7
+
8
+ import type { BenchmarkOperation } from "./protocol.js"
9
+
10
+ // =============================================================================
11
+ // Types
12
+ // =============================================================================
13
+
14
+ export interface BenchmarkScenario {
15
+ /** Unique scenario ID */
16
+ id: string
17
+ /** Human-readable name */
18
+ name: string
19
+ /** Description */
20
+ description: string
21
+ /** Category for grouping */
22
+ category: `latency` | `throughput` | `streaming`
23
+ /** Required client features */
24
+ requires?: Array<`batching` | `sse` | `longPoll` | `streaming`>
25
+ /** Scenario configuration */
26
+ config: BenchmarkScenarioConfig
27
+ /** Success criteria */
28
+ criteria?: BenchmarkCriteria
29
+ /** Factory to create benchmark operations for each iteration */
30
+ createOperation: (ctx: ScenarioContext) => BenchmarkOperation
31
+ /** Optional setup before running iterations */
32
+ setup?: (ctx: ScenarioContext) => Promise<SetupResult>
33
+ /** Optional cleanup after running iterations */
34
+ cleanup?: (ctx: ScenarioContext) => Promise<void>
35
+ }
36
+
37
+ export interface BenchmarkScenarioConfig {
38
+ /** Number of warmup iterations (not measured) */
39
+ warmupIterations: number
40
+ /** Number of measured iterations */
41
+ measureIterations: number
42
+ /** Message size in bytes (where applicable) */
43
+ messageSize: number
44
+ /** Concurrency level (for throughput tests) */
45
+ concurrency?: number
46
+ }
47
+
48
+ export interface BenchmarkCriteria {
49
+ /** Maximum acceptable p50 latency in ms */
50
+ maxP50Ms?: number
51
+ /** Maximum acceptable p99 latency in ms */
52
+ maxP99Ms?: number
53
+ /** Minimum throughput in operations/second */
54
+ minOpsPerSecond?: number
55
+ /** Minimum throughput in MB/second */
56
+ minMBPerSecond?: number
57
+ }
58
+
59
+ export interface ScenarioContext {
60
+ /** Base path for streams (unique per scenario run) */
61
+ basePath: string
62
+ /** Current iteration number (0-based) */
63
+ iteration: number
64
+ /** Stored values from setup */
65
+ setupData: Record<string, unknown>
66
+ }
67
+
68
+ export interface SetupResult {
69
+ /** Data to pass to each iteration */
70
+ data?: Record<string, unknown>
71
+ }
72
+
73
+ // =============================================================================
74
+ // Latency Scenarios
75
+ // =============================================================================
76
+
77
+ export const appendLatencyScenario: BenchmarkScenario = {
78
+ id: `latency-append`,
79
+ name: `Append Latency`,
80
+ description: `Measure time to complete a single append operation`,
81
+ category: `latency`,
82
+ config: {
83
+ warmupIterations: 10,
84
+ measureIterations: 100,
85
+ messageSize: 100, // 100 bytes
86
+ },
87
+ criteria: {
88
+ maxP50Ms: 20,
89
+ maxP99Ms: 100,
90
+ },
91
+ createOperation: (ctx) => ({
92
+ op: `append`,
93
+ path: `${ctx.basePath}/stream`,
94
+ size: 100,
95
+ }),
96
+ setup: (ctx) => {
97
+ ctx.setupData.streamPath = `${ctx.basePath}/stream`
98
+ return Promise.resolve({})
99
+ },
100
+ }
101
+
102
+ export const readLatencyScenario: BenchmarkScenario = {
103
+ id: `latency-read`,
104
+ name: `Read Latency`,
105
+ description: `Measure time to complete a single read operation`,
106
+ category: `latency`,
107
+ config: {
108
+ warmupIterations: 10,
109
+ measureIterations: 100,
110
+ messageSize: 100,
111
+ },
112
+ criteria: {
113
+ maxP50Ms: 20,
114
+ maxP99Ms: 100,
115
+ },
116
+ createOperation: (ctx) => ({
117
+ op: `read`,
118
+ path: `${ctx.basePath}/stream`,
119
+ offset: ctx.setupData.offset as string | undefined,
120
+ }),
121
+ setup: (ctx) => {
122
+ ctx.setupData.streamPath = `${ctx.basePath}/stream`
123
+ return Promise.resolve({})
124
+ },
125
+ }
126
+
127
+ export const roundtripLatencyScenario: BenchmarkScenario = {
128
+ id: `latency-roundtrip`,
129
+ name: `Roundtrip Latency`,
130
+ description: `Measure time to append and immediately read back via long-poll`,
131
+ category: `latency`,
132
+ requires: [`longPoll`],
133
+ config: {
134
+ warmupIterations: 5,
135
+ measureIterations: 50,
136
+ messageSize: 100,
137
+ },
138
+ criteria: {
139
+ maxP50Ms: 50,
140
+ maxP99Ms: 200,
141
+ },
142
+ createOperation: (ctx) => ({
143
+ op: `roundtrip`,
144
+ path: `${ctx.basePath}/roundtrip-${ctx.iteration}`,
145
+ size: 100,
146
+ live: `long-poll`,
147
+ }),
148
+ }
149
+
150
+ export const createLatencyScenario: BenchmarkScenario = {
151
+ id: `latency-create`,
152
+ name: `Create Latency`,
153
+ description: `Measure time to create a new stream`,
154
+ category: `latency`,
155
+ config: {
156
+ warmupIterations: 5,
157
+ measureIterations: 50,
158
+ messageSize: 0,
159
+ },
160
+ criteria: {
161
+ maxP50Ms: 30,
162
+ maxP99Ms: 150,
163
+ },
164
+ createOperation: (ctx) => ({
165
+ op: `create`,
166
+ path: `${ctx.basePath}/create-${ctx.iteration}`,
167
+ contentType: `application/octet-stream`,
168
+ }),
169
+ }
170
+
171
+ // =============================================================================
172
+ // Throughput Scenarios
173
+ // =============================================================================
174
+
175
+ export const smallMessageThroughputScenario: BenchmarkScenario = {
176
+ id: `throughput-small-messages`,
177
+ name: `Small Message Throughput`,
178
+ description: `Measure throughput for 100-byte messages at high concurrency`,
179
+ category: `throughput`,
180
+ requires: [`batching`],
181
+ config: {
182
+ warmupIterations: 2,
183
+ measureIterations: 10,
184
+ messageSize: 100,
185
+ concurrency: 200,
186
+ },
187
+ criteria: {
188
+ minOpsPerSecond: 1000,
189
+ },
190
+ createOperation: (ctx) => ({
191
+ op: `throughput_append`,
192
+ path: `${ctx.basePath}/throughput-small`,
193
+ count: 10000,
194
+ size: 100,
195
+ concurrency: 200,
196
+ }),
197
+ }
198
+
199
+ export const largeMessageThroughputScenario: BenchmarkScenario = {
200
+ id: `throughput-large-messages`,
201
+ name: `Large Message Throughput`,
202
+ description: `Measure throughput for 1MB messages`,
203
+ category: `throughput`,
204
+ requires: [`batching`],
205
+ config: {
206
+ warmupIterations: 1,
207
+ measureIterations: 5,
208
+ messageSize: 1024 * 1024, // 1MB
209
+ concurrency: 10,
210
+ },
211
+ criteria: {
212
+ minOpsPerSecond: 20,
213
+ },
214
+ createOperation: (ctx) => ({
215
+ op: `throughput_append`,
216
+ path: `${ctx.basePath}/throughput-large`,
217
+ count: 50,
218
+ size: 1024 * 1024,
219
+ concurrency: 10,
220
+ }),
221
+ }
222
+
223
+ export const readThroughputScenario: BenchmarkScenario = {
224
+ id: `throughput-read`,
225
+ name: `Read Throughput`,
226
+ description: `Measure JSON parsing and iteration speed reading back messages`,
227
+ category: `throughput`,
228
+ config: {
229
+ warmupIterations: 1,
230
+ measureIterations: 5,
231
+ messageSize: 100, // ~100 bytes per JSON message
232
+ },
233
+ criteria: {
234
+ minMBPerSecond: 3, // Python is slower, so use lower threshold
235
+ },
236
+ createOperation: (ctx) => ({
237
+ op: `throughput_read`,
238
+ path: `${ctx.basePath}/throughput-read`,
239
+ expectedCount: ctx.setupData.expectedCount as number | undefined,
240
+ }),
241
+ setup: (ctx) => {
242
+ // Expecting 10000 JSON messages to be pre-populated
243
+ ctx.setupData.expectedCount = 10000
244
+ return Promise.resolve({ data: { expectedCount: 10000 } })
245
+ },
246
+ }
247
+
248
+ // =============================================================================
249
+ // Streaming Scenarios
250
+ // =============================================================================
251
+
252
+ export const sseLatencyScenario: BenchmarkScenario = {
253
+ id: `streaming-sse-latency`,
254
+ name: `SSE First Event Latency`,
255
+ description: `Measure time to receive first event via SSE`,
256
+ category: `streaming`,
257
+ requires: [`sse`],
258
+ config: {
259
+ warmupIterations: 3,
260
+ measureIterations: 20,
261
+ messageSize: 100,
262
+ },
263
+ criteria: {
264
+ maxP50Ms: 100,
265
+ maxP99Ms: 500,
266
+ },
267
+ createOperation: (ctx) => ({
268
+ op: `roundtrip`,
269
+ path: `${ctx.basePath}/sse-latency-${ctx.iteration}`,
270
+ size: 100,
271
+ live: `sse`,
272
+ contentType: `application/json`, // SSE requires JSON-compatible content type
273
+ }),
274
+ }
275
+
276
+ // =============================================================================
277
+ // All Scenarios
278
+ // =============================================================================
279
+
280
+ export const allScenarios: Array<BenchmarkScenario> = [
281
+ // Latency
282
+ appendLatencyScenario,
283
+ readLatencyScenario,
284
+ roundtripLatencyScenario,
285
+ createLatencyScenario,
286
+ // Throughput
287
+ smallMessageThroughputScenario,
288
+ largeMessageThroughputScenario,
289
+ readThroughputScenario,
290
+ // Streaming
291
+ sseLatencyScenario,
292
+ ]
293
+
294
+ export const scenariosByCategory: Record<
295
+ `latency` | `throughput` | `streaming`,
296
+ Array<BenchmarkScenario>
297
+ > = {
298
+ latency: allScenarios.filter((s) => s.category === `latency`),
299
+ throughput: allScenarios.filter((s) => s.category === `throughput`),
300
+ streaming: allScenarios.filter((s) => s.category === `streaming`),
301
+ }
302
+
303
+ export function getScenarioById(id: string): BenchmarkScenario | undefined {
304
+ return allScenarios.find((s) => s.id === id)
305
+ }
306
+
307
+ export function getScenariosByCategory(
308
+ category: `latency` | `throughput` | `streaming`
309
+ ): Array<BenchmarkScenario> {
310
+ return scenariosByCategory[category]
311
+ }
package/src/cli.ts ADDED
@@ -0,0 +1,294 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * CLI for running client conformance tests and benchmarks.
4
+ *
5
+ * Usage:
6
+ * npx @durable-streams/client-conformance-tests --run ts
7
+ * npx @durable-streams/client-conformance-tests --run ./my-python-client
8
+ * npx @durable-streams/client-conformance-tests --run ./client --suite producer
9
+ * npx @durable-streams/client-conformance-tests --bench ts
10
+ */
11
+
12
+ import { runConformanceTests } from "./runner.js"
13
+ import { runBenchmarks } from "./benchmark-runner.js"
14
+ import type { RunnerOptions } from "./runner.js"
15
+ import type { BenchmarkRunnerOptions } from "./benchmark-runner.js"
16
+
17
+ const HELP = `
18
+ Durable Streams Client Conformance Test Suite
19
+
20
+ Usage:
21
+ npx @durable-streams/client-conformance-tests --run <adapter> [options]
22
+ npx @durable-streams/client-conformance-tests --bench <adapter> [options]
23
+
24
+ Arguments:
25
+ <adapter> Path to client adapter executable, or "ts" for built-in TypeScript adapter
26
+
27
+ Conformance Test Options:
28
+ --run <adapter> Run conformance tests with the specified adapter
29
+ --suite <name> Run only specific suite(s): producer, consumer, lifecycle
30
+ Can be specified multiple times
31
+ --tag <name> Run only tests with specific tag(s)
32
+ Can be specified multiple times
33
+ --fail-fast Stop on first test failure
34
+ --timeout <ms> Timeout for each test in milliseconds (default: 30000)
35
+
36
+ Benchmark Options:
37
+ --bench <adapter> Run benchmarks with the specified adapter
38
+ --scenario <id> Run only specific scenario(s) by ID
39
+ Can be specified multiple times
40
+ --category <name> Run only scenarios in category: latency, throughput, streaming
41
+ Can be specified multiple times
42
+ --format <fmt> Output format: console, json, markdown (default: console)
43
+
44
+ Common Options:
45
+ --verbose Show detailed output for each operation
46
+ --port <port> Port for reference server (default: random)
47
+ --help, -h Show this help message
48
+
49
+ Conformance Test Examples:
50
+ # Test the TypeScript client
51
+ npx @durable-streams/client-conformance-tests --run ts
52
+
53
+ # Test a Python client adapter
54
+ npx @durable-streams/client-conformance-tests --run ./adapters/python_adapter.py
55
+
56
+ # Test only producer functionality
57
+ npx @durable-streams/client-conformance-tests --run ts --suite producer
58
+
59
+ # Test with verbose output and stop on first failure
60
+ npx @durable-streams/client-conformance-tests --run ts --verbose --fail-fast
61
+
62
+ Benchmark Examples:
63
+ # Run all benchmarks with TypeScript client
64
+ npx @durable-streams/client-conformance-tests --bench ts
65
+
66
+ # Run only latency benchmarks
67
+ npx @durable-streams/client-conformance-tests --bench ts --category latency
68
+
69
+ # Run specific scenario
70
+ npx @durable-streams/client-conformance-tests --bench ts --scenario latency-append
71
+
72
+ # Output as JSON for CI
73
+ npx @durable-streams/client-conformance-tests --bench ts --format json
74
+
75
+ Implementing a Client Adapter:
76
+ A client adapter is an executable that communicates via stdin/stdout using
77
+ JSON-line protocol. See the documentation for the protocol specification
78
+ and examples in different languages.
79
+
80
+ The adapter receives JSON commands on stdin (one per line) and responds
81
+ with JSON results on stdout (one per line).
82
+
83
+ Commands: init, create, connect, append, read, head, delete, shutdown, benchmark
84
+
85
+ Example flow:
86
+ Runner -> Client: {"type":"init","serverUrl":"http://localhost:3000"}
87
+ Client -> Runner: {"type":"init","success":true,"clientName":"my-client","clientVersion":"1.0.0"}
88
+ Runner -> Client: {"type":"create","path":"/test-stream"}
89
+ Client -> Runner: {"type":"create","success":true,"status":201}
90
+ ...
91
+ `
92
+
93
+ type ParsedOptions =
94
+ | { mode: `conformance`; options: RunnerOptions }
95
+ | { mode: `benchmark`; options: BenchmarkRunnerOptions }
96
+ | null
97
+
98
+ function parseArgs(args: Array<string>): ParsedOptions {
99
+ let mode: `conformance` | `benchmark` | null = null
100
+ let clientAdapter = ``
101
+
102
+ // Conformance-specific options
103
+ const suites: Array<`producer` | `consumer` | `lifecycle`> = []
104
+ const tags: Array<string> = []
105
+ let failFast = false
106
+ let testTimeout = 30000
107
+
108
+ // Benchmark-specific options
109
+ const scenarios: Array<string> = []
110
+ const categories: Array<`latency` | `throughput` | `streaming`> = []
111
+ let format: `console` | `json` | `markdown` = `console`
112
+
113
+ // Common options
114
+ let verbose = false
115
+ let serverPort = 0
116
+
117
+ let i = 0
118
+ while (i < args.length) {
119
+ const arg = args[i]!
120
+
121
+ if (arg === `--help` || arg === `-h`) {
122
+ console.log(HELP)
123
+ process.exit(0)
124
+ }
125
+
126
+ if (arg === `--run`) {
127
+ mode = `conformance`
128
+ i++
129
+ if (i >= args.length) {
130
+ console.error(`Error: --run requires an adapter path`)
131
+ return null
132
+ }
133
+ clientAdapter = args[i]!
134
+ } else if (arg === `--bench`) {
135
+ mode = `benchmark`
136
+ i++
137
+ if (i >= args.length) {
138
+ console.error(`Error: --bench requires an adapter path`)
139
+ return null
140
+ }
141
+ clientAdapter = args[i]!
142
+ } else if (arg === `--suite`) {
143
+ i++
144
+ if (i >= args.length) {
145
+ console.error(`Error: --suite requires a suite name`)
146
+ return null
147
+ }
148
+ const suite = args[i] as `producer` | `consumer` | `lifecycle`
149
+ if (![`producer`, `consumer`, `lifecycle`].includes(suite)) {
150
+ console.error(
151
+ `Error: Invalid suite "${suite}". Must be: producer, consumer, lifecycle`
152
+ )
153
+ return null
154
+ }
155
+ suites.push(suite)
156
+ } else if (arg === `--tag`) {
157
+ i++
158
+ if (i >= args.length) {
159
+ console.error(`Error: --tag requires a tag name`)
160
+ return null
161
+ }
162
+ tags.push(args[i]!)
163
+ } else if (arg === `--scenario`) {
164
+ i++
165
+ if (i >= args.length) {
166
+ console.error(`Error: --scenario requires a scenario ID`)
167
+ return null
168
+ }
169
+ scenarios.push(args[i]!)
170
+ } else if (arg === `--category`) {
171
+ i++
172
+ if (i >= args.length) {
173
+ console.error(`Error: --category requires a category name`)
174
+ return null
175
+ }
176
+ const category = args[i] as `latency` | `throughput` | `streaming`
177
+ if (![`latency`, `throughput`, `streaming`].includes(category)) {
178
+ console.error(
179
+ `Error: Invalid category "${category}". Must be: latency, throughput, streaming`
180
+ )
181
+ return null
182
+ }
183
+ categories.push(category)
184
+ } else if (arg === `--format`) {
185
+ i++
186
+ if (i >= args.length) {
187
+ console.error(`Error: --format requires a format name`)
188
+ return null
189
+ }
190
+ const fmt = args[i] as `console` | `json` | `markdown`
191
+ if (![`console`, `json`, `markdown`].includes(fmt)) {
192
+ console.error(
193
+ `Error: Invalid format "${fmt}". Must be: console, json, markdown`
194
+ )
195
+ return null
196
+ }
197
+ format = fmt
198
+ } else if (arg === `--verbose`) {
199
+ verbose = true
200
+ } else if (arg === `--fail-fast`) {
201
+ failFast = true
202
+ } else if (arg === `--timeout`) {
203
+ i++
204
+ if (i >= args.length) {
205
+ console.error(`Error: --timeout requires a value in milliseconds`)
206
+ return null
207
+ }
208
+ testTimeout = parseInt(args[i]!, 10)
209
+ if (isNaN(testTimeout)) {
210
+ console.error(`Error: --timeout must be a number`)
211
+ return null
212
+ }
213
+ } else if (arg === `--port`) {
214
+ i++
215
+ if (i >= args.length) {
216
+ console.error(`Error: --port requires a port number`)
217
+ return null
218
+ }
219
+ serverPort = parseInt(args[i]!, 10)
220
+ if (isNaN(serverPort)) {
221
+ console.error(`Error: --port must be a number`)
222
+ return null
223
+ }
224
+ } else if (arg.startsWith(`-`)) {
225
+ console.error(`Error: Unknown option "${arg}"`)
226
+ return null
227
+ }
228
+
229
+ i++
230
+ }
231
+
232
+ // Validate required options
233
+ if (!mode || !clientAdapter) {
234
+ console.error(`Error: --run <adapter> or --bench <adapter> is required`)
235
+ console.log(`\nRun with --help for usage information`)
236
+ return null
237
+ }
238
+
239
+ if (mode === `conformance`) {
240
+ const options: RunnerOptions = {
241
+ clientAdapter,
242
+ verbose,
243
+ failFast,
244
+ testTimeout,
245
+ serverPort,
246
+ }
247
+ if (suites.length > 0) options.suites = suites
248
+ if (tags.length > 0) options.tags = tags
249
+ return { mode: `conformance`, options }
250
+ } else {
251
+ const options: BenchmarkRunnerOptions = {
252
+ clientAdapter,
253
+ verbose,
254
+ serverPort,
255
+ format,
256
+ }
257
+ if (scenarios.length > 0) options.scenarios = scenarios
258
+ if (categories.length > 0) options.categories = categories
259
+ return { mode: `benchmark`, options }
260
+ }
261
+ }
262
+
263
+ async function main(): Promise<void> {
264
+ const args = process.argv.slice(2)
265
+
266
+ if (args.length === 0) {
267
+ console.log(HELP)
268
+ process.exit(0)
269
+ }
270
+
271
+ const parsed = parseArgs(args)
272
+ if (!parsed) {
273
+ process.exit(1)
274
+ }
275
+
276
+ try {
277
+ if (parsed.mode === `conformance`) {
278
+ const summary = await runConformanceTests(parsed.options)
279
+ if (summary.failed > 0) {
280
+ process.exit(1)
281
+ }
282
+ } else {
283
+ const summary = await runBenchmarks(parsed.options)
284
+ if (summary.failed > 0) {
285
+ process.exit(1)
286
+ }
287
+ }
288
+ } catch (err) {
289
+ console.error(`Error running ${parsed.mode}:`, err)
290
+ process.exit(1)
291
+ }
292
+ }
293
+
294
+ main()
package/src/index.ts ADDED
@@ -0,0 +1,50 @@
1
+ /**
2
+ * Client Conformance Test Suite for Durable Streams
3
+ *
4
+ * This package provides a comprehensive test suite to verify that a client
5
+ * correctly implements the Durable Streams protocol for both producers and consumers,
6
+ * along with performance benchmarking capabilities.
7
+ *
8
+ * @packageDocumentation
9
+ */
10
+
11
+ // Conformance testing
12
+ export {
13
+ runConformanceTests,
14
+ loadEmbeddedTestSuites,
15
+ filterByCategory,
16
+ countTests,
17
+ type RunnerOptions,
18
+ type TestRunResult,
19
+ type RunSummary,
20
+ } from "./runner.js"
21
+
22
+ export {
23
+ type TestSuite,
24
+ type TestCase,
25
+ type TestOperation,
26
+ type ClientFeature,
27
+ loadTestSuites,
28
+ } from "./test-cases.js"
29
+
30
+ // Benchmarking
31
+ export {
32
+ runBenchmarks,
33
+ allScenarios,
34
+ getScenarioById,
35
+ type BenchmarkRunnerOptions,
36
+ type ScenarioResult,
37
+ type BenchmarkSummary,
38
+ } from "./benchmark-runner.js"
39
+
40
+ export {
41
+ type BenchmarkScenario,
42
+ type BenchmarkScenarioConfig,
43
+ type BenchmarkCriteria,
44
+ type ScenarioContext,
45
+ getScenariosByCategory,
46
+ scenariosByCategory,
47
+ } from "./benchmark-scenarios.js"
48
+
49
+ // Re-export protocol types for adapter implementers
50
+ export * from "./protocol.js"