@durable-streams/client-conformance-tests 0.1.5 → 0.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. package/dist/adapters/typescript-adapter.cjs +75 -2
  2. package/dist/adapters/typescript-adapter.js +76 -3
  3. package/dist/{benchmark-runner-C_Yghc8f.js → benchmark-runner-CrE6JkbX.js} +106 -12
  4. package/dist/{benchmark-runner-CLAR9oLd.cjs → benchmark-runner-Db4he452.cjs} +107 -12
  5. package/dist/cli.cjs +1 -1
  6. package/dist/cli.js +1 -1
  7. package/dist/index.cjs +1 -1
  8. package/dist/index.d.cts +126 -11
  9. package/dist/index.d.ts +126 -11
  10. package/dist/index.js +1 -1
  11. package/dist/{protocol-3cf94Xyb.d.cts → protocol-D37G3c4e.d.cts} +80 -4
  12. package/dist/{protocol-DyEvTHPF.d.ts → protocol-Mcbiq3nQ.d.ts} +80 -4
  13. package/dist/protocol.d.cts +2 -2
  14. package/dist/protocol.d.ts +2 -2
  15. package/package.json +3 -3
  16. package/src/adapters/typescript-adapter.ts +127 -5
  17. package/src/protocol.ts +85 -1
  18. package/src/runner.ts +202 -17
  19. package/src/test-cases.ts +130 -8
  20. package/test-cases/consumer/error-handling.yaml +42 -0
  21. package/test-cases/consumer/fault-injection.yaml +202 -0
  22. package/test-cases/consumer/offset-handling.yaml +209 -0
  23. package/test-cases/producer/idempotent/autoclaim.yaml +214 -0
  24. package/test-cases/producer/idempotent/batching.yaml +98 -0
  25. package/test-cases/producer/idempotent/concurrent-requests.yaml +100 -0
  26. package/test-cases/producer/idempotent/epoch-management.yaml +333 -0
  27. package/test-cases/producer/idempotent/error-handling.yaml +194 -0
  28. package/test-cases/producer/idempotent/multi-producer.yaml +322 -0
  29. package/test-cases/producer/idempotent/sequence-validation.yaml +339 -0
  30. package/test-cases/producer/idempotent-json-batching.yaml +134 -0
@@ -0,0 +1,202 @@
1
+ id: consumer-fault-injection
2
+ name: Advanced Fault Injection
3
+ description: Tests for client resilience against various network and server faults
4
+ category: consumer
5
+ tags:
6
+ - fault-injection
7
+ - resilience
8
+ - chaos
9
+ - advanced
10
+
11
+ tests:
12
+ - id: delay-recovery
13
+ name: Client handles delayed responses
14
+ description: Client should successfully read data even with server delays
15
+ setup:
16
+ - action: create
17
+ as: streamPath
18
+ - action: append
19
+ path: ${streamPath}
20
+ data: "delayed-data"
21
+ operations:
22
+ # Inject a 500ms delay
23
+ - action: inject-error
24
+ path: ${streamPath}
25
+ delayMs: 500
26
+ count: 1
27
+ # Client should wait and succeed
28
+ - action: read
29
+ path: ${streamPath}
30
+ expect:
31
+ data: "delayed-data"
32
+ cleanup:
33
+ - action: clear-errors
34
+
35
+ - id: delay-with-jitter
36
+ name: Client handles delayed responses with jitter
37
+ description: Client should handle variable delays (delay + random jitter)
38
+ setup:
39
+ - action: create
40
+ as: streamPath
41
+ - action: append
42
+ path: ${streamPath}
43
+ data: "jittery-data"
44
+ operations:
45
+ # Inject delay with jitter (300-600ms total)
46
+ - action: inject-error
47
+ path: ${streamPath}
48
+ delayMs: 300
49
+ jitterMs: 300
50
+ count: 1
51
+ - action: read
52
+ path: ${streamPath}
53
+ expect:
54
+ data: "jittery-data"
55
+ cleanup:
56
+ - action: clear-errors
57
+
58
+ - id: connection-drop-recovery
59
+ name: Client recovers from dropped connection
60
+ description: Client should retry when connection is dropped mid-request
61
+ setup:
62
+ - action: create
63
+ as: streamPath
64
+ - action: append
65
+ path: ${streamPath}
66
+ data: "persistent-data"
67
+ operations:
68
+ # Drop connection once
69
+ - action: inject-error
70
+ path: ${streamPath}
71
+ dropConnection: true
72
+ count: 1
73
+ # Client should retry and eventually succeed
74
+ - action: read
75
+ path: ${streamPath}
76
+ expect:
77
+ data: "persistent-data"
78
+ cleanup:
79
+ - action: clear-errors
80
+
81
+ - id: multiple-connection-drops
82
+ name: Client recovers from multiple dropped connections
83
+ description: Client should retry through multiple connection failures
84
+ setup:
85
+ - action: create
86
+ as: streamPath
87
+ - action: append
88
+ path: ${streamPath}
89
+ data: "resilient-data"
90
+ operations:
91
+ # Drop connection twice
92
+ - action: inject-error
93
+ path: ${streamPath}
94
+ dropConnection: true
95
+ count: 2
96
+ # Client should retry multiple times and succeed
97
+ - action: read
98
+ path: ${streamPath}
99
+ expect:
100
+ data: "resilient-data"
101
+ cleanup:
102
+ - action: clear-errors
103
+
104
+ - id: method-specific-fault
105
+ name: Fault only affects specific HTTP method
106
+ description: Fault should only trigger for specified method
107
+ setup:
108
+ - action: create
109
+ as: streamPath
110
+ operations:
111
+ # Inject fault only for POST (append)
112
+ - action: inject-error
113
+ path: ${streamPath}
114
+ status: 503
115
+ method: POST
116
+ count: 1
117
+ # GET should work fine (no fault)
118
+ - action: read
119
+ path: ${streamPath}
120
+ expect:
121
+ data: ""
122
+ # POST (append) should fail first then succeed on retry
123
+ - action: append
124
+ path: ${streamPath}
125
+ data: "method-filtered"
126
+ # Verify data was appended
127
+ - action: read
128
+ path: ${streamPath}
129
+ expect:
130
+ data: "method-filtered"
131
+ cleanup:
132
+ - action: clear-errors
133
+
134
+ - id: delay-then-error
135
+ name: Delay followed by error
136
+ description: Combined delay and error response
137
+ setup:
138
+ - action: create
139
+ as: streamPath
140
+ - action: append
141
+ path: ${streamPath}
142
+ data: "combined-fault-data"
143
+ operations:
144
+ # Delay 200ms then return 503
145
+ - action: inject-error
146
+ path: ${streamPath}
147
+ delayMs: 200
148
+ status: 503
149
+ count: 1
150
+ # Client should wait, get error, retry, and succeed
151
+ - action: read
152
+ path: ${streamPath}
153
+ expect:
154
+ data: "combined-fault-data"
155
+ cleanup:
156
+ - action: clear-errors
157
+
158
+ - id: append-with-delay
159
+ name: Append succeeds with server delay
160
+ description: Append should complete successfully even with server-side delay
161
+ setup:
162
+ - action: create
163
+ as: streamPath
164
+ operations:
165
+ # Add 500ms delay on first append
166
+ - action: inject-error
167
+ path: ${streamPath}
168
+ delayMs: 500
169
+ count: 1
170
+ # Append should wait and succeed
171
+ - action: append
172
+ path: ${streamPath}
173
+ data: "delayed-append"
174
+ # Verify data was appended
175
+ - action: read
176
+ path: ${streamPath}
177
+ expect:
178
+ data: "delayed-append"
179
+ cleanup:
180
+ - action: clear-errors
181
+
182
+ - id: delay-under-timeout
183
+ name: Client succeeds with delay under timeout
184
+ description: Client should succeed when delay is less than client timeout
185
+ setup:
186
+ - action: create
187
+ as: streamPath
188
+ - action: append
189
+ path: ${streamPath}
190
+ data: "within-timeout"
191
+ operations:
192
+ # 1 second delay (should be under most client timeouts)
193
+ - action: inject-error
194
+ path: ${streamPath}
195
+ delayMs: 1000
196
+ count: 1
197
+ - action: read
198
+ path: ${streamPath}
199
+ expect:
200
+ data: "within-timeout"
201
+ cleanup:
202
+ - action: clear-errors
@@ -30,6 +30,215 @@ tests:
30
30
  expect:
31
31
  data: "beginningmiddleend"
32
32
 
33
+ - id: offset-now-skips-historical
34
+ name: Offset 'now' skips historical data
35
+ description: Using offset=now should return empty body with tail offset
36
+ setup:
37
+ - action: create
38
+ as: streamPath
39
+ - action: append
40
+ path: ${streamPath}
41
+ data: "historical"
42
+ - action: append
43
+ path: ${streamPath}
44
+ data: "data"
45
+ operations:
46
+ - action: read
47
+ path: ${streamPath}
48
+ offset: "now"
49
+ expect:
50
+ chunkCount: 0
51
+ upToDate: true
52
+ storeOffsetAs: nowOffset
53
+ # Verify we can resume from the offset
54
+ - action: append
55
+ path: ${streamPath}
56
+ data: "future"
57
+ - action: read
58
+ path: ${streamPath}
59
+ offset: ${nowOffset}
60
+ expect:
61
+ data: "future"
62
+
63
+ - id: offset-now-empty-stream
64
+ name: Offset 'now' on empty stream
65
+ description: Using offset=now on an empty stream should work correctly
66
+ setup:
67
+ - action: create
68
+ as: streamPath
69
+ operations:
70
+ - action: read
71
+ path: ${streamPath}
72
+ offset: "now"
73
+ expect:
74
+ chunkCount: 0
75
+ upToDate: true
76
+ storeOffsetAs: startOffset
77
+ # Verify the offset works for future data
78
+ - action: append
79
+ path: ${streamPath}
80
+ data: "firstdata"
81
+ - action: read
82
+ path: ${streamPath}
83
+ offset: ${startOffset}
84
+ expect:
85
+ data: "firstdata"
86
+
87
+ - id: offset-now-empty-stream-longpoll
88
+ name: Offset 'now' on empty stream with long-poll
89
+ description: Using offset=now with long-poll on an empty stream should wait for data
90
+ requires:
91
+ - longPoll
92
+ setup:
93
+ - action: create
94
+ as: streamPath
95
+ operations:
96
+ # Long-poll with offset=now on empty stream should timeout
97
+ - action: read
98
+ path: ${streamPath}
99
+ offset: "now"
100
+ live: long-poll
101
+ timeoutMs: 1000
102
+ expect:
103
+ status: 204
104
+ upToDate: true
105
+ storeOffsetAs: pollOffset
106
+ # Verify the offset is valid and works for future data
107
+ - action: append
108
+ path: ${streamPath}
109
+ data: "firstdata"
110
+ - action: read
111
+ path: ${streamPath}
112
+ offset: ${pollOffset}
113
+ expect:
114
+ data: "firstdata"
115
+
116
+ - id: offset-now-empty-stream-sse
117
+ name: Offset 'now' on empty stream with SSE
118
+ description: Using offset=now with SSE on an empty stream should provide valid offset
119
+ requires:
120
+ - sse
121
+ setup:
122
+ - action: create
123
+ as: streamPath
124
+ operations:
125
+ # SSE with offset=now on empty stream
126
+ - action: read
127
+ path: ${streamPath}
128
+ offset: "now"
129
+ live: sse
130
+ waitForUpToDate: true
131
+ expect:
132
+ chunkCount: 0
133
+ upToDate: true
134
+ storeOffsetAs: sseOffset
135
+ # Verify the offset is valid and works for future data
136
+ - action: append
137
+ path: ${streamPath}
138
+ data: "firstdata"
139
+ - action: read
140
+ path: ${streamPath}
141
+ offset: ${sseOffset}
142
+ expect:
143
+ data: "firstdata"
144
+
145
+ - id: offset-now-matches-tail
146
+ name: Offset 'now' returns same offset as tail
147
+ description: The offset from offset=now should match the stream tail offset
148
+ setup:
149
+ - action: create
150
+ as: streamPath
151
+ - action: append
152
+ path: ${streamPath}
153
+ data: "somedata"
154
+ operations:
155
+ # Get tail offset via normal read
156
+ - action: read
157
+ path: ${streamPath}
158
+ expect:
159
+ storeOffsetAs: tailOffset
160
+ # Get offset via offset=now
161
+ - action: read
162
+ path: ${streamPath}
163
+ offset: "now"
164
+ expect:
165
+ storeOffsetAs: nowOffset
166
+ # Append and verify both offsets work identically
167
+ - action: append
168
+ path: ${streamPath}
169
+ data: "newdata"
170
+ - action: read
171
+ path: ${streamPath}
172
+ offset: ${tailOffset}
173
+ expect:
174
+ data: "newdata"
175
+ - action: read
176
+ path: ${streamPath}
177
+ offset: ${nowOffset}
178
+ expect:
179
+ data: "newdata"
180
+
181
+ - id: offset-now-longpoll-waits
182
+ name: Offset 'now' with long-poll immediately waits for data
183
+ description: Using offset=now with long-poll should immediately start waiting for new data (no round trip)
184
+ setup:
185
+ - action: create
186
+ as: streamPath
187
+ - action: append
188
+ path: ${streamPath}
189
+ data: "existingdata"
190
+ operations:
191
+ # Long-poll with offset=now should timeout with 204 since no new data arrives
192
+ # This proves it immediately started waiting (skipping historical data)
193
+ - action: read
194
+ path: ${streamPath}
195
+ offset: "now"
196
+ live: long-poll
197
+ timeoutMs: 1000
198
+ expect:
199
+ status: 204
200
+ upToDate: true
201
+ storeOffsetAs: pollOffset
202
+ # Verify the offset works for future data
203
+ - action: append
204
+ path: ${streamPath}
205
+ data: "afterpoll"
206
+ - action: read
207
+ path: ${streamPath}
208
+ offset: ${pollOffset}
209
+ expect:
210
+ data: "afterpoll"
211
+
212
+ - id: offset-now-sse
213
+ name: Offset 'now' with SSE mode
214
+ description: Using offset=now with SSE should skip historical data and provide correct offset
215
+ requires:
216
+ - sse
217
+ setup:
218
+ - action: create
219
+ as: streamPath
220
+ - action: append
221
+ path: ${streamPath}
222
+ data: "historicalsse"
223
+ operations:
224
+ - action: read
225
+ path: ${streamPath}
226
+ offset: "now"
227
+ live: sse
228
+ waitForUpToDate: true
229
+ expect:
230
+ chunkCount: 0
231
+ storeOffsetAs: sseOffset
232
+ # Verify offset works
233
+ - action: append
234
+ path: ${streamPath}
235
+ data: "futuredata"
236
+ - action: read
237
+ path: ${streamPath}
238
+ offset: ${sseOffset}
239
+ expect:
240
+ data: "futuredata"
241
+
33
242
  - id: offset-empty-stream
34
243
  name: Read from empty stream
35
244
  description: Reading from an empty stream should return no data and up-to-date
@@ -0,0 +1,214 @@
1
+ id: idempotent-autoclaim
2
+ name: Idempotent Producer - AutoClaim
3
+ description: |
4
+ Tests for the autoClaim feature that enables ephemeral producers.
5
+ When autoClaim=true, the client automatically claims the next epoch
6
+ when fenced (403) and blocks pipelining until epoch is known.
7
+ category: producer
8
+ tags:
9
+ - idempotent
10
+ - autoclaim
11
+ - ephemeral
12
+
13
+ tests:
14
+ - id: autoclaim-with-pipelining
15
+ name: autoClaim works with maxInFlight > 1
16
+ description: |
17
+ When autoClaim is true, the client should block pipelining until the first batch
18
+ completes and the epoch is known. After that, full pipelining should be enabled.
19
+ This test verifies that autoClaim + maxInFlight > 1 works correctly.
20
+ setup:
21
+ - action: create
22
+ as: streamPath
23
+ contentType: text/plain
24
+ operations:
25
+ # Send 20 messages with autoClaim=true and maxInFlight=5
26
+ # The first batch should block until epoch is claimed, then pipelining kicks in
27
+ - action: idempotent-append-batch
28
+ path: ${streamPath}
29
+ producerId: autoclaim-producer
30
+ autoClaim: true
31
+ maxInFlight: 5
32
+ items:
33
+ - data: "ac-00"
34
+ - data: "ac-01"
35
+ - data: "ac-02"
36
+ - data: "ac-03"
37
+ - data: "ac-04"
38
+ - data: "ac-05"
39
+ - data: "ac-06"
40
+ - data: "ac-07"
41
+ - data: "ac-08"
42
+ - data: "ac-09"
43
+ - data: "ac-10"
44
+ - data: "ac-11"
45
+ - data: "ac-12"
46
+ - data: "ac-13"
47
+ - data: "ac-14"
48
+ - data: "ac-15"
49
+ - data: "ac-16"
50
+ - data: "ac-17"
51
+ - data: "ac-18"
52
+ - data: "ac-19"
53
+ expect:
54
+ allSucceed: true
55
+ # Verify all messages are in the stream
56
+ - action: read
57
+ path: ${streamPath}
58
+ expect:
59
+ dataContainsAll:
60
+ - "ac-00"
61
+ - "ac-05"
62
+ - "ac-10"
63
+ - "ac-15"
64
+ - "ac-19"
65
+ upToDate: true
66
+
67
+ - id: autoclaim-with-pipelining-json
68
+ name: autoClaim with pipelining works for JSON streams
69
+ description: Same test with JSON content type
70
+ setup:
71
+ - action: create
72
+ as: streamPath
73
+ contentType: application/json
74
+ operations:
75
+ - action: idempotent-append-batch
76
+ path: ${streamPath}
77
+ producerId: autoclaim-json-producer
78
+ autoClaim: true
79
+ maxInFlight: 5
80
+ items:
81
+ - data: '{"msg": 0}'
82
+ - data: '{"msg": 1}'
83
+ - data: '{"msg": 2}'
84
+ - data: '{"msg": 3}'
85
+ - data: '{"msg": 4}'
86
+ - data: '{"msg": 5}'
87
+ - data: '{"msg": 6}'
88
+ - data: '{"msg": 7}'
89
+ - data: '{"msg": 8}'
90
+ - data: '{"msg": 9}'
91
+ expect:
92
+ allSucceed: true
93
+ - action: read
94
+ path: ${streamPath}
95
+ expect:
96
+ dataContainsAll:
97
+ - '"msg":0'
98
+ - '"msg":5'
99
+ - '"msg":9'
100
+ upToDate: true
101
+
102
+ - id: autoclaim-recovers-from-stale-epoch
103
+ name: autoClaim recovers when fenced by newer epoch
104
+ description: |
105
+ If a producer with autoClaim gets 403 (stale epoch) because another instance
106
+ claimed a higher epoch, it should automatically bump its epoch and retry.
107
+ setup:
108
+ - action: create
109
+ as: streamPath
110
+ contentType: text/plain
111
+ operations:
112
+ # Establish epoch=5 via raw server-append (simulating another producer instance)
113
+ - action: server-append
114
+ path: ${streamPath}
115
+ data: "from-epoch-5"
116
+ producerId: recovery-producer
117
+ producerEpoch: 5
118
+ producerSeq: 0
119
+ expect:
120
+ status: 200
121
+ # Client with autoClaim starts at epoch=0, should get 403 then recover with epoch=6
122
+ - action: idempotent-append
123
+ path: ${streamPath}
124
+ producerId: recovery-producer
125
+ epoch: 0
126
+ autoClaim: true
127
+ data: "recovered"
128
+ expect:
129
+ success: true
130
+ # Verify both messages are in the stream
131
+ - action: read
132
+ path: ${streamPath}
133
+ expect:
134
+ dataContainsAll:
135
+ - "from-epoch-5"
136
+ - "recovered"
137
+ upToDate: true
138
+
139
+ - id: autoclaim-recovers-from-403
140
+ name: Client with autoClaim recovers from 403 fencing
141
+ description: |
142
+ When a producer with autoClaim=true gets fenced (403), it should
143
+ automatically claim the next epoch and retry successfully.
144
+ setup:
145
+ - action: create
146
+ as: streamPath
147
+ contentType: text/plain
148
+ operations:
149
+ # Another producer claims epoch=5
150
+ - action: server-append
151
+ path: ${streamPath}
152
+ data: "fencer"
153
+ producerId: autoclaim-producer
154
+ producerEpoch: 5
155
+ producerSeq: 0
156
+ expect:
157
+ status: 200
158
+ # Now use idempotent-append with autoClaim starting at epoch=0
159
+ # It should get 403, then retry with epoch=6
160
+ - action: idempotent-append
161
+ path: ${streamPath}
162
+ producerId: autoclaim-producer
163
+ epoch: 0
164
+ autoClaim: true
165
+ data: "recovered"
166
+ expect:
167
+ success: true
168
+ # Verify both messages are in stream
169
+ - action: read
170
+ path: ${streamPath}
171
+ expect:
172
+ dataContainsAll:
173
+ - "fencer"
174
+ - "recovered"
175
+ upToDate: true
176
+
177
+ - id: autoclaim-batch-recovers-from-403
178
+ name: Batch with autoClaim recovers from 403 fencing
179
+ description: Same test for batch operations
180
+ setup:
181
+ - action: create
182
+ as: streamPath
183
+ contentType: text/plain
184
+ operations:
185
+ # Another producer claims epoch=3
186
+ - action: server-append
187
+ path: ${streamPath}
188
+ data: "blocker"
189
+ producerId: batch-autoclaim
190
+ producerEpoch: 3
191
+ producerSeq: 0
192
+ expect:
193
+ status: 200
194
+ # Batch with autoClaim should recover
195
+ - action: idempotent-append-batch
196
+ path: ${streamPath}
197
+ producerId: batch-autoclaim
198
+ autoClaim: true
199
+ maxInFlight: 1
200
+ items:
201
+ - data: "batch-msg-0"
202
+ - data: "batch-msg-1"
203
+ - data: "batch-msg-2"
204
+ expect:
205
+ allSucceed: true
206
+ - action: read
207
+ path: ${streamPath}
208
+ expect:
209
+ dataContainsAll:
210
+ - "blocker"
211
+ - "batch-msg-0"
212
+ - "batch-msg-1"
213
+ - "batch-msg-2"
214
+ upToDate: true