@durable-streams/client-conformance-tests 0.1.6 → 0.1.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/adapters/typescript-adapter.cjs +75 -3
- package/dist/adapters/typescript-adapter.js +76 -4
- package/dist/{benchmark-runner-D-YSAvRy.js → benchmark-runner-CrE6JkbX.js} +86 -8
- package/dist/{benchmark-runner-BlKqhoXE.cjs → benchmark-runner-Db4he452.cjs} +87 -8
- package/dist/cli.cjs +1 -1
- package/dist/cli.js +1 -1
- package/dist/index.cjs +1 -1
- package/dist/index.d.cts +106 -6
- package/dist/index.d.ts +106 -6
- package/dist/index.js +1 -1
- package/dist/{protocol-3cf94Xyb.d.cts → protocol-D37G3c4e.d.cts} +80 -4
- package/dist/{protocol-DyEvTHPF.d.ts → protocol-Mcbiq3nQ.d.ts} +80 -4
- package/dist/protocol.d.cts +2 -2
- package/dist/protocol.d.ts +2 -2
- package/package.json +3 -3
- package/src/adapters/typescript-adapter.ts +127 -6
- package/src/protocol.ts +85 -1
- package/src/runner.ts +178 -13
- package/src/test-cases.ts +110 -3
- package/test-cases/consumer/error-handling.yaml +42 -0
- package/test-cases/consumer/offset-handling.yaml +209 -0
- package/test-cases/producer/idempotent/autoclaim.yaml +214 -0
- package/test-cases/producer/idempotent/batching.yaml +98 -0
- package/test-cases/producer/idempotent/concurrent-requests.yaml +100 -0
- package/test-cases/producer/idempotent/epoch-management.yaml +333 -0
- package/test-cases/producer/idempotent/error-handling.yaml +194 -0
- package/test-cases/producer/idempotent/multi-producer.yaml +322 -0
- package/test-cases/producer/idempotent/sequence-validation.yaml +339 -0
- package/test-cases/producer/idempotent-json-batching.yaml +134 -0
|
@@ -56,6 +56,48 @@ tests:
|
|
|
56
56
|
status: 400
|
|
57
57
|
errorCode: INVALID_OFFSET
|
|
58
58
|
|
|
59
|
+
- id: read-offset-now-nonexistent
|
|
60
|
+
name: Read with offset=now on non-existent stream
|
|
61
|
+
description: offset=now should not mask a missing stream - must return 404
|
|
62
|
+
operations:
|
|
63
|
+
- action: read
|
|
64
|
+
path: /nonexistent-offset-now-stream
|
|
65
|
+
offset: "now"
|
|
66
|
+
live: false
|
|
67
|
+
expect:
|
|
68
|
+
status: 404
|
|
69
|
+
errorCode: NOT_FOUND
|
|
70
|
+
|
|
71
|
+
- id: read-offset-now-nonexistent-longpoll
|
|
72
|
+
name: Long-poll with offset=now on non-existent stream
|
|
73
|
+
description: offset=now with long-poll should not mask a missing stream - must return 404
|
|
74
|
+
requires:
|
|
75
|
+
- longPoll
|
|
76
|
+
operations:
|
|
77
|
+
- action: read
|
|
78
|
+
path: /nonexistent-offset-now-longpoll
|
|
79
|
+
offset: "now"
|
|
80
|
+
live: long-poll
|
|
81
|
+
timeoutMs: 1000
|
|
82
|
+
expect:
|
|
83
|
+
status: 404
|
|
84
|
+
errorCode: NOT_FOUND
|
|
85
|
+
|
|
86
|
+
- id: read-offset-now-nonexistent-sse
|
|
87
|
+
name: SSE with offset=now on non-existent stream
|
|
88
|
+
description: offset=now with SSE should not mask a missing stream - must return 404
|
|
89
|
+
requires:
|
|
90
|
+
- sse
|
|
91
|
+
operations:
|
|
92
|
+
- action: read
|
|
93
|
+
path: /nonexistent-offset-now-sse
|
|
94
|
+
offset: "now"
|
|
95
|
+
live: sse
|
|
96
|
+
timeoutMs: 1000
|
|
97
|
+
expect:
|
|
98
|
+
status: 404
|
|
99
|
+
errorCode: NOT_FOUND
|
|
100
|
+
|
|
59
101
|
- id: read-future-offset
|
|
60
102
|
name: Read with future offset
|
|
61
103
|
description: Client should handle offset beyond stream end
|
|
@@ -30,6 +30,215 @@ tests:
|
|
|
30
30
|
expect:
|
|
31
31
|
data: "beginningmiddleend"
|
|
32
32
|
|
|
33
|
+
- id: offset-now-skips-historical
|
|
34
|
+
name: Offset 'now' skips historical data
|
|
35
|
+
description: Using offset=now should return empty body with tail offset
|
|
36
|
+
setup:
|
|
37
|
+
- action: create
|
|
38
|
+
as: streamPath
|
|
39
|
+
- action: append
|
|
40
|
+
path: ${streamPath}
|
|
41
|
+
data: "historical"
|
|
42
|
+
- action: append
|
|
43
|
+
path: ${streamPath}
|
|
44
|
+
data: "data"
|
|
45
|
+
operations:
|
|
46
|
+
- action: read
|
|
47
|
+
path: ${streamPath}
|
|
48
|
+
offset: "now"
|
|
49
|
+
expect:
|
|
50
|
+
chunkCount: 0
|
|
51
|
+
upToDate: true
|
|
52
|
+
storeOffsetAs: nowOffset
|
|
53
|
+
# Verify we can resume from the offset
|
|
54
|
+
- action: append
|
|
55
|
+
path: ${streamPath}
|
|
56
|
+
data: "future"
|
|
57
|
+
- action: read
|
|
58
|
+
path: ${streamPath}
|
|
59
|
+
offset: ${nowOffset}
|
|
60
|
+
expect:
|
|
61
|
+
data: "future"
|
|
62
|
+
|
|
63
|
+
- id: offset-now-empty-stream
|
|
64
|
+
name: Offset 'now' on empty stream
|
|
65
|
+
description: Using offset=now on an empty stream should work correctly
|
|
66
|
+
setup:
|
|
67
|
+
- action: create
|
|
68
|
+
as: streamPath
|
|
69
|
+
operations:
|
|
70
|
+
- action: read
|
|
71
|
+
path: ${streamPath}
|
|
72
|
+
offset: "now"
|
|
73
|
+
expect:
|
|
74
|
+
chunkCount: 0
|
|
75
|
+
upToDate: true
|
|
76
|
+
storeOffsetAs: startOffset
|
|
77
|
+
# Verify the offset works for future data
|
|
78
|
+
- action: append
|
|
79
|
+
path: ${streamPath}
|
|
80
|
+
data: "firstdata"
|
|
81
|
+
- action: read
|
|
82
|
+
path: ${streamPath}
|
|
83
|
+
offset: ${startOffset}
|
|
84
|
+
expect:
|
|
85
|
+
data: "firstdata"
|
|
86
|
+
|
|
87
|
+
- id: offset-now-empty-stream-longpoll
|
|
88
|
+
name: Offset 'now' on empty stream with long-poll
|
|
89
|
+
description: Using offset=now with long-poll on an empty stream should wait for data
|
|
90
|
+
requires:
|
|
91
|
+
- longPoll
|
|
92
|
+
setup:
|
|
93
|
+
- action: create
|
|
94
|
+
as: streamPath
|
|
95
|
+
operations:
|
|
96
|
+
# Long-poll with offset=now on empty stream should timeout
|
|
97
|
+
- action: read
|
|
98
|
+
path: ${streamPath}
|
|
99
|
+
offset: "now"
|
|
100
|
+
live: long-poll
|
|
101
|
+
timeoutMs: 1000
|
|
102
|
+
expect:
|
|
103
|
+
status: 204
|
|
104
|
+
upToDate: true
|
|
105
|
+
storeOffsetAs: pollOffset
|
|
106
|
+
# Verify the offset is valid and works for future data
|
|
107
|
+
- action: append
|
|
108
|
+
path: ${streamPath}
|
|
109
|
+
data: "firstdata"
|
|
110
|
+
- action: read
|
|
111
|
+
path: ${streamPath}
|
|
112
|
+
offset: ${pollOffset}
|
|
113
|
+
expect:
|
|
114
|
+
data: "firstdata"
|
|
115
|
+
|
|
116
|
+
- id: offset-now-empty-stream-sse
|
|
117
|
+
name: Offset 'now' on empty stream with SSE
|
|
118
|
+
description: Using offset=now with SSE on an empty stream should provide valid offset
|
|
119
|
+
requires:
|
|
120
|
+
- sse
|
|
121
|
+
setup:
|
|
122
|
+
- action: create
|
|
123
|
+
as: streamPath
|
|
124
|
+
operations:
|
|
125
|
+
# SSE with offset=now on empty stream
|
|
126
|
+
- action: read
|
|
127
|
+
path: ${streamPath}
|
|
128
|
+
offset: "now"
|
|
129
|
+
live: sse
|
|
130
|
+
waitForUpToDate: true
|
|
131
|
+
expect:
|
|
132
|
+
chunkCount: 0
|
|
133
|
+
upToDate: true
|
|
134
|
+
storeOffsetAs: sseOffset
|
|
135
|
+
# Verify the offset is valid and works for future data
|
|
136
|
+
- action: append
|
|
137
|
+
path: ${streamPath}
|
|
138
|
+
data: "firstdata"
|
|
139
|
+
- action: read
|
|
140
|
+
path: ${streamPath}
|
|
141
|
+
offset: ${sseOffset}
|
|
142
|
+
expect:
|
|
143
|
+
data: "firstdata"
|
|
144
|
+
|
|
145
|
+
- id: offset-now-matches-tail
|
|
146
|
+
name: Offset 'now' returns same offset as tail
|
|
147
|
+
description: The offset from offset=now should match the stream tail offset
|
|
148
|
+
setup:
|
|
149
|
+
- action: create
|
|
150
|
+
as: streamPath
|
|
151
|
+
- action: append
|
|
152
|
+
path: ${streamPath}
|
|
153
|
+
data: "somedata"
|
|
154
|
+
operations:
|
|
155
|
+
# Get tail offset via normal read
|
|
156
|
+
- action: read
|
|
157
|
+
path: ${streamPath}
|
|
158
|
+
expect:
|
|
159
|
+
storeOffsetAs: tailOffset
|
|
160
|
+
# Get offset via offset=now
|
|
161
|
+
- action: read
|
|
162
|
+
path: ${streamPath}
|
|
163
|
+
offset: "now"
|
|
164
|
+
expect:
|
|
165
|
+
storeOffsetAs: nowOffset
|
|
166
|
+
# Append and verify both offsets work identically
|
|
167
|
+
- action: append
|
|
168
|
+
path: ${streamPath}
|
|
169
|
+
data: "newdata"
|
|
170
|
+
- action: read
|
|
171
|
+
path: ${streamPath}
|
|
172
|
+
offset: ${tailOffset}
|
|
173
|
+
expect:
|
|
174
|
+
data: "newdata"
|
|
175
|
+
- action: read
|
|
176
|
+
path: ${streamPath}
|
|
177
|
+
offset: ${nowOffset}
|
|
178
|
+
expect:
|
|
179
|
+
data: "newdata"
|
|
180
|
+
|
|
181
|
+
- id: offset-now-longpoll-waits
|
|
182
|
+
name: Offset 'now' with long-poll immediately waits for data
|
|
183
|
+
description: Using offset=now with long-poll should immediately start waiting for new data (no round trip)
|
|
184
|
+
setup:
|
|
185
|
+
- action: create
|
|
186
|
+
as: streamPath
|
|
187
|
+
- action: append
|
|
188
|
+
path: ${streamPath}
|
|
189
|
+
data: "existingdata"
|
|
190
|
+
operations:
|
|
191
|
+
# Long-poll with offset=now should timeout with 204 since no new data arrives
|
|
192
|
+
# This proves it immediately started waiting (skipping historical data)
|
|
193
|
+
- action: read
|
|
194
|
+
path: ${streamPath}
|
|
195
|
+
offset: "now"
|
|
196
|
+
live: long-poll
|
|
197
|
+
timeoutMs: 1000
|
|
198
|
+
expect:
|
|
199
|
+
status: 204
|
|
200
|
+
upToDate: true
|
|
201
|
+
storeOffsetAs: pollOffset
|
|
202
|
+
# Verify the offset works for future data
|
|
203
|
+
- action: append
|
|
204
|
+
path: ${streamPath}
|
|
205
|
+
data: "afterpoll"
|
|
206
|
+
- action: read
|
|
207
|
+
path: ${streamPath}
|
|
208
|
+
offset: ${pollOffset}
|
|
209
|
+
expect:
|
|
210
|
+
data: "afterpoll"
|
|
211
|
+
|
|
212
|
+
- id: offset-now-sse
|
|
213
|
+
name: Offset 'now' with SSE mode
|
|
214
|
+
description: Using offset=now with SSE should skip historical data and provide correct offset
|
|
215
|
+
requires:
|
|
216
|
+
- sse
|
|
217
|
+
setup:
|
|
218
|
+
- action: create
|
|
219
|
+
as: streamPath
|
|
220
|
+
- action: append
|
|
221
|
+
path: ${streamPath}
|
|
222
|
+
data: "historicalsse"
|
|
223
|
+
operations:
|
|
224
|
+
- action: read
|
|
225
|
+
path: ${streamPath}
|
|
226
|
+
offset: "now"
|
|
227
|
+
live: sse
|
|
228
|
+
waitForUpToDate: true
|
|
229
|
+
expect:
|
|
230
|
+
chunkCount: 0
|
|
231
|
+
storeOffsetAs: sseOffset
|
|
232
|
+
# Verify offset works
|
|
233
|
+
- action: append
|
|
234
|
+
path: ${streamPath}
|
|
235
|
+
data: "futuredata"
|
|
236
|
+
- action: read
|
|
237
|
+
path: ${streamPath}
|
|
238
|
+
offset: ${sseOffset}
|
|
239
|
+
expect:
|
|
240
|
+
data: "futuredata"
|
|
241
|
+
|
|
33
242
|
- id: offset-empty-stream
|
|
34
243
|
name: Read from empty stream
|
|
35
244
|
description: Reading from an empty stream should return no data and up-to-date
|
|
@@ -0,0 +1,214 @@
|
|
|
1
|
+
id: idempotent-autoclaim
|
|
2
|
+
name: Idempotent Producer - AutoClaim
|
|
3
|
+
description: |
|
|
4
|
+
Tests for the autoClaim feature that enables ephemeral producers.
|
|
5
|
+
When autoClaim=true, the client automatically claims the next epoch
|
|
6
|
+
when fenced (403) and blocks pipelining until epoch is known.
|
|
7
|
+
category: producer
|
|
8
|
+
tags:
|
|
9
|
+
- idempotent
|
|
10
|
+
- autoclaim
|
|
11
|
+
- ephemeral
|
|
12
|
+
|
|
13
|
+
tests:
|
|
14
|
+
- id: autoclaim-with-pipelining
|
|
15
|
+
name: autoClaim works with maxInFlight > 1
|
|
16
|
+
description: |
|
|
17
|
+
When autoClaim is true, the client should block pipelining until the first batch
|
|
18
|
+
completes and the epoch is known. After that, full pipelining should be enabled.
|
|
19
|
+
This test verifies that autoClaim + maxInFlight > 1 works correctly.
|
|
20
|
+
setup:
|
|
21
|
+
- action: create
|
|
22
|
+
as: streamPath
|
|
23
|
+
contentType: text/plain
|
|
24
|
+
operations:
|
|
25
|
+
# Send 20 messages with autoClaim=true and maxInFlight=5
|
|
26
|
+
# The first batch should block until epoch is claimed, then pipelining kicks in
|
|
27
|
+
- action: idempotent-append-batch
|
|
28
|
+
path: ${streamPath}
|
|
29
|
+
producerId: autoclaim-producer
|
|
30
|
+
autoClaim: true
|
|
31
|
+
maxInFlight: 5
|
|
32
|
+
items:
|
|
33
|
+
- data: "ac-00"
|
|
34
|
+
- data: "ac-01"
|
|
35
|
+
- data: "ac-02"
|
|
36
|
+
- data: "ac-03"
|
|
37
|
+
- data: "ac-04"
|
|
38
|
+
- data: "ac-05"
|
|
39
|
+
- data: "ac-06"
|
|
40
|
+
- data: "ac-07"
|
|
41
|
+
- data: "ac-08"
|
|
42
|
+
- data: "ac-09"
|
|
43
|
+
- data: "ac-10"
|
|
44
|
+
- data: "ac-11"
|
|
45
|
+
- data: "ac-12"
|
|
46
|
+
- data: "ac-13"
|
|
47
|
+
- data: "ac-14"
|
|
48
|
+
- data: "ac-15"
|
|
49
|
+
- data: "ac-16"
|
|
50
|
+
- data: "ac-17"
|
|
51
|
+
- data: "ac-18"
|
|
52
|
+
- data: "ac-19"
|
|
53
|
+
expect:
|
|
54
|
+
allSucceed: true
|
|
55
|
+
# Verify all messages are in the stream
|
|
56
|
+
- action: read
|
|
57
|
+
path: ${streamPath}
|
|
58
|
+
expect:
|
|
59
|
+
dataContainsAll:
|
|
60
|
+
- "ac-00"
|
|
61
|
+
- "ac-05"
|
|
62
|
+
- "ac-10"
|
|
63
|
+
- "ac-15"
|
|
64
|
+
- "ac-19"
|
|
65
|
+
upToDate: true
|
|
66
|
+
|
|
67
|
+
- id: autoclaim-with-pipelining-json
|
|
68
|
+
name: autoClaim with pipelining works for JSON streams
|
|
69
|
+
description: Same test with JSON content type
|
|
70
|
+
setup:
|
|
71
|
+
- action: create
|
|
72
|
+
as: streamPath
|
|
73
|
+
contentType: application/json
|
|
74
|
+
operations:
|
|
75
|
+
- action: idempotent-append-batch
|
|
76
|
+
path: ${streamPath}
|
|
77
|
+
producerId: autoclaim-json-producer
|
|
78
|
+
autoClaim: true
|
|
79
|
+
maxInFlight: 5
|
|
80
|
+
items:
|
|
81
|
+
- data: '{"msg": 0}'
|
|
82
|
+
- data: '{"msg": 1}'
|
|
83
|
+
- data: '{"msg": 2}'
|
|
84
|
+
- data: '{"msg": 3}'
|
|
85
|
+
- data: '{"msg": 4}'
|
|
86
|
+
- data: '{"msg": 5}'
|
|
87
|
+
- data: '{"msg": 6}'
|
|
88
|
+
- data: '{"msg": 7}'
|
|
89
|
+
- data: '{"msg": 8}'
|
|
90
|
+
- data: '{"msg": 9}'
|
|
91
|
+
expect:
|
|
92
|
+
allSucceed: true
|
|
93
|
+
- action: read
|
|
94
|
+
path: ${streamPath}
|
|
95
|
+
expect:
|
|
96
|
+
dataContainsAll:
|
|
97
|
+
- '"msg":0'
|
|
98
|
+
- '"msg":5'
|
|
99
|
+
- '"msg":9'
|
|
100
|
+
upToDate: true
|
|
101
|
+
|
|
102
|
+
- id: autoclaim-recovers-from-stale-epoch
|
|
103
|
+
name: autoClaim recovers when fenced by newer epoch
|
|
104
|
+
description: |
|
|
105
|
+
If a producer with autoClaim gets 403 (stale epoch) because another instance
|
|
106
|
+
claimed a higher epoch, it should automatically bump its epoch and retry.
|
|
107
|
+
setup:
|
|
108
|
+
- action: create
|
|
109
|
+
as: streamPath
|
|
110
|
+
contentType: text/plain
|
|
111
|
+
operations:
|
|
112
|
+
# Establish epoch=5 via raw server-append (simulating another producer instance)
|
|
113
|
+
- action: server-append
|
|
114
|
+
path: ${streamPath}
|
|
115
|
+
data: "from-epoch-5"
|
|
116
|
+
producerId: recovery-producer
|
|
117
|
+
producerEpoch: 5
|
|
118
|
+
producerSeq: 0
|
|
119
|
+
expect:
|
|
120
|
+
status: 200
|
|
121
|
+
# Client with autoClaim starts at epoch=0, should get 403 then recover with epoch=6
|
|
122
|
+
- action: idempotent-append
|
|
123
|
+
path: ${streamPath}
|
|
124
|
+
producerId: recovery-producer
|
|
125
|
+
epoch: 0
|
|
126
|
+
autoClaim: true
|
|
127
|
+
data: "recovered"
|
|
128
|
+
expect:
|
|
129
|
+
success: true
|
|
130
|
+
# Verify both messages are in the stream
|
|
131
|
+
- action: read
|
|
132
|
+
path: ${streamPath}
|
|
133
|
+
expect:
|
|
134
|
+
dataContainsAll:
|
|
135
|
+
- "from-epoch-5"
|
|
136
|
+
- "recovered"
|
|
137
|
+
upToDate: true
|
|
138
|
+
|
|
139
|
+
- id: autoclaim-recovers-from-403
|
|
140
|
+
name: Client with autoClaim recovers from 403 fencing
|
|
141
|
+
description: |
|
|
142
|
+
When a producer with autoClaim=true gets fenced (403), it should
|
|
143
|
+
automatically claim the next epoch and retry successfully.
|
|
144
|
+
setup:
|
|
145
|
+
- action: create
|
|
146
|
+
as: streamPath
|
|
147
|
+
contentType: text/plain
|
|
148
|
+
operations:
|
|
149
|
+
# Another producer claims epoch=5
|
|
150
|
+
- action: server-append
|
|
151
|
+
path: ${streamPath}
|
|
152
|
+
data: "fencer"
|
|
153
|
+
producerId: autoclaim-producer
|
|
154
|
+
producerEpoch: 5
|
|
155
|
+
producerSeq: 0
|
|
156
|
+
expect:
|
|
157
|
+
status: 200
|
|
158
|
+
# Now use idempotent-append with autoClaim starting at epoch=0
|
|
159
|
+
# It should get 403, then retry with epoch=6
|
|
160
|
+
- action: idempotent-append
|
|
161
|
+
path: ${streamPath}
|
|
162
|
+
producerId: autoclaim-producer
|
|
163
|
+
epoch: 0
|
|
164
|
+
autoClaim: true
|
|
165
|
+
data: "recovered"
|
|
166
|
+
expect:
|
|
167
|
+
success: true
|
|
168
|
+
# Verify both messages are in stream
|
|
169
|
+
- action: read
|
|
170
|
+
path: ${streamPath}
|
|
171
|
+
expect:
|
|
172
|
+
dataContainsAll:
|
|
173
|
+
- "fencer"
|
|
174
|
+
- "recovered"
|
|
175
|
+
upToDate: true
|
|
176
|
+
|
|
177
|
+
- id: autoclaim-batch-recovers-from-403
|
|
178
|
+
name: Batch with autoClaim recovers from 403 fencing
|
|
179
|
+
description: Same test for batch operations
|
|
180
|
+
setup:
|
|
181
|
+
- action: create
|
|
182
|
+
as: streamPath
|
|
183
|
+
contentType: text/plain
|
|
184
|
+
operations:
|
|
185
|
+
# Another producer claims epoch=3
|
|
186
|
+
- action: server-append
|
|
187
|
+
path: ${streamPath}
|
|
188
|
+
data: "blocker"
|
|
189
|
+
producerId: batch-autoclaim
|
|
190
|
+
producerEpoch: 3
|
|
191
|
+
producerSeq: 0
|
|
192
|
+
expect:
|
|
193
|
+
status: 200
|
|
194
|
+
# Batch with autoClaim should recover
|
|
195
|
+
- action: idempotent-append-batch
|
|
196
|
+
path: ${streamPath}
|
|
197
|
+
producerId: batch-autoclaim
|
|
198
|
+
autoClaim: true
|
|
199
|
+
maxInFlight: 1
|
|
200
|
+
items:
|
|
201
|
+
- data: "batch-msg-0"
|
|
202
|
+
- data: "batch-msg-1"
|
|
203
|
+
- data: "batch-msg-2"
|
|
204
|
+
expect:
|
|
205
|
+
allSucceed: true
|
|
206
|
+
- action: read
|
|
207
|
+
path: ${streamPath}
|
|
208
|
+
expect:
|
|
209
|
+
dataContainsAll:
|
|
210
|
+
- "blocker"
|
|
211
|
+
- "batch-msg-0"
|
|
212
|
+
- "batch-msg-1"
|
|
213
|
+
- "batch-msg-2"
|
|
214
|
+
upToDate: true
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
id: idempotent-batching
|
|
2
|
+
name: Idempotent Producer - Batching
|
|
3
|
+
description: |
|
|
4
|
+
Tests for producer batching behavior and header interactions.
|
|
5
|
+
Batches are atomic - the entire batch shares a single sequence number.
|
|
6
|
+
category: producer
|
|
7
|
+
tags:
|
|
8
|
+
- idempotent
|
|
9
|
+
- batching
|
|
10
|
+
|
|
11
|
+
tests:
|
|
12
|
+
- id: batch-retry-deduplication
|
|
13
|
+
name: Retrying entire batch returns 204 for all
|
|
14
|
+
description: If client retries an entire JSON batch, server should deduplicate
|
|
15
|
+
setup:
|
|
16
|
+
- action: create
|
|
17
|
+
as: streamPath
|
|
18
|
+
contentType: application/json
|
|
19
|
+
operations:
|
|
20
|
+
# Send a batch of 3 items
|
|
21
|
+
- action: server-append
|
|
22
|
+
path: ${streamPath}
|
|
23
|
+
data: '[{"id":1},{"id":2},{"id":3}]'
|
|
24
|
+
producerId: batch-retry-producer
|
|
25
|
+
producerEpoch: 0
|
|
26
|
+
producerSeq: 0
|
|
27
|
+
expect:
|
|
28
|
+
status: 200
|
|
29
|
+
# Retry the exact same batch - should get 204
|
|
30
|
+
- action: server-append
|
|
31
|
+
path: ${streamPath}
|
|
32
|
+
data: '[{"id":1},{"id":2},{"id":3}]'
|
|
33
|
+
producerId: batch-retry-producer
|
|
34
|
+
producerEpoch: 0
|
|
35
|
+
producerSeq: 0
|
|
36
|
+
expect:
|
|
37
|
+
status: 204
|
|
38
|
+
duplicate: true
|
|
39
|
+
# Verify only 3 items in stream (not 6)
|
|
40
|
+
- action: read
|
|
41
|
+
path: ${streamPath}
|
|
42
|
+
expect:
|
|
43
|
+
upToDate: true
|
|
44
|
+
# Next seq should still work
|
|
45
|
+
- action: server-append
|
|
46
|
+
path: ${streamPath}
|
|
47
|
+
data: '[{"id":4}]'
|
|
48
|
+
producerId: batch-retry-producer
|
|
49
|
+
producerEpoch: 0
|
|
50
|
+
producerSeq: 1
|
|
51
|
+
expect:
|
|
52
|
+
status: 200
|
|
53
|
+
|
|
54
|
+
- id: producer-with-stream-seq
|
|
55
|
+
name: Producer headers work with Stream-Seq header
|
|
56
|
+
description: Both producer deduplication and writer coordination should work together
|
|
57
|
+
setup:
|
|
58
|
+
- action: create
|
|
59
|
+
as: streamPath
|
|
60
|
+
contentType: text/plain
|
|
61
|
+
operations:
|
|
62
|
+
- action: server-append
|
|
63
|
+
path: ${streamPath}
|
|
64
|
+
data: "msg"
|
|
65
|
+
producerId: test-producer
|
|
66
|
+
producerEpoch: 0
|
|
67
|
+
producerSeq: 0
|
|
68
|
+
headers:
|
|
69
|
+
Stream-Seq: "app-seq-001"
|
|
70
|
+
expect:
|
|
71
|
+
status: 200
|
|
72
|
+
|
|
73
|
+
- id: duplicate-json-different-payload
|
|
74
|
+
name: Duplicate JSON with different payload returns 204
|
|
75
|
+
description: Same test for JSON content type - dedup is by headers only
|
|
76
|
+
setup:
|
|
77
|
+
- action: create
|
|
78
|
+
as: streamPath
|
|
79
|
+
contentType: application/json
|
|
80
|
+
operations:
|
|
81
|
+
- action: server-append
|
|
82
|
+
path: ${streamPath}
|
|
83
|
+
data: '{"key": "original"}'
|
|
84
|
+
producerId: test-producer
|
|
85
|
+
producerEpoch: 0
|
|
86
|
+
producerSeq: 0
|
|
87
|
+
expect:
|
|
88
|
+
status: 200
|
|
89
|
+
# Retry with different JSON payload
|
|
90
|
+
- action: server-append
|
|
91
|
+
path: ${streamPath}
|
|
92
|
+
data: '{"key": "different", "extra": true}'
|
|
93
|
+
producerId: test-producer
|
|
94
|
+
producerEpoch: 0
|
|
95
|
+
producerSeq: 0
|
|
96
|
+
expect:
|
|
97
|
+
status: 204
|
|
98
|
+
duplicate: true
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
id: idempotent-concurrent-requests
|
|
2
|
+
name: Idempotent Producer - Concurrent Requests
|
|
3
|
+
description: |
|
|
4
|
+
Tests for client handling of concurrent HTTP requests (pipelining).
|
|
5
|
+
When maxInFlight > 1, requests may arrive out of order causing 409 errors.
|
|
6
|
+
The client must retry and ensure all messages are delivered.
|
|
7
|
+
category: producer
|
|
8
|
+
tags:
|
|
9
|
+
- idempotent
|
|
10
|
+
- concurrent
|
|
11
|
+
- pipelining
|
|
12
|
+
- retry
|
|
13
|
+
|
|
14
|
+
tests:
|
|
15
|
+
- id: concurrent-batches-all-delivered
|
|
16
|
+
name: All messages delivered with high concurrency
|
|
17
|
+
description: |
|
|
18
|
+
When using maxInFlight > 1, HTTP requests may arrive out of order causing 409 errors.
|
|
19
|
+
The client should retry and ensure all messages are delivered.
|
|
20
|
+
setup:
|
|
21
|
+
- action: create
|
|
22
|
+
as: streamPath
|
|
23
|
+
contentType: text/plain
|
|
24
|
+
operations:
|
|
25
|
+
# Send 20 messages with maxInFlight=5
|
|
26
|
+
# This forces concurrent batches that may arrive out of order
|
|
27
|
+
- action: idempotent-append-batch
|
|
28
|
+
path: ${streamPath}
|
|
29
|
+
producerId: test-producer
|
|
30
|
+
epoch: 0
|
|
31
|
+
maxInFlight: 5
|
|
32
|
+
items:
|
|
33
|
+
- data: "msg-00"
|
|
34
|
+
- data: "msg-01"
|
|
35
|
+
- data: "msg-02"
|
|
36
|
+
- data: "msg-03"
|
|
37
|
+
- data: "msg-04"
|
|
38
|
+
- data: "msg-05"
|
|
39
|
+
- data: "msg-06"
|
|
40
|
+
- data: "msg-07"
|
|
41
|
+
- data: "msg-08"
|
|
42
|
+
- data: "msg-09"
|
|
43
|
+
- data: "msg-10"
|
|
44
|
+
- data: "msg-11"
|
|
45
|
+
- data: "msg-12"
|
|
46
|
+
- data: "msg-13"
|
|
47
|
+
- data: "msg-14"
|
|
48
|
+
- data: "msg-15"
|
|
49
|
+
- data: "msg-16"
|
|
50
|
+
- data: "msg-17"
|
|
51
|
+
- data: "msg-18"
|
|
52
|
+
- data: "msg-19"
|
|
53
|
+
expect:
|
|
54
|
+
allSucceed: true
|
|
55
|
+
# Verify all messages are in the stream
|
|
56
|
+
- action: read
|
|
57
|
+
path: ${streamPath}
|
|
58
|
+
expect:
|
|
59
|
+
dataContainsAll:
|
|
60
|
+
- "msg-00"
|
|
61
|
+
- "msg-05"
|
|
62
|
+
- "msg-10"
|
|
63
|
+
- "msg-15"
|
|
64
|
+
- "msg-19"
|
|
65
|
+
upToDate: true
|
|
66
|
+
|
|
67
|
+
- id: concurrent-json-batches-all-delivered
|
|
68
|
+
name: All JSON messages delivered with high concurrency
|
|
69
|
+
description: Same test with JSON content type to verify JSON batching works with concurrency
|
|
70
|
+
setup:
|
|
71
|
+
- action: create
|
|
72
|
+
as: streamPath
|
|
73
|
+
contentType: application/json
|
|
74
|
+
operations:
|
|
75
|
+
- action: idempotent-append-batch
|
|
76
|
+
path: ${streamPath}
|
|
77
|
+
producerId: test-producer
|
|
78
|
+
epoch: 0
|
|
79
|
+
maxInFlight: 5
|
|
80
|
+
items:
|
|
81
|
+
- data: '{"id": 0}'
|
|
82
|
+
- data: '{"id": 1}'
|
|
83
|
+
- data: '{"id": 2}'
|
|
84
|
+
- data: '{"id": 3}'
|
|
85
|
+
- data: '{"id": 4}'
|
|
86
|
+
- data: '{"id": 5}'
|
|
87
|
+
- data: '{"id": 6}'
|
|
88
|
+
- data: '{"id": 7}'
|
|
89
|
+
- data: '{"id": 8}'
|
|
90
|
+
- data: '{"id": 9}'
|
|
91
|
+
expect:
|
|
92
|
+
allSucceed: true
|
|
93
|
+
- action: read
|
|
94
|
+
path: ${streamPath}
|
|
95
|
+
expect:
|
|
96
|
+
dataContainsAll:
|
|
97
|
+
- '"id":0'
|
|
98
|
+
- '"id":5'
|
|
99
|
+
- '"id":9'
|
|
100
|
+
upToDate: true
|