@s2-dev/streamstore 0.16.0 → 0.16.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +201 -0
- package/README.md +1 -2
- package/dist/accessTokens.d.ts +37 -0
- package/dist/accessTokens.d.ts.map +1 -0
- package/dist/accessTokens.js +74 -0
- package/dist/accessTokens.js.map +1 -0
- package/dist/basin.d.ts +26 -0
- package/dist/basin.d.ts.map +1 -0
- package/dist/basin.js +34 -0
- package/dist/basin.js.map +1 -0
- package/dist/basins.d.ts +53 -0
- package/dist/basins.d.ts.map +1 -0
- package/dist/basins.js +115 -0
- package/dist/basins.js.map +1 -0
- package/dist/common.d.ts +44 -0
- package/dist/common.d.ts.map +1 -0
- package/dist/common.js +2 -0
- package/dist/common.js.map +1 -0
- package/dist/error.d.ts +28 -0
- package/dist/error.d.ts.map +1 -0
- package/dist/error.js +43 -0
- package/dist/error.js.map +1 -0
- package/dist/generated/client/client.gen.d.ts +3 -0
- package/dist/generated/client/client.gen.d.ts.map +1 -0
- package/dist/generated/client/client.gen.js +205 -0
- package/dist/generated/client/client.gen.js.map +1 -0
- package/dist/generated/client/index.d.ts +9 -0
- package/dist/generated/client/index.d.ts.map +1 -0
- package/dist/generated/client/index.js +7 -0
- package/dist/generated/client/index.js.map +1 -0
- package/dist/generated/client/types.gen.d.ts +125 -0
- package/dist/generated/client/types.gen.d.ts.map +1 -0
- package/dist/generated/client/types.gen.js +3 -0
- package/dist/generated/client/types.gen.js.map +1 -0
- package/dist/generated/client/utils.gen.d.ts +34 -0
- package/dist/generated/client/utils.gen.d.ts.map +1 -0
- package/dist/generated/client/utils.gen.js +231 -0
- package/dist/generated/client/utils.gen.js.map +1 -0
- package/{src/generated/client.gen.ts → dist/generated/client.gen.d.ts} +3 -8
- package/dist/generated/client.gen.d.ts.map +1 -0
- package/dist/generated/client.gen.js +6 -0
- package/dist/generated/client.gen.js.map +1 -0
- package/dist/generated/core/auth.gen.d.ts +19 -0
- package/dist/generated/core/auth.gen.d.ts.map +1 -0
- package/dist/generated/core/auth.gen.js +15 -0
- package/dist/generated/core/auth.gen.js.map +1 -0
- package/dist/generated/core/bodySerializer.gen.d.ts +18 -0
- package/dist/generated/core/bodySerializer.gen.d.ts.map +1 -0
- package/dist/generated/core/bodySerializer.gen.js +58 -0
- package/dist/generated/core/bodySerializer.gen.js.map +1 -0
- package/dist/generated/core/params.gen.d.ts +34 -0
- package/dist/generated/core/params.gen.d.ts.map +1 -0
- package/dist/generated/core/params.gen.js +89 -0
- package/dist/generated/core/params.gen.js.map +1 -0
- package/dist/generated/core/pathSerializer.gen.d.ts +34 -0
- package/dist/generated/core/pathSerializer.gen.d.ts.map +1 -0
- package/dist/generated/core/pathSerializer.gen.js +115 -0
- package/dist/generated/core/pathSerializer.gen.js.map +1 -0
- package/dist/generated/core/queryKeySerializer.gen.d.ts +19 -0
- package/dist/generated/core/queryKeySerializer.gen.d.ts.map +1 -0
- package/dist/generated/core/queryKeySerializer.gen.js +100 -0
- package/dist/generated/core/queryKeySerializer.gen.js.map +1 -0
- package/dist/generated/core/serverSentEvents.gen.d.ts +72 -0
- package/dist/generated/core/serverSentEvents.gen.d.ts.map +1 -0
- package/dist/generated/core/serverSentEvents.gen.js +136 -0
- package/dist/generated/core/serverSentEvents.gen.js.map +1 -0
- package/dist/generated/core/types.gen.d.ts +79 -0
- package/dist/generated/core/types.gen.d.ts.map +1 -0
- package/dist/generated/core/types.gen.js +3 -0
- package/dist/generated/core/types.gen.js.map +1 -0
- package/dist/generated/core/utils.gen.d.ts +20 -0
- package/dist/generated/core/utils.gen.d.ts.map +1 -0
- package/dist/generated/core/utils.gen.js +88 -0
- package/dist/generated/core/utils.gen.js.map +1 -0
- package/dist/generated/index.d.ts +3 -0
- package/dist/generated/index.d.ts.map +1 -0
- package/{src/generated/index.ts → dist/generated/index.js} +1 -2
- package/dist/generated/index.js.map +1 -0
- package/dist/generated/sdk.gen.d.ts +100 -0
- package/dist/generated/sdk.gen.d.ts.map +1 -0
- package/dist/generated/sdk.gen.js +350 -0
- package/dist/generated/sdk.gen.js.map +1 -0
- package/{src/generated/types.gen.ts → dist/generated/types.gen.d.ts} +1 -158
- package/dist/generated/types.gen.d.ts.map +1 -0
- package/dist/generated/types.gen.js +3 -0
- package/dist/generated/types.gen.js.map +1 -0
- package/dist/index.d.ts +10 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +4 -0
- package/dist/index.js.map +1 -0
- package/dist/lib/event-stream.d.ts +26 -0
- package/dist/lib/event-stream.d.ts.map +1 -0
- package/dist/lib/event-stream.js +138 -0
- package/dist/lib/event-stream.js.map +1 -0
- package/dist/metrics.d.ts +44 -0
- package/dist/metrics.d.ts.map +1 -0
- package/dist/metrics.js +83 -0
- package/dist/metrics.js.map +1 -0
- package/dist/s2.d.ts +38 -0
- package/dist/s2.d.ts.map +1 -0
- package/dist/s2.js +56 -0
- package/dist/s2.js.map +1 -0
- package/dist/stream.d.ts +156 -0
- package/dist/stream.d.ts.map +1 -0
- package/dist/stream.js +598 -0
- package/dist/stream.js.map +1 -0
- package/dist/streams.d.ts +52 -0
- package/dist/streams.d.ts.map +1 -0
- package/dist/streams.js +114 -0
- package/dist/streams.js.map +1 -0
- package/dist/utils.d.ts +20 -0
- package/dist/utils.d.ts.map +1 -0
- package/dist/utils.js +52 -0
- package/dist/utils.js.map +1 -0
- package/package.json +13 -4
- package/.changeset/README.md +0 -8
- package/.changeset/config.json +0 -11
- package/.claude/settings.local.json +0 -9
- package/.github/workflows/ci.yml +0 -59
- package/.github/workflows/publish.yml +0 -35
- package/CHANGELOG.md +0 -7
- package/biome.json +0 -30
- package/bun.lock +0 -598
- package/examples/append.ts +0 -84
- package/examples/kitchen-sink.ts +0 -73
- package/examples/read.ts +0 -30
- package/openapi-ts.config.ts +0 -7
- package/src/accessTokens.ts +0 -100
- package/src/basin.ts +0 -43
- package/src/basins.ts +0 -154
- package/src/common.ts +0 -45
- package/src/error.ts +0 -58
- package/src/generated/client/client.gen.ts +0 -268
- package/src/generated/client/index.ts +0 -26
- package/src/generated/client/types.gen.ts +0 -268
- package/src/generated/client/utils.gen.ts +0 -331
- package/src/generated/core/auth.gen.ts +0 -42
- package/src/generated/core/bodySerializer.gen.ts +0 -92
- package/src/generated/core/params.gen.ts +0 -153
- package/src/generated/core/pathSerializer.gen.ts +0 -181
- package/src/generated/core/queryKeySerializer.gen.ts +0 -136
- package/src/generated/core/serverSentEvents.gen.ts +0 -264
- package/src/generated/core/types.gen.ts +0 -118
- package/src/generated/core/utils.gen.ts +0 -143
- package/src/generated/sdk.gen.ts +0 -387
- package/src/index.ts +0 -66
- package/src/lib/event-stream.ts +0 -167
- package/src/metrics.ts +0 -106
- package/src/s2.ts +0 -65
- package/src/stream.ts +0 -791
- package/src/streams.ts +0 -156
- package/src/tests/appendSession.test.ts +0 -149
- package/src/tests/batcher-session.integration.test.ts +0 -80
- package/src/tests/batcher.test.ts +0 -216
- package/src/tests/index.test.ts +0 -7
- package/src/utils.ts +0 -80
- package/tsconfig.build.json +0 -10
- package/tsconfig.json +0 -31
package/src/stream.ts
DELETED
|
@@ -1,791 +0,0 @@
|
|
|
1
|
-
import { Base64 } from "js-base64";
|
|
2
|
-
import type { S2RequestOptions } from "./common";
|
|
3
|
-
import { S2Error } from "./error";
|
|
4
|
-
import {
|
|
5
|
-
type AppendAck,
|
|
6
|
-
append,
|
|
7
|
-
checkTail,
|
|
8
|
-
type AppendInput as GeneratedAppendInput,
|
|
9
|
-
type AppendRecord as GeneratedAppendRecord,
|
|
10
|
-
type ReadBatch as GeneratedReadBatch,
|
|
11
|
-
type SequencedRecord as GeneratedSequencedRecord,
|
|
12
|
-
type ReadData,
|
|
13
|
-
read,
|
|
14
|
-
type StreamPosition,
|
|
15
|
-
} from "./generated";
|
|
16
|
-
import type { Client } from "./generated/client/types.gen";
|
|
17
|
-
import { EventStream } from "./lib/event-stream";
|
|
18
|
-
|
|
19
|
-
export class S2Stream {
|
|
20
|
-
private readonly client: Client;
|
|
21
|
-
|
|
22
|
-
public readonly name: string;
|
|
23
|
-
|
|
24
|
-
constructor(name: string, client: Client) {
|
|
25
|
-
this.name = name;
|
|
26
|
-
this.client = client;
|
|
27
|
-
}
|
|
28
|
-
|
|
29
|
-
/**
|
|
30
|
-
* Check the tail of the stream.
|
|
31
|
-
*
|
|
32
|
-
* Returns the next sequence number and timestamp to be assigned (`tail`).
|
|
33
|
-
*/
|
|
34
|
-
public async checkTail(options?: S2RequestOptions) {
|
|
35
|
-
const response = await checkTail({
|
|
36
|
-
client: this.client,
|
|
37
|
-
path: {
|
|
38
|
-
stream: this.name,
|
|
39
|
-
},
|
|
40
|
-
...options,
|
|
41
|
-
});
|
|
42
|
-
|
|
43
|
-
if (response.error) {
|
|
44
|
-
throw new S2Error({
|
|
45
|
-
message: response.error.message,
|
|
46
|
-
code: response.error.code ?? undefined,
|
|
47
|
-
status: response.response.status,
|
|
48
|
-
});
|
|
49
|
-
}
|
|
50
|
-
|
|
51
|
-
return response.data;
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
/**
|
|
55
|
-
* Read records from the stream.
|
|
56
|
-
*
|
|
57
|
-
* - When `as: "bytes"` is provided, bodies and headers are decoded from base64 to `Uint8Array`.
|
|
58
|
-
* - Supports starting position by `seq_num`, `timestamp`, or `tail_offset` and can clamp to the tail.
|
|
59
|
-
* - Non-streaming reads are bounded by `count` and `bytes` (defaults 1000 and 1 MiB).
|
|
60
|
-
* - Use `readSession` for streaming reads
|
|
61
|
-
*/
|
|
62
|
-
public async read<Format extends "string" | "bytes" = "string">(
|
|
63
|
-
args?: ReadArgs<Format>,
|
|
64
|
-
options?: S2RequestOptions,
|
|
65
|
-
): Promise<ReadBatch<Format>> {
|
|
66
|
-
const { as, ...queryParams } = args ?? {};
|
|
67
|
-
const response = await read({
|
|
68
|
-
client: this.client,
|
|
69
|
-
path: {
|
|
70
|
-
stream: this.name,
|
|
71
|
-
},
|
|
72
|
-
headers: {
|
|
73
|
-
...(as === "bytes" ? { "s2-format": "base64" } : {}),
|
|
74
|
-
},
|
|
75
|
-
query: queryParams,
|
|
76
|
-
...options,
|
|
77
|
-
});
|
|
78
|
-
if (response.error) {
|
|
79
|
-
if ("message" in response.error) {
|
|
80
|
-
throw new S2Error({
|
|
81
|
-
message: response.error.message,
|
|
82
|
-
code: response.error.code ?? undefined,
|
|
83
|
-
status: response.response.status,
|
|
84
|
-
});
|
|
85
|
-
} else {
|
|
86
|
-
// special case for 416 - Range Not Satisfiable
|
|
87
|
-
throw new S2Error({
|
|
88
|
-
message:
|
|
89
|
-
"Range not satisfiable: requested position is beyond the stream tail. Use 'clamp: true' to start from the tail instead.",
|
|
90
|
-
status: response.response.status,
|
|
91
|
-
data: response.error,
|
|
92
|
-
});
|
|
93
|
-
}
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
if (args?.as === "bytes") {
|
|
97
|
-
const res: ReadBatch<"bytes"> = {
|
|
98
|
-
...response.data,
|
|
99
|
-
records: response.data.records?.map((record) => ({
|
|
100
|
-
...record,
|
|
101
|
-
body: record.body ? Base64.toUint8Array(record.body) : undefined,
|
|
102
|
-
headers: record.headers?.map(
|
|
103
|
-
(header) =>
|
|
104
|
-
header.map((h) => Base64.toUint8Array(h)) as [
|
|
105
|
-
Uint8Array,
|
|
106
|
-
Uint8Array,
|
|
107
|
-
],
|
|
108
|
-
),
|
|
109
|
-
})),
|
|
110
|
-
};
|
|
111
|
-
return res as ReadBatch<Format>;
|
|
112
|
-
} else {
|
|
113
|
-
const res: ReadBatch<"string"> = response.data;
|
|
114
|
-
return res as ReadBatch<Format>;
|
|
115
|
-
}
|
|
116
|
-
}
|
|
117
|
-
/**
|
|
118
|
-
* Append one or more records to the stream.
|
|
119
|
-
*
|
|
120
|
-
* - Automatically base64-encodes when any body or header is a `Uint8Array`.
|
|
121
|
-
* - Supports conditional appends via `fencing_token` and `match_seq_num`.
|
|
122
|
-
* - Returns the acknowledged range and the stream tail after the append.
|
|
123
|
-
*/
|
|
124
|
-
public async append(
|
|
125
|
-
records: AppendRecord | AppendRecord[],
|
|
126
|
-
args?: Omit<AppendArgs, "records">,
|
|
127
|
-
options?: S2RequestOptions,
|
|
128
|
-
): Promise<AppendAck> {
|
|
129
|
-
const recordsArray = Array.isArray(records) ? records : [records];
|
|
130
|
-
const normalizeHeaders = (
|
|
131
|
-
headers: AppendRecord["headers"],
|
|
132
|
-
): [string | Uint8Array, string | Uint8Array][] | undefined => {
|
|
133
|
-
if (headers === undefined) {
|
|
134
|
-
return undefined;
|
|
135
|
-
} else if (Array.isArray(headers)) {
|
|
136
|
-
return headers;
|
|
137
|
-
} else {
|
|
138
|
-
return Object.entries(headers).map(([key, value]) => [key, value]);
|
|
139
|
-
}
|
|
140
|
-
};
|
|
141
|
-
|
|
142
|
-
const recordsWithNormalizedHeaders = recordsArray.map((record) => ({
|
|
143
|
-
...record,
|
|
144
|
-
headers: normalizeHeaders(record.headers),
|
|
145
|
-
}));
|
|
146
|
-
|
|
147
|
-
const hasBytes =
|
|
148
|
-
recordsWithNormalizedHeaders.some(
|
|
149
|
-
(record) => record.body instanceof Uint8Array,
|
|
150
|
-
) ||
|
|
151
|
-
recordsWithNormalizedHeaders.some((record) =>
|
|
152
|
-
record.headers?.some(
|
|
153
|
-
(header) =>
|
|
154
|
-
header[0] instanceof Uint8Array || header[1] instanceof Uint8Array,
|
|
155
|
-
),
|
|
156
|
-
);
|
|
157
|
-
|
|
158
|
-
const encodedRecords = recordsWithNormalizedHeaders.map((record) => ({
|
|
159
|
-
...record,
|
|
160
|
-
body:
|
|
161
|
-
record.body instanceof Uint8Array
|
|
162
|
-
? Base64.fromUint8Array(record.body)
|
|
163
|
-
: hasBytes && record.body
|
|
164
|
-
? Base64.fromUint8Array(new TextEncoder().encode(record.body))
|
|
165
|
-
: record.body,
|
|
166
|
-
headers: record.headers?.map(
|
|
167
|
-
(header) =>
|
|
168
|
-
header.map((h) =>
|
|
169
|
-
h instanceof Uint8Array
|
|
170
|
-
? Base64.fromUint8Array(h)
|
|
171
|
-
: hasBytes
|
|
172
|
-
? Base64.fromUint8Array(new TextEncoder().encode(h))
|
|
173
|
-
: h,
|
|
174
|
-
) as [string, string],
|
|
175
|
-
),
|
|
176
|
-
}));
|
|
177
|
-
|
|
178
|
-
const response = await append({
|
|
179
|
-
client: this.client,
|
|
180
|
-
path: {
|
|
181
|
-
stream: this.name,
|
|
182
|
-
},
|
|
183
|
-
body: {
|
|
184
|
-
...args,
|
|
185
|
-
records: encodedRecords,
|
|
186
|
-
},
|
|
187
|
-
headers: {
|
|
188
|
-
...(hasBytes ? { "s2-format": "base64" } : {}),
|
|
189
|
-
},
|
|
190
|
-
...options,
|
|
191
|
-
});
|
|
192
|
-
if (response.error) {
|
|
193
|
-
if ("message" in response.error) {
|
|
194
|
-
throw new S2Error({
|
|
195
|
-
message: response.error.message,
|
|
196
|
-
code: response.error.code ?? undefined,
|
|
197
|
-
status: response.response.status,
|
|
198
|
-
});
|
|
199
|
-
} else {
|
|
200
|
-
// special case for 412
|
|
201
|
-
throw new S2Error({
|
|
202
|
-
message: "Append condition failed",
|
|
203
|
-
status: response.response.status,
|
|
204
|
-
data: response.error,
|
|
205
|
-
});
|
|
206
|
-
}
|
|
207
|
-
}
|
|
208
|
-
return response.data;
|
|
209
|
-
}
|
|
210
|
-
/**
|
|
211
|
-
* Open a streaming read session
|
|
212
|
-
*
|
|
213
|
-
* Use the returned session as an async iterable or as a readable stream.
|
|
214
|
-
* When `as: "bytes"` is provided, bodies and headers are decoded to `Uint8Array`.
|
|
215
|
-
*/
|
|
216
|
-
public async readSession<Format extends "string" | "bytes" = "string">(
|
|
217
|
-
args?: ReadArgs<Format>,
|
|
218
|
-
options?: S2RequestOptions,
|
|
219
|
-
): Promise<ReadSession<Format>> {
|
|
220
|
-
return await ReadSession.create(this.client, this.name, args, options);
|
|
221
|
-
}
|
|
222
|
-
/**
|
|
223
|
-
* Create an append session that guaranteeds ordering of submissions.
|
|
224
|
-
*
|
|
225
|
-
* Use this to coordinate high-throughput, sequential appends with backpressure.
|
|
226
|
-
*/
|
|
227
|
-
public async appendSession(
|
|
228
|
-
options?: S2RequestOptions,
|
|
229
|
-
): Promise<AppendSession> {
|
|
230
|
-
return await AppendSession.create(this, options);
|
|
231
|
-
}
|
|
232
|
-
}
|
|
233
|
-
|
|
234
|
-
export type Header<Format extends "string" | "bytes" = "string"> =
|
|
235
|
-
Format extends "string" ? [string, string] : [Uint8Array, Uint8Array];
|
|
236
|
-
|
|
237
|
-
export type ReadBatch<Format extends "string" | "bytes" = "string"> = Omit<
|
|
238
|
-
GeneratedReadBatch,
|
|
239
|
-
"records"
|
|
240
|
-
> & {
|
|
241
|
-
records?: Array<SequencedRecord<Format>>;
|
|
242
|
-
};
|
|
243
|
-
|
|
244
|
-
export type SequencedRecord<Format extends "string" | "bytes" = "string"> =
|
|
245
|
-
Omit<GeneratedSequencedRecord, "body" | "headers"> & {
|
|
246
|
-
body?: Format extends "string" ? string : Uint8Array;
|
|
247
|
-
headers?: Array<Header<Format>>;
|
|
248
|
-
};
|
|
249
|
-
|
|
250
|
-
export type ReadArgs<Format extends "string" | "bytes" = "string"> =
|
|
251
|
-
ReadData["query"] & {
|
|
252
|
-
as?: Format;
|
|
253
|
-
};
|
|
254
|
-
|
|
255
|
-
export type AppendRecord = Omit<GeneratedAppendRecord, "body" | "headers"> & {
|
|
256
|
-
body?: string | Uint8Array;
|
|
257
|
-
headers?:
|
|
258
|
-
| Array<[string | Uint8Array, string | Uint8Array]>
|
|
259
|
-
| Record<string, string | Uint8Array>;
|
|
260
|
-
};
|
|
261
|
-
|
|
262
|
-
export type AppendArgs = Omit<GeneratedAppendInput, "records"> & {
|
|
263
|
-
records: Array<AppendRecord>;
|
|
264
|
-
};
|
|
265
|
-
|
|
266
|
-
class ReadSession<
|
|
267
|
-
Format extends "string" | "bytes" = "string",
|
|
268
|
-
> extends EventStream<SequencedRecord<Format>> {
|
|
269
|
-
static async create<Format extends "string" | "bytes" = "string">(
|
|
270
|
-
client: Client,
|
|
271
|
-
name: string,
|
|
272
|
-
args?: ReadArgs<Format>,
|
|
273
|
-
options?: S2RequestOptions,
|
|
274
|
-
) {
|
|
275
|
-
const { as, ...queryParams } = args ?? {};
|
|
276
|
-
const response = await read({
|
|
277
|
-
client,
|
|
278
|
-
path: {
|
|
279
|
-
stream: name,
|
|
280
|
-
},
|
|
281
|
-
headers: {
|
|
282
|
-
accept: "text/event-stream",
|
|
283
|
-
...(as === "bytes" ? { "s2-format": "base64" } : {}),
|
|
284
|
-
},
|
|
285
|
-
query: queryParams,
|
|
286
|
-
parseAs: "stream",
|
|
287
|
-
...options,
|
|
288
|
-
});
|
|
289
|
-
if (response.error) {
|
|
290
|
-
if ("message" in response.error) {
|
|
291
|
-
throw new S2Error({
|
|
292
|
-
message: response.error.message,
|
|
293
|
-
code: response.error.code ?? undefined,
|
|
294
|
-
status: response.response.status,
|
|
295
|
-
});
|
|
296
|
-
} else {
|
|
297
|
-
// special case for 416 - Range Not Satisfiable
|
|
298
|
-
throw new S2Error({
|
|
299
|
-
message:
|
|
300
|
-
"Range not satisfiable: requested position is beyond the stream tail. Use 'clamp: true' to start from the tail instead.",
|
|
301
|
-
status: response.response.status,
|
|
302
|
-
data: response.error,
|
|
303
|
-
});
|
|
304
|
-
}
|
|
305
|
-
}
|
|
306
|
-
if (!response.response.body) {
|
|
307
|
-
throw new S2Error({
|
|
308
|
-
message: "No body in SSE response",
|
|
309
|
-
});
|
|
310
|
-
}
|
|
311
|
-
return new ReadSession(response.response.body, args?.as ?? "string");
|
|
312
|
-
}
|
|
313
|
-
|
|
314
|
-
private _streamPosition: StreamPosition | undefined = undefined;
|
|
315
|
-
|
|
316
|
-
private constructor(stream: ReadableStream<Uint8Array>, format: Format) {
|
|
317
|
-
super(stream, (msg) => {
|
|
318
|
-
// Parse SSE events according to the S2 protocol
|
|
319
|
-
if (msg.event === "batch" && msg.data) {
|
|
320
|
-
const batch: ReadBatch<Format> = JSON.parse(msg.data);
|
|
321
|
-
// If format is bytes, decode base64 to Uint8Array
|
|
322
|
-
if (format === "bytes") {
|
|
323
|
-
for (const record of batch.records ?? []) {
|
|
324
|
-
if (record.body && typeof record.body === "string") {
|
|
325
|
-
(record as any).body = Base64.toUint8Array(record.body);
|
|
326
|
-
}
|
|
327
|
-
if (record.headers) {
|
|
328
|
-
(record as any).headers = record.headers.map((header) =>
|
|
329
|
-
header.map((h) =>
|
|
330
|
-
typeof h === "string" ? Base64.toUint8Array(h) : h,
|
|
331
|
-
),
|
|
332
|
-
);
|
|
333
|
-
}
|
|
334
|
-
}
|
|
335
|
-
}
|
|
336
|
-
if (batch.tail) {
|
|
337
|
-
this._streamPosition = batch.tail;
|
|
338
|
-
}
|
|
339
|
-
return { done: false, batch: true, value: batch.records ?? [] };
|
|
340
|
-
}
|
|
341
|
-
if (msg.event === "error") {
|
|
342
|
-
// Handle error events
|
|
343
|
-
throw new S2Error({ message: msg.data ?? "Unknown error" });
|
|
344
|
-
}
|
|
345
|
-
|
|
346
|
-
// Skip ping events and other events
|
|
347
|
-
return { done: false };
|
|
348
|
-
});
|
|
349
|
-
}
|
|
350
|
-
|
|
351
|
-
public get streamPosition() {
|
|
352
|
-
return this._streamPosition;
|
|
353
|
-
}
|
|
354
|
-
}
|
|
355
|
-
|
|
356
|
-
class AcksStream extends ReadableStream<AppendAck> implements AsyncDisposable {
|
|
357
|
-
constructor(
|
|
358
|
-
setController: (
|
|
359
|
-
controller: ReadableStreamDefaultController<AppendAck>,
|
|
360
|
-
) => void,
|
|
361
|
-
) {
|
|
362
|
-
super({
|
|
363
|
-
start: (controller) => {
|
|
364
|
-
setController(controller);
|
|
365
|
-
},
|
|
366
|
-
});
|
|
367
|
-
}
|
|
368
|
-
|
|
369
|
-
async [Symbol.asyncDispose]() {
|
|
370
|
-
await this.cancel("disposed");
|
|
371
|
-
}
|
|
372
|
-
|
|
373
|
-
// Polyfill for older browsers
|
|
374
|
-
[Symbol.asyncIterator](): AsyncIterableIterator<AppendAck> {
|
|
375
|
-
const fn = (ReadableStream.prototype as any)[Symbol.asyncIterator];
|
|
376
|
-
if (typeof fn === "function") return fn.call(this);
|
|
377
|
-
const reader = this.getReader();
|
|
378
|
-
return {
|
|
379
|
-
next: async () => {
|
|
380
|
-
const r = await reader.read();
|
|
381
|
-
if (r.done) {
|
|
382
|
-
reader.releaseLock();
|
|
383
|
-
return { done: true, value: undefined };
|
|
384
|
-
}
|
|
385
|
-
return { done: false, value: r.value };
|
|
386
|
-
},
|
|
387
|
-
throw: async (e) => {
|
|
388
|
-
await reader.cancel(e);
|
|
389
|
-
reader.releaseLock();
|
|
390
|
-
return { done: true, value: undefined };
|
|
391
|
-
},
|
|
392
|
-
return: async () => {
|
|
393
|
-
await reader.cancel("done");
|
|
394
|
-
reader.releaseLock();
|
|
395
|
-
return { done: true, value: undefined };
|
|
396
|
-
},
|
|
397
|
-
[Symbol.asyncIterator]() {
|
|
398
|
-
return this;
|
|
399
|
-
},
|
|
400
|
-
};
|
|
401
|
-
}
|
|
402
|
-
}
|
|
403
|
-
|
|
404
|
-
interface BatcherArgs {
|
|
405
|
-
/** Duration in milliseconds to wait before flushing a batch (default: 5ms) */
|
|
406
|
-
lingerDuration?: number;
|
|
407
|
-
/** Maximum number of records in a batch (default: 1000) */
|
|
408
|
-
maxBatchSize?: number;
|
|
409
|
-
/** Optional fencing token to enforce (remains static across batches) */
|
|
410
|
-
fencing_token?: string;
|
|
411
|
-
/** Optional sequence number to match for first batch (auto-increments for subsequent batches) */
|
|
412
|
-
match_seq_num?: number;
|
|
413
|
-
}
|
|
414
|
-
|
|
415
|
-
/**
|
|
416
|
-
* Batches individual records and submits them to an AppendSession.
|
|
417
|
-
* Handles linger duration, batch size limits, and auto-incrementing match_seq_num.
|
|
418
|
-
*/
|
|
419
|
-
class Batcher
|
|
420
|
-
extends WritableStream<AppendRecord | AppendRecord[]>
|
|
421
|
-
implements AsyncDisposable
|
|
422
|
-
{
|
|
423
|
-
private session: AppendSession;
|
|
424
|
-
private currentBatch: AppendRecord[] = [];
|
|
425
|
-
private currentBatchResolvers: Array<{
|
|
426
|
-
resolve: (ack: AppendAck) => void;
|
|
427
|
-
reject: (error: any) => void;
|
|
428
|
-
}> = [];
|
|
429
|
-
private lingerTimer: ReturnType<typeof setTimeout> | null = null;
|
|
430
|
-
private closed = false;
|
|
431
|
-
private readonly maxBatchSize: number;
|
|
432
|
-
private readonly lingerDuration: number;
|
|
433
|
-
private readonly fencing_token?: string;
|
|
434
|
-
private next_match_seq_num?: number;
|
|
435
|
-
|
|
436
|
-
constructor(session: AppendSession, args?: BatcherArgs) {
|
|
437
|
-
let writableController: WritableStreamDefaultController;
|
|
438
|
-
|
|
439
|
-
super({
|
|
440
|
-
start: (controller) => {
|
|
441
|
-
writableController = controller;
|
|
442
|
-
},
|
|
443
|
-
write: (chunk) => {
|
|
444
|
-
const records = Array.isArray(chunk) ? chunk : [chunk];
|
|
445
|
-
this.submit(records);
|
|
446
|
-
},
|
|
447
|
-
close: () => {
|
|
448
|
-
this.closed = true;
|
|
449
|
-
this.flush();
|
|
450
|
-
this.cleanup();
|
|
451
|
-
},
|
|
452
|
-
abort: (reason) => {
|
|
453
|
-
this.closed = true;
|
|
454
|
-
|
|
455
|
-
// Reject all pending promises in the current batch
|
|
456
|
-
const error = new S2Error({
|
|
457
|
-
message: `Batcher was aborted: ${reason}`,
|
|
458
|
-
});
|
|
459
|
-
for (const resolver of this.currentBatchResolvers) {
|
|
460
|
-
resolver.reject(error);
|
|
461
|
-
}
|
|
462
|
-
|
|
463
|
-
this.currentBatch = [];
|
|
464
|
-
this.currentBatchResolvers = [];
|
|
465
|
-
this.cleanup();
|
|
466
|
-
},
|
|
467
|
-
});
|
|
468
|
-
|
|
469
|
-
this.session = session;
|
|
470
|
-
this.maxBatchSize = args?.maxBatchSize ?? 1000;
|
|
471
|
-
this.lingerDuration = args?.lingerDuration ?? 5;
|
|
472
|
-
this.fencing_token = args?.fencing_token;
|
|
473
|
-
this.next_match_seq_num = args?.match_seq_num;
|
|
474
|
-
}
|
|
475
|
-
|
|
476
|
-
async [Symbol.asyncDispose]() {
|
|
477
|
-
await this.close();
|
|
478
|
-
}
|
|
479
|
-
|
|
480
|
-
/**
|
|
481
|
-
* Submit one or more records to be batched.
|
|
482
|
-
* For array submits, the entire array is treated as an atomic unit and will never be split across batches.
|
|
483
|
-
* If it doesn't fit in the current batch, the current batch is flushed and the array is queued in the next batch.
|
|
484
|
-
* Returns a promise that resolves when the batch containing these records is acknowledged.
|
|
485
|
-
*/
|
|
486
|
-
submit(records: AppendRecord | AppendRecord[]): Promise<AppendAck> {
|
|
487
|
-
if (this.closed) {
|
|
488
|
-
return Promise.reject(new S2Error({ message: "Batcher is closed" }));
|
|
489
|
-
}
|
|
490
|
-
|
|
491
|
-
return new Promise((resolve, reject) => {
|
|
492
|
-
const recordsArray = Array.isArray(records) ? records : [records];
|
|
493
|
-
const isArraySubmit = Array.isArray(records) && records.length > 1;
|
|
494
|
-
|
|
495
|
-
// Start linger timer on first record added to an empty batch
|
|
496
|
-
if (this.currentBatch.length === 0 && this.lingerDuration > 0) {
|
|
497
|
-
this.startLingerTimer();
|
|
498
|
-
}
|
|
499
|
-
|
|
500
|
-
if (isArraySubmit) {
|
|
501
|
-
// Treat the entire array as atomic: if it doesn't fit, flush current batch first
|
|
502
|
-
if (
|
|
503
|
-
this.currentBatch.length > 0 &&
|
|
504
|
-
this.currentBatch.length + recordsArray.length > this.maxBatchSize
|
|
505
|
-
) {
|
|
506
|
-
this.flush();
|
|
507
|
-
// After flush, if linger is enabled, restart the timer for the new batch
|
|
508
|
-
if (this.lingerDuration > 0) {
|
|
509
|
-
this.startLingerTimer();
|
|
510
|
-
}
|
|
511
|
-
}
|
|
512
|
-
|
|
513
|
-
// Add the entire array (even if it exceeds maxBatchSize) as a single batch unit
|
|
514
|
-
this.currentBatch.push(...recordsArray);
|
|
515
|
-
this.currentBatchResolvers.push({ resolve, reject });
|
|
516
|
-
// Do not auto-flush here; allow linger timer or explicit flush to send the batch
|
|
517
|
-
} else {
|
|
518
|
-
// Single record submit — normal behavior
|
|
519
|
-
if (this.currentBatch.length >= this.maxBatchSize) {
|
|
520
|
-
this.flush();
|
|
521
|
-
if (this.lingerDuration > 0) {
|
|
522
|
-
this.startLingerTimer();
|
|
523
|
-
}
|
|
524
|
-
}
|
|
525
|
-
this.currentBatch.push(recordsArray[0]!);
|
|
526
|
-
this.currentBatchResolvers.push({ resolve, reject });
|
|
527
|
-
if (this.currentBatch.length >= this.maxBatchSize) {
|
|
528
|
-
this.flush();
|
|
529
|
-
}
|
|
530
|
-
}
|
|
531
|
-
});
|
|
532
|
-
}
|
|
533
|
-
|
|
534
|
-
/**
|
|
535
|
-
* Flush the current batch to the session.
|
|
536
|
-
*/
|
|
537
|
-
flush(): void {
|
|
538
|
-
this.cancelLingerTimer();
|
|
539
|
-
|
|
540
|
-
if (this.currentBatch.length === 0) {
|
|
541
|
-
return;
|
|
542
|
-
}
|
|
543
|
-
|
|
544
|
-
const args: AppendArgs = {
|
|
545
|
-
records: this.currentBatch,
|
|
546
|
-
fencing_token: this.fencing_token,
|
|
547
|
-
match_seq_num: this.next_match_seq_num,
|
|
548
|
-
};
|
|
549
|
-
|
|
550
|
-
// Auto-increment match_seq_num for next batch
|
|
551
|
-
if (this.next_match_seq_num !== undefined) {
|
|
552
|
-
this.next_match_seq_num += this.currentBatch.length;
|
|
553
|
-
}
|
|
554
|
-
|
|
555
|
-
// Capture resolvers for this batch
|
|
556
|
-
const batchResolvers = this.currentBatchResolvers;
|
|
557
|
-
this.currentBatchResolvers = [];
|
|
558
|
-
this.currentBatch = [];
|
|
559
|
-
|
|
560
|
-
// Submit to session and handle promise
|
|
561
|
-
const promise = this.session.submit(args.records, {
|
|
562
|
-
fencing_token: args.fencing_token,
|
|
563
|
-
match_seq_num: args.match_seq_num,
|
|
564
|
-
});
|
|
565
|
-
|
|
566
|
-
// Resolve/reject all resolvers for this batch when the ack comes back
|
|
567
|
-
promise.then(
|
|
568
|
-
(ack) => {
|
|
569
|
-
for (const resolver of batchResolvers) {
|
|
570
|
-
resolver.resolve(ack);
|
|
571
|
-
}
|
|
572
|
-
},
|
|
573
|
-
(error) => {
|
|
574
|
-
for (const resolver of batchResolvers) {
|
|
575
|
-
resolver.reject(error);
|
|
576
|
-
}
|
|
577
|
-
},
|
|
578
|
-
);
|
|
579
|
-
}
|
|
580
|
-
|
|
581
|
-
private startLingerTimer(): void {
|
|
582
|
-
this.cancelLingerTimer();
|
|
583
|
-
|
|
584
|
-
this.lingerTimer = setTimeout(() => {
|
|
585
|
-
this.lingerTimer = null;
|
|
586
|
-
if (!this.closed && this.currentBatch.length > 0) {
|
|
587
|
-
this.flush();
|
|
588
|
-
}
|
|
589
|
-
}, this.lingerDuration);
|
|
590
|
-
}
|
|
591
|
-
|
|
592
|
-
private cancelLingerTimer(): void {
|
|
593
|
-
if (this.lingerTimer) {
|
|
594
|
-
clearTimeout(this.lingerTimer);
|
|
595
|
-
this.lingerTimer = null;
|
|
596
|
-
}
|
|
597
|
-
}
|
|
598
|
-
|
|
599
|
-
private cleanup(): void {
|
|
600
|
-
this.cancelLingerTimer();
|
|
601
|
-
}
|
|
602
|
-
}
|
|
603
|
-
|
|
604
|
-
/**
|
|
605
|
-
* Session for appending records to a stream.
|
|
606
|
-
* Queues append requests and ensures only one is in-flight at a time.
|
|
607
|
-
*/
|
|
608
|
-
class AppendSession
|
|
609
|
-
extends WritableStream<AppendArgs>
|
|
610
|
-
implements AsyncDisposable
|
|
611
|
-
{
|
|
612
|
-
private _lastSeenPosition: AppendAck | undefined = undefined;
|
|
613
|
-
private queue: AppendArgs[] = [];
|
|
614
|
-
private pendingResolvers: Array<{
|
|
615
|
-
resolve: (ack: AppendAck) => void;
|
|
616
|
-
reject: (error: any) => void;
|
|
617
|
-
}> = [];
|
|
618
|
-
private inFlight = false;
|
|
619
|
-
private readonly options?: S2RequestOptions;
|
|
620
|
-
private readonly stream: S2Stream;
|
|
621
|
-
private acksController:
|
|
622
|
-
| ReadableStreamDefaultController<AppendAck>
|
|
623
|
-
| undefined;
|
|
624
|
-
private _acksStream: AcksStream | undefined;
|
|
625
|
-
private closed = false;
|
|
626
|
-
private processingPromise: Promise<void> | null = null;
|
|
627
|
-
|
|
628
|
-
static async create(stream: S2Stream, options?: S2RequestOptions) {
|
|
629
|
-
return new AppendSession(stream, options);
|
|
630
|
-
}
|
|
631
|
-
|
|
632
|
-
private constructor(stream: S2Stream, options?: S2RequestOptions) {
|
|
633
|
-
let writableController: WritableStreamDefaultController;
|
|
634
|
-
|
|
635
|
-
super({
|
|
636
|
-
start: (controller) => {
|
|
637
|
-
writableController = controller;
|
|
638
|
-
},
|
|
639
|
-
write: (chunk) => {
|
|
640
|
-
this.submit(chunk.records, {
|
|
641
|
-
fencing_token: chunk.fencing_token,
|
|
642
|
-
match_seq_num: chunk.match_seq_num,
|
|
643
|
-
});
|
|
644
|
-
},
|
|
645
|
-
close: async () => {
|
|
646
|
-
this.closed = true;
|
|
647
|
-
await this.waitForDrain();
|
|
648
|
-
},
|
|
649
|
-
abort: async (reason) => {
|
|
650
|
-
this.closed = true;
|
|
651
|
-
this.queue = [];
|
|
652
|
-
|
|
653
|
-
// Reject all pending promises
|
|
654
|
-
const error = new S2Error({
|
|
655
|
-
message: `AppendSession was aborted: ${reason}`,
|
|
656
|
-
});
|
|
657
|
-
for (const resolver of this.pendingResolvers) {
|
|
658
|
-
resolver.reject(error);
|
|
659
|
-
}
|
|
660
|
-
this.pendingResolvers = [];
|
|
661
|
-
},
|
|
662
|
-
});
|
|
663
|
-
this.options = options;
|
|
664
|
-
this.stream = stream;
|
|
665
|
-
}
|
|
666
|
-
|
|
667
|
-
async [Symbol.asyncDispose]() {
|
|
668
|
-
await this.close();
|
|
669
|
-
}
|
|
670
|
-
|
|
671
|
-
/**
|
|
672
|
-
* Create a batcher that batches individual records and submits them to this session.
|
|
673
|
-
*/
|
|
674
|
-
makeBatcher(args?: BatcherArgs): Batcher {
|
|
675
|
-
return new Batcher(this, args);
|
|
676
|
-
}
|
|
677
|
-
|
|
678
|
-
/**
|
|
679
|
-
* Get a stream of acknowledgements for appends.
|
|
680
|
-
*/
|
|
681
|
-
acks(): AcksStream {
|
|
682
|
-
if (!this._acksStream) {
|
|
683
|
-
this._acksStream = new AcksStream((controller) => {
|
|
684
|
-
this.acksController = controller;
|
|
685
|
-
});
|
|
686
|
-
}
|
|
687
|
-
return this._acksStream;
|
|
688
|
-
}
|
|
689
|
-
|
|
690
|
-
/**
|
|
691
|
-
* Submit an append request to the session.
|
|
692
|
-
* The request will be queued and sent when no other request is in-flight.
|
|
693
|
-
* Returns a promise that resolves when the append is acknowledged or rejects on error.
|
|
694
|
-
*/
|
|
695
|
-
submit(
|
|
696
|
-
records: AppendRecord | AppendRecord[],
|
|
697
|
-
args?: Omit<AppendArgs, "records">,
|
|
698
|
-
): Promise<AppendAck> {
|
|
699
|
-
if (this.closed) {
|
|
700
|
-
return Promise.reject(
|
|
701
|
-
new S2Error({ message: "AppendSession is closed" }),
|
|
702
|
-
);
|
|
703
|
-
}
|
|
704
|
-
|
|
705
|
-
return new Promise((resolve, reject) => {
|
|
706
|
-
this.queue.push({
|
|
707
|
-
records: Array.isArray(records) ? records : [records],
|
|
708
|
-
...args,
|
|
709
|
-
});
|
|
710
|
-
this.pendingResolvers.push({ resolve, reject });
|
|
711
|
-
|
|
712
|
-
// Start processing if not already running
|
|
713
|
-
if (!this.processingPromise) {
|
|
714
|
-
this.processingPromise = this.processLoop();
|
|
715
|
-
}
|
|
716
|
-
});
|
|
717
|
-
}
|
|
718
|
-
|
|
719
|
-
/**
|
|
720
|
-
* Main processing loop that sends queued requests one at a time.
|
|
721
|
-
*/
|
|
722
|
-
private async processLoop(): Promise<void> {
|
|
723
|
-
while (!this.closed && this.queue.length > 0) {
|
|
724
|
-
this.inFlight = true;
|
|
725
|
-
const args = this.queue.shift()!;
|
|
726
|
-
const resolver = this.pendingResolvers.shift()!;
|
|
727
|
-
|
|
728
|
-
try {
|
|
729
|
-
const ack = await this.stream.append(
|
|
730
|
-
args.records,
|
|
731
|
-
{
|
|
732
|
-
fencing_token: args.fencing_token,
|
|
733
|
-
match_seq_num: args.match_seq_num,
|
|
734
|
-
},
|
|
735
|
-
this.options,
|
|
736
|
-
);
|
|
737
|
-
this._lastSeenPosition = ack;
|
|
738
|
-
|
|
739
|
-
// Emit ack to the acks stream if it exists
|
|
740
|
-
if (this.acksController) {
|
|
741
|
-
this.acksController.enqueue(ack);
|
|
742
|
-
}
|
|
743
|
-
|
|
744
|
-
// Resolve the promise for this request
|
|
745
|
-
resolver.resolve(ack);
|
|
746
|
-
} catch (error) {
|
|
747
|
-
this.inFlight = false;
|
|
748
|
-
this.processingPromise = null;
|
|
749
|
-
|
|
750
|
-
// Reject the promise for this request
|
|
751
|
-
resolver.reject(error);
|
|
752
|
-
|
|
753
|
-
// Reject all remaining pending promises
|
|
754
|
-
for (const pendingResolver of this.pendingResolvers) {
|
|
755
|
-
pendingResolver.reject(error);
|
|
756
|
-
}
|
|
757
|
-
this.pendingResolvers = [];
|
|
758
|
-
|
|
759
|
-
// Clear the queue
|
|
760
|
-
this.queue = [];
|
|
761
|
-
|
|
762
|
-
// Do not rethrow here to avoid unhandled rejection; callers already received rejection
|
|
763
|
-
}
|
|
764
|
-
|
|
765
|
-
this.inFlight = false;
|
|
766
|
-
}
|
|
767
|
-
|
|
768
|
-
this.processingPromise = null;
|
|
769
|
-
}
|
|
770
|
-
|
|
771
|
-
private async waitForDrain(): Promise<void> {
|
|
772
|
-
// Wait for processing to complete
|
|
773
|
-
if (this.processingPromise) {
|
|
774
|
-
await this.processingPromise;
|
|
775
|
-
}
|
|
776
|
-
|
|
777
|
-
// Wait until queue is empty and nothing is in flight
|
|
778
|
-
while (this.queue.length > 0 || this.inFlight) {
|
|
779
|
-
await new Promise((resolve) => setTimeout(resolve, 10));
|
|
780
|
-
}
|
|
781
|
-
|
|
782
|
-
// Close the acks stream if it exists
|
|
783
|
-
if (this.acksController) {
|
|
784
|
-
this.acksController.close();
|
|
785
|
-
}
|
|
786
|
-
}
|
|
787
|
-
|
|
788
|
-
get lastSeenPosition() {
|
|
789
|
-
return this._lastSeenPosition;
|
|
790
|
-
}
|
|
791
|
-
}
|