@livestore/sync-s2 0.0.0-snapshot-39b490a4d9054515b0012244297c45505059cf72 → 0.0.0-snapshot-4b42ef6ef0c52dda5b8633a10addf45134e4cafe

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/limits.ts ADDED
@@ -0,0 +1,135 @@
1
+ import type { LiveStoreEvent } from '@livestore/common/schema'
2
+ import { splitChunkBySize } from '@livestore/common/sync'
3
+ import { Chunk, Effect, Schema } from '@livestore/utils/effect'
4
+
5
+ const textEncoder = new TextEncoder()
6
+
7
+ /**
8
+ * Maximum metered size of a single record (docs: https://s2.dev/docs/limits#records).
9
+ */
10
+ export const MAX_RECORD_METERED_BYTES = 1_048_576 // 1 MiB
11
+
12
+ /**
13
+ * Maximum combined metered size of a batch append (docs: https://s2.dev/docs/limits#records).
14
+ */
15
+ export const MAX_BATCH_METERED_BYTES = 1_048_576 // 1 MiB
16
+
17
+ /**
18
+ * Maximum number of records per append (docs: https://s2.dev/docs/limits#records).
19
+ */
20
+ export const MAX_RECORDS_PER_BATCH = 1_000
21
+
22
+ const LimitType = Schema.Literal('record-metered-bytes', 'batch-metered-bytes', 'batch-count')
23
+
24
+ export class S2LimitExceededError extends Schema.TaggedError<S2LimitExceededError>()('S2LimitExceededError', {
25
+ limitType: LimitType,
26
+ max: Schema.Number,
27
+ actual: Schema.Number,
28
+ recordIndex: Schema.optional(Schema.Number),
29
+ }) {}
30
+
31
+ export interface AppendRecordBody {
32
+ readonly body?: string
33
+ readonly headers?: ReadonlyArray<{ readonly name: string; readonly value: string }>
34
+ }
35
+
36
+ // S2 measures bodies/headers in UTF‑8 bytes; centralising this helper keeps the
37
+ // formula readable and consistent with the docs.
38
+ const utf8ByteLength = (value: string): number => textEncoder.encode(value).byteLength
39
+
40
+ export const computeRecordMeteredBytes = (record: AppendRecordBody): number => {
41
+ const headers = record.headers ?? []
42
+ const headerCount = headers.length
43
+ const headerBytes = headers.reduce(
44
+ (acc, header) => acc + utf8ByteLength(header.name) + utf8ByteLength(header.value),
45
+ 0,
46
+ )
47
+ const bodyBytes = record.body === undefined ? 0 : utf8ByteLength(record.body)
48
+ return 8 + 2 * headerCount + headerBytes + bodyBytes
49
+ }
50
+
51
+ export const computeBatchMeteredBytes = (records: ReadonlyArray<AppendRecordBody>): number =>
52
+ records.reduce((acc, record) => acc + computeRecordMeteredBytes(record), 0)
53
+
54
+ interface PreparedEvent {
55
+ readonly event: LiveStoreEvent.AnyEncodedGlobal
56
+ readonly record: AppendRecordBody
57
+ readonly meteredBytes: number
58
+ readonly index: number
59
+ }
60
+
61
+ export interface S2Chunk {
62
+ readonly events: ReadonlyArray<LiveStoreEvent.AnyEncodedGlobal>
63
+ readonly records: ReadonlyArray<AppendRecordBody>
64
+ readonly meteredBytes: number
65
+ }
66
+
67
+ // Pre-stringify events and pre-compute per-record metered bytes so we only pay
68
+ // the JSON cost once when chunking large batches.
69
+ const convertEventsToPrepared = (events: ReadonlyArray<LiveStoreEvent.AnyEncodedGlobal>): PreparedEvent[] =>
70
+ events.map((event, index) => {
71
+ const body = JSON.stringify(event)
72
+ const record: AppendRecordBody = { body }
73
+ const meteredBytes = computeRecordMeteredBytes(record)
74
+
75
+ if (meteredBytes > MAX_RECORD_METERED_BYTES) {
76
+ throw new S2LimitExceededError({
77
+ limitType: 'record-metered-bytes',
78
+ max: MAX_RECORD_METERED_BYTES,
79
+ actual: meteredBytes,
80
+ recordIndex: index,
81
+ })
82
+ }
83
+
84
+ return { event, record, meteredBytes, index }
85
+ })
86
+
87
+ // Summarises a chunk’s metered bytes. Passed to splitChunkBySize so we enforce
88
+ // S2 limits directly instead of relying on JSON size heuristics.
89
+ const makeChunkMeasure = (items: ReadonlyArray<PreparedEvent>): number =>
90
+ items.reduce((acc, item) => acc + item.meteredBytes, 0)
91
+
92
+ const mapPreparedChunks = (chunks: Chunk.Chunk<Chunk.Chunk<PreparedEvent>>): ReadonlyArray<S2Chunk> =>
93
+ Chunk.toReadonlyArray(chunks).map((chunk) => {
94
+ const chunkItems = Chunk.toReadonlyArray(chunk)
95
+ const events = chunkItems.map((item) => item.event)
96
+ const records = chunkItems.map((item) => item.record)
97
+ return {
98
+ events,
99
+ records,
100
+ meteredBytes: makeChunkMeasure(chunkItems),
101
+ }
102
+ })
103
+
104
+ export const chunkEventsForS2 = (events: ReadonlyArray<LiveStoreEvent.AnyEncodedGlobal>): ReadonlyArray<S2Chunk> => {
105
+ if (events.length === 0) {
106
+ return []
107
+ }
108
+
109
+ const prepared = convertEventsToPrepared(events)
110
+
111
+ try {
112
+ const chunks = Chunk.fromIterable(prepared).pipe(
113
+ splitChunkBySize({
114
+ maxItems: MAX_RECORDS_PER_BATCH,
115
+ maxBytes: MAX_BATCH_METERED_BYTES,
116
+ encode: (items) => ({ records: items.map((item) => item.record) }),
117
+ measure: makeChunkMeasure,
118
+ }),
119
+ Effect.runSync,
120
+ )
121
+
122
+ return mapPreparedChunks(chunks)
123
+ } catch (error) {
124
+ if (error && typeof error === 'object' && (error as any)._tag === 'OversizeChunkItemError') {
125
+ const oversize = error as { size: number; maxBytes: number; _tag: string }
126
+ throw new S2LimitExceededError({
127
+ limitType: 'record-metered-bytes',
128
+ max: oversize.maxBytes,
129
+ actual: oversize.size,
130
+ })
131
+ }
132
+
133
+ throw error
134
+ }
135
+ }
package/src/mod.ts CHANGED
@@ -1,5 +1,6 @@
1
1
  export * as ApiSchema from './api-schema.ts'
2
2
  export * as HttpClientGenerated from './http-client-generated.ts'
3
+ export * from './limits.ts'
3
4
  export * from './make-s2-url.ts'
4
5
  export * from './s2-proxy-helpers.ts'
5
6
  export { makeSyncBackend, type SyncS2Options } from './sync-provider.ts'
@@ -5,6 +5,7 @@
5
5
 
6
6
  import type { LiveStoreEvent } from '@livestore/livestore'
7
7
  import type { PullArgs } from './api-schema.ts'
8
+ import { chunkEventsForS2 } from './limits.ts'
8
9
  import { makeS2StreamName } from './make-s2-url.ts'
9
10
 
10
11
  /** Configuration for S2 connections */
@@ -125,7 +126,20 @@ export const buildPullRequest = ({
125
126
  }
126
127
  }
127
128
 
128
- export const buildPushRequest = ({
129
+ export interface S2PushRequest {
130
+ readonly url: string
131
+ readonly method: 'POST'
132
+ readonly headers: Record<string, string>
133
+ readonly body: string
134
+ }
135
+
136
+ /**
137
+ * Builds one or more append requests against S2. The helper applies the
138
+ * documented 1 MiB / 1000-record limits via `chunkEventsForS2`, so callers
139
+ * receive a request per compliant chunk instead of hitting 413 responses at
140
+ * runtime.
141
+ */
142
+ export const buildPushRequests = ({
129
143
  config,
130
144
  storeId,
131
145
  batch,
@@ -133,21 +147,17 @@ export const buildPushRequest = ({
133
147
  config: S2Config
134
148
  storeId: string
135
149
  batch: readonly LiveStoreEvent.AnyEncodedGlobal[]
136
- }): {
137
- url: string
138
- method: 'POST'
139
- headers: Record<string, string>
140
- /** JSON-encoded batch */
141
- body: string
142
- } => {
150
+ }): ReadonlyArray<S2PushRequest> => {
143
151
  const streamName = makeS2StreamName(storeId)
144
152
  const url = getBasinUrl(config, `/streams/${encodeURIComponent(streamName)}/records`)
145
- return {
153
+ const chunks = chunkEventsForS2(batch)
154
+
155
+ return chunks.map((chunk) => ({
146
156
  url,
147
- method: 'POST',
157
+ method: 'POST' as const,
148
158
  headers: getPushHeaders(config.token),
149
- body: JSON.stringify(formatBatchForS2(batch)),
150
- }
159
+ body: JSON.stringify({ records: chunk.records }),
160
+ }))
151
161
  }
152
162
 
153
163
  // Response helpers
@@ -177,21 +187,3 @@ export const errorResponse = (message: string, status = 500): Response => {
177
187
  headers: { 'content-type': 'application/json' },
178
188
  })
179
189
  }
180
-
181
- // Batch formatting helper
182
- export const formatBatchForS2 = (
183
- batch: readonly LiveStoreEvent.AnyEncodedGlobal[],
184
- ): { records: { body: string }[] } => {
185
- return {
186
- records: batch.map((ev) => ({ body: JSON.stringify(ev) })),
187
- }
188
- }
189
-
190
- export const asCurl = (request: { url: string; method: string; headers: Record<string, string>; body?: string }) => {
191
- const url = request.url
192
- const method = request.method
193
- const headers = Object.entries(request.headers).map(([key, value]) => `-H "${key}: ${value}"`)
194
- const body = request.body
195
- const headersStr = headers.join(' ')
196
- return `curl -X ${method} ${url} ${headersStr} ${body ? `-d '${body}'` : ''}`
197
- }
@@ -50,6 +50,7 @@ import {
50
50
  import * as ApiSchema from './api-schema.ts'
51
51
  import { decodeReadBatch } from './decode.ts'
52
52
  import * as HttpClientGenerated from './http-client-generated.ts'
53
+ import { chunkEventsForS2, S2LimitExceededError } from './limits.ts'
53
54
  import type { SyncMetadata } from './types.ts'
54
55
 
55
56
  export interface SyncS2Options {
@@ -247,15 +248,53 @@ export const makeSyncBackend =
247
248
  }
248
249
  },
249
250
  push: (batch) =>
250
- HttpClientRequest.schemaBodyJson(ApiSchema.PushPayload)(HttpClientRequest.post(pushEndpoint), {
251
- storeId,
252
- batch,
253
- }).pipe(
254
- Effect.andThen(httpClient.pipe(HttpClient.filterStatusOk).execute),
255
- Effect.andThen(HttpClientResponse.schemaBodyJson(ApiSchema.PushResponse)),
256
- Effect.mapError((cause) => InvalidPushError.make({ cause: UnexpectedError.make({ cause }) })),
257
- Effect.retry(retry?.push ?? defaultRetry),
258
- ),
251
+ Effect.gen(function* () {
252
+ const makeInvalidPushError = (cause: unknown): InvalidPushError => {
253
+ if (cause instanceof InvalidPushError) {
254
+ return cause
255
+ }
256
+
257
+ if (cause instanceof UnexpectedError) {
258
+ return new InvalidPushError({ cause })
259
+ }
260
+
261
+ if (cause instanceof S2LimitExceededError) {
262
+ const note =
263
+ cause.limitType === 'record-metered-bytes'
264
+ ? `S2 record exceeded ${cause.max} metered bytes (actual: ${cause.actual})`
265
+ : `S2 batch exceeded ${cause.max} (type: ${cause.limitType}, actual: ${cause.actual})`
266
+
267
+ return new InvalidPushError({
268
+ cause: new UnexpectedError({
269
+ cause,
270
+ note,
271
+ payload: {
272
+ limitType: cause.limitType,
273
+ max: cause.max,
274
+ actual: cause.actual,
275
+ recordIndex: cause.recordIndex,
276
+ },
277
+ }),
278
+ })
279
+ }
280
+
281
+ return new InvalidPushError({ cause: new UnexpectedError({ cause }) })
282
+ }
283
+
284
+ const chunks = yield* Effect.sync(() => chunkEventsForS2(batch)).pipe(Effect.mapError(makeInvalidPushError))
285
+
286
+ for (const chunk of chunks) {
287
+ yield* HttpClientRequest.schemaBodyJson(ApiSchema.PushPayload)(HttpClientRequest.post(pushEndpoint), {
288
+ storeId,
289
+ batch: chunk.events,
290
+ }).pipe(
291
+ Effect.andThen(httpClient.pipe(HttpClient.filterStatusOk).execute),
292
+ Effect.andThen(HttpClientResponse.schemaBodyJson(ApiSchema.PushResponse)),
293
+ Effect.mapError(makeInvalidPushError),
294
+ Effect.retry(retry?.push ?? defaultRetry),
295
+ )
296
+ }
297
+ }),
259
298
  ping,
260
299
  isConnected,
261
300
  metadata: {