@livestore/sync-s2 0.0.0-snapshot-4b42ef6ef0c52dda5b8633a10addf45134e4cafe → 0.0.0-snapshot-4d1cde4c26a9d8ff6ba4b77935eaa816334db6d4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +0,0 @@
1
- {"version":3,"file":"limits.test.js","sourceRoot":"","sources":["../src/limits.test.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,QAAQ,EAAE,MAAM,EAAE,EAAE,EAAE,MAAM,QAAQ,CAAA;AAC7C,OAAO,EACL,gBAAgB,EAChB,yBAAyB,EACzB,uBAAuB,EACvB,wBAAwB,EACxB,oBAAoB,GACrB,MAAM,aAAa,CAAA;AAEpB,MAAM,OAAO,GAAG,IAAI,WAAW,EAAE,CAAA;AAEjC,MAAM,SAAS,GAAG,CAAC,aAAqB,EAAE,KAAK,GAAG,CAAC,EAAmC,EAAE,CAAC,CAAC;IACxF,IAAI,EAAE,SAAS,KAAK,EAAE;IACtB,IAAI,EAAE,EAAE,OAAO,EAAE,GAAG,CAAC,MAAM,CAAC,aAAa,CAAC,EAAE;IAC5C,MAAM,EAAE,KAAsD;IAC9D,YAAY,EAAE,KAAsD;IACpE,QAAQ,EAAE,QAAQ;IAClB,SAAS,EAAE,SAAS;CACrB,CAAC,CAAA;AAEF,QAAQ,CAAC,mBAAmB,EAAE,GAAG,EAAE;IACjC,EAAE,CAAC,0CAA0C,EAAE,GAAG,EAAE;QAClD,MAAM,MAAM,GAAG,EAAE,IAAI,EAAE,IAAI,CAAC,SAAS,CAAC,EAAE,KAAK,EAAE,OAAO,EAAE,CAAC,EAAE,CAAA;QAC3D,MAAM,QAAQ,GAAG,CAAC,GAAG,OAAO,CAAC,MAAM,CAAC,MAAM,CAAC,IAAI,IAAI,EAAE,CAAC,CAAC,UAAU,CAAA;QACjE,MAAM,CAAC,yBAAyB,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAA;IAC1D,CAAC,CAAC,CAAA;IAEF,EAAE,CAAC,2DAA2D,EAAE,GAAG,EAAE;QACnE,MAAM,MAAM,GAAG,CAAC,SAAS,CAAC,OAAO,EAAE,CAAC,CAAC,EAAE,SAAS,CAAC,OAAO,EAAE,CAAC,CAAC,EAAE,SAAS,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,CAAA;QACpF,MAAM,MAAM,GAAG,gBAAgB,CAAC,MAAM,CAAC,CAAA;QAEvC,MAAM,CAAC,MAAM,CAAC,CAAC,YAAY,CAAC,CAAC,CAAC,CAAA;QAC9B,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC,KAAK,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,aAAa,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAA;QACxE,MAAM,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,KAAK,EAAE,EAAE,CAAC,KAAK,CAAC,YAAY,IAAI,uBAAuB,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAA;IAC3F,CAAC,CAAC,CAAA;IAEF,EAAE,CAAC,0DAA0D,EAAE,GAAG,EAAE;QAClE,MAAM,QAAQ,GAAG,SAAS,CAAC,wBAAwB,EAAE,CAAC,CAAC,CAAA;QACvD,MAAM,CAAC,GAAG,EAAE,CAAC,gBAAgB,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,oBAAoB,CAAC,CAAA;IAC1E,CAAC,CAAC,CAAA;AACJ,CAAC,CAAC,CAAA"}
@@ -1,42 +0,0 @@
1
- import type { EventSequenceNumber, LiveStoreEvent } from '@livestore/common/schema'
2
- import { describe, expect, it } from 'vitest'
3
- import {
4
- chunkEventsForS2,
5
- computeRecordMeteredBytes,
6
- MAX_BATCH_METERED_BYTES,
7
- MAX_RECORD_METERED_BYTES,
8
- S2LimitExceededError,
9
- } from './limits.ts'
10
-
11
- const encoder = new TextEncoder()
12
-
13
- const makeEvent = (payloadLength: number, index = 0): LiveStoreEvent.AnyEncodedGlobal => ({
14
- name: `event-${index}`,
15
- args: { payload: 'x'.repeat(payloadLength) },
16
- seqNum: index as EventSequenceNumber.GlobalEventSequenceNumber,
17
- parentSeqNum: index as EventSequenceNumber.GlobalEventSequenceNumber,
18
- clientId: 'client',
19
- sessionId: 'session',
20
- })
21
-
22
- describe('S2 limits helpers', () => {
23
- it('computes metered bytes for record bodies', () => {
24
- const record = { body: JSON.stringify({ hello: 'world' }) }
25
- const expected = 8 + encoder.encode(record.body ?? '').byteLength
26
- expect(computeRecordMeteredBytes(record)).toBe(expected)
27
- })
28
-
29
- it('splits large batches while respecting metered byte limits', () => {
30
- const events = [makeEvent(400_000, 1), makeEvent(400_000, 2), makeEvent(400_000, 3)]
31
- const chunks = chunkEventsForS2(events)
32
-
33
- expect(chunks).toHaveLength(2)
34
- expect(chunks.map((chunk) => chunk.events.length)).toStrictEqual([2, 1])
35
- expect(chunks.every((chunk) => chunk.meteredBytes <= MAX_BATCH_METERED_BYTES)).toBe(true)
36
- })
37
-
38
- it('throws when a single record exceeds the metered byte cap', () => {
39
- const oversize = makeEvent(MAX_RECORD_METERED_BYTES, 1)
40
- expect(() => chunkEventsForS2([oversize])).toThrow(S2LimitExceededError)
41
- })
42
- })
package/src/limits.ts DELETED
@@ -1,135 +0,0 @@
1
- import type { LiveStoreEvent } from '@livestore/common/schema'
2
- import { splitChunkBySize } from '@livestore/common/sync'
3
- import { Chunk, Effect, Schema } from '@livestore/utils/effect'
4
-
5
- const textEncoder = new TextEncoder()
6
-
7
- /**
8
- * Maximum metered size of a single record (docs: https://s2.dev/docs/limits#records).
9
- */
10
- export const MAX_RECORD_METERED_BYTES = 1_048_576 // 1 MiB
11
-
12
- /**
13
- * Maximum combined metered size of a batch append (docs: https://s2.dev/docs/limits#records).
14
- */
15
- export const MAX_BATCH_METERED_BYTES = 1_048_576 // 1 MiB
16
-
17
- /**
18
- * Maximum number of records per append (docs: https://s2.dev/docs/limits#records).
19
- */
20
- export const MAX_RECORDS_PER_BATCH = 1_000
21
-
22
- const LimitType = Schema.Literal('record-metered-bytes', 'batch-metered-bytes', 'batch-count')
23
-
24
- export class S2LimitExceededError extends Schema.TaggedError<S2LimitExceededError>()('S2LimitExceededError', {
25
- limitType: LimitType,
26
- max: Schema.Number,
27
- actual: Schema.Number,
28
- recordIndex: Schema.optional(Schema.Number),
29
- }) {}
30
-
31
- export interface AppendRecordBody {
32
- readonly body?: string
33
- readonly headers?: ReadonlyArray<{ readonly name: string; readonly value: string }>
34
- }
35
-
36
- // S2 measures bodies/headers in UTF‑8 bytes; centralising this helper keeps the
37
- // formula readable and consistent with the docs.
38
- const utf8ByteLength = (value: string): number => textEncoder.encode(value).byteLength
39
-
40
- export const computeRecordMeteredBytes = (record: AppendRecordBody): number => {
41
- const headers = record.headers ?? []
42
- const headerCount = headers.length
43
- const headerBytes = headers.reduce(
44
- (acc, header) => acc + utf8ByteLength(header.name) + utf8ByteLength(header.value),
45
- 0,
46
- )
47
- const bodyBytes = record.body === undefined ? 0 : utf8ByteLength(record.body)
48
- return 8 + 2 * headerCount + headerBytes + bodyBytes
49
- }
50
-
51
- export const computeBatchMeteredBytes = (records: ReadonlyArray<AppendRecordBody>): number =>
52
- records.reduce((acc, record) => acc + computeRecordMeteredBytes(record), 0)
53
-
54
- interface PreparedEvent {
55
- readonly event: LiveStoreEvent.AnyEncodedGlobal
56
- readonly record: AppendRecordBody
57
- readonly meteredBytes: number
58
- readonly index: number
59
- }
60
-
61
- export interface S2Chunk {
62
- readonly events: ReadonlyArray<LiveStoreEvent.AnyEncodedGlobal>
63
- readonly records: ReadonlyArray<AppendRecordBody>
64
- readonly meteredBytes: number
65
- }
66
-
67
- // Pre-stringify events and pre-compute per-record metered bytes so we only pay
68
- // the JSON cost once when chunking large batches.
69
- const convertEventsToPrepared = (events: ReadonlyArray<LiveStoreEvent.AnyEncodedGlobal>): PreparedEvent[] =>
70
- events.map((event, index) => {
71
- const body = JSON.stringify(event)
72
- const record: AppendRecordBody = { body }
73
- const meteredBytes = computeRecordMeteredBytes(record)
74
-
75
- if (meteredBytes > MAX_RECORD_METERED_BYTES) {
76
- throw new S2LimitExceededError({
77
- limitType: 'record-metered-bytes',
78
- max: MAX_RECORD_METERED_BYTES,
79
- actual: meteredBytes,
80
- recordIndex: index,
81
- })
82
- }
83
-
84
- return { event, record, meteredBytes, index }
85
- })
86
-
87
- // Summarises a chunk’s metered bytes. Passed to splitChunkBySize so we enforce
88
- // S2 limits directly instead of relying on JSON size heuristics.
89
- const makeChunkMeasure = (items: ReadonlyArray<PreparedEvent>): number =>
90
- items.reduce((acc, item) => acc + item.meteredBytes, 0)
91
-
92
- const mapPreparedChunks = (chunks: Chunk.Chunk<Chunk.Chunk<PreparedEvent>>): ReadonlyArray<S2Chunk> =>
93
- Chunk.toReadonlyArray(chunks).map((chunk) => {
94
- const chunkItems = Chunk.toReadonlyArray(chunk)
95
- const events = chunkItems.map((item) => item.event)
96
- const records = chunkItems.map((item) => item.record)
97
- return {
98
- events,
99
- records,
100
- meteredBytes: makeChunkMeasure(chunkItems),
101
- }
102
- })
103
-
104
- export const chunkEventsForS2 = (events: ReadonlyArray<LiveStoreEvent.AnyEncodedGlobal>): ReadonlyArray<S2Chunk> => {
105
- if (events.length === 0) {
106
- return []
107
- }
108
-
109
- const prepared = convertEventsToPrepared(events)
110
-
111
- try {
112
- const chunks = Chunk.fromIterable(prepared).pipe(
113
- splitChunkBySize({
114
- maxItems: MAX_RECORDS_PER_BATCH,
115
- maxBytes: MAX_BATCH_METERED_BYTES,
116
- encode: (items) => ({ records: items.map((item) => item.record) }),
117
- measure: makeChunkMeasure,
118
- }),
119
- Effect.runSync,
120
- )
121
-
122
- return mapPreparedChunks(chunks)
123
- } catch (error) {
124
- if (error && typeof error === 'object' && (error as any)._tag === 'OversizeChunkItemError') {
125
- const oversize = error as { size: number; maxBytes: number; _tag: string }
126
- throw new S2LimitExceededError({
127
- limitType: 'record-metered-bytes',
128
- max: oversize.maxBytes,
129
- actual: oversize.size,
130
- })
131
- }
132
-
133
- throw error
134
- }
135
- }