@livestore/sync-cf 0.4.0-dev.7 → 0.4.0-dev.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/dist/.tsbuildinfo +1 -1
  2. package/dist/cf-worker/do/durable-object.d.ts.map +1 -1
  3. package/dist/cf-worker/do/durable-object.js +5 -9
  4. package/dist/cf-worker/do/durable-object.js.map +1 -1
  5. package/dist/cf-worker/do/layer.d.ts +1 -1
  6. package/dist/cf-worker/do/pull.d.ts +1 -1
  7. package/dist/cf-worker/do/pull.d.ts.map +1 -1
  8. package/dist/cf-worker/do/pull.js +21 -13
  9. package/dist/cf-worker/do/pull.js.map +1 -1
  10. package/dist/cf-worker/do/push.d.ts.map +1 -1
  11. package/dist/cf-worker/do/push.js +66 -35
  12. package/dist/cf-worker/do/push.js.map +1 -1
  13. package/dist/cf-worker/do/sync-storage.d.ts +8 -5
  14. package/dist/cf-worker/do/sync-storage.d.ts.map +1 -1
  15. package/dist/cf-worker/do/sync-storage.js +64 -19
  16. package/dist/cf-worker/do/sync-storage.js.map +1 -1
  17. package/dist/cf-worker/do/transport/do-rpc-server.d.ts +2 -1
  18. package/dist/cf-worker/do/transport/do-rpc-server.d.ts.map +1 -1
  19. package/dist/cf-worker/do/transport/do-rpc-server.js.map +1 -1
  20. package/dist/cf-worker/do/transport/http-rpc-server.d.ts +1 -1
  21. package/dist/cf-worker/do/ws-chunking.d.ts +22 -0
  22. package/dist/cf-worker/do/ws-chunking.d.ts.map +1 -0
  23. package/dist/cf-worker/do/ws-chunking.js +49 -0
  24. package/dist/cf-worker/do/ws-chunking.js.map +1 -0
  25. package/dist/cf-worker/shared.d.ts +19 -13
  26. package/dist/cf-worker/shared.d.ts.map +1 -1
  27. package/dist/cf-worker/shared.js +15 -4
  28. package/dist/cf-worker/shared.js.map +1 -1
  29. package/dist/cf-worker/worker.d.ts +30 -45
  30. package/dist/cf-worker/worker.d.ts.map +1 -1
  31. package/dist/cf-worker/worker.js +30 -25
  32. package/dist/cf-worker/worker.js.map +1 -1
  33. package/dist/common/sync-message-types.d.ts +5 -5
  34. package/package.json +5 -5
  35. package/src/cf-worker/do/durable-object.ts +6 -10
  36. package/src/cf-worker/do/pull.ts +30 -17
  37. package/src/cf-worker/do/push.ts +85 -38
  38. package/src/cf-worker/do/sync-storage.ts +106 -27
  39. package/src/cf-worker/do/transport/do-rpc-server.ts +4 -2
  40. package/src/cf-worker/do/ws-chunking.ts +76 -0
  41. package/src/cf-worker/shared.ts +19 -6
  42. package/src/cf-worker/worker.ts +46 -69
  43. package/dist/cf-worker/cf-types.d.ts +0 -2
  44. package/dist/cf-worker/cf-types.d.ts.map +0 -1
  45. package/dist/cf-worker/cf-types.js +0 -2
  46. package/dist/cf-worker/cf-types.js.map +0 -1
  47. package/dist/cf-worker/durable-object.d.ts +0 -189
  48. package/dist/cf-worker/durable-object.d.ts.map +0 -1
  49. package/dist/cf-worker/durable-object.js +0 -317
  50. package/dist/cf-worker/durable-object.js.map +0 -1
  51. package/dist/common/ws-message-types.d.ts +0 -270
  52. package/dist/common/ws-message-types.d.ts.map +0 -1
  53. package/dist/common/ws-message-types.js +0 -57
  54. package/dist/common/ws-message-types.js.map +0 -1
  55. package/dist/sync-impl/mod.d.ts +0 -2
  56. package/dist/sync-impl/mod.d.ts.map +0 -1
  57. package/dist/sync-impl/mod.js +0 -2
  58. package/dist/sync-impl/mod.js.map +0 -1
  59. package/dist/sync-impl/ws-impl.d.ts +0 -7
  60. package/dist/sync-impl/ws-impl.d.ts.map +0 -1
  61. package/dist/sync-impl/ws-impl.js +0 -175
  62. package/dist/sync-impl/ws-impl.js.map +0 -1
@@ -6,10 +6,21 @@ import {
6
6
  UnexpectedError,
7
7
  } from '@livestore/common'
8
8
  import { type CfTypes, emitStreamResponse } from '@livestore/common-cf'
9
- import { Effect, Option, type RpcMessage, Schema } from '@livestore/utils/effect'
9
+ import { Chunk, Effect, Option, type RpcMessage, Schema } from '@livestore/utils/effect'
10
10
  import { SyncMessage } from '../../common/mod.ts'
11
- import { type Env, type MakeDurableObjectClassOptions, type StoreId, WebSocketAttachmentSchema } from '../shared.ts'
11
+ import {
12
+ type Env,
13
+ MAX_PULL_EVENTS_PER_MESSAGE,
14
+ MAX_WS_MESSAGE_BYTES,
15
+ type MakeDurableObjectClassOptions,
16
+ type StoreId,
17
+ WebSocketAttachmentSchema,
18
+ } from '../shared.ts'
12
19
  import { DoCtx } from './layer.ts'
20
+ import { splitChunkBySize } from './ws-chunking.ts'
21
+
22
+ const encodePullResponse = Schema.encodeSync(SyncMessage.PullResponse)
23
+ type PullBatchItem = SyncMessage.PullResponse['batch'][number]
13
24
 
14
25
  export const makePush =
15
26
  ({
@@ -51,6 +62,13 @@ export const makePush =
51
62
  // Validate the batch
52
63
  const firstEventParent = pushRequest.batch[0]!.parentSeqNum
53
64
  if (firstEventParent !== currentHead) {
65
+ // yield* Effect.logDebug('ServerAheadError: backend head mismatch', {
66
+ // expectedHead: currentHead,
67
+ // providedHead: firstEventParent,
68
+ // batchSize: pushRequest.batch.length,
69
+ // backendId,
70
+ // })
71
+
54
72
  return yield* new ServerAheadError({ minimumExpectedNum: currentHead, providedNum: firstEventParent })
55
73
  }
56
74
 
@@ -68,40 +86,69 @@ export const makePush =
68
86
  yield* Effect.gen(function* () {
69
87
  const connectedClients = ctx.getWebSockets()
70
88
 
71
- // Dual broadcasting: WebSocket + RPC clients
72
- const pullRes = SyncMessage.PullResponse.make({
73
- batch: pushRequest.batch.map((eventEncoded) => ({
74
- eventEncoded,
75
- metadata: Option.some(SyncMessage.SyncMetadata.make({ createdAt })),
76
- })),
77
- pageInfo: SyncBackend.pageInfoNoMore,
78
- backendId,
79
- })
89
+ // Preparing chunks of responses to make sure we don't exceed the WS message size limit.
90
+ const responses = Chunk.fromIterable(pushRequest.batch).pipe(
91
+ splitChunkBySize({
92
+ maxItems: MAX_PULL_EVENTS_PER_MESSAGE,
93
+ maxBytes: MAX_WS_MESSAGE_BYTES,
94
+ encode: (items) =>
95
+ encodePullResponse(
96
+ SyncMessage.PullResponse.make({
97
+ batch: items.map(
98
+ (eventEncoded): PullBatchItem => ({
99
+ eventEncoded,
100
+ metadata: Option.some(SyncMessage.SyncMetadata.make({ createdAt })),
101
+ }),
102
+ ),
103
+ pageInfo: SyncBackend.pageInfoNoMore,
104
+ backendId,
105
+ }),
106
+ ),
107
+ }),
108
+ Chunk.map((eventsChunk) => {
109
+ const batchWithMetadata = Chunk.toReadonlyArray(eventsChunk).map((eventEncoded) => ({
110
+ eventEncoded,
111
+ metadata: Option.some(SyncMessage.SyncMetadata.make({ createdAt })),
112
+ }))
113
+
114
+ const response = SyncMessage.PullResponse.make({
115
+ batch: batchWithMetadata,
116
+ pageInfo: SyncBackend.pageInfoNoMore,
117
+ backendId,
118
+ })
119
+
120
+ return {
121
+ response,
122
+ encoded: Schema.encodeSync(SyncMessage.PullResponse)(response),
123
+ }
124
+ }),
125
+ )
80
126
 
81
- const pullResEnc = Schema.encodeSync(SyncMessage.PullResponse)(pullRes)
127
+ // Dual broadcasting: WebSocket + RPC clients
82
128
 
83
129
  // Broadcast to WebSocket clients
84
130
  if (connectedClients.length > 0) {
85
- // Only calling once for now.
86
- if (options?.onPullRes) {
87
- yield* Effect.tryAll(() => options.onPullRes!(pullRes)).pipe(UnexpectedError.mapToUnexpectedError)
88
- }
131
+ for (const { response, encoded } of responses) {
132
+ // Only calling once for now.
133
+ if (options?.onPullRes) {
134
+ yield* Effect.tryAll(() => options.onPullRes!(response)).pipe(UnexpectedError.mapToUnexpectedError)
135
+ }
89
136
 
90
- // NOTE we're also sending the pullRes to the pushing ws client as a confirmation
91
- for (const conn of connectedClients) {
92
- // conn.send(pullResEnc)
93
- const attachment = Schema.decodeSync(WebSocketAttachmentSchema)(conn.deserializeAttachment())
94
-
95
- // We're doing something a bit "advanced" here as we're directly emitting Effect RPC-compatible
96
- // response messsages on the Effect RPC-managed websocket connection to the WS client.
97
- // For this we need to get the RPC `requestId` from the WebSocket attachment.
98
- for (const requestId of attachment.pullRequestIds) {
99
- const res: RpcMessage.ResponseChunkEncoded = {
100
- _tag: 'Chunk',
101
- requestId,
102
- values: [pullResEnc],
137
+ // NOTE we're also sending the pullRes chunk to the pushing ws client as confirmation
138
+ for (const conn of connectedClients) {
139
+ const attachment = Schema.decodeSync(WebSocketAttachmentSchema)(conn.deserializeAttachment())
140
+
141
+ // We're doing something a bit "advanced" here as we're directly emitting Effect RPC-compatible
142
+ // response messsages on the Effect RPC-managed websocket connection to the WS client.
143
+ // For this we need to get the RPC `requestId` from the WebSocket attachment.
144
+ for (const requestId of attachment.pullRequestIds) {
145
+ const res: RpcMessage.ResponseChunkEncoded = {
146
+ _tag: 'Chunk',
147
+ requestId,
148
+ values: [encoded],
149
+ }
150
+ conn.send(JSON.stringify(res))
103
151
  }
104
- conn.send(JSON.stringify(res))
105
152
  }
106
153
  }
107
154
 
@@ -110,17 +157,16 @@ export const makePush =
110
157
 
111
158
  // RPC broadcasting would require reconstructing client stubs from clientIds
112
159
  if (rpcSubscriptions.size > 0) {
113
- yield* Effect.forEach(
114
- rpcSubscriptions.values(),
115
- (subscription) =>
116
- emitStreamResponse({
160
+ for (const subscription of rpcSubscriptions.values()) {
161
+ for (const { encoded } of responses) {
162
+ yield* emitStreamResponse({
117
163
  callerContext: subscription.callerContext,
118
164
  env,
119
165
  requestId: subscription.requestId,
120
- values: [pullResEnc],
121
- }).pipe(Effect.tapCauseLogPretty, Effect.exit),
122
- { concurrency: 'unbounded' },
123
- )
166
+ values: [encoded],
167
+ }).pipe(Effect.tapCauseLogPretty, Effect.exit)
168
+ }
169
+ }
124
170
 
125
171
  yield* Effect.logDebug(`Broadcasted to ${rpcSubscriptions.size} RPC clients`)
126
172
  }
@@ -144,6 +190,7 @@ export const makePush =
144
190
  }),
145
191
  ),
146
192
  Effect.mapError((cause) => InvalidPushError.make({ cause })),
193
+ Effect.withSpan('sync-cf:do:push', { attributes: { storeId, batchSize: pushRequest.batch.length } }),
147
194
  )
148
195
 
149
196
  /**
@@ -1,18 +1,21 @@
1
1
  import { UnexpectedError } from '@livestore/common'
2
2
  import type { LiveStoreEvent } from '@livestore/common/schema'
3
3
  import type { CfTypes } from '@livestore/common-cf'
4
- import { Effect, Option, Schema } from '@livestore/utils/effect'
4
+ import { Chunk, Effect, Option, Schema, Stream } from '@livestore/utils/effect'
5
5
  import { SyncMetadata } from '../../common/sync-message-types.ts'
6
6
  import { type Env, PERSISTENCE_FORMAT_VERSION, type StoreId } from '../shared.ts'
7
7
  import { eventlogTable } from './sqlite.ts'
8
8
 
9
9
  export type SyncStorage = {
10
10
  dbName: string
11
- // getHead: Effect.Effect<EventSequenceNumber.GlobalEventSequenceNumber, UnexpectedError>
12
- getEvents: (
13
- cursor: number | undefined,
14
- ) => Effect.Effect<
15
- ReadonlyArray<{ eventEncoded: LiveStoreEvent.AnyEncodedGlobal; metadata: Option.Option<SyncMetadata> }>,
11
+ getEvents: (cursor: number | undefined) => Effect.Effect<
12
+ {
13
+ total: number
14
+ stream: Stream.Stream<
15
+ { eventEncoded: LiveStoreEvent.AnyEncodedGlobal; metadata: Option.Option<SyncMetadata> },
16
+ UnexpectedError
17
+ >
18
+ },
16
19
  UnexpectedError
17
20
  >
18
21
  appendEvents: (
@@ -34,35 +37,111 @@ export const makeStorage = (ctx: CfTypes.DurableObjectState, env: Env, storeId:
34
37
  Effect.withSpan('@livestore/sync-cf:durable-object:execDb'),
35
38
  )
36
39
 
37
- // const getHead: Effect.Effect<EventSequenceNumber.GlobalEventSequenceNumber, UnexpectedError> = Effect.gen(
38
- // function* () {
39
- // const result = yield* execDb<{ seqNum: EventSequenceNumber.GlobalEventSequenceNumber }>((db) =>
40
- // db.prepare(`SELECT seqNum FROM ${dbName} ORDER BY seqNum DESC LIMIT 1`).all(),
41
- // )
40
+ // Cloudflare's D1 HTTP endpoint rejects JSON responses once they exceed ~1MB.
41
+ // Keep individual SELECT batches comfortably below that threshold so we can
42
+ // serve large histories without tripping the limit.
43
+ const D1_MAX_JSON_RESPONSE_BYTES = 1_000_000
44
+ const D1_RESPONSE_SAFETY_MARGIN_BYTES = 64 * 1024
45
+ const D1_TARGET_RESPONSE_BYTES = D1_MAX_JSON_RESPONSE_BYTES - D1_RESPONSE_SAFETY_MARGIN_BYTES
46
+ const D1_INITIAL_PAGE_SIZE = 256
47
+ const D1_MIN_PAGE_SIZE = 1
48
+
49
+ const decodeEventlogRows = Schema.decodeUnknownSync(Schema.Array(eventlogTable.rowSchema))
50
+ const textEncoder = new TextEncoder()
51
+
52
+ const decreaseLimit = (limit: number) => Math.max(D1_MIN_PAGE_SIZE, Math.floor(limit / 2))
53
+ const increaseLimit = (limit: number) => Math.min(D1_INITIAL_PAGE_SIZE, limit * 2)
42
54
 
43
- // return result[0]?.seqNum ?? EventSequenceNumber.ROOT.global
44
- // },
45
- // ).pipe(UnexpectedError.mapToUnexpectedError)
55
+ const computeNextLimit = (limit: number, encodedSize: number) => {
56
+ if (encodedSize > D1_TARGET_RESPONSE_BYTES && limit > D1_MIN_PAGE_SIZE) {
57
+ const next = decreaseLimit(limit)
58
+ return next === limit ? limit : next
59
+ }
60
+
61
+ if (encodedSize < D1_TARGET_RESPONSE_BYTES / 2 && limit < D1_INITIAL_PAGE_SIZE) {
62
+ const next = increaseLimit(limit)
63
+ return next === limit ? limit : next
64
+ }
65
+
66
+ return limit
67
+ }
46
68
 
47
- // TODO support streaming
48
69
  const getEvents = (
49
70
  cursor: number | undefined,
50
71
  ): Effect.Effect<
51
- ReadonlyArray<{ eventEncoded: LiveStoreEvent.AnyEncodedGlobal; metadata: Option.Option<SyncMetadata> }>,
72
+ {
73
+ total: number
74
+ stream: Stream.Stream<
75
+ { eventEncoded: LiveStoreEvent.AnyEncodedGlobal; metadata: Option.Option<SyncMetadata> },
76
+ UnexpectedError
77
+ >
78
+ },
52
79
  UnexpectedError
53
80
  > =>
54
81
  Effect.gen(function* () {
55
- const whereClause = cursor === undefined ? '' : `WHERE seqNum > ${cursor}`
56
- const sql = `SELECT * FROM ${dbName} ${whereClause} ORDER BY seqNum ASC`
57
- // TODO handle case where `cursor` was not found
58
- const rawEvents = yield* execDb((db) => db.prepare(sql).all())
59
- const events = Schema.decodeUnknownSync(Schema.Array(eventlogTable.rowSchema))(rawEvents).map(
60
- ({ createdAt, ...eventEncoded }) => ({
61
- eventEncoded,
62
- metadata: Option.some(SyncMetadata.make({ createdAt })),
63
- }),
64
- )
65
- return events
82
+ const countStatement =
83
+ cursor === undefined
84
+ ? `SELECT COUNT(*) as total FROM ${dbName}`
85
+ : `SELECT COUNT(*) as total FROM ${dbName} WHERE seqNum > ?`
86
+
87
+ const countRows = yield* execDb<{ total: number }>((db) => {
88
+ const prepared = db.prepare(countStatement)
89
+ return cursor === undefined ? prepared.all() : prepared.bind(cursor).all()
90
+ })
91
+
92
+ const total = Number(countRows[0]?.total ?? 0)
93
+
94
+ type State = { cursor: number | undefined; limit: number }
95
+ type EmittedEvent = { eventEncoded: LiveStoreEvent.AnyEncodedGlobal; metadata: Option.Option<SyncMetadata> }
96
+
97
+ const initialState: State = { cursor, limit: D1_INITIAL_PAGE_SIZE }
98
+
99
+ const fetchPage = (
100
+ state: State,
101
+ ): Effect.Effect<Option.Option<readonly [Chunk.Chunk<EmittedEvent>, State]>, UnexpectedError> =>
102
+ Effect.gen(function* () {
103
+ const statement =
104
+ state.cursor === undefined
105
+ ? `SELECT * FROM ${dbName} ORDER BY seqNum ASC LIMIT ?`
106
+ : `SELECT * FROM ${dbName} WHERE seqNum > ? ORDER BY seqNum ASC LIMIT ?`
107
+
108
+ const rawEvents = yield* execDb((db) => {
109
+ const prepared = db.prepare(statement)
110
+ return state.cursor === undefined
111
+ ? prepared.bind(state.limit).all()
112
+ : prepared.bind(state.cursor, state.limit).all()
113
+ })
114
+
115
+ if (rawEvents.length === 0) {
116
+ return Option.none()
117
+ }
118
+
119
+ const encodedSize = textEncoder.encode(JSON.stringify(rawEvents)).byteLength
120
+
121
+ if (encodedSize > D1_TARGET_RESPONSE_BYTES && state.limit > D1_MIN_PAGE_SIZE) {
122
+ const nextLimit = decreaseLimit(state.limit)
123
+
124
+ if (nextLimit !== state.limit) {
125
+ return yield* fetchPage({ cursor: state.cursor, limit: nextLimit })
126
+ }
127
+ }
128
+
129
+ const decodedRows = Chunk.fromIterable(decodeEventlogRows(rawEvents))
130
+
131
+ const eventsChunk = Chunk.map(decodedRows, ({ createdAt, ...eventEncoded }) => ({
132
+ eventEncoded,
133
+ metadata: Option.some(SyncMetadata.make({ createdAt })),
134
+ }))
135
+
136
+ const lastSeqNum = Chunk.unsafeLast(decodedRows).seqNum
137
+ const nextState: State = { cursor: lastSeqNum, limit: computeNextLimit(state.limit, encodedSize) }
138
+
139
+ return Option.some([eventsChunk, nextState] as const)
140
+ })
141
+
142
+ const stream = Stream.unfoldChunkEffect(initialState, fetchPage)
143
+
144
+ return { total, stream }
66
145
  }).pipe(
67
146
  UnexpectedError.mapToUnexpectedError,
68
147
  Effect.withSpan('@livestore/sync-cf:durable-object:getEvents', { attributes: { dbName, cursor } }),
@@ -1,5 +1,5 @@
1
1
  import { InvalidPullError, InvalidPushError } from '@livestore/common'
2
- import { toDurableObjectHandler } from '@livestore/common-cf'
2
+ import { type CfTypes, toDurableObjectHandler } from '@livestore/common-cf'
3
3
  import {
4
4
  Effect,
5
5
  Headers,
@@ -22,7 +22,9 @@ export interface DoRpcHandlerOptions {
22
22
  input: Omit<DoCtxInput, 'from'>
23
23
  }
24
24
 
25
- export const createDoRpcHandler = (options: DoRpcHandlerOptions) =>
25
+ export const createDoRpcHandler = (
26
+ options: DoRpcHandlerOptions,
27
+ ): Effect.Effect<Uint8Array<ArrayBuffer> | CfTypes.ReadableStream> =>
26
28
  Effect.gen(this, function* () {
27
29
  const { payload, input } = options
28
30
  // const { rpcSubscriptions, backendId, doOptions, ctx, env } = yield* DoCtx
@@ -0,0 +1,76 @@
1
+ import { Chunk } from '@livestore/utils/effect'
2
+
3
+ const textEncoder = new TextEncoder()
4
+
5
+ /**
6
+ * Configuration describing how to break a chunk into smaller payload-safe chunks.
7
+ */
8
+ export interface ChunkingOptions<A> {
9
+ /** Maximum number of items that may appear in any emitted chunk. */
10
+ readonly maxItems: number
11
+ /** Maximum encoded byte size allowed for any emitted chunk. */
12
+ readonly maxBytes: number
13
+ /**
14
+ * Callback that produces a JSON-serialisable structure whose byte size should
15
+ * fit within {@link maxBytes}. This lets callers control framing overhead.
16
+ */
17
+ readonly encode: (items: ReadonlyArray<A>) => unknown
18
+ }
19
+
20
+ /**
21
+ * Derives a function that splits an input chunk into sub-chunks confined by
22
+ * both item count and encoded byte size limits. Designed for transports with
23
+ * strict frame caps (e.g. Cloudflare hibernated WebSockets).
24
+ */
25
+ export const splitChunkBySize =
26
+ <A>(options: ChunkingOptions<A>) =>
27
+ (chunk: Chunk.Chunk<A>): Chunk.Chunk<Chunk.Chunk<A>> => {
28
+ const maxItems = Math.max(1, options.maxItems)
29
+ const maxBytes = Math.max(1, options.maxBytes)
30
+ const encode = options.encode
31
+
32
+ const measure = (items: ReadonlyArray<A>) => {
33
+ const encoded = encode(items)
34
+ return textEncoder.encode(JSON.stringify(encoded)).byteLength
35
+ }
36
+
37
+ const items = Chunk.toReadonlyArray(chunk)
38
+ if (items.length === 0) {
39
+ return Chunk.fromIterable<Chunk.Chunk<A>>([])
40
+ }
41
+
42
+ const result: Array<Chunk.Chunk<A>> = []
43
+ let current: Array<A> = []
44
+
45
+ const flushCurrent = () => {
46
+ if (current.length > 0) {
47
+ result.push(Chunk.fromIterable(current))
48
+ current = []
49
+ }
50
+ }
51
+
52
+ for (const item of items) {
53
+ current.push(item)
54
+ const exceedsLimit = current.length > maxItems || measure(current) > maxBytes
55
+
56
+ if (exceedsLimit) {
57
+ // remove the item we just added and emit the previous chunk if it exists
58
+ const last = current.pop()!
59
+ flushCurrent()
60
+
61
+ if (last !== undefined) {
62
+ current = [last]
63
+ const singleItemTooLarge = measure(current) > maxBytes
64
+ if (singleItemTooLarge || current.length > maxItems) {
65
+ // Emit the oversized item on its own; downstream can decide how to handle it.
66
+ result.push(Chunk.of(last))
67
+ current = []
68
+ }
69
+ }
70
+ }
71
+ }
72
+
73
+ flushCurrent()
74
+
75
+ return Chunk.fromIterable(result)
76
+ }
@@ -1,6 +1,7 @@
1
1
  import type { InvalidPullError, InvalidPushError } from '@livestore/common'
2
2
  import type { CfTypes } from '@livestore/common-cf'
3
- import { Effect, type Option, Schema, UrlParams } from '@livestore/utils/effect'
3
+ import { Effect, Schema, UrlParams } from '@livestore/utils/effect'
4
+ import type { SearchParams } from '../common/mod.ts'
4
5
  import { SearchParamsSchema, SyncMessage } from '../common/mod.ts'
5
6
 
6
7
  export interface Env {
@@ -48,20 +49,32 @@ export type DurableObjectId = string
48
49
  */
49
50
  export const PERSISTENCE_FORMAT_VERSION = 7
50
51
 
51
- export const DEFAULT_SYNC_DURABLE_OBJECT_NAME = 'SYNC_BACKEND_DO'
52
-
53
52
  export const encodeOutgoingMessage = Schema.encodeSync(Schema.parseJson(SyncMessage.BackendToClientMessage))
54
53
  export const encodeIncomingMessage = Schema.encodeSync(Schema.parseJson(SyncMessage.ClientToBackendMessage))
55
54
 
56
- export const getSyncRequestSearchParams = (request: CfTypes.Request): Option.Option<typeof SearchParamsSchema.Type> => {
55
+ /**
56
+ * Extracts the LiveStore sync search parameters from a request. Returns
57
+ * `undefined` when the request does not carry valid sync metadata so callers
58
+ * can fall back to custom routing.
59
+ */
60
+ export const matchSyncRequest = (request: CfTypes.Request): SearchParams | undefined => {
57
61
  const url = new URL(request.url)
58
62
  const urlParams = UrlParams.fromInput(url.searchParams)
59
63
  const paramsResult = UrlParams.schemaStruct(SearchParamsSchema)(urlParams).pipe(Effect.option, Effect.runSync)
60
64
 
61
- return paramsResult
65
+ if (paramsResult._tag === 'None') {
66
+ return undefined
67
+ }
68
+
69
+ return paramsResult.value
62
70
  }
63
71
 
64
- export const PULL_CHUNK_SIZE = 100
72
+ export const MAX_PULL_EVENTS_PER_MESSAGE = 100
73
+
74
+ // Cloudflare hibernated WebSocket frames begin failing just below 1MB. Keep our
75
+ // payloads comfortably beneath that ceiling so we don't rely on implementation
76
+ // quirks of local dev servers.
77
+ export const MAX_WS_MESSAGE_BYTES = 900_000
65
78
 
66
79
  // RPC subscription storage (TODO refactor)
67
80
  export type RpcSubscription = {