@livestore/sync-cf 0.4.0-dev.1 → 0.4.0-dev.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +60 -0
- package/dist/.tsbuildinfo +1 -1
- package/dist/cf-worker/do/durable-object.d.ts +45 -0
- package/dist/cf-worker/do/durable-object.d.ts.map +1 -0
- package/dist/cf-worker/do/durable-object.js +150 -0
- package/dist/cf-worker/do/durable-object.js.map +1 -0
- package/dist/cf-worker/do/layer.d.ts +34 -0
- package/dist/cf-worker/do/layer.d.ts.map +1 -0
- package/dist/cf-worker/do/layer.js +91 -0
- package/dist/cf-worker/do/layer.js.map +1 -0
- package/dist/cf-worker/do/pull.d.ts +6 -0
- package/dist/cf-worker/do/pull.d.ts.map +1 -0
- package/dist/cf-worker/do/pull.js +47 -0
- package/dist/cf-worker/do/pull.js.map +1 -0
- package/dist/cf-worker/do/push.d.ts +14 -0
- package/dist/cf-worker/do/push.d.ts.map +1 -0
- package/dist/cf-worker/do/push.js +131 -0
- package/dist/cf-worker/do/push.js.map +1 -0
- package/dist/cf-worker/{durable-object.d.ts → do/sqlite.d.ts} +77 -70
- package/dist/cf-worker/do/sqlite.d.ts.map +1 -0
- package/dist/cf-worker/do/sqlite.js +27 -0
- package/dist/cf-worker/do/sqlite.js.map +1 -0
- package/dist/cf-worker/do/sync-storage.d.ts +25 -0
- package/dist/cf-worker/do/sync-storage.d.ts.map +1 -0
- package/dist/cf-worker/do/sync-storage.js +190 -0
- package/dist/cf-worker/do/sync-storage.js.map +1 -0
- package/dist/cf-worker/do/transport/do-rpc-server.d.ts +9 -0
- package/dist/cf-worker/do/transport/do-rpc-server.d.ts.map +1 -0
- package/dist/cf-worker/do/transport/do-rpc-server.js +45 -0
- package/dist/cf-worker/do/transport/do-rpc-server.js.map +1 -0
- package/dist/cf-worker/do/transport/http-rpc-server.d.ts +7 -0
- package/dist/cf-worker/do/transport/http-rpc-server.d.ts.map +1 -0
- package/dist/cf-worker/do/transport/http-rpc-server.js +24 -0
- package/dist/cf-worker/do/transport/http-rpc-server.js.map +1 -0
- package/dist/cf-worker/do/transport/ws-rpc-server.d.ts +4 -0
- package/dist/cf-worker/do/transport/ws-rpc-server.d.ts.map +1 -0
- package/dist/cf-worker/do/transport/ws-rpc-server.js +21 -0
- package/dist/cf-worker/do/transport/ws-rpc-server.js.map +1 -0
- package/dist/cf-worker/mod.d.ts +4 -2
- package/dist/cf-worker/mod.d.ts.map +1 -1
- package/dist/cf-worker/mod.js +3 -2
- package/dist/cf-worker/mod.js.map +1 -1
- package/dist/cf-worker/shared.d.ts +147 -0
- package/dist/cf-worker/shared.d.ts.map +1 -0
- package/dist/cf-worker/shared.js +32 -0
- package/dist/cf-worker/shared.js.map +1 -0
- package/dist/cf-worker/worker.d.ts +45 -45
- package/dist/cf-worker/worker.d.ts.map +1 -1
- package/dist/cf-worker/worker.js +53 -39
- package/dist/cf-worker/worker.js.map +1 -1
- package/dist/client/mod.d.ts +4 -0
- package/dist/client/mod.d.ts.map +1 -0
- package/dist/client/mod.js +4 -0
- package/dist/client/mod.js.map +1 -0
- package/dist/client/transport/do-rpc-client.d.ts +40 -0
- package/dist/client/transport/do-rpc-client.d.ts.map +1 -0
- package/dist/client/transport/do-rpc-client.js +117 -0
- package/dist/client/transport/do-rpc-client.js.map +1 -0
- package/dist/client/transport/http-rpc-client.d.ts +43 -0
- package/dist/client/transport/http-rpc-client.d.ts.map +1 -0
- package/dist/client/transport/http-rpc-client.js +103 -0
- package/dist/client/transport/http-rpc-client.js.map +1 -0
- package/dist/client/transport/ws-rpc-client.d.ts +45 -0
- package/dist/client/transport/ws-rpc-client.d.ts.map +1 -0
- package/dist/client/transport/ws-rpc-client.js +108 -0
- package/dist/client/transport/ws-rpc-client.js.map +1 -0
- package/dist/common/constants.d.ts +7 -0
- package/dist/common/constants.d.ts.map +1 -0
- package/dist/common/constants.js +17 -0
- package/dist/common/constants.js.map +1 -0
- package/dist/common/do-rpc-schema.d.ts +76 -0
- package/dist/common/do-rpc-schema.d.ts.map +1 -0
- package/dist/common/do-rpc-schema.js +48 -0
- package/dist/common/do-rpc-schema.js.map +1 -0
- package/dist/common/http-rpc-schema.d.ts +58 -0
- package/dist/common/http-rpc-schema.d.ts.map +1 -0
- package/dist/common/http-rpc-schema.js +37 -0
- package/dist/common/http-rpc-schema.js.map +1 -0
- package/dist/common/mod.d.ts +8 -1
- package/dist/common/mod.d.ts.map +1 -1
- package/dist/common/mod.js +7 -1
- package/dist/common/mod.js.map +1 -1
- package/dist/common/{ws-message-types.d.ts → sync-message-types.d.ts} +119 -153
- package/dist/common/sync-message-types.d.ts.map +1 -0
- package/dist/common/sync-message-types.js +60 -0
- package/dist/common/sync-message-types.js.map +1 -0
- package/dist/common/ws-rpc-schema.d.ts +55 -0
- package/dist/common/ws-rpc-schema.d.ts.map +1 -0
- package/dist/common/ws-rpc-schema.js +32 -0
- package/dist/common/ws-rpc-schema.js.map +1 -0
- package/package.json +7 -8
- package/src/cf-worker/do/durable-object.ts +237 -0
- package/src/cf-worker/do/layer.ts +128 -0
- package/src/cf-worker/do/pull.ts +77 -0
- package/src/cf-worker/do/push.ts +205 -0
- package/src/cf-worker/do/sqlite.ts +28 -0
- package/src/cf-worker/do/sync-storage.ts +321 -0
- package/src/cf-worker/do/transport/do-rpc-server.ts +84 -0
- package/src/cf-worker/do/transport/http-rpc-server.ts +37 -0
- package/src/cf-worker/do/transport/ws-rpc-server.ts +34 -0
- package/src/cf-worker/mod.ts +4 -2
- package/src/cf-worker/shared.ts +112 -0
- package/src/cf-worker/worker.ts +94 -105
- package/src/client/mod.ts +3 -0
- package/src/client/transport/do-rpc-client.ts +191 -0
- package/src/client/transport/http-rpc-client.ts +225 -0
- package/src/client/transport/ws-rpc-client.ts +202 -0
- package/src/common/constants.ts +18 -0
- package/src/common/do-rpc-schema.ts +54 -0
- package/src/common/http-rpc-schema.ts +40 -0
- package/src/common/mod.ts +10 -1
- package/src/common/sync-message-types.ts +117 -0
- package/src/common/ws-rpc-schema.ts +36 -0
- package/dist/cf-worker/cf-types.d.ts +0 -2
- package/dist/cf-worker/cf-types.d.ts.map +0 -1
- package/dist/cf-worker/cf-types.js +0 -2
- package/dist/cf-worker/cf-types.js.map +0 -1
- package/dist/cf-worker/durable-object.d.ts.map +0 -1
- package/dist/cf-worker/durable-object.js +0 -317
- package/dist/cf-worker/durable-object.js.map +0 -1
- package/dist/common/ws-message-types.d.ts.map +0 -1
- package/dist/common/ws-message-types.js +0 -57
- package/dist/common/ws-message-types.js.map +0 -1
- package/dist/sync-impl/mod.d.ts +0 -2
- package/dist/sync-impl/mod.d.ts.map +0 -1
- package/dist/sync-impl/mod.js +0 -2
- package/dist/sync-impl/mod.js.map +0 -1
- package/dist/sync-impl/ws-impl.d.ts +0 -7
- package/dist/sync-impl/ws-impl.d.ts.map +0 -1
- package/dist/sync-impl/ws-impl.js +0 -175
- package/dist/sync-impl/ws-impl.js.map +0 -1
- package/src/cf-worker/cf-types.ts +0 -12
- package/src/cf-worker/durable-object.ts +0 -478
- package/src/common/ws-message-types.ts +0 -114
- package/src/sync-impl/mod.ts +0 -1
- package/src/sync-impl/ws-impl.ts +0 -274
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
import {
|
|
2
|
+
BackendIdMismatchError,
|
|
3
|
+
InvalidPushError,
|
|
4
|
+
ServerAheadError,
|
|
5
|
+
SyncBackend,
|
|
6
|
+
UnexpectedError,
|
|
7
|
+
} from '@livestore/common'
|
|
8
|
+
import { splitChunkBySize } from '@livestore/common/sync'
|
|
9
|
+
import { type CfTypes, emitStreamResponse } from '@livestore/common-cf'
|
|
10
|
+
import { Chunk, Effect, Option, type RpcMessage, Schema } from '@livestore/utils/effect'
|
|
11
|
+
import { MAX_PUSH_EVENTS_PER_REQUEST, MAX_WS_MESSAGE_BYTES } from '../../common/constants.ts'
|
|
12
|
+
import { SyncMessage } from '../../common/mod.ts'
|
|
13
|
+
import { type Env, type MakeDurableObjectClassOptions, type StoreId, WebSocketAttachmentSchema } from '../shared.ts'
|
|
14
|
+
import { DoCtx } from './layer.ts'
|
|
15
|
+
|
|
16
|
+
const encodePullResponse = Schema.encodeSync(SyncMessage.PullResponse)
|
|
17
|
+
type PullBatchItem = SyncMessage.PullResponse['batch'][number]
|
|
18
|
+
|
|
19
|
+
export const makePush =
|
|
20
|
+
({
|
|
21
|
+
payload,
|
|
22
|
+
options,
|
|
23
|
+
storeId,
|
|
24
|
+
ctx,
|
|
25
|
+
env,
|
|
26
|
+
}: {
|
|
27
|
+
payload: Schema.JsonValue | undefined
|
|
28
|
+
options: MakeDurableObjectClassOptions | undefined
|
|
29
|
+
storeId: StoreId
|
|
30
|
+
ctx: CfTypes.DurableObjectState
|
|
31
|
+
env: Env
|
|
32
|
+
}) =>
|
|
33
|
+
(pushRequest: Omit<SyncMessage.PushRequest, '_tag'>) =>
|
|
34
|
+
Effect.gen(function* () {
|
|
35
|
+
// yield* Effect.log(`Pushing ${decodedMessage.batch.length} events`, decodedMessage.batch)
|
|
36
|
+
const { backendId, storage, currentHeadRef, updateCurrentHead, rpcSubscriptions } = yield* DoCtx
|
|
37
|
+
|
|
38
|
+
if (pushRequest.batch.length === 0) {
|
|
39
|
+
return SyncMessage.PushAck.make({})
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
if (options?.onPush) {
|
|
43
|
+
yield* Effect.tryAll(() => options.onPush!(pushRequest, { storeId, payload })).pipe(
|
|
44
|
+
UnexpectedError.mapToUnexpectedError,
|
|
45
|
+
)
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
if (pushRequest.backendId._tag === 'Some' && pushRequest.backendId.value !== backendId) {
|
|
49
|
+
return yield* new BackendIdMismatchError({ expected: backendId, received: pushRequest.backendId.value })
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// This part of the code needs to run sequentially to avoid race conditions
|
|
53
|
+
const { createdAt } = yield* Effect.gen(function* () {
|
|
54
|
+
const currentHead = currentHeadRef.current
|
|
55
|
+
// TODO handle clientId unique conflict
|
|
56
|
+
// Validate the batch
|
|
57
|
+
const firstEventParent = pushRequest.batch[0]!.parentSeqNum
|
|
58
|
+
if (firstEventParent !== currentHead) {
|
|
59
|
+
// yield* Effect.logDebug('ServerAheadError: backend head mismatch', {
|
|
60
|
+
// expectedHead: currentHead,
|
|
61
|
+
// providedHead: firstEventParent,
|
|
62
|
+
// batchSize: pushRequest.batch.length,
|
|
63
|
+
// backendId,
|
|
64
|
+
// })
|
|
65
|
+
|
|
66
|
+
return yield* new ServerAheadError({ minimumExpectedNum: currentHead, providedNum: firstEventParent })
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
const createdAt = new Date().toISOString()
|
|
70
|
+
|
|
71
|
+
// TODO possibly model this as a queue in order to speed up subsequent pushes
|
|
72
|
+
yield* storage.appendEvents(pushRequest.batch, createdAt)
|
|
73
|
+
|
|
74
|
+
updateCurrentHead(pushRequest.batch.at(-1)!.seqNum)
|
|
75
|
+
|
|
76
|
+
return { createdAt }
|
|
77
|
+
}).pipe(blockConcurrencyWhile(ctx))
|
|
78
|
+
|
|
79
|
+
// Run in background but already return the push ack to the client
|
|
80
|
+
yield* Effect.gen(function* () {
|
|
81
|
+
const connectedClients = ctx.getWebSockets()
|
|
82
|
+
|
|
83
|
+
// Preparing chunks of responses to make sure we don't exceed the WS message size limit.
|
|
84
|
+
const responses = yield* Chunk.fromIterable(pushRequest.batch).pipe(
|
|
85
|
+
splitChunkBySize({
|
|
86
|
+
maxItems: MAX_PUSH_EVENTS_PER_REQUEST,
|
|
87
|
+
maxBytes: MAX_WS_MESSAGE_BYTES,
|
|
88
|
+
encode: (items) =>
|
|
89
|
+
encodePullResponse(
|
|
90
|
+
SyncMessage.PullResponse.make({
|
|
91
|
+
batch: items.map(
|
|
92
|
+
(eventEncoded): PullBatchItem => ({
|
|
93
|
+
eventEncoded,
|
|
94
|
+
metadata: Option.some(SyncMessage.SyncMetadata.make({ createdAt })),
|
|
95
|
+
}),
|
|
96
|
+
),
|
|
97
|
+
pageInfo: SyncBackend.pageInfoNoMore,
|
|
98
|
+
backendId,
|
|
99
|
+
}),
|
|
100
|
+
),
|
|
101
|
+
}),
|
|
102
|
+
Effect.map(
|
|
103
|
+
Chunk.map((eventsChunk) => {
|
|
104
|
+
const batchWithMetadata = Chunk.toReadonlyArray(eventsChunk).map((eventEncoded) => ({
|
|
105
|
+
eventEncoded,
|
|
106
|
+
metadata: Option.some(SyncMessage.SyncMetadata.make({ createdAt })),
|
|
107
|
+
}))
|
|
108
|
+
|
|
109
|
+
const response = SyncMessage.PullResponse.make({
|
|
110
|
+
batch: batchWithMetadata,
|
|
111
|
+
pageInfo: SyncBackend.pageInfoNoMore,
|
|
112
|
+
backendId,
|
|
113
|
+
})
|
|
114
|
+
|
|
115
|
+
return {
|
|
116
|
+
response,
|
|
117
|
+
encoded: Schema.encodeSync(SyncMessage.PullResponse)(response),
|
|
118
|
+
}
|
|
119
|
+
}),
|
|
120
|
+
),
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
// Dual broadcasting: WebSocket + RPC clients
|
|
124
|
+
|
|
125
|
+
// Broadcast to WebSocket clients
|
|
126
|
+
if (connectedClients.length > 0) {
|
|
127
|
+
for (const { response, encoded } of responses) {
|
|
128
|
+
// Only calling once for now.
|
|
129
|
+
if (options?.onPullRes) {
|
|
130
|
+
yield* Effect.tryAll(() => options.onPullRes!(response)).pipe(UnexpectedError.mapToUnexpectedError)
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
// NOTE we're also sending the pullRes chunk to the pushing ws client as confirmation
|
|
134
|
+
for (const conn of connectedClients) {
|
|
135
|
+
const attachment = Schema.decodeSync(WebSocketAttachmentSchema)(conn.deserializeAttachment())
|
|
136
|
+
|
|
137
|
+
// We're doing something a bit "advanced" here as we're directly emitting Effect RPC-compatible
|
|
138
|
+
// response messsages on the Effect RPC-managed websocket connection to the WS client.
|
|
139
|
+
// For this we need to get the RPC `requestId` from the WebSocket attachment.
|
|
140
|
+
for (const requestId of attachment.pullRequestIds) {
|
|
141
|
+
const res: RpcMessage.ResponseChunkEncoded = {
|
|
142
|
+
_tag: 'Chunk',
|
|
143
|
+
requestId,
|
|
144
|
+
values: [encoded],
|
|
145
|
+
}
|
|
146
|
+
conn.send(JSON.stringify(res))
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
yield* Effect.logDebug(`Broadcasted to ${connectedClients.length} WebSocket clients`)
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
// RPC broadcasting would require reconstructing client stubs from clientIds
|
|
155
|
+
if (rpcSubscriptions.size > 0) {
|
|
156
|
+
for (const subscription of rpcSubscriptions.values()) {
|
|
157
|
+
for (const { encoded } of responses) {
|
|
158
|
+
yield* emitStreamResponse({
|
|
159
|
+
callerContext: subscription.callerContext,
|
|
160
|
+
env,
|
|
161
|
+
requestId: subscription.requestId,
|
|
162
|
+
values: [encoded],
|
|
163
|
+
}).pipe(Effect.tapCauseLogPretty, Effect.exit)
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
yield* Effect.logDebug(`Broadcasted to ${rpcSubscriptions.size} RPC clients`)
|
|
168
|
+
}
|
|
169
|
+
}).pipe(
|
|
170
|
+
Effect.tapCauseLogPretty,
|
|
171
|
+
Effect.withSpan('push-rpc-broadcast'),
|
|
172
|
+
Effect.uninterruptible, // We need to make sure Effect RPC doesn't interrupt this fiber
|
|
173
|
+
Effect.fork,
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
// We need to yield here to make sure the fork above is kicked off before we let Effect RPC finish the request
|
|
177
|
+
yield* Effect.yieldNow()
|
|
178
|
+
|
|
179
|
+
return SyncMessage.PushAck.make({})
|
|
180
|
+
}).pipe(
|
|
181
|
+
Effect.tap(
|
|
182
|
+
Effect.fn(function* (message) {
|
|
183
|
+
if (options?.onPushRes) {
|
|
184
|
+
yield* Effect.tryAll(() => options.onPushRes!(message)).pipe(UnexpectedError.mapToUnexpectedError)
|
|
185
|
+
}
|
|
186
|
+
}),
|
|
187
|
+
),
|
|
188
|
+
Effect.mapError((cause) => InvalidPushError.make({ cause })),
|
|
189
|
+
Effect.withSpan('sync-cf:do:push', { attributes: { storeId, batchSize: pushRequest.batch.length } }),
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
/**
|
|
193
|
+
* @see https://developers.cloudflare.com/durable-objects/api/state/#blockconcurrencywhile
|
|
194
|
+
*/
|
|
195
|
+
const blockConcurrencyWhile =
|
|
196
|
+
(ctx: CfTypes.DurableObjectState) =>
|
|
197
|
+
<A, E, R>(eff: Effect.Effect<A, E, R>) =>
|
|
198
|
+
Effect.gen(function* () {
|
|
199
|
+
const runtime = yield* Effect.runtime<R>()
|
|
200
|
+
const exit = yield* Effect.promise(() =>
|
|
201
|
+
ctx.blockConcurrencyWhile(() => eff.pipe(Effect.provide(runtime), Effect.runPromiseExit)),
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
return yield* exit
|
|
205
|
+
})
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import { EventSequenceNumber, State } from '@livestore/common/schema'
|
|
2
|
+
import { Schema } from '@livestore/utils/effect'
|
|
3
|
+
import { PERSISTENCE_FORMAT_VERSION } from '../shared.ts'
|
|
4
|
+
|
|
5
|
+
export const eventlogTable = State.SQLite.table({
|
|
6
|
+
// NOTE actual table name is determined at runtime to use proper storeId
|
|
7
|
+
name: `eventlog_${PERSISTENCE_FORMAT_VERSION}_$storeId`,
|
|
8
|
+
columns: {
|
|
9
|
+
seqNum: State.SQLite.integer({ primaryKey: true, schema: EventSequenceNumber.GlobalEventSequenceNumber }),
|
|
10
|
+
parentSeqNum: State.SQLite.integer({ schema: EventSequenceNumber.GlobalEventSequenceNumber }),
|
|
11
|
+
name: State.SQLite.text({}),
|
|
12
|
+
args: State.SQLite.text({ schema: Schema.parseJson(Schema.Any), nullable: true }),
|
|
13
|
+
/** ISO date format. Currently only used for debugging purposes. */
|
|
14
|
+
createdAt: State.SQLite.text({}),
|
|
15
|
+
clientId: State.SQLite.text({}),
|
|
16
|
+
sessionId: State.SQLite.text({}),
|
|
17
|
+
},
|
|
18
|
+
})
|
|
19
|
+
|
|
20
|
+
/** Will only ever have one row per durable object. */
|
|
21
|
+
export const contextTable = State.SQLite.table({
|
|
22
|
+
name: `context_${PERSISTENCE_FORMAT_VERSION}`,
|
|
23
|
+
columns: {
|
|
24
|
+
storeId: State.SQLite.text({ primaryKey: true }),
|
|
25
|
+
currentHead: State.SQLite.integer({ schema: EventSequenceNumber.GlobalEventSequenceNumber }),
|
|
26
|
+
backendId: State.SQLite.text({}),
|
|
27
|
+
},
|
|
28
|
+
})
|
|
@@ -0,0 +1,321 @@
|
|
|
1
|
+
import { UnexpectedError } from '@livestore/common'
|
|
2
|
+
import type { LiveStoreEvent } from '@livestore/common/schema'
|
|
3
|
+
import type { CfTypes } from '@livestore/common-cf'
|
|
4
|
+
import { Chunk, Effect, Option, Schema, Stream } from '@livestore/utils/effect'
|
|
5
|
+
import { SyncMetadata } from '../../common/sync-message-types.ts'
|
|
6
|
+
import { PERSISTENCE_FORMAT_VERSION, type StoreId } from '../shared.ts'
|
|
7
|
+
import { eventlogTable } from './sqlite.ts'
|
|
8
|
+
|
|
9
|
+
export type SyncStorage = {
|
|
10
|
+
dbName: string
|
|
11
|
+
getEvents: (cursor: number | undefined) => Effect.Effect<
|
|
12
|
+
{
|
|
13
|
+
total: number
|
|
14
|
+
stream: Stream.Stream<
|
|
15
|
+
{ eventEncoded: LiveStoreEvent.AnyEncodedGlobal; metadata: Option.Option<SyncMetadata> },
|
|
16
|
+
UnexpectedError
|
|
17
|
+
>
|
|
18
|
+
},
|
|
19
|
+
UnexpectedError
|
|
20
|
+
>
|
|
21
|
+
appendEvents: (
|
|
22
|
+
batch: ReadonlyArray<LiveStoreEvent.AnyEncodedGlobal>,
|
|
23
|
+
createdAt: string,
|
|
24
|
+
) => Effect.Effect<void, UnexpectedError>
|
|
25
|
+
resetStore: Effect.Effect<void, UnexpectedError>
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
export const makeStorage = (
|
|
29
|
+
ctx: CfTypes.DurableObjectState,
|
|
30
|
+
storeId: StoreId,
|
|
31
|
+
engine: { _tag: 'd1'; db: CfTypes.D1Database } | { _tag: 'do-sqlite' },
|
|
32
|
+
): SyncStorage => {
|
|
33
|
+
const dbName = `eventlog_${PERSISTENCE_FORMAT_VERSION}_${toValidTableName(storeId)}`
|
|
34
|
+
|
|
35
|
+
const execDb = <T>(cb: (db: CfTypes.D1Database) => Promise<CfTypes.D1Result<T>>) =>
|
|
36
|
+
Effect.tryPromise({
|
|
37
|
+
try: () => cb(engine._tag === 'd1' ? engine.db : (undefined as never)),
|
|
38
|
+
catch: (error) => new UnexpectedError({ cause: error, payload: { dbName } }),
|
|
39
|
+
}).pipe(
|
|
40
|
+
Effect.map((_) => _.results),
|
|
41
|
+
Effect.withSpan('@livestore/sync-cf:durable-object:execDb'),
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
// Cloudflare's D1 HTTP endpoint rejects JSON responses once they exceed ~1MB.
|
|
45
|
+
// Keep individual SELECT batches comfortably below that threshold so we can
|
|
46
|
+
// serve large histories without tripping the limit.
|
|
47
|
+
const D1_MAX_JSON_RESPONSE_BYTES = 1_000_000
|
|
48
|
+
const D1_RESPONSE_SAFETY_MARGIN_BYTES = 64 * 1024
|
|
49
|
+
const D1_TARGET_RESPONSE_BYTES = D1_MAX_JSON_RESPONSE_BYTES - D1_RESPONSE_SAFETY_MARGIN_BYTES
|
|
50
|
+
const D1_INITIAL_PAGE_SIZE = 256
|
|
51
|
+
const D1_MIN_PAGE_SIZE = 1
|
|
52
|
+
|
|
53
|
+
const decodeEventlogRows = Schema.decodeUnknownSync(Schema.Array(eventlogTable.rowSchema))
|
|
54
|
+
const textEncoder = new TextEncoder()
|
|
55
|
+
|
|
56
|
+
const decreaseLimit = (limit: number) => Math.max(D1_MIN_PAGE_SIZE, Math.floor(limit / 2))
|
|
57
|
+
const increaseLimit = (limit: number) => Math.min(D1_INITIAL_PAGE_SIZE, limit * 2)
|
|
58
|
+
|
|
59
|
+
const computeNextLimit = (limit: number, encodedSize: number) => {
|
|
60
|
+
if (encodedSize > D1_TARGET_RESPONSE_BYTES && limit > D1_MIN_PAGE_SIZE) {
|
|
61
|
+
const next = decreaseLimit(limit)
|
|
62
|
+
return next === limit ? limit : next
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
if (encodedSize < D1_TARGET_RESPONSE_BYTES / 2 && limit < D1_INITIAL_PAGE_SIZE) {
|
|
66
|
+
const next = increaseLimit(limit)
|
|
67
|
+
return next === limit ? limit : next
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
return limit
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
const getEventsD1 = (
|
|
74
|
+
cursor: number | undefined,
|
|
75
|
+
): Effect.Effect<
|
|
76
|
+
{
|
|
77
|
+
total: number
|
|
78
|
+
stream: Stream.Stream<
|
|
79
|
+
{ eventEncoded: LiveStoreEvent.AnyEncodedGlobal; metadata: Option.Option<SyncMetadata> },
|
|
80
|
+
UnexpectedError
|
|
81
|
+
>
|
|
82
|
+
},
|
|
83
|
+
UnexpectedError
|
|
84
|
+
> =>
|
|
85
|
+
Effect.gen(function* () {
|
|
86
|
+
const countStatement =
|
|
87
|
+
cursor === undefined
|
|
88
|
+
? `SELECT COUNT(*) as total FROM ${dbName}`
|
|
89
|
+
: `SELECT COUNT(*) as total FROM ${dbName} WHERE seqNum > ?`
|
|
90
|
+
|
|
91
|
+
const countRows = yield* execDb<{ total: number }>((db) => {
|
|
92
|
+
const prepared = db.prepare(countStatement)
|
|
93
|
+
return cursor === undefined ? prepared.all() : prepared.bind(cursor).all()
|
|
94
|
+
})
|
|
95
|
+
|
|
96
|
+
const total = Number(countRows[0]?.total ?? 0)
|
|
97
|
+
|
|
98
|
+
type State = { cursor: number | undefined; limit: number }
|
|
99
|
+
type EmittedEvent = { eventEncoded: LiveStoreEvent.AnyEncodedGlobal; metadata: Option.Option<SyncMetadata> }
|
|
100
|
+
|
|
101
|
+
const initialState: State = { cursor, limit: D1_INITIAL_PAGE_SIZE }
|
|
102
|
+
|
|
103
|
+
const fetchPage = (
|
|
104
|
+
state: State,
|
|
105
|
+
): Effect.Effect<Option.Option<readonly [Chunk.Chunk<EmittedEvent>, State]>, UnexpectedError> =>
|
|
106
|
+
Effect.gen(function* () {
|
|
107
|
+
const statement =
|
|
108
|
+
state.cursor === undefined
|
|
109
|
+
? `SELECT * FROM ${dbName} ORDER BY seqNum ASC LIMIT ?`
|
|
110
|
+
: `SELECT * FROM ${dbName} WHERE seqNum > ? ORDER BY seqNum ASC LIMIT ?`
|
|
111
|
+
|
|
112
|
+
const rawEvents = yield* execDb((db) => {
|
|
113
|
+
const prepared = db.prepare(statement)
|
|
114
|
+
return state.cursor === undefined
|
|
115
|
+
? prepared.bind(state.limit).all()
|
|
116
|
+
: prepared.bind(state.cursor, state.limit).all()
|
|
117
|
+
})
|
|
118
|
+
|
|
119
|
+
if (rawEvents.length === 0) {
|
|
120
|
+
return Option.none()
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
const encodedSize = textEncoder.encode(JSON.stringify(rawEvents)).byteLength
|
|
124
|
+
|
|
125
|
+
if (encodedSize > D1_TARGET_RESPONSE_BYTES && state.limit > D1_MIN_PAGE_SIZE) {
|
|
126
|
+
const nextLimit = decreaseLimit(state.limit)
|
|
127
|
+
|
|
128
|
+
if (nextLimit !== state.limit) {
|
|
129
|
+
return yield* fetchPage({ cursor: state.cursor, limit: nextLimit })
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
const decodedRows = Chunk.fromIterable(decodeEventlogRows(rawEvents))
|
|
134
|
+
|
|
135
|
+
const eventsChunk = Chunk.map(decodedRows, ({ createdAt, ...eventEncoded }) => ({
|
|
136
|
+
eventEncoded,
|
|
137
|
+
metadata: Option.some(SyncMetadata.make({ createdAt })),
|
|
138
|
+
}))
|
|
139
|
+
|
|
140
|
+
const lastSeqNum = Chunk.unsafeLast(decodedRows).seqNum
|
|
141
|
+
const nextState: State = { cursor: lastSeqNum, limit: computeNextLimit(state.limit, encodedSize) }
|
|
142
|
+
|
|
143
|
+
return Option.some([eventsChunk, nextState] as const)
|
|
144
|
+
})
|
|
145
|
+
|
|
146
|
+
const stream = Stream.unfoldChunkEffect(initialState, fetchPage)
|
|
147
|
+
|
|
148
|
+
return { total, stream }
|
|
149
|
+
}).pipe(
|
|
150
|
+
UnexpectedError.mapToUnexpectedError,
|
|
151
|
+
Effect.withSpan('@livestore/sync-cf:durable-object:getEvents', {
|
|
152
|
+
attributes: { dbName, cursor, engine: engine._tag },
|
|
153
|
+
}),
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
const appendEventsD1: SyncStorage['appendEvents'] = (batch, createdAt) =>
|
|
157
|
+
Effect.gen(function* () {
|
|
158
|
+
// If there are no events, do nothing.
|
|
159
|
+
if (batch.length === 0) return
|
|
160
|
+
|
|
161
|
+
// CF D1 limits:
|
|
162
|
+
// Maximum bound parameters per query 100, Maximum arguments per SQL function 32
|
|
163
|
+
// Thus we need to split the batch into chunks of max (100/7=)14 events each.
|
|
164
|
+
const CHUNK_SIZE = 14
|
|
165
|
+
|
|
166
|
+
for (let i = 0; i < batch.length; i += CHUNK_SIZE) {
|
|
167
|
+
const chunk = batch.slice(i, i + CHUNK_SIZE)
|
|
168
|
+
|
|
169
|
+
// Create a list of placeholders ("(?, ?, ?, ?, ?, ?, ?)"), corresponding to each event.
|
|
170
|
+
const valuesPlaceholders = chunk.map(() => '(?, ?, ?, ?, ?, ?, ?)').join(', ')
|
|
171
|
+
const sql = `INSERT INTO ${dbName} (seqNum, parentSeqNum, args, name, createdAt, clientId, sessionId) VALUES ${valuesPlaceholders}`
|
|
172
|
+
// Flatten the event properties into a parameters array.
|
|
173
|
+
const params = chunk.flatMap((event) => [
|
|
174
|
+
event.seqNum,
|
|
175
|
+
event.parentSeqNum,
|
|
176
|
+
event.args === undefined ? null : JSON.stringify(event.args),
|
|
177
|
+
event.name,
|
|
178
|
+
createdAt,
|
|
179
|
+
event.clientId,
|
|
180
|
+
event.sessionId,
|
|
181
|
+
])
|
|
182
|
+
|
|
183
|
+
yield* execDb((db) =>
|
|
184
|
+
db
|
|
185
|
+
.prepare(sql)
|
|
186
|
+
.bind(...params)
|
|
187
|
+
.run(),
|
|
188
|
+
)
|
|
189
|
+
}
|
|
190
|
+
}).pipe(
|
|
191
|
+
UnexpectedError.mapToUnexpectedError,
|
|
192
|
+
Effect.withSpan('@livestore/sync-cf:durable-object:appendEvents', {
|
|
193
|
+
attributes: { dbName, batchLength: batch.length, engine: engine._tag },
|
|
194
|
+
}),
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
const resetStore = Effect.promise(() => ctx.storage.deleteAll()).pipe(
|
|
198
|
+
UnexpectedError.mapToUnexpectedError,
|
|
199
|
+
Effect.withSpan('@livestore/sync-cf:durable-object:resetStore'),
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
// DO SQLite engine implementation
|
|
203
|
+
const getEventsDoSqlite = (
|
|
204
|
+
cursor: number | undefined,
|
|
205
|
+
): Effect.Effect<
|
|
206
|
+
{
|
|
207
|
+
total: number
|
|
208
|
+
stream: Stream.Stream<
|
|
209
|
+
{ eventEncoded: LiveStoreEvent.AnyEncodedGlobal; metadata: Option.Option<SyncMetadata> },
|
|
210
|
+
UnexpectedError
|
|
211
|
+
>
|
|
212
|
+
},
|
|
213
|
+
UnexpectedError
|
|
214
|
+
> =>
|
|
215
|
+
Effect.gen(function* () {
|
|
216
|
+
const selectCountSql =
|
|
217
|
+
cursor === undefined
|
|
218
|
+
? `SELECT COUNT(*) as total FROM "${dbName}"`
|
|
219
|
+
: `SELECT COUNT(*) as total FROM "${dbName}" WHERE seqNum > ?`
|
|
220
|
+
|
|
221
|
+
let total = 0
|
|
222
|
+
try {
|
|
223
|
+
const cursorIter =
|
|
224
|
+
cursor === undefined ? ctx.storage.sql.exec(selectCountSql) : ctx.storage.sql.exec(selectCountSql, cursor)
|
|
225
|
+
for (const row of cursorIter) {
|
|
226
|
+
total = Number((row as any).total ?? 0)
|
|
227
|
+
}
|
|
228
|
+
} catch (error) {
|
|
229
|
+
return yield* Effect.fail(new UnexpectedError({ cause: error, payload: { dbName, stage: 'count' } }))
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
type State = { cursor: number | undefined }
|
|
233
|
+
type EmittedEvent = { eventEncoded: LiveStoreEvent.AnyEncodedGlobal; metadata: Option.Option<SyncMetadata> }
|
|
234
|
+
|
|
235
|
+
const DO_PAGE_SIZE = 256
|
|
236
|
+
const initialState: State = { cursor }
|
|
237
|
+
|
|
238
|
+
const fetchPage = (
|
|
239
|
+
state: State,
|
|
240
|
+
): Effect.Effect<Option.Option<readonly [Chunk.Chunk<EmittedEvent>, State]>, UnexpectedError> =>
|
|
241
|
+
Effect.try({
|
|
242
|
+
try: () => {
|
|
243
|
+
const sql =
|
|
244
|
+
state.cursor === undefined
|
|
245
|
+
? `SELECT * FROM "${dbName}" ORDER BY seqNum ASC LIMIT ?`
|
|
246
|
+
: `SELECT * FROM "${dbName}" WHERE seqNum > ? ORDER BY seqNum ASC LIMIT ?`
|
|
247
|
+
|
|
248
|
+
const iter =
|
|
249
|
+
state.cursor === undefined
|
|
250
|
+
? ctx.storage.sql.exec(sql, DO_PAGE_SIZE)
|
|
251
|
+
: ctx.storage.sql.exec(sql, state.cursor, DO_PAGE_SIZE)
|
|
252
|
+
|
|
253
|
+
const rows: any[] = []
|
|
254
|
+
for (const row of iter) rows.push(row)
|
|
255
|
+
|
|
256
|
+
if (rows.length === 0) {
|
|
257
|
+
return Option.none()
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
const decodedRows = Chunk.fromIterable(decodeEventlogRows(rows))
|
|
261
|
+
const eventsChunk = Chunk.map(decodedRows, ({ createdAt, ...eventEncoded }) => ({
|
|
262
|
+
eventEncoded,
|
|
263
|
+
metadata: Option.some(SyncMetadata.make({ createdAt })),
|
|
264
|
+
}))
|
|
265
|
+
|
|
266
|
+
const lastSeqNum = Chunk.unsafeLast(decodedRows).seqNum
|
|
267
|
+
const nextState: State = { cursor: lastSeqNum }
|
|
268
|
+
|
|
269
|
+
return Option.some([eventsChunk, nextState] as const)
|
|
270
|
+
},
|
|
271
|
+
catch: (error) => new UnexpectedError({ cause: error, payload: { dbName, stage: 'select' } }),
|
|
272
|
+
})
|
|
273
|
+
|
|
274
|
+
const stream = Stream.unfoldChunkEffect(initialState, fetchPage)
|
|
275
|
+
|
|
276
|
+
return { total, stream }
|
|
277
|
+
}).pipe(
|
|
278
|
+
UnexpectedError.mapToUnexpectedError,
|
|
279
|
+
Effect.withSpan('@livestore/sync-cf:durable-object:getEvents', {
|
|
280
|
+
attributes: { dbName, cursor, engine: engine._tag },
|
|
281
|
+
}),
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
const appendEventsDoSqlite: SyncStorage['appendEvents'] = (batch, createdAt) =>
|
|
285
|
+
Effect.try({
|
|
286
|
+
try: () => {
|
|
287
|
+
if (batch.length === 0) return
|
|
288
|
+
// Keep params per statement within conservative limits (align with D1 bound params ~100)
|
|
289
|
+
const CHUNK_SIZE = 14
|
|
290
|
+
for (let i = 0; i < batch.length; i += CHUNK_SIZE) {
|
|
291
|
+
const chunk = batch.slice(i, i + CHUNK_SIZE)
|
|
292
|
+
const placeholders = chunk.map(() => '(?, ?, ?, ?, ?, ?, ?)').join(', ')
|
|
293
|
+
const sql = `INSERT INTO "${dbName}" (seqNum, parentSeqNum, args, name, createdAt, clientId, sessionId) VALUES ${placeholders}`
|
|
294
|
+
const params = chunk.flatMap((event) => [
|
|
295
|
+
event.seqNum,
|
|
296
|
+
event.parentSeqNum,
|
|
297
|
+
event.args === undefined ? null : JSON.stringify(event.args),
|
|
298
|
+
event.name,
|
|
299
|
+
createdAt,
|
|
300
|
+
event.clientId,
|
|
301
|
+
event.sessionId,
|
|
302
|
+
])
|
|
303
|
+
ctx.storage.sql.exec(sql, ...params)
|
|
304
|
+
}
|
|
305
|
+
},
|
|
306
|
+
catch: (error) => new UnexpectedError({ cause: error, payload: { dbName, stage: 'insert' } }),
|
|
307
|
+
}).pipe(
|
|
308
|
+
Effect.withSpan('@livestore/sync-cf:durable-object:appendEvents', {
|
|
309
|
+
attributes: { dbName, batchLength: batch.length, engine: engine._tag },
|
|
310
|
+
}),
|
|
311
|
+
UnexpectedError.mapToUnexpectedError,
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
if (engine._tag === 'd1') {
|
|
315
|
+
return { dbName, getEvents: getEventsD1, appendEvents: appendEventsD1, resetStore }
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
return { dbName, getEvents: getEventsDoSqlite, appendEvents: appendEventsDoSqlite, resetStore }
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
const toValidTableName = (str: string) => str.replaceAll(/[^a-zA-Z0-9]/g, '_')
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
import { InvalidPullError, InvalidPushError } from '@livestore/common'
|
|
2
|
+
import { type CfTypes, toDurableObjectHandler } from '@livestore/common-cf'
|
|
3
|
+
import {
|
|
4
|
+
Effect,
|
|
5
|
+
Headers,
|
|
6
|
+
HttpServer,
|
|
7
|
+
Layer,
|
|
8
|
+
Logger,
|
|
9
|
+
LogLevel,
|
|
10
|
+
Option,
|
|
11
|
+
RpcSerialization,
|
|
12
|
+
Stream,
|
|
13
|
+
} from '@livestore/utils/effect'
|
|
14
|
+
import { SyncDoRpc } from '../../../common/do-rpc-schema.ts'
|
|
15
|
+
import { SyncMessage } from '../../../common/mod.ts'
|
|
16
|
+
import { DoCtx, type DoCtxInput } from '../layer.ts'
|
|
17
|
+
import { makeEndingPullStream } from '../pull.ts'
|
|
18
|
+
import { makePush } from '../push.ts'
|
|
19
|
+
|
|
20
|
+
export interface DoRpcHandlerOptions {
|
|
21
|
+
payload: Uint8Array<ArrayBuffer>
|
|
22
|
+
input: Omit<DoCtxInput, 'from'>
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
export const createDoRpcHandler = (
|
|
26
|
+
options: DoRpcHandlerOptions,
|
|
27
|
+
): Effect.Effect<Uint8Array<ArrayBuffer> | CfTypes.ReadableStream> =>
|
|
28
|
+
Effect.gen(this, function* () {
|
|
29
|
+
const { payload, input } = options
|
|
30
|
+
// const { rpcSubscriptions, backendId, doOptions, ctx, env } = yield* DoCtx
|
|
31
|
+
|
|
32
|
+
// TODO add admin RPCs
|
|
33
|
+
const RpcLive = SyncDoRpc.toLayer({
|
|
34
|
+
'SyncDoRpc.Ping': (_req) => {
|
|
35
|
+
return Effect.succeed(SyncMessage.Pong.make({}))
|
|
36
|
+
},
|
|
37
|
+
'SyncDoRpc.Pull': (req, { headers }) =>
|
|
38
|
+
Effect.gen(this, function* () {
|
|
39
|
+
const { rpcSubscriptions } = yield* DoCtx
|
|
40
|
+
|
|
41
|
+
// TODO rename `req.rpcContext` to something more appropriate
|
|
42
|
+
if (req.rpcContext) {
|
|
43
|
+
rpcSubscriptions.set(req.storeId, {
|
|
44
|
+
storeId: req.storeId,
|
|
45
|
+
payload: req.payload,
|
|
46
|
+
subscribedAt: Date.now(),
|
|
47
|
+
requestId: Headers.get(headers, 'x-rpc-request-id').pipe(Option.getOrThrow),
|
|
48
|
+
callerContext: req.rpcContext.callerContext,
|
|
49
|
+
})
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
return makeEndingPullStream(req, req.payload)
|
|
53
|
+
}).pipe(
|
|
54
|
+
Stream.unwrap,
|
|
55
|
+
Stream.map((res) => ({
|
|
56
|
+
...res,
|
|
57
|
+
rpcRequestId: Headers.get(headers, 'x-rpc-request-id').pipe(Option.getOrThrow),
|
|
58
|
+
})),
|
|
59
|
+
Stream.provideLayer(DoCtx.Default({ ...input, from: { storeId: req.storeId } })),
|
|
60
|
+
Stream.mapError((cause) => (cause._tag === 'InvalidPullError' ? cause : InvalidPullError.make({ cause }))),
|
|
61
|
+
Stream.tapErrorCause(Effect.log),
|
|
62
|
+
),
|
|
63
|
+
'SyncDoRpc.Push': (req) =>
|
|
64
|
+
Effect.gen(this, function* () {
|
|
65
|
+
const { doOptions, ctx, env, storeId } = yield* DoCtx
|
|
66
|
+
const push = makePush({ storeId, payload: req.payload, options: doOptions, ctx, env })
|
|
67
|
+
|
|
68
|
+
return yield* push(req)
|
|
69
|
+
}).pipe(
|
|
70
|
+
Effect.provide(DoCtx.Default({ ...input, from: { storeId: req.storeId } })),
|
|
71
|
+
Effect.mapError((cause) => (cause._tag === 'InvalidPushError' ? cause : InvalidPushError.make({ cause }))),
|
|
72
|
+
Effect.tapCauseLogPretty,
|
|
73
|
+
),
|
|
74
|
+
})
|
|
75
|
+
|
|
76
|
+
const handler = toDurableObjectHandler(SyncDoRpc, {
|
|
77
|
+
layer: Layer.mergeAll(RpcLive, RpcSerialization.layerJson, HttpServer.layerContext).pipe(
|
|
78
|
+
Layer.provide(Logger.consoleWithThread('SyncDo')),
|
|
79
|
+
Layer.provide(Logger.minimumLogLevel(LogLevel.Debug)),
|
|
80
|
+
),
|
|
81
|
+
})
|
|
82
|
+
|
|
83
|
+
return yield* handler(payload)
|
|
84
|
+
}).pipe(Effect.withSpan('createDoRpcHandler'))
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
import type { CfTypes } from '@livestore/common-cf'
|
|
2
|
+
import { Effect, HttpApp, Layer, RpcSerialization, RpcServer } from '@livestore/utils/effect'
|
|
3
|
+
import { SyncHttpRpc } from '../../../common/http-rpc-schema.ts'
|
|
4
|
+
import * as SyncMessage from '../../../common/sync-message-types.ts'
|
|
5
|
+
import { DoCtx } from '../layer.ts'
|
|
6
|
+
import { makeEndingPullStream } from '../pull.ts'
|
|
7
|
+
import { makePush } from '../push.ts'
|
|
8
|
+
|
|
9
|
+
export const createHttpRpcHandler = ({ request }: { request: CfTypes.Request }) =>
|
|
10
|
+
Effect.gen(function* () {
|
|
11
|
+
const handlerLayer = createHttpRpcLayer
|
|
12
|
+
const httpApp = RpcServer.toHttpApp(SyncHttpRpc).pipe(Effect.provide(handlerLayer))
|
|
13
|
+
const webHandler = yield* httpApp.pipe(Effect.map(HttpApp.toWebHandler))
|
|
14
|
+
|
|
15
|
+
return yield* Effect.promise(
|
|
16
|
+
() => webHandler(request as TODO as Request) as TODO as Promise<CfTypes.Response>,
|
|
17
|
+
).pipe(Effect.timeout(10000))
|
|
18
|
+
}).pipe(Effect.withSpan('createHttpRpcHandler'))
|
|
19
|
+
|
|
20
|
+
const createHttpRpcLayer =
|
|
21
|
+
// TODO implement admin requests
|
|
22
|
+
SyncHttpRpc.toLayer({
|
|
23
|
+
'SyncHttpRpc.Pull': (req) => makeEndingPullStream(req, req.payload),
|
|
24
|
+
|
|
25
|
+
'SyncHttpRpc.Push': (req) =>
|
|
26
|
+
Effect.gen(function* () {
|
|
27
|
+
const { ctx, env, doOptions, storeId } = yield* DoCtx
|
|
28
|
+
const push = makePush({ payload: undefined, options: doOptions, storeId, ctx, env })
|
|
29
|
+
|
|
30
|
+
return yield* push(req)
|
|
31
|
+
}),
|
|
32
|
+
|
|
33
|
+
'SyncHttpRpc.Ping': () => Effect.succeed(SyncMessage.Pong.make({})),
|
|
34
|
+
}).pipe(
|
|
35
|
+
Layer.provideMerge(RpcServer.layerProtocolHttp({ path: '/http-rpc' })),
|
|
36
|
+
Layer.provideMerge(RpcSerialization.layerJson),
|
|
37
|
+
)
|