@livestore/adapter-web 0.0.0-snapshot-a953343ad2d7468c6573bcb5e26f0eab4302078f
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.eslintrc.cjs +6 -0
- package/README.md +12 -0
- package/dist/.tsbuildinfo +1 -0
- package/dist/common/connection.d.ts +7 -0
- package/dist/common/connection.d.ts.map +1 -0
- package/dist/common/connection.js +25 -0
- package/dist/common/connection.js.map +1 -0
- package/dist/devtools-bridge/background-browser-channel.d.ts +9 -0
- package/dist/devtools-bridge/background-browser-channel.d.ts.map +1 -0
- package/dist/devtools-bridge/background-browser-channel.js +31 -0
- package/dist/devtools-bridge/background-browser-channel.js.map +1 -0
- package/dist/devtools-bridge/background-message.d.ts +75 -0
- package/dist/devtools-bridge/background-message.d.ts.map +1 -0
- package/dist/devtools-bridge/background-message.js +53 -0
- package/dist/devtools-bridge/background-message.js.map +1 -0
- package/dist/devtools-bridge/bridge-shared.d.ts +14 -0
- package/dist/devtools-bridge/bridge-shared.d.ts.map +1 -0
- package/dist/devtools-bridge/bridge-shared.js +67 -0
- package/dist/devtools-bridge/bridge-shared.js.map +1 -0
- package/dist/devtools-bridge/browser-extension-bridge.d.ts +3 -0
- package/dist/devtools-bridge/browser-extension-bridge.d.ts.map +1 -0
- package/dist/devtools-bridge/browser-extension-bridge.js +59 -0
- package/dist/devtools-bridge/browser-extension-bridge.js.map +1 -0
- package/dist/devtools-bridge/iframe-message.d.ts +16 -0
- package/dist/devtools-bridge/iframe-message.d.ts.map +1 -0
- package/dist/devtools-bridge/iframe-message.js +11 -0
- package/dist/devtools-bridge/iframe-message.js.map +1 -0
- package/dist/devtools-bridge/index.d.ts +6 -0
- package/dist/devtools-bridge/index.d.ts.map +1 -0
- package/dist/devtools-bridge/index.js +5 -0
- package/dist/devtools-bridge/index.js.map +1 -0
- package/dist/devtools-bridge/web-bridge.d.ts +31 -0
- package/dist/devtools-bridge/web-bridge.d.ts.map +1 -0
- package/dist/devtools-bridge/web-bridge.js +131 -0
- package/dist/devtools-bridge/web-bridge.js.map +1 -0
- package/dist/in-memory/index.d.ts +4 -0
- package/dist/in-memory/index.d.ts.map +1 -0
- package/dist/in-memory/index.js +50 -0
- package/dist/in-memory/index.js.map +1 -0
- package/dist/index.d.ts +4 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +4 -0
- package/dist/index.js.map +1 -0
- package/dist/opfs-utils.d.ts +5 -0
- package/dist/opfs-utils.d.ts.map +1 -0
- package/dist/opfs-utils.js +43 -0
- package/dist/opfs-utils.js.map +1 -0
- package/dist/web-worker/client-session/client-session-devtools.d.ts +7 -0
- package/dist/web-worker/client-session/client-session-devtools.d.ts.map +1 -0
- package/dist/web-worker/client-session/client-session-devtools.js +107 -0
- package/dist/web-worker/client-session/client-session-devtools.js.map +1 -0
- package/dist/web-worker/client-session/index.d.ts +41 -0
- package/dist/web-worker/client-session/index.d.ts.map +1 -0
- package/dist/web-worker/client-session/index.js +299 -0
- package/dist/web-worker/client-session/index.js.map +1 -0
- package/dist/web-worker/client-session/trim-batch.d.ts +4 -0
- package/dist/web-worker/client-session/trim-batch.d.ts.map +1 -0
- package/dist/web-worker/client-session/trim-batch.js +13 -0
- package/dist/web-worker/client-session/trim-batch.js.map +1 -0
- package/dist/web-worker/client-session/trim-batch.test.d.ts +2 -0
- package/dist/web-worker/client-session/trim-batch.test.d.ts.map +1 -0
- package/dist/web-worker/client-session/trim-batch.test.js +38 -0
- package/dist/web-worker/client-session/trim-batch.test.js.map +1 -0
- package/dist/web-worker/common/persisted-sqlite.d.ts +23 -0
- package/dist/web-worker/common/persisted-sqlite.d.ts.map +1 -0
- package/dist/web-worker/common/persisted-sqlite.js +92 -0
- package/dist/web-worker/common/persisted-sqlite.js.map +1 -0
- package/dist/web-worker/common/shutdown-channel.d.ts +7 -0
- package/dist/web-worker/common/shutdown-channel.d.ts.map +1 -0
- package/dist/web-worker/common/shutdown-channel.js +7 -0
- package/dist/web-worker/common/shutdown-channel.js.map +1 -0
- package/dist/web-worker/common/worker-schema.d.ts +226 -0
- package/dist/web-worker/common/worker-schema.d.ts.map +1 -0
- package/dist/web-worker/common/worker-schema.js +176 -0
- package/dist/web-worker/common/worker-schema.js.map +1 -0
- package/dist/web-worker/leader-worker/make-leader-worker.d.ts +15 -0
- package/dist/web-worker/leader-worker/make-leader-worker.d.ts.map +1 -0
- package/dist/web-worker/leader-worker/make-leader-worker.js +144 -0
- package/dist/web-worker/leader-worker/make-leader-worker.js.map +1 -0
- package/dist/web-worker/shared-worker/make-shared-worker.d.ts +2 -0
- package/dist/web-worker/shared-worker/make-shared-worker.d.ts.map +1 -0
- package/dist/web-worker/shared-worker/make-shared-worker.js +160 -0
- package/dist/web-worker/shared-worker/make-shared-worker.js.map +1 -0
- package/dist/web-worker/vite-dev-polyfill.d.ts +2 -0
- package/dist/web-worker/vite-dev-polyfill.d.ts.map +1 -0
- package/dist/web-worker/vite-dev-polyfill.js +37 -0
- package/dist/web-worker/vite-dev-polyfill.js.map +1 -0
- package/package.json +78 -0
- package/src/common/connection.ts +32 -0
- package/src/devtools-bridge/background-browser-channel.ts +57 -0
- package/src/devtools-bridge/background-message.ts +42 -0
- package/src/devtools-bridge/bridge-shared.ts +97 -0
- package/src/devtools-bridge/browser-extension-bridge.ts +64 -0
- package/src/devtools-bridge/iframe-message.ts +9 -0
- package/src/devtools-bridge/index.ts +9 -0
- package/src/devtools-bridge/web-bridge.ts +169 -0
- package/src/in-memory/index.ts +66 -0
- package/src/index.ts +3 -0
- package/src/opfs-utils.ts +61 -0
- package/src/web-worker/ambient.d.ts +37 -0
- package/src/web-worker/client-session/client-session-devtools.ts +167 -0
- package/src/web-worker/client-session/index.ts +537 -0
- package/src/web-worker/client-session/trim-batch.test.ts +48 -0
- package/src/web-worker/client-session/trim-batch.ts +15 -0
- package/src/web-worker/common/persisted-sqlite.ts +136 -0
- package/src/web-worker/common/shutdown-channel.ts +8 -0
- package/src/web-worker/common/worker-schema.ts +206 -0
- package/src/web-worker/leader-worker/make-leader-worker.ts +276 -0
- package/src/web-worker/shared-worker/make-shared-worker.ts +300 -0
- package/src/web-worker/vite-dev-polyfill.ts +36 -0
- package/tsconfig.json +17 -0
|
@@ -0,0 +1,537 @@
|
|
|
1
|
+
import type { Adapter, ClientSession, LockStatus, NetworkStatus } from '@livestore/common'
|
|
2
|
+
import { Devtools, IntentionalShutdownCause, UnexpectedError } from '@livestore/common'
|
|
3
|
+
// TODO bring back - this currently doesn't work due to https://github.com/vitejs/vite/issues/8427
|
|
4
|
+
// NOTE We're using a non-relative import here for Vite to properly resolve the import during app builds
|
|
5
|
+
// import LiveStoreSharedWorker from '@livestore/adapter-web/internal-shared-worker?sharedworker'
|
|
6
|
+
import { ShutdownChannel } from '@livestore/common/leader-thread'
|
|
7
|
+
import type { MutationEvent } from '@livestore/common/schema'
|
|
8
|
+
import { EventId, SESSION_CHANGESET_META_TABLE } from '@livestore/common/schema'
|
|
9
|
+
import { makeWebDevtoolsChannel } from '@livestore/devtools-web-common/web-channel'
|
|
10
|
+
import { sqliteDbFactory } from '@livestore/sqlite-wasm/browser'
|
|
11
|
+
import { loadSqlite3Wasm } from '@livestore/sqlite-wasm/load-wasm'
|
|
12
|
+
import { isDevEnv, shouldNeverHappen, tryAsFunctionAndNew } from '@livestore/utils'
|
|
13
|
+
import {
|
|
14
|
+
BrowserWorker,
|
|
15
|
+
BucketQueue,
|
|
16
|
+
Cause,
|
|
17
|
+
Deferred,
|
|
18
|
+
Effect,
|
|
19
|
+
Exit,
|
|
20
|
+
Fiber,
|
|
21
|
+
ParseResult,
|
|
22
|
+
Queue,
|
|
23
|
+
Schema,
|
|
24
|
+
Stream,
|
|
25
|
+
SubscriptionRef,
|
|
26
|
+
WebLock,
|
|
27
|
+
Worker,
|
|
28
|
+
WorkerError,
|
|
29
|
+
} from '@livestore/utils/effect'
|
|
30
|
+
import { nanoid } from '@livestore/utils/nanoid'
|
|
31
|
+
|
|
32
|
+
import * as OpfsUtils from '../../opfs-utils.js'
|
|
33
|
+
import { readPersistedAppDbFromClientSession, resetPersistedDataFromClientSession } from '../common/persisted-sqlite.js'
|
|
34
|
+
import { makeShutdownChannel } from '../common/shutdown-channel.js'
|
|
35
|
+
import * as WorkerSchema from '../common/worker-schema.js'
|
|
36
|
+
import { bootDevtools } from './client-session-devtools.js'
|
|
37
|
+
import { trimPushBatch } from './trim-batch.js'
|
|
38
|
+
|
|
39
|
+
// NOTE we're starting to initialize the sqlite wasm binary here to speed things up
|
|
40
|
+
const sqlite3Promise = loadSqlite3Wasm()
|
|
41
|
+
|
|
42
|
+
if (isDevEnv()) {
|
|
43
|
+
globalThis.__debugLiveStoreUtils = {
|
|
44
|
+
opfs: OpfsUtils,
|
|
45
|
+
runSync: (effect: Effect.Effect<any, any, never>) => Effect.runSync(effect),
|
|
46
|
+
runFork: (effect: Effect.Effect<any, any, never>) => Effect.runFork(effect),
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
export type WebAdapterOptions = {
|
|
51
|
+
worker: ((options: { name: string }) => globalThis.Worker) | (new (options: { name: string }) => globalThis.Worker)
|
|
52
|
+
/**
|
|
53
|
+
* This is mostly an implementation detail and needed to be exposed into app code
|
|
54
|
+
* due to a current Vite limitation (https://github.com/vitejs/vite/issues/8427).
|
|
55
|
+
*
|
|
56
|
+
* In most cases this should look like:
|
|
57
|
+
* ```ts
|
|
58
|
+
* import LiveStoreSharedWorker from '@livestore/adapter-web/shared-worker?sharedworker'
|
|
59
|
+
*
|
|
60
|
+
* const adapter = makeAdapter({
|
|
61
|
+
* sharedWorker: LiveStoreSharedWorker,
|
|
62
|
+
* // ...
|
|
63
|
+
* })
|
|
64
|
+
* ```
|
|
65
|
+
*/
|
|
66
|
+
sharedWorker:
|
|
67
|
+
| ((options: { name: string }) => globalThis.SharedWorker)
|
|
68
|
+
| (new (options: { name: string }) => globalThis.SharedWorker)
|
|
69
|
+
/**
|
|
70
|
+
* Specifies where to persist data for this adapter
|
|
71
|
+
*/
|
|
72
|
+
storage: WorkerSchema.StorageTypeEncoded
|
|
73
|
+
/**
|
|
74
|
+
* Warning: This will reset both the app and mutationlog database.
|
|
75
|
+
* This should only be used during development.
|
|
76
|
+
*
|
|
77
|
+
* @default false
|
|
78
|
+
*/
|
|
79
|
+
resetPersistence?: boolean
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
export const makeAdapter =
|
|
83
|
+
(options: WebAdapterOptions): Adapter =>
|
|
84
|
+
({ schema, storeId, devtoolsEnabled, debugInstanceId, bootStatusQueue, shutdown, connectDevtoolsToStore }) =>
|
|
85
|
+
Effect.gen(function* () {
|
|
86
|
+
yield* ensureBrowserRequirements
|
|
87
|
+
|
|
88
|
+
yield* Queue.offer(bootStatusQueue, { stage: 'loading' })
|
|
89
|
+
|
|
90
|
+
const sqlite3 = yield* Effect.promise(() => sqlite3Promise)
|
|
91
|
+
|
|
92
|
+
const LIVESTORE_TAB_LOCK = `livestore-tab-lock-${storeId}`
|
|
93
|
+
|
|
94
|
+
const storageOptions = yield* Schema.decode(WorkerSchema.StorageType)(options.storage)
|
|
95
|
+
|
|
96
|
+
if (options.resetPersistence === true) {
|
|
97
|
+
yield* resetPersistedDataFromClientSession({ storageOptions, storeId })
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
// Note on fast-path booting:
|
|
101
|
+
// Instead of waiting for the leader worker to boot and then get a database snapshot from it,
|
|
102
|
+
// we're here trying to get the snapshot directly from storage
|
|
103
|
+
// we usually speeds up the boot process by a lot.
|
|
104
|
+
// We need to be extra careful though to not run into any race conditions or inconsistencies.
|
|
105
|
+
// TODO also verify persisted data
|
|
106
|
+
const dataFromFile = yield* readPersistedAppDbFromClientSession({ storageOptions, storeId, schema })
|
|
107
|
+
|
|
108
|
+
// The same across all client sessions (i.e. tabs, windows)
|
|
109
|
+
const clientId = getPersistedId(`clientId:${storeId}`, 'local')
|
|
110
|
+
// Unique per client session (i.e. tab, window)
|
|
111
|
+
const sessionId = getPersistedId(`sessionId:${storeId}`, 'session')
|
|
112
|
+
|
|
113
|
+
const shutdownChannel = yield* makeShutdownChannel(storeId)
|
|
114
|
+
|
|
115
|
+
yield* shutdownChannel.listen.pipe(
|
|
116
|
+
Stream.flatten(),
|
|
117
|
+
Stream.filter(Schema.is(IntentionalShutdownCause)),
|
|
118
|
+
Stream.tap((msg) => shutdown(Cause.fail(msg))),
|
|
119
|
+
Stream.runDrain,
|
|
120
|
+
Effect.interruptible,
|
|
121
|
+
Effect.tapCauseLogPretty,
|
|
122
|
+
Effect.forkScoped,
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
const sharedWebWorker = tryAsFunctionAndNew(options.sharedWorker, { name: `livestore-shared-worker-${storeId}` })
|
|
126
|
+
|
|
127
|
+
const sharedWorkerFiber = yield* Worker.makePoolSerialized<typeof WorkerSchema.SharedWorker.Request.Type>({
|
|
128
|
+
size: 1,
|
|
129
|
+
concurrency: 100,
|
|
130
|
+
initialMessage: () =>
|
|
131
|
+
new WorkerSchema.SharedWorker.InitialMessage({
|
|
132
|
+
payload: {
|
|
133
|
+
_tag: 'FromClientSession',
|
|
134
|
+
initialMessage: new WorkerSchema.LeaderWorkerInner.InitialMessage({
|
|
135
|
+
storageOptions,
|
|
136
|
+
storeId,
|
|
137
|
+
clientId,
|
|
138
|
+
devtoolsEnabled,
|
|
139
|
+
debugInstanceId,
|
|
140
|
+
}),
|
|
141
|
+
},
|
|
142
|
+
}),
|
|
143
|
+
}).pipe(
|
|
144
|
+
Effect.provide(BrowserWorker.layer(() => sharedWebWorker)),
|
|
145
|
+
Effect.tapCauseLogPretty,
|
|
146
|
+
UnexpectedError.mapToUnexpectedError,
|
|
147
|
+
Effect.tapErrorCause(shutdown),
|
|
148
|
+
Effect.withSpan('@livestore/adapter-web:client-session:setupSharedWorker'),
|
|
149
|
+
Effect.forkScoped,
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
const lockDeferred = yield* Deferred.make<void>()
|
|
153
|
+
// It's important that we resolve the leader election in a blocking way, so there's always a leader.
|
|
154
|
+
// Otherwise mutations could end up being dropped.
|
|
155
|
+
//
|
|
156
|
+
// Sorry for this pun ...
|
|
157
|
+
let gotLocky = yield* WebLock.tryGetDeferredLock(lockDeferred, LIVESTORE_TAB_LOCK)
|
|
158
|
+
const lockStatus = yield* SubscriptionRef.make<LockStatus>(gotLocky ? 'has-lock' : 'no-lock')
|
|
159
|
+
|
|
160
|
+
// Ideally we can come up with a simpler implementation that doesn't require this
|
|
161
|
+
const waitForSharedWorkerInitialized = yield* Deferred.make<void>()
|
|
162
|
+
if (gotLocky === false) {
|
|
163
|
+
// Don't need to wait if we're not the leader
|
|
164
|
+
yield* Deferred.succeed(waitForSharedWorkerInitialized, undefined)
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
const runLocked = Effect.gen(function* () {
|
|
168
|
+
yield* Effect.logDebug(
|
|
169
|
+
`[@livestore/adapter-web:client-session] ✅ Got lock '${LIVESTORE_TAB_LOCK}' (sessionId: ${sessionId})`,
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
yield* Effect.addFinalizer(() =>
|
|
173
|
+
Effect.logDebug(`[@livestore/adapter-web:client-session] Releasing lock for '${LIVESTORE_TAB_LOCK}'`),
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
yield* SubscriptionRef.set(lockStatus, 'has-lock')
|
|
177
|
+
|
|
178
|
+
const mc = new MessageChannel()
|
|
179
|
+
|
|
180
|
+
// NOTE we're adding the `storeId` to the worker name to make it unique
|
|
181
|
+
// and adding the `sessionId` to make it easier to debug which session a worker belongs to in logs
|
|
182
|
+
const worker = tryAsFunctionAndNew(options.worker, { name: `livestore-worker-${storeId}-${sessionId}` })
|
|
183
|
+
|
|
184
|
+
yield* Worker.makeSerialized<WorkerSchema.LeaderWorkerOuter.Request>({
|
|
185
|
+
initialMessage: () =>
|
|
186
|
+
new WorkerSchema.LeaderWorkerOuter.InitialMessage({ port: mc.port1, storeId, clientId }),
|
|
187
|
+
}).pipe(
|
|
188
|
+
Effect.provide(BrowserWorker.layer(() => worker)),
|
|
189
|
+
UnexpectedError.mapToUnexpectedError,
|
|
190
|
+
Effect.tapErrorCause(shutdown),
|
|
191
|
+
Effect.withSpan('@livestore/adapter-web:client-session:setupDedicatedWorker'),
|
|
192
|
+
Effect.tapCauseLogPretty,
|
|
193
|
+
Effect.forkScoped,
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
yield* shutdownChannel.send(ShutdownChannel.DedicatedWorkerDisconnectBroadcast.make({}))
|
|
197
|
+
|
|
198
|
+
const sharedWorker = yield* Fiber.join(sharedWorkerFiber)
|
|
199
|
+
yield* sharedWorker
|
|
200
|
+
.executeEffect(new WorkerSchema.SharedWorker.UpdateMessagePort({ port: mc.port2 }))
|
|
201
|
+
.pipe(UnexpectedError.mapToUnexpectedError, Effect.tapErrorCause(shutdown))
|
|
202
|
+
|
|
203
|
+
yield* Deferred.succeed(waitForSharedWorkerInitialized, undefined)
|
|
204
|
+
|
|
205
|
+
yield* Effect.addFinalizer(() =>
|
|
206
|
+
Effect.gen(function* () {
|
|
207
|
+
// console.log('[@livestore/adapter-web:client-session] Shutting down leader worker')
|
|
208
|
+
// We first try to gracefully shutdown the leader worker and then forcefully terminate it
|
|
209
|
+
// yield* Effect.raceFirst(
|
|
210
|
+
// sharedWorker
|
|
211
|
+
// .executeEffect(new WorkerSchema.LeaderWorkerInner.Shutdown({}))
|
|
212
|
+
// .pipe(Effect.andThen(() => worker.terminate())),
|
|
213
|
+
// Effect.sync(() => {
|
|
214
|
+
// console.warn(
|
|
215
|
+
// '[@livestore/adapter-web:client-session] Worker did not gracefully shutdown in time, terminating it',
|
|
216
|
+
// )
|
|
217
|
+
// worker.terminate()
|
|
218
|
+
// }).pipe(
|
|
219
|
+
// // Seems like we still need to wait a bit for the worker to terminate
|
|
220
|
+
// // TODO improve this implementation (possibly via another weblock?)
|
|
221
|
+
// Effect.delay(1000),
|
|
222
|
+
// ),
|
|
223
|
+
// )
|
|
224
|
+
// yield* Effect.logDebug('[@livestore/adapter-web:client-session] client-session shutdown. worker terminated')
|
|
225
|
+
}).pipe(Effect.withSpan('@livestore/adapter-web:client-session:lock:shutdown'), Effect.ignoreLogged),
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
yield* Effect.never
|
|
229
|
+
}).pipe(Effect.withSpan('@livestore/adapter-web:client-session:lock'))
|
|
230
|
+
|
|
231
|
+
// TODO take/give up lock when tab becomes active/passive
|
|
232
|
+
if (gotLocky === false) {
|
|
233
|
+
yield* Effect.logDebug(
|
|
234
|
+
`[@livestore/adapter-web:client-session] ⏳ Waiting for lock '${LIVESTORE_TAB_LOCK}' (sessionId: ${sessionId})`,
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
// TODO find a cleaner implementation for the lock handling as we don't make use of the deferred properly right now
|
|
238
|
+
yield* WebLock.waitForDeferredLock(lockDeferred, LIVESTORE_TAB_LOCK).pipe(
|
|
239
|
+
Effect.andThen(() => {
|
|
240
|
+
gotLocky = true
|
|
241
|
+
return runLocked
|
|
242
|
+
}),
|
|
243
|
+
Effect.interruptible,
|
|
244
|
+
Effect.tapCauseLogPretty,
|
|
245
|
+
Effect.forkScoped,
|
|
246
|
+
)
|
|
247
|
+
} else {
|
|
248
|
+
yield* runLocked.pipe(Effect.interruptible, Effect.tapCauseLogPretty, Effect.forkScoped)
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
const runInWorker = <TReq extends typeof WorkerSchema.SharedWorker.Request.Type>(
|
|
252
|
+
req: TReq,
|
|
253
|
+
): TReq extends Schema.WithResult<infer A, infer _I, infer E, infer _EI, infer R>
|
|
254
|
+
? Effect.Effect<A, UnexpectedError | E, R>
|
|
255
|
+
: never =>
|
|
256
|
+
Fiber.join(sharedWorkerFiber).pipe(
|
|
257
|
+
// NOTE we need to wait for the shared worker to be initialized before we can send requests to it
|
|
258
|
+
Effect.tap(() => waitForSharedWorkerInitialized),
|
|
259
|
+
Effect.flatMap((worker) => worker.executeEffect(req) as any),
|
|
260
|
+
// NOTE we want to treat worker requests as atomic and therefore not allow them to be interrupted
|
|
261
|
+
// Interruption usually only happens during leader re-election or store shutdown
|
|
262
|
+
// Effect.uninterruptible,
|
|
263
|
+
Effect.logWarnIfTakesLongerThan({
|
|
264
|
+
label: `@livestore/adapter-web:client-session:runInWorker:${req._tag}`,
|
|
265
|
+
duration: 2000,
|
|
266
|
+
}),
|
|
267
|
+
Effect.withSpan(`@livestore/adapter-web:client-session:runInWorker:${req._tag}`),
|
|
268
|
+
Effect.mapError((cause) =>
|
|
269
|
+
Schema.is(UnexpectedError)(cause)
|
|
270
|
+
? cause
|
|
271
|
+
: ParseResult.isParseError(cause) || Schema.is(WorkerError.WorkerError)(cause)
|
|
272
|
+
? new UnexpectedError({ cause })
|
|
273
|
+
: cause,
|
|
274
|
+
),
|
|
275
|
+
Effect.catchAllDefect((cause) => new UnexpectedError({ cause })),
|
|
276
|
+
) as any
|
|
277
|
+
|
|
278
|
+
const runInWorkerStream = <TReq extends typeof WorkerSchema.SharedWorker.Request.Type>(
|
|
279
|
+
req: TReq,
|
|
280
|
+
): TReq extends Schema.WithResult<infer A, infer _I, infer _E, infer _EI, infer R>
|
|
281
|
+
? Stream.Stream<A, UnexpectedError, R>
|
|
282
|
+
: never =>
|
|
283
|
+
Effect.gen(function* () {
|
|
284
|
+
const sharedWorker = yield* Fiber.join(sharedWorkerFiber)
|
|
285
|
+
return sharedWorker.execute(req as any).pipe(
|
|
286
|
+
Stream.mapError((cause) =>
|
|
287
|
+
Schema.is(UnexpectedError)(cause)
|
|
288
|
+
? cause
|
|
289
|
+
: ParseResult.isParseError(cause) || Schema.is(WorkerError.WorkerError)(cause)
|
|
290
|
+
? new UnexpectedError({ cause })
|
|
291
|
+
: cause,
|
|
292
|
+
),
|
|
293
|
+
Stream.withSpan(`@livestore/adapter-web:client-session:runInWorkerStream:${req._tag}`),
|
|
294
|
+
)
|
|
295
|
+
}).pipe(Stream.unwrap) as any
|
|
296
|
+
|
|
297
|
+
const networkStatus = yield* SubscriptionRef.make<NetworkStatus>({
|
|
298
|
+
isConnected: false,
|
|
299
|
+
timestampMs: Date.now(),
|
|
300
|
+
latchClosed: false,
|
|
301
|
+
})
|
|
302
|
+
|
|
303
|
+
yield* runInWorkerStream(new WorkerSchema.LeaderWorkerInner.NetworkStatusStream()).pipe(
|
|
304
|
+
Stream.tap((_) => SubscriptionRef.set(networkStatus, _)),
|
|
305
|
+
Stream.runDrain,
|
|
306
|
+
Effect.forever, // NOTE Whenever the leader changes, we need to re-start the stream
|
|
307
|
+
Effect.tapErrorCause(shutdown),
|
|
308
|
+
Effect.interruptible,
|
|
309
|
+
Effect.tapCauseLogPretty,
|
|
310
|
+
Effect.forkScoped,
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
const bootStatusFiber = yield* runInWorkerStream(new WorkerSchema.LeaderWorkerInner.BootStatusStream()).pipe(
|
|
314
|
+
Stream.tap((_) => Queue.offer(bootStatusQueue, _)),
|
|
315
|
+
Stream.runDrain,
|
|
316
|
+
Effect.tapErrorCause((cause) => (Cause.isInterruptedOnly(cause) ? Effect.void : shutdown(cause))),
|
|
317
|
+
Effect.interruptible,
|
|
318
|
+
Effect.tapCauseLogPretty,
|
|
319
|
+
Effect.forkScoped,
|
|
320
|
+
)
|
|
321
|
+
|
|
322
|
+
yield* Queue.awaitShutdown(bootStatusQueue).pipe(
|
|
323
|
+
Effect.andThen(Fiber.interrupt(bootStatusFiber)),
|
|
324
|
+
Effect.tapCauseLogPretty,
|
|
325
|
+
Effect.forkScoped,
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
// TODO maybe bring back transfering the initially created in-memory db snapshot instead of
|
|
329
|
+
// re-exporting the db
|
|
330
|
+
const initialResult =
|
|
331
|
+
dataFromFile === undefined
|
|
332
|
+
? yield* runInWorker(new WorkerSchema.LeaderWorkerInner.GetRecreateSnapshot()).pipe(
|
|
333
|
+
Effect.map(({ snapshot, migrationsReport }) => ({
|
|
334
|
+
_tag: 'from-leader-worker' as const,
|
|
335
|
+
snapshot,
|
|
336
|
+
migrationsReport,
|
|
337
|
+
})),
|
|
338
|
+
)
|
|
339
|
+
: { _tag: 'fast-path' as const, snapshot: dataFromFile }
|
|
340
|
+
|
|
341
|
+
const migrationsReport =
|
|
342
|
+
initialResult._tag === 'from-leader-worker' ? initialResult.migrationsReport : { migrations: [] }
|
|
343
|
+
|
|
344
|
+
const makeSqliteDb = sqliteDbFactory({ sqlite3 })
|
|
345
|
+
const sqliteDb = yield* makeSqliteDb({ _tag: 'in-memory' })
|
|
346
|
+
|
|
347
|
+
sqliteDb.import(initialResult.snapshot)
|
|
348
|
+
|
|
349
|
+
const numberOfTables =
|
|
350
|
+
sqliteDb.select<{ count: number }>(`select count(*) as count from sqlite_master`)[0]?.count ?? 0
|
|
351
|
+
if (numberOfTables === 0) {
|
|
352
|
+
yield* UnexpectedError.make({
|
|
353
|
+
cause: `Encountered empty or corrupted database`,
|
|
354
|
+
payload: { snapshotByteLength: initialResult.snapshot.byteLength, storageOptions: options.storage },
|
|
355
|
+
})
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
// We're restoring the leader head from the SESSION_CHANGESET_META_TABLE, not from the mutationlog db/table
|
|
359
|
+
// in order to avoid exporting/transferring the mutationlog db/table, which is important to speed up the fast path.
|
|
360
|
+
const initialLeaderHeadRes = sqliteDb.select<{
|
|
361
|
+
idGlobal: EventId.GlobalEventId
|
|
362
|
+
idClient: EventId.ClientEventId
|
|
363
|
+
}>(
|
|
364
|
+
`select idGlobal, idClient from ${SESSION_CHANGESET_META_TABLE} order by idGlobal desc, idClient desc limit 1`,
|
|
365
|
+
)[0]
|
|
366
|
+
|
|
367
|
+
const initialLeaderHead = initialLeaderHeadRes
|
|
368
|
+
? EventId.make({ global: initialLeaderHeadRes.idGlobal, client: initialLeaderHeadRes.idClient })
|
|
369
|
+
: EventId.ROOT
|
|
370
|
+
|
|
371
|
+
// console.debug('[@livestore/adapter-web:client-session] initialLeaderHead', initialLeaderHead)
|
|
372
|
+
|
|
373
|
+
yield* Effect.addFinalizer((ex) =>
|
|
374
|
+
Effect.gen(function* () {
|
|
375
|
+
if (
|
|
376
|
+
Exit.isFailure(ex) &&
|
|
377
|
+
Exit.isInterrupted(ex) === false &&
|
|
378
|
+
Schema.is(IntentionalShutdownCause)(Cause.squash(ex.cause)) === false
|
|
379
|
+
) {
|
|
380
|
+
yield* Effect.logError('[@livestore/adapter-web:client-session] client-session shutdown', ex.cause)
|
|
381
|
+
} else {
|
|
382
|
+
yield* Effect.logDebug('[@livestore/adapter-web:client-session] client-session shutdown', gotLocky, ex)
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
if (gotLocky) {
|
|
386
|
+
yield* Deferred.succeed(lockDeferred, undefined)
|
|
387
|
+
}
|
|
388
|
+
}).pipe(Effect.tapCauseLogPretty, Effect.orDie),
|
|
389
|
+
)
|
|
390
|
+
|
|
391
|
+
const pushQueue = yield* BucketQueue.make<MutationEvent.AnyEncoded>()
|
|
392
|
+
|
|
393
|
+
yield* Effect.gen(function* () {
|
|
394
|
+
const batch = yield* BucketQueue.takeBetween(pushQueue, 1, 100)
|
|
395
|
+
// We need to trim "old batches" which can happen during client session rebasing
|
|
396
|
+
const trimmedBatch = trimPushBatch(batch)
|
|
397
|
+
yield* runInWorker(new WorkerSchema.LeaderWorkerInner.PushToLeader({ batch: trimmedBatch })).pipe(
|
|
398
|
+
Effect.withSpan('@livestore/adapter-web:client-session:pushToLeader', {
|
|
399
|
+
attributes: { batchSize: batch.length },
|
|
400
|
+
}),
|
|
401
|
+
// We can ignore the error here because the ClientSessionSyncProcessor will retry after rebasing
|
|
402
|
+
Effect.ignoreLogged,
|
|
403
|
+
)
|
|
404
|
+
}).pipe(Effect.forever, Effect.interruptible, Effect.tapCauseLogPretty, Effect.forkScoped)
|
|
405
|
+
|
|
406
|
+
const devtools: ClientSession['devtools'] = devtoolsEnabled
|
|
407
|
+
? { enabled: true, pullLatch: yield* Effect.makeLatch(true), pushLatch: yield* Effect.makeLatch(true) }
|
|
408
|
+
: { enabled: false }
|
|
409
|
+
|
|
410
|
+
const clientSession = {
|
|
411
|
+
sqliteDb,
|
|
412
|
+
devtools,
|
|
413
|
+
lockStatus,
|
|
414
|
+
clientId,
|
|
415
|
+
sessionId,
|
|
416
|
+
|
|
417
|
+
leaderThread: {
|
|
418
|
+
export: runInWorker(new WorkerSchema.LeaderWorkerInner.Export()).pipe(
|
|
419
|
+
Effect.timeout(10_000),
|
|
420
|
+
UnexpectedError.mapToUnexpectedError,
|
|
421
|
+
Effect.withSpan('@livestore/adapter-web:client-session:export'),
|
|
422
|
+
),
|
|
423
|
+
|
|
424
|
+
mutations: {
|
|
425
|
+
pull: runInWorkerStream(new WorkerSchema.LeaderWorkerInner.PullStream({ cursor: initialLeaderHead })).pipe(
|
|
426
|
+
Stream.orDie,
|
|
427
|
+
),
|
|
428
|
+
|
|
429
|
+
// NOTE instead of sending the worker message right away, we're batching the events in order to
|
|
430
|
+
// - maintain a consistent order of events
|
|
431
|
+
// - improve efficiency by reducing the number of messages
|
|
432
|
+
push: (batch) => BucketQueue.offerAll(pushQueue, batch),
|
|
433
|
+
},
|
|
434
|
+
|
|
435
|
+
initialState: { leaderHead: initialLeaderHead, migrationsReport },
|
|
436
|
+
|
|
437
|
+
getMutationLogData: runInWorker(new WorkerSchema.LeaderWorkerInner.ExportMutationlog()).pipe(
|
|
438
|
+
Effect.timeout(10_000),
|
|
439
|
+
UnexpectedError.mapToUnexpectedError,
|
|
440
|
+
Effect.withSpan('@livestore/adapter-web:client-session:getMutationLogData'),
|
|
441
|
+
),
|
|
442
|
+
|
|
443
|
+
getSyncState: runInWorker(new WorkerSchema.LeaderWorkerInner.GetLeaderSyncState()).pipe(
|
|
444
|
+
UnexpectedError.mapToUnexpectedError,
|
|
445
|
+
Effect.withSpan('@livestore/adapter-web:client-session:getLeaderSyncState'),
|
|
446
|
+
),
|
|
447
|
+
|
|
448
|
+
networkStatus,
|
|
449
|
+
|
|
450
|
+
sendDevtoolsMessage: (message) =>
|
|
451
|
+
runInWorker(new WorkerSchema.LeaderWorkerInner.ExtraDevtoolsMessage({ message })).pipe(
|
|
452
|
+
UnexpectedError.mapToUnexpectedError,
|
|
453
|
+
Effect.withSpan('@livestore/adapter-web:client-session:devtoolsMessageForLeader'),
|
|
454
|
+
),
|
|
455
|
+
},
|
|
456
|
+
|
|
457
|
+
shutdown,
|
|
458
|
+
} satisfies ClientSession
|
|
459
|
+
|
|
460
|
+
if (devtoolsEnabled) {
|
|
461
|
+
// yield* bootDevtools({ client-session, waitForDevtoolsWebBridgePort, connectToDevtools, storeId })
|
|
462
|
+
yield* Effect.gen(function* () {
|
|
463
|
+
const sharedWorker = yield* Fiber.join(sharedWorkerFiber)
|
|
464
|
+
|
|
465
|
+
yield* bootDevtools({ clientSession, storeId })
|
|
466
|
+
|
|
467
|
+
// TODO re-enable browser extension as well
|
|
468
|
+
const storeDevtoolsChannel = yield* makeWebDevtoolsChannel({
|
|
469
|
+
nodeName: `client-session-${storeId}-${clientId}-${sessionId}`,
|
|
470
|
+
target: `devtools`,
|
|
471
|
+
schema: {
|
|
472
|
+
listen: Devtools.ClientSession.MessageToApp,
|
|
473
|
+
send: Devtools.ClientSession.MessageFromApp,
|
|
474
|
+
},
|
|
475
|
+
worker: sharedWorker,
|
|
476
|
+
workerTargetName: 'shared-worker',
|
|
477
|
+
})
|
|
478
|
+
|
|
479
|
+
yield* connectDevtoolsToStore(storeDevtoolsChannel)
|
|
480
|
+
}).pipe(
|
|
481
|
+
Effect.withSpan('@livestore/adapter-web:client-session:devtools'),
|
|
482
|
+
Effect.tapCauseLogPretty,
|
|
483
|
+
Effect.forkScoped,
|
|
484
|
+
)
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
return clientSession
|
|
488
|
+
}).pipe(UnexpectedError.mapToUnexpectedError)
|
|
489
|
+
|
|
490
|
+
// NOTE for `local` storage we could also use the mutationlog db to store the data
|
|
491
|
+
const getPersistedId = (key: string, storageType: 'session' | 'local') => {
|
|
492
|
+
const makeId = () => nanoid(5)
|
|
493
|
+
|
|
494
|
+
const storage =
|
|
495
|
+
typeof window === 'undefined'
|
|
496
|
+
? undefined
|
|
497
|
+
: storageType === 'session'
|
|
498
|
+
? sessionStorage
|
|
499
|
+
: storageType === 'local'
|
|
500
|
+
? localStorage
|
|
501
|
+
: shouldNeverHappen(`[@livestore/adapter-web] Invalid storage type: ${storageType}`)
|
|
502
|
+
|
|
503
|
+
// in case of a worker, we need the id of the parent window, to keep the id consistent
|
|
504
|
+
// we also need to handle the case where there are multiple workers being spawned by the same window
|
|
505
|
+
if (storage === undefined) {
|
|
506
|
+
return makeId()
|
|
507
|
+
}
|
|
508
|
+
|
|
509
|
+
const fullKey = `livestore:${key}`
|
|
510
|
+
const storedKey = storage.getItem(fullKey)
|
|
511
|
+
|
|
512
|
+
if (storedKey) return storedKey
|
|
513
|
+
|
|
514
|
+
const newKey = makeId()
|
|
515
|
+
storage.setItem(fullKey, newKey)
|
|
516
|
+
|
|
517
|
+
return newKey
|
|
518
|
+
}
|
|
519
|
+
|
|
520
|
+
const ensureBrowserRequirements = Effect.gen(function* () {
|
|
521
|
+
const validate = (condition: boolean, label: string) =>
|
|
522
|
+
Effect.gen(function* () {
|
|
523
|
+
if (condition) {
|
|
524
|
+
yield* UnexpectedError.make({
|
|
525
|
+
cause: `[@livestore/adapter-web] Browser not supported. The LiveStore web adapter needs '${label}' to work properly`,
|
|
526
|
+
})
|
|
527
|
+
}
|
|
528
|
+
})
|
|
529
|
+
|
|
530
|
+
yield* Effect.all([
|
|
531
|
+
validate(typeof navigator === 'undefined', 'navigator'),
|
|
532
|
+
validate(navigator.locks === undefined, 'navigator.locks'),
|
|
533
|
+
validate(navigator.storage === undefined, 'navigator.storage'),
|
|
534
|
+
validate(typeof window === 'undefined', 'window'),
|
|
535
|
+
validate(typeof sessionStorage === 'undefined', 'sessionStorage'),
|
|
536
|
+
])
|
|
537
|
+
})
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import type { MutationEvent } from '@livestore/common/schema'
|
|
2
|
+
import { EventId } from '@livestore/common/schema'
|
|
3
|
+
import { describe, expect, it } from 'vitest'
|
|
4
|
+
|
|
5
|
+
import { trimPushBatch } from './trim-batch.js'
|
|
6
|
+
|
|
7
|
+
describe('trimPushBatch', () => {
|
|
8
|
+
it('should return same batch', () => {
|
|
9
|
+
const batch = [
|
|
10
|
+
{ id: EventId.make({ global: 0, client: 1 }), parentId: EventId.make({ global: 0, client: 0 }) },
|
|
11
|
+
{ id: EventId.make({ global: 0, client: 2 }), parentId: EventId.make({ global: 0, client: 1 }) },
|
|
12
|
+
{ id: EventId.make({ global: 1, client: 0 }), parentId: EventId.make({ global: 0, client: 0 }) },
|
|
13
|
+
{ id: EventId.make({ global: 1, client: 1 }), parentId: EventId.make({ global: 1, client: 0 }) },
|
|
14
|
+
] as MutationEvent.AnyEncoded[]
|
|
15
|
+
|
|
16
|
+
const trimmed = trimPushBatch(batch)
|
|
17
|
+
|
|
18
|
+
expect(trimmed).toEqual(batch)
|
|
19
|
+
})
|
|
20
|
+
|
|
21
|
+
it('should trim the batch', () => {
|
|
22
|
+
const batch = [
|
|
23
|
+
{ id: EventId.make({ global: 0, client: 1 }), parentId: EventId.make({ global: 0, client: 0 }) },
|
|
24
|
+
{ id: EventId.make({ global: 0, client: 2 }), parentId: EventId.make({ global: 0, client: 1 }) },
|
|
25
|
+
// should trim above
|
|
26
|
+
{ id: EventId.make({ global: 0, client: 1 }), parentId: EventId.make({ global: 0, client: 0 }) },
|
|
27
|
+
{ id: EventId.make({ global: 0, client: 2 }), parentId: EventId.make({ global: 0, client: 1 }) },
|
|
28
|
+
{ id: EventId.make({ global: 1, client: 0 }), parentId: EventId.make({ global: 0, client: 0 }) },
|
|
29
|
+
{ id: EventId.make({ global: 1, client: 1 }), parentId: EventId.make({ global: 1, client: 0 }) },
|
|
30
|
+
] as MutationEvent.AnyEncoded[]
|
|
31
|
+
|
|
32
|
+
const trimmed = trimPushBatch(batch)
|
|
33
|
+
|
|
34
|
+
expect(trimmed).toEqual(batch.slice(2))
|
|
35
|
+
})
|
|
36
|
+
|
|
37
|
+
it('should trim the batch', () => {
|
|
38
|
+
const batch = [
|
|
39
|
+
{ id: EventId.make({ global: 0, client: 1 }), parentId: EventId.make({ global: 0, client: 0 }) },
|
|
40
|
+
// should trim above
|
|
41
|
+
{ id: EventId.make({ global: 0, client: 1 }), parentId: EventId.make({ global: 0, client: 0 }) },
|
|
42
|
+
] as MutationEvent.AnyEncoded[]
|
|
43
|
+
|
|
44
|
+
const trimmed = trimPushBatch(batch)
|
|
45
|
+
|
|
46
|
+
expect(trimmed).toEqual(batch.slice(1))
|
|
47
|
+
})
|
|
48
|
+
})
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import type { MutationEvent } from '@livestore/common/schema'
|
|
2
|
+
import { EventId } from '@livestore/common/schema'
|
|
3
|
+
|
|
4
|
+
/** [(0,1), (0,2), (1,0), (0,1), (0,2), (1,0), (1,1)] -> [(0,1), (0,2), (1,0), (1,1)] */
|
|
5
|
+
export const trimPushBatch = (batch: ReadonlyArray<MutationEvent.AnyEncoded>) => {
|
|
6
|
+
// Iterate over batch from the end and stop once we encounter an event with a larger id than the previous event
|
|
7
|
+
// Then return the slice of the batch up to and including that event
|
|
8
|
+
for (let i = batch.length - 2; i >= 0; i--) {
|
|
9
|
+
if (EventId.isGreaterThanOrEqual(batch[i]!.id, batch[i + 1]!.id)) {
|
|
10
|
+
return batch.slice(i + 1)
|
|
11
|
+
}
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
return batch
|
|
15
|
+
}
|