@livestore/adapter-web 0.3.1 → 0.3.2-dev.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/.tsbuildinfo +1 -1
- package/dist/in-memory/in-memory-adapter.d.ts +1 -2
- package/dist/in-memory/in-memory-adapter.d.ts.map +1 -1
- package/dist/in-memory/in-memory-adapter.js +6 -5
- package/dist/in-memory/in-memory-adapter.js.map +1 -1
- package/dist/index.d.ts +3 -3
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +3 -3
- package/dist/index.js.map +1 -1
- package/dist/opfs-utils.d.ts.map +1 -1
- package/dist/opfs-utils.js.map +1 -1
- package/dist/web-worker/client-session/client-session-devtools.d.ts +1 -1
- package/dist/web-worker/client-session/persisted-adapter.d.ts +2 -2
- package/dist/web-worker/client-session/persisted-adapter.d.ts.map +1 -1
- package/dist/web-worker/client-session/persisted-adapter.js +26 -25
- package/dist/web-worker/client-session/persisted-adapter.js.map +1 -1
- package/dist/web-worker/common/persisted-sqlite.d.ts +1 -1
- package/dist/web-worker/common/persisted-sqlite.js +2 -2
- package/dist/web-worker/common/persisted-sqlite.js.map +1 -1
- package/dist/web-worker/common/shutdown-channel.d.ts +1 -1
- package/dist/web-worker/common/shutdown-channel.d.ts.map +1 -1
- package/dist/web-worker/common/worker-schema.d.ts +183 -191
- package/dist/web-worker/common/worker-schema.d.ts.map +1 -1
- package/dist/web-worker/common/worker-schema.js +121 -148
- package/dist/web-worker/common/worker-schema.js.map +1 -1
- package/dist/web-worker/leader-worker/make-leader-worker.js +6 -6
- package/dist/web-worker/leader-worker/make-leader-worker.js.map +1 -1
- package/dist/web-worker/shared-worker/make-shared-worker.js +4 -4
- package/dist/web-worker/shared-worker/make-shared-worker.js.map +1 -1
- package/dist/web-worker/vite-dev-polyfill.js +2 -1
- package/dist/web-worker/vite-dev-polyfill.js.map +1 -1
- package/package.json +7 -7
- package/src/in-memory/in-memory-adapter.ts +19 -7
- package/src/index.ts +3 -3
- package/src/opfs-utils.ts +1 -1
- package/src/web-worker/ambient.d.ts +0 -2
- package/src/web-worker/client-session/persisted-adapter.ts +35 -35
- package/src/web-worker/common/persisted-sqlite.ts +3 -3
- package/src/web-worker/common/worker-schema.ts +126 -104
- package/src/web-worker/leader-worker/make-leader-worker.ts +6 -6
- package/src/web-worker/shared-worker/make-shared-worker.ts +10 -10
- package/src/web-worker/vite-dev-polyfill.ts +3 -1
|
@@ -4,12 +4,13 @@ import {
|
|
|
4
4
|
liveStoreVersion,
|
|
5
5
|
makeClientSession,
|
|
6
6
|
StoreInterrupted,
|
|
7
|
+
sessionChangesetMetaTable,
|
|
7
8
|
UnexpectedError,
|
|
8
9
|
} from '@livestore/common'
|
|
9
10
|
// TODO bring back - this currently doesn't work due to https://github.com/vitejs/vite/issues/8427
|
|
10
11
|
// NOTE We're using a non-relative import here for Vite to properly resolve the import during app builds
|
|
11
12
|
// import LiveStoreSharedWorker from '@livestore/adapter-web/internal-shared-worker?sharedworker'
|
|
12
|
-
import { EventSequenceNumber
|
|
13
|
+
import { EventSequenceNumber } from '@livestore/common/schema'
|
|
13
14
|
import { sqliteDbFactory } from '@livestore/sqlite-wasm/browser'
|
|
14
15
|
import { loadSqlite3Wasm } from '@livestore/sqlite-wasm/load-wasm'
|
|
15
16
|
import { isDevEnv, shouldNeverHappen, tryAsFunctionAndNew } from '@livestore/utils'
|
|
@@ -31,12 +32,12 @@ import {
|
|
|
31
32
|
} from '@livestore/utils/effect'
|
|
32
33
|
import { nanoid } from '@livestore/utils/nanoid'
|
|
33
34
|
|
|
34
|
-
import * as OpfsUtils from '../../opfs-utils.
|
|
35
|
-
import { readPersistedAppDbFromClientSession, resetPersistedDataFromClientSession } from '../common/persisted-sqlite.
|
|
36
|
-
import { makeShutdownChannel } from '../common/shutdown-channel.
|
|
37
|
-
import { DedicatedWorkerDisconnectBroadcast, makeWorkerDisconnectChannel } from '../common/worker-disconnect-channel.
|
|
38
|
-
import * as WorkerSchema from '../common/worker-schema.
|
|
39
|
-
import { connectWebmeshNodeClientSession } from './client-session-devtools.
|
|
35
|
+
import * as OpfsUtils from '../../opfs-utils.ts'
|
|
36
|
+
import { readPersistedAppDbFromClientSession, resetPersistedDataFromClientSession } from '../common/persisted-sqlite.ts'
|
|
37
|
+
import { makeShutdownChannel } from '../common/shutdown-channel.ts'
|
|
38
|
+
import { DedicatedWorkerDisconnectBroadcast, makeWorkerDisconnectChannel } from '../common/worker-disconnect-channel.ts'
|
|
39
|
+
import * as WorkerSchema from '../common/worker-schema.ts'
|
|
40
|
+
import { connectWebmeshNodeClientSession } from './client-session-devtools.ts'
|
|
40
41
|
|
|
41
42
|
// NOTE we're starting to initialize the sqlite wasm binary here to speed things up
|
|
42
43
|
const sqlite3Promise = loadSqlite3Wasm()
|
|
@@ -67,7 +68,9 @@ export type WebAdapterOptions = {
|
|
|
67
68
|
*/
|
|
68
69
|
sharedWorker:
|
|
69
70
|
| ((options: { name: string }) => globalThis.SharedWorker)
|
|
70
|
-
| (new (options: {
|
|
71
|
+
| (new (options: {
|
|
72
|
+
name: string
|
|
73
|
+
}) => globalThis.SharedWorker)
|
|
71
74
|
/**
|
|
72
75
|
* Specifies where to persist data for this adapter
|
|
73
76
|
*/
|
|
@@ -107,7 +110,7 @@ export type WebAdapterOptions = {
|
|
|
107
110
|
* @example
|
|
108
111
|
* ```ts
|
|
109
112
|
* import { makePersistedAdapter } from '@livestore/adapter-web'
|
|
110
|
-
* import LiveStoreWorker from './livestore.worker?worker'
|
|
113
|
+
* import LiveStoreWorker from './livestore.worker.ts?worker'
|
|
111
114
|
* import LiveStoreSharedWorker from '@livestore/adapter-web/shared-worker?sharedworker'
|
|
112
115
|
*
|
|
113
116
|
* const adapter = makePersistedAdapter({
|
|
@@ -170,15 +173,15 @@ export const makePersistedAdapter =
|
|
|
170
173
|
|
|
171
174
|
const sharedWebWorker = tryAsFunctionAndNew(options.sharedWorker, { name: `livestore-shared-worker-${storeId}` })
|
|
172
175
|
|
|
173
|
-
const sharedWorkerFiber = yield* Worker.makePoolSerialized<typeof WorkerSchema.
|
|
176
|
+
const sharedWorkerFiber = yield* Worker.makePoolSerialized<typeof WorkerSchema.SharedWorkerRequest.Type>({
|
|
174
177
|
size: 1,
|
|
175
178
|
concurrency: 100,
|
|
176
179
|
initialMessage: () =>
|
|
177
|
-
new WorkerSchema.
|
|
180
|
+
new WorkerSchema.SharedWorkerInitialMessage({
|
|
178
181
|
liveStoreVersion,
|
|
179
182
|
payload: {
|
|
180
183
|
_tag: 'FromClientSession',
|
|
181
|
-
initialMessage: new WorkerSchema.
|
|
184
|
+
initialMessage: new WorkerSchema.LeaderWorkerInnerInitialMessage({
|
|
182
185
|
storageOptions,
|
|
183
186
|
storeId,
|
|
184
187
|
clientId,
|
|
@@ -229,9 +232,8 @@ export const makePersistedAdapter =
|
|
|
229
232
|
// and adding the `sessionId` to make it easier to debug which session a worker belongs to in logs
|
|
230
233
|
const worker = tryAsFunctionAndNew(options.worker, { name: `livestore-worker-${storeId}-${sessionId}` })
|
|
231
234
|
|
|
232
|
-
yield* Worker.makeSerialized<WorkerSchema.
|
|
233
|
-
initialMessage: () =>
|
|
234
|
-
new WorkerSchema.LeaderWorkerOuter.InitialMessage({ port: mc.port1, storeId, clientId }),
|
|
235
|
+
yield* Worker.makeSerialized<WorkerSchema.LeaderWorkerOuterRequest>({
|
|
236
|
+
initialMessage: () => new WorkerSchema.LeaderWorkerOuterInitialMessage({ port: mc.port1, storeId, clientId }),
|
|
235
237
|
}).pipe(
|
|
236
238
|
Effect.provide(BrowserWorker.layer(() => worker)),
|
|
237
239
|
UnexpectedError.mapToUnexpectedError,
|
|
@@ -245,12 +247,12 @@ export const makePersistedAdapter =
|
|
|
245
247
|
|
|
246
248
|
const sharedWorker = yield* Fiber.join(sharedWorkerFiber)
|
|
247
249
|
yield* sharedWorker
|
|
248
|
-
.executeEffect(new WorkerSchema.
|
|
250
|
+
.executeEffect(new WorkerSchema.SharedWorkerUpdateMessagePort({ port: mc.port2 }))
|
|
249
251
|
.pipe(UnexpectedError.mapToUnexpectedError, Effect.tapErrorCause(shutdown))
|
|
250
252
|
|
|
251
253
|
yield* Deferred.succeed(waitForSharedWorkerInitialized, undefined)
|
|
252
254
|
|
|
253
|
-
yield* Effect.never
|
|
255
|
+
return yield* Effect.never
|
|
254
256
|
}).pipe(Effect.withSpan('@livestore/adapter-web:client-session:lock'))
|
|
255
257
|
|
|
256
258
|
// TODO take/give up lock when tab becomes active/passive
|
|
@@ -273,7 +275,7 @@ export const makePersistedAdapter =
|
|
|
273
275
|
yield* runLocked.pipe(Effect.interruptible, Effect.tapCauseLogPretty, Effect.forkScoped)
|
|
274
276
|
}
|
|
275
277
|
|
|
276
|
-
const runInWorker = <TReq extends typeof WorkerSchema.
|
|
278
|
+
const runInWorker = <TReq extends typeof WorkerSchema.SharedWorkerRequest.Type>(
|
|
277
279
|
req: TReq,
|
|
278
280
|
): TReq extends Schema.WithResult<infer A, infer _I, infer E, infer _EI, infer R>
|
|
279
281
|
? Effect.Effect<A, UnexpectedError | E, R>
|
|
@@ -300,7 +302,7 @@ export const makePersistedAdapter =
|
|
|
300
302
|
Effect.catchAllDefect((cause) => new UnexpectedError({ cause })),
|
|
301
303
|
) as any
|
|
302
304
|
|
|
303
|
-
const runInWorkerStream = <TReq extends typeof WorkerSchema.
|
|
305
|
+
const runInWorkerStream = <TReq extends typeof WorkerSchema.SharedWorkerRequest.Type>(
|
|
304
306
|
req: TReq,
|
|
305
307
|
): TReq extends Schema.WithResult<infer A, infer _I, infer _E, infer _EI, infer R>
|
|
306
308
|
? Stream.Stream<A, UnexpectedError, R>
|
|
@@ -319,7 +321,7 @@ export const makePersistedAdapter =
|
|
|
319
321
|
)
|
|
320
322
|
}).pipe(Stream.unwrap) as any
|
|
321
323
|
|
|
322
|
-
const bootStatusFiber = yield* runInWorkerStream(new WorkerSchema.
|
|
324
|
+
const bootStatusFiber = yield* runInWorkerStream(new WorkerSchema.LeaderWorkerInnerBootStatusStream()).pipe(
|
|
323
325
|
Stream.tap((_) => Queue.offer(bootStatusQueue, _)),
|
|
324
326
|
Stream.runDrain,
|
|
325
327
|
Effect.tapErrorCause((cause) => (Cause.isInterruptedOnly(cause) ? Effect.void : shutdown(cause))),
|
|
@@ -338,7 +340,7 @@ export const makePersistedAdapter =
|
|
|
338
340
|
// re-exporting the db
|
|
339
341
|
const initialResult =
|
|
340
342
|
dataFromFile === undefined
|
|
341
|
-
? yield* runInWorker(new WorkerSchema.
|
|
343
|
+
? yield* runInWorker(new WorkerSchema.LeaderWorkerInnerGetRecreateSnapshot()).pipe(
|
|
342
344
|
Effect.map(({ snapshot, migrationsReport }) => ({
|
|
343
345
|
_tag: 'from-leader-worker' as const,
|
|
344
346
|
snapshot,
|
|
@@ -358,7 +360,7 @@ export const makePersistedAdapter =
|
|
|
358
360
|
const numberOfTables =
|
|
359
361
|
sqliteDb.select<{ count: number }>(`select count(*) as count from sqlite_master`)[0]?.count ?? 0
|
|
360
362
|
if (numberOfTables === 0) {
|
|
361
|
-
yield* UnexpectedError.make({
|
|
363
|
+
return yield* UnexpectedError.make({
|
|
362
364
|
cause: `Encountered empty or corrupted database`,
|
|
363
365
|
payload: { snapshotByteLength: initialResult.snapshot.byteLength, storageOptions: options.storage },
|
|
364
366
|
})
|
|
@@ -366,17 +368,15 @@ export const makePersistedAdapter =
|
|
|
366
368
|
|
|
367
369
|
// We're restoring the leader head from the SESSION_CHANGESET_META_TABLE, not from the eventlog db/table
|
|
368
370
|
// in order to avoid exporting/transferring the eventlog db/table, which is important to speed up the fast path.
|
|
369
|
-
const initialLeaderHeadRes = sqliteDb.select
|
|
370
|
-
seqNumGlobal
|
|
371
|
-
|
|
372
|
-
}>(
|
|
373
|
-
`select seqNumGlobal, seqNumClient from ${SystemTables.SESSION_CHANGESET_META_TABLE} order by seqNumGlobal desc, seqNumClient desc limit 1`,
|
|
374
|
-
)[0]
|
|
371
|
+
const initialLeaderHeadRes = sqliteDb.select(
|
|
372
|
+
sessionChangesetMetaTable.select('seqNumClient', 'seqNumGlobal', 'seqNumRebaseGeneration').first(),
|
|
373
|
+
)
|
|
375
374
|
|
|
376
375
|
const initialLeaderHead = initialLeaderHeadRes
|
|
377
376
|
? EventSequenceNumber.make({
|
|
378
377
|
global: initialLeaderHeadRes.seqNumGlobal,
|
|
379
378
|
client: initialLeaderHeadRes.seqNumClient,
|
|
379
|
+
rebaseGeneration: initialLeaderHeadRes.seqNumRebaseGeneration,
|
|
380
380
|
})
|
|
381
381
|
: EventSequenceNumber.ROOT
|
|
382
382
|
|
|
@@ -402,7 +402,7 @@ export const makePersistedAdapter =
|
|
|
402
402
|
)
|
|
403
403
|
|
|
404
404
|
const leaderThread: ClientSession['leaderThread'] = {
|
|
405
|
-
export: runInWorker(new WorkerSchema.
|
|
405
|
+
export: runInWorker(new WorkerSchema.LeaderWorkerInnerExport()).pipe(
|
|
406
406
|
Effect.timeout(10_000),
|
|
407
407
|
UnexpectedError.mapToUnexpectedError,
|
|
408
408
|
Effect.withSpan('@livestore/adapter-web:client-session:export'),
|
|
@@ -410,9 +410,9 @@ export const makePersistedAdapter =
|
|
|
410
410
|
|
|
411
411
|
events: {
|
|
412
412
|
pull: ({ cursor }) =>
|
|
413
|
-
runInWorkerStream(new WorkerSchema.
|
|
413
|
+
runInWorkerStream(new WorkerSchema.LeaderWorkerInnerPullStream({ cursor })).pipe(Stream.orDie),
|
|
414
414
|
push: (batch) =>
|
|
415
|
-
runInWorker(new WorkerSchema.
|
|
415
|
+
runInWorker(new WorkerSchema.LeaderWorkerInnerPushToLeader({ batch })).pipe(
|
|
416
416
|
Effect.withSpan('@livestore/adapter-web:client-session:pushToLeader', {
|
|
417
417
|
attributes: { batchSize: batch.length },
|
|
418
418
|
}),
|
|
@@ -421,19 +421,19 @@ export const makePersistedAdapter =
|
|
|
421
421
|
|
|
422
422
|
initialState: { leaderHead: initialLeaderHead, migrationsReport },
|
|
423
423
|
|
|
424
|
-
getEventlogData: runInWorker(new WorkerSchema.
|
|
424
|
+
getEventlogData: runInWorker(new WorkerSchema.LeaderWorkerInnerExportEventlog()).pipe(
|
|
425
425
|
Effect.timeout(10_000),
|
|
426
426
|
UnexpectedError.mapToUnexpectedError,
|
|
427
427
|
Effect.withSpan('@livestore/adapter-web:client-session:getEventlogData'),
|
|
428
428
|
),
|
|
429
429
|
|
|
430
|
-
getSyncState: runInWorker(new WorkerSchema.
|
|
430
|
+
getSyncState: runInWorker(new WorkerSchema.LeaderWorkerInnerGetLeaderSyncState()).pipe(
|
|
431
431
|
UnexpectedError.mapToUnexpectedError,
|
|
432
432
|
Effect.withSpan('@livestore/adapter-web:client-session:getLeaderSyncState'),
|
|
433
433
|
),
|
|
434
434
|
|
|
435
435
|
sendDevtoolsMessage: (message) =>
|
|
436
|
-
runInWorker(new WorkerSchema.
|
|
436
|
+
runInWorker(new WorkerSchema.LeaderWorkerInnerExtraDevtoolsMessage({ message })).pipe(
|
|
437
437
|
UnexpectedError.mapToUnexpectedError,
|
|
438
438
|
Effect.withSpan('@livestore/adapter-web:client-session:devtoolsMessageForLeader'),
|
|
439
439
|
),
|
|
@@ -500,7 +500,7 @@ const ensureBrowserRequirements = Effect.gen(function* () {
|
|
|
500
500
|
const validate = (condition: boolean, label: string) =>
|
|
501
501
|
Effect.gen(function* () {
|
|
502
502
|
if (condition) {
|
|
503
|
-
yield* UnexpectedError.make({
|
|
503
|
+
return yield* UnexpectedError.make({
|
|
504
504
|
cause: `[@livestore/adapter-web] Browser not supported. The LiveStore web adapter needs '${label}' to work properly`,
|
|
505
505
|
})
|
|
506
506
|
}
|
|
@@ -3,8 +3,8 @@ import type { LiveStoreSchema } from '@livestore/common/schema'
|
|
|
3
3
|
import { decodeSAHPoolFilename, HEADER_OFFSET_DATA } from '@livestore/sqlite-wasm/browser'
|
|
4
4
|
import { Effect, Schedule, Schema } from '@livestore/utils/effect'
|
|
5
5
|
|
|
6
|
-
import * as OpfsUtils from '../../opfs-utils.
|
|
7
|
-
import type * as WorkerSchema from './worker-schema.
|
|
6
|
+
import * as OpfsUtils from '../../opfs-utils.ts'
|
|
7
|
+
import type * as WorkerSchema from './worker-schema.ts'
|
|
8
8
|
|
|
9
9
|
export class PersistedSqliteError extends Schema.TaggedError<PersistedSqliteError>()('PersistedSqliteError', {
|
|
10
10
|
cause: Schema.Defect,
|
|
@@ -47,7 +47,7 @@ export const readPersistedAppDbFromClientSession = ({
|
|
|
47
47
|
|
|
48
48
|
const fileResults = await Promise.all(files.map(tryGetDbFile))
|
|
49
49
|
|
|
50
|
-
const appDbFileName =
|
|
50
|
+
const appDbFileName = `/${getStateDbFileName(schema)}`
|
|
51
51
|
|
|
52
52
|
const dbFileRes = fileResults.find((_) => _?.fileName === appDbFileName)
|
|
53
53
|
// console.debug('fileResults', fileResults, 'dbFileRes', dbFileRes)
|
|
@@ -2,7 +2,6 @@ import {
|
|
|
2
2
|
BootStatus,
|
|
3
3
|
Devtools,
|
|
4
4
|
LeaderAheadError,
|
|
5
|
-
LeaderPullCursor,
|
|
6
5
|
liveStoreVersion,
|
|
7
6
|
MigrationsReport,
|
|
8
7
|
SyncState,
|
|
@@ -44,19 +43,21 @@ export type StorageTypeEncoded = typeof StorageType.Encoded
|
|
|
44
43
|
export const SyncBackendOptions = Schema.Record({ key: Schema.String, value: Schema.JsonValue })
|
|
45
44
|
export type SyncBackendOptions = Record<string, Schema.JsonValue>
|
|
46
45
|
|
|
47
|
-
export
|
|
48
|
-
|
|
46
|
+
export class LeaderWorkerOuterInitialMessage extends Schema.TaggedRequest<LeaderWorkerOuterInitialMessage>()(
|
|
47
|
+
'InitialMessage',
|
|
48
|
+
{
|
|
49
49
|
payload: { port: Transferable.MessagePort, storeId: Schema.String, clientId: Schema.String },
|
|
50
50
|
success: Schema.Void,
|
|
51
51
|
failure: UnexpectedError,
|
|
52
|
-
}
|
|
52
|
+
},
|
|
53
|
+
) {}
|
|
53
54
|
|
|
54
|
-
|
|
55
|
-
}
|
|
55
|
+
export class LeaderWorkerOuterRequest extends Schema.Union(LeaderWorkerOuterInitialMessage) {}
|
|
56
56
|
|
|
57
57
|
// TODO unify this code with schema from node adapter
|
|
58
|
-
export
|
|
59
|
-
|
|
58
|
+
export class LeaderWorkerInnerInitialMessage extends Schema.TaggedRequest<LeaderWorkerInnerInitialMessage>()(
|
|
59
|
+
'InitialMessage',
|
|
60
|
+
{
|
|
60
61
|
payload: {
|
|
61
62
|
storageOptions: StorageType,
|
|
62
63
|
devtoolsEnabled: Schema.Boolean,
|
|
@@ -67,137 +68,158 @@ export namespace LeaderWorkerInner {
|
|
|
67
68
|
},
|
|
68
69
|
success: Schema.Void,
|
|
69
70
|
failure: UnexpectedError,
|
|
70
|
-
}
|
|
71
|
+
},
|
|
72
|
+
) {}
|
|
71
73
|
|
|
72
|
-
|
|
74
|
+
export class LeaderWorkerInnerBootStatusStream extends Schema.TaggedRequest<LeaderWorkerInnerBootStatusStream>()(
|
|
75
|
+
'BootStatusStream',
|
|
76
|
+
{
|
|
73
77
|
payload: {},
|
|
74
78
|
success: BootStatus,
|
|
75
79
|
failure: UnexpectedError,
|
|
76
|
-
}
|
|
80
|
+
},
|
|
81
|
+
) {}
|
|
77
82
|
|
|
78
|
-
|
|
83
|
+
export class LeaderWorkerInnerPushToLeader extends Schema.TaggedRequest<LeaderWorkerInnerPushToLeader>()(
|
|
84
|
+
'PushToLeader',
|
|
85
|
+
{
|
|
79
86
|
payload: {
|
|
80
87
|
batch: Schema.Array(LiveStoreEvent.AnyEncoded),
|
|
81
88
|
},
|
|
82
89
|
success: Schema.Void,
|
|
83
90
|
failure: Schema.Union(UnexpectedError, LeaderAheadError),
|
|
84
|
-
}
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
91
|
+
},
|
|
92
|
+
) {}
|
|
93
|
+
|
|
94
|
+
export class LeaderWorkerInnerPullStream extends Schema.TaggedRequest<LeaderWorkerInnerPullStream>()('PullStream', {
|
|
95
|
+
payload: {
|
|
96
|
+
cursor: EventSequenceNumber.EventSequenceNumber,
|
|
97
|
+
},
|
|
98
|
+
success: Schema.Struct({
|
|
99
|
+
payload: SyncState.PayloadUpstream,
|
|
100
|
+
}),
|
|
101
|
+
failure: UnexpectedError,
|
|
102
|
+
}) {}
|
|
103
|
+
|
|
104
|
+
export class LeaderWorkerInnerExport extends Schema.TaggedRequest<LeaderWorkerInnerExport>()('Export', {
|
|
105
|
+
payload: {},
|
|
106
|
+
success: Transferable.Uint8Array,
|
|
107
|
+
failure: UnexpectedError,
|
|
108
|
+
}) {}
|
|
109
|
+
|
|
110
|
+
export class LeaderWorkerInnerExportEventlog extends Schema.TaggedRequest<LeaderWorkerInnerExportEventlog>()(
|
|
111
|
+
'ExportEventlog',
|
|
112
|
+
{
|
|
104
113
|
payload: {},
|
|
105
114
|
success: Transferable.Uint8Array,
|
|
106
115
|
failure: UnexpectedError,
|
|
107
|
-
}
|
|
116
|
+
},
|
|
117
|
+
) {}
|
|
108
118
|
|
|
109
|
-
|
|
119
|
+
export class LeaderWorkerInnerGetRecreateSnapshot extends Schema.TaggedRequest<LeaderWorkerInnerGetRecreateSnapshot>()(
|
|
120
|
+
'GetRecreateSnapshot',
|
|
121
|
+
{
|
|
110
122
|
payload: {},
|
|
111
123
|
success: Schema.Struct({
|
|
112
124
|
snapshot: Transferable.Uint8Array,
|
|
113
125
|
migrationsReport: MigrationsReport,
|
|
114
126
|
}),
|
|
115
127
|
failure: UnexpectedError,
|
|
116
|
-
}
|
|
128
|
+
},
|
|
129
|
+
) {}
|
|
117
130
|
|
|
118
|
-
|
|
131
|
+
export class LeaderWorkerInnerGetLeaderHead extends Schema.TaggedRequest<LeaderWorkerInnerGetLeaderHead>()(
|
|
132
|
+
'GetLeaderHead',
|
|
133
|
+
{
|
|
119
134
|
payload: {},
|
|
120
135
|
success: EventSequenceNumber.EventSequenceNumber,
|
|
121
136
|
failure: UnexpectedError,
|
|
122
|
-
}
|
|
137
|
+
},
|
|
138
|
+
) {}
|
|
123
139
|
|
|
124
|
-
|
|
140
|
+
export class LeaderWorkerInnerGetLeaderSyncState extends Schema.TaggedRequest<LeaderWorkerInnerGetLeaderSyncState>()(
|
|
141
|
+
'GetLeaderSyncState',
|
|
142
|
+
{
|
|
125
143
|
payload: {},
|
|
126
144
|
success: SyncState.SyncState,
|
|
127
145
|
failure: UnexpectedError,
|
|
128
|
-
}
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
146
|
+
},
|
|
147
|
+
) {}
|
|
148
|
+
|
|
149
|
+
export class LeaderWorkerInnerShutdown extends Schema.TaggedRequest<LeaderWorkerInnerShutdown>()('Shutdown', {
|
|
150
|
+
payload: {},
|
|
151
|
+
success: Schema.Void,
|
|
152
|
+
failure: UnexpectedError,
|
|
153
|
+
}) {}
|
|
154
|
+
|
|
155
|
+
export class LeaderWorkerInnerExtraDevtoolsMessage extends Schema.TaggedRequest<LeaderWorkerInnerExtraDevtoolsMessage>()(
|
|
156
|
+
'ExtraDevtoolsMessage',
|
|
157
|
+
{
|
|
137
158
|
payload: {
|
|
138
159
|
message: Devtools.Leader.MessageToApp,
|
|
139
160
|
},
|
|
140
161
|
success: Schema.Void,
|
|
141
162
|
failure: UnexpectedError,
|
|
142
|
-
}
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
export
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
payload: {
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
163
|
+
},
|
|
164
|
+
) {}
|
|
165
|
+
|
|
166
|
+
export const LeaderWorkerInnerRequest = Schema.Union(
|
|
167
|
+
LeaderWorkerInnerInitialMessage,
|
|
168
|
+
LeaderWorkerInnerBootStatusStream,
|
|
169
|
+
LeaderWorkerInnerPushToLeader,
|
|
170
|
+
LeaderWorkerInnerPullStream,
|
|
171
|
+
LeaderWorkerInnerExport,
|
|
172
|
+
LeaderWorkerInnerExportEventlog,
|
|
173
|
+
LeaderWorkerInnerGetRecreateSnapshot,
|
|
174
|
+
LeaderWorkerInnerGetLeaderHead,
|
|
175
|
+
LeaderWorkerInnerGetLeaderSyncState,
|
|
176
|
+
LeaderWorkerInnerShutdown,
|
|
177
|
+
LeaderWorkerInnerExtraDevtoolsMessage,
|
|
178
|
+
WebmeshWorker.Schema.CreateConnection,
|
|
179
|
+
)
|
|
180
|
+
export type LeaderWorkerInnerRequest = typeof LeaderWorkerInnerRequest.Type
|
|
181
|
+
|
|
182
|
+
export class SharedWorkerInitialMessagePayloadFromClientSession extends Schema.TaggedStruct('FromClientSession', {
|
|
183
|
+
initialMessage: LeaderWorkerInnerInitialMessage,
|
|
184
|
+
}) {}
|
|
185
|
+
|
|
186
|
+
export class SharedWorkerInitialMessage extends Schema.TaggedRequest<SharedWorkerInitialMessage>()('InitialMessage', {
|
|
187
|
+
payload: {
|
|
188
|
+
payload: Schema.Union(SharedWorkerInitialMessagePayloadFromClientSession, Schema.TaggedStruct('FromWebBridge', {})),
|
|
189
|
+
// To guard against scenarios where a client session is already running a newer version of LiveStore
|
|
190
|
+
// We should probably find a better way to handle those cases once they become more common.
|
|
191
|
+
liveStoreVersion: Schema.Literal(liveStoreVersion),
|
|
192
|
+
},
|
|
193
|
+
success: Schema.Void,
|
|
194
|
+
failure: UnexpectedError,
|
|
195
|
+
}) {}
|
|
196
|
+
|
|
197
|
+
export class SharedWorkerUpdateMessagePort extends Schema.TaggedRequest<SharedWorkerUpdateMessagePort>()(
|
|
198
|
+
'UpdateMessagePort',
|
|
199
|
+
{
|
|
178
200
|
payload: {
|
|
179
201
|
port: Transferable.MessagePort,
|
|
180
202
|
},
|
|
181
203
|
success: Schema.Void,
|
|
182
204
|
failure: UnexpectedError,
|
|
183
|
-
}
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
}
|
|
205
|
+
},
|
|
206
|
+
) {}
|
|
207
|
+
|
|
208
|
+
export class SharedWorkerRequest extends Schema.Union(
|
|
209
|
+
SharedWorkerInitialMessage,
|
|
210
|
+
SharedWorkerUpdateMessagePort,
|
|
211
|
+
|
|
212
|
+
// Proxied requests
|
|
213
|
+
LeaderWorkerInnerBootStatusStream,
|
|
214
|
+
LeaderWorkerInnerPushToLeader,
|
|
215
|
+
LeaderWorkerInnerPullStream,
|
|
216
|
+
LeaderWorkerInnerExport,
|
|
217
|
+
LeaderWorkerInnerGetRecreateSnapshot,
|
|
218
|
+
LeaderWorkerInnerExportEventlog,
|
|
219
|
+
LeaderWorkerInnerGetLeaderHead,
|
|
220
|
+
LeaderWorkerInnerGetLeaderSyncState,
|
|
221
|
+
LeaderWorkerInnerShutdown,
|
|
222
|
+
LeaderWorkerInnerExtraDevtoolsMessage,
|
|
223
|
+
|
|
224
|
+
WebmeshWorker.Schema.CreateConnection,
|
|
225
|
+
) {}
|
|
@@ -25,10 +25,10 @@ import {
|
|
|
25
25
|
} from '@livestore/utils/effect'
|
|
26
26
|
import type * as otel from '@opentelemetry/api'
|
|
27
27
|
|
|
28
|
-
import * as OpfsUtils from '../../opfs-utils.
|
|
29
|
-
import { getStateDbFileName, sanitizeOpfsDir } from '../common/persisted-sqlite.
|
|
30
|
-
import { makeShutdownChannel } from '../common/shutdown-channel.
|
|
31
|
-
import * as WorkerSchema from '../common/worker-schema.
|
|
28
|
+
import * as OpfsUtils from '../../opfs-utils.ts'
|
|
29
|
+
import { getStateDbFileName, sanitizeOpfsDir } from '../common/persisted-sqlite.ts'
|
|
30
|
+
import { makeShutdownChannel } from '../common/shutdown-channel.ts'
|
|
31
|
+
import * as WorkerSchema from '../common/worker-schema.ts'
|
|
32
32
|
|
|
33
33
|
export type WorkerOptions = {
|
|
34
34
|
schema: LiveStoreSchema
|
|
@@ -81,7 +81,7 @@ export const makeWorkerEffect = (options: WorkerOptions) => {
|
|
|
81
81
|
const makeWorkerRunnerOuter = (
|
|
82
82
|
workerOptions: WorkerOptions,
|
|
83
83
|
): Layer.Layer<never, WorkerError.WorkerError, WorkerRunner.PlatformRunner | HttpClient.HttpClient> =>
|
|
84
|
-
WorkerRunner.layerSerialized(WorkerSchema.
|
|
84
|
+
WorkerRunner.layerSerialized(WorkerSchema.LeaderWorkerOuterInitialMessage, {
|
|
85
85
|
// Port coming from client session and forwarded via the shared worker
|
|
86
86
|
InitialMessage: ({ port: incomingRequestsPort, storeId, clientId }) =>
|
|
87
87
|
Effect.gen(function* () {
|
|
@@ -102,7 +102,7 @@ const makeWorkerRunnerOuter = (
|
|
|
102
102
|
})
|
|
103
103
|
|
|
104
104
|
const makeWorkerRunnerInner = ({ schema, sync: syncOptions }: WorkerOptions) =>
|
|
105
|
-
WorkerRunner.layerSerialized(WorkerSchema.
|
|
105
|
+
WorkerRunner.layerSerialized(WorkerSchema.LeaderWorkerInnerRequest, {
|
|
106
106
|
InitialMessage: ({ storageOptions, storeId, clientId, devtoolsEnabled, debugInstanceId, syncPayload }) =>
|
|
107
107
|
Effect.gen(function* () {
|
|
108
108
|
const sqlite3 = yield* Effect.promise(() => loadSqlite3Wasm())
|
|
@@ -25,8 +25,8 @@ import {
|
|
|
25
25
|
WorkerRunner,
|
|
26
26
|
} from '@livestore/utils/effect'
|
|
27
27
|
|
|
28
|
-
import { makeShutdownChannel } from '../common/shutdown-channel.
|
|
29
|
-
import * as WorkerSchema from '../common/worker-schema.
|
|
28
|
+
import { makeShutdownChannel } from '../common/shutdown-channel.ts'
|
|
29
|
+
import * as WorkerSchema from '../common/worker-schema.ts'
|
|
30
30
|
|
|
31
31
|
if (isDevEnv()) {
|
|
32
32
|
globalThis.__debugLiveStoreUtils = {
|
|
@@ -39,21 +39,21 @@ if (isDevEnv()) {
|
|
|
39
39
|
const makeWorkerRunner = Effect.gen(function* () {
|
|
40
40
|
const leaderWorkerContextSubRef = yield* SubscriptionRef.make<
|
|
41
41
|
| {
|
|
42
|
-
worker: Worker.SerializedWorkerPool<WorkerSchema.
|
|
42
|
+
worker: Worker.SerializedWorkerPool<WorkerSchema.LeaderWorkerInnerRequest>
|
|
43
43
|
scope: Scope.CloseableScope
|
|
44
44
|
}
|
|
45
45
|
| undefined
|
|
46
46
|
>(undefined)
|
|
47
47
|
|
|
48
48
|
const initialMessagePayloadDeferredRef = yield* Deferred.make<
|
|
49
|
-
typeof WorkerSchema.
|
|
49
|
+
typeof WorkerSchema.SharedWorkerInitialMessagePayloadFromClientSession.Type
|
|
50
50
|
>().pipe(Effect.andThen(Ref.make))
|
|
51
51
|
|
|
52
52
|
const waitForWorker = SubscriptionRef.waitUntil(leaderWorkerContextSubRef, isNotUndefined).pipe(
|
|
53
53
|
Effect.map((_) => _.worker),
|
|
54
54
|
)
|
|
55
55
|
|
|
56
|
-
const forwardRequest = <TReq extends WorkerSchema.
|
|
56
|
+
const forwardRequest = <TReq extends WorkerSchema.LeaderWorkerInnerRequest>(
|
|
57
57
|
req: TReq,
|
|
58
58
|
): TReq extends Schema.WithResult<infer A, infer _I, infer _E, infer _EI, infer _R>
|
|
59
59
|
? Effect.Effect<A, UnexpectedError, never>
|
|
@@ -79,7 +79,7 @@ const makeWorkerRunner = Effect.gen(function* () {
|
|
|
79
79
|
Effect.tapCauseLogPretty,
|
|
80
80
|
) as any
|
|
81
81
|
|
|
82
|
-
const forwardRequestStream = <TReq extends WorkerSchema.
|
|
82
|
+
const forwardRequestStream = <TReq extends WorkerSchema.LeaderWorkerInnerRequest>(
|
|
83
83
|
req: TReq,
|
|
84
84
|
): TReq extends Schema.WithResult<infer A, infer _I, infer _E, infer _EI, infer _R>
|
|
85
85
|
? Stream.Stream<A, UnexpectedError, never>
|
|
@@ -133,14 +133,14 @@ const makeWorkerRunner = Effect.gen(function* () {
|
|
|
133
133
|
yield* Effect.logDebug('reset')
|
|
134
134
|
|
|
135
135
|
const initialMessagePayloadDeferred =
|
|
136
|
-
yield* Deferred.make<typeof WorkerSchema.
|
|
136
|
+
yield* Deferred.make<typeof WorkerSchema.SharedWorkerInitialMessagePayloadFromClientSession.Type>()
|
|
137
137
|
yield* Ref.set(initialMessagePayloadDeferredRef, initialMessagePayloadDeferred)
|
|
138
138
|
|
|
139
139
|
yield* resetCurrentWorkerCtx
|
|
140
140
|
// yield* devtoolsWebBridge.reset
|
|
141
141
|
})
|
|
142
142
|
|
|
143
|
-
return WorkerRunner.layerSerialized(WorkerSchema.
|
|
143
|
+
return WorkerRunner.layerSerialized(WorkerSchema.SharedWorkerRequest, {
|
|
144
144
|
InitialMessage: (message) =>
|
|
145
145
|
Effect.gen(function* () {
|
|
146
146
|
if (message.payload._tag === 'FromWebBridge') return
|
|
@@ -151,7 +151,7 @@ const makeWorkerRunner = Effect.gen(function* () {
|
|
|
151
151
|
|
|
152
152
|
if (deferredAlreadyDone) {
|
|
153
153
|
const previousInitialMessage = yield* Deferred.await(initialMessagePayloadDeferred)
|
|
154
|
-
const messageSchema = WorkerSchema.
|
|
154
|
+
const messageSchema = WorkerSchema.LeaderWorkerInnerInitialMessage.pipe(
|
|
155
155
|
Schema.omit('devtoolsEnabled', 'debugInstanceId'),
|
|
156
156
|
)
|
|
157
157
|
const isEqual = Schema.equivalence(messageSchema)
|
|
@@ -194,7 +194,7 @@ const makeWorkerRunner = Effect.gen(function* () {
|
|
|
194
194
|
|
|
195
195
|
const workerLayer = yield* Layer.build(BrowserWorker.layer(() => port))
|
|
196
196
|
|
|
197
|
-
const worker = yield* Worker.makePoolSerialized<WorkerSchema.
|
|
197
|
+
const worker = yield* Worker.makePoolSerialized<WorkerSchema.LeaderWorkerInnerRequest>({
|
|
198
198
|
size: 1,
|
|
199
199
|
concurrency: 100,
|
|
200
200
|
initialMessage: () => initialMessagePayload.initialMessage,
|
|
@@ -1,9 +1,11 @@
|
|
|
1
|
+
/// <reference lib="dom" />
|
|
2
|
+
|
|
1
3
|
// @ts-expect-error TODO remove when Vite does proper treeshaking during dev
|
|
2
4
|
globalThis.$RefreshReg$ = () => {}
|
|
3
5
|
// @ts-expect-error TODO remove when Vite does proper treeshaking during dev
|
|
4
6
|
globalThis.$RefreshSig$ = () => (type: any) => type
|
|
5
7
|
|
|
6
|
-
// @ts-
|
|
8
|
+
// @ts-ignore
|
|
7
9
|
globalThis.process = globalThis.process ?? { env: {} }
|
|
8
10
|
|
|
9
11
|
globalThis.document = (globalThis as any)?.document ?? {
|