@livestore/adapter-web 0.4.0-dev.22 → 0.4.0-dev.23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +5 -5
- package/dist/.tsbuildinfo +1 -1
- package/dist/in-memory/in-memory-adapter.js +3 -3
- package/dist/in-memory/in-memory-adapter.js.map +1 -1
- package/dist/single-tab/single-tab-adapter.d.ts.map +1 -1
- package/dist/single-tab/single-tab-adapter.js +17 -25
- package/dist/single-tab/single-tab-adapter.js.map +1 -1
- package/dist/web-worker/client-session/client-session-devtools.d.ts +1 -1
- package/dist/web-worker/client-session/client-session-devtools.d.ts.map +1 -1
- package/dist/web-worker/client-session/client-session-devtools.js +7 -7
- package/dist/web-worker/client-session/client-session-devtools.js.map +1 -1
- package/dist/web-worker/client-session/persisted-adapter.d.ts.map +1 -1
- package/dist/web-worker/client-session/persisted-adapter.js +25 -33
- package/dist/web-worker/client-session/persisted-adapter.js.map +1 -1
- package/dist/web-worker/client-session/sqlite-loader.d.ts.map +1 -1
- package/dist/web-worker/client-session/sqlite-loader.js +1 -1
- package/dist/web-worker/client-session/sqlite-loader.js.map +1 -1
- package/dist/web-worker/common/persisted-sqlite.d.ts.map +1 -1
- package/dist/web-worker/common/persisted-sqlite.js +12 -10
- package/dist/web-worker/common/persisted-sqlite.js.map +1 -1
- package/dist/web-worker/common/worker-schema.d.ts +26 -27
- package/dist/web-worker/common/worker-schema.d.ts.map +1 -1
- package/dist/web-worker/common/worker-schema.js +18 -19
- package/dist/web-worker/common/worker-schema.js.map +1 -1
- package/dist/web-worker/leader-worker/make-leader-worker.d.ts +1 -1
- package/dist/web-worker/leader-worker/make-leader-worker.d.ts.map +1 -1
- package/dist/web-worker/leader-worker/make-leader-worker.js +20 -20
- package/dist/web-worker/leader-worker/make-leader-worker.js.map +1 -1
- package/dist/web-worker/shared-worker/make-shared-worker.d.ts.map +1 -1
- package/dist/web-worker/shared-worker/make-shared-worker.js +15 -15
- package/dist/web-worker/shared-worker/make-shared-worker.js.map +1 -1
- package/package.json +56 -17
- package/src/in-memory/in-memory-adapter.ts +4 -4
- package/src/single-tab/single-tab-adapter.ts +34 -52
- package/src/web-worker/client-session/client-session-devtools.ts +26 -27
- package/src/web-worker/client-session/persisted-adapter.ts +41 -60
- package/src/web-worker/client-session/sqlite-loader.ts +1 -1
- package/src/web-worker/common/persisted-sqlite.ts +12 -9
- package/src/web-worker/common/worker-schema.ts +19 -18
- package/src/web-worker/leader-worker/make-leader-worker.ts +39 -46
- package/src/web-worker/shared-worker/make-shared-worker.ts +26 -48
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import {
|
|
2
2
|
BootStatus,
|
|
3
3
|
Devtools,
|
|
4
|
-
|
|
4
|
+
RejectedPushError,
|
|
5
5
|
liveStoreVersion,
|
|
6
6
|
MigrationsReport,
|
|
7
7
|
SyncBackend,
|
|
@@ -50,7 +50,7 @@ export class LeaderWorkerOuterInitialMessage extends Schema.TaggedRequest<Leader
|
|
|
50
50
|
{
|
|
51
51
|
payload: { port: Transferable.MessagePort, storeId: Schema.String, clientId: Schema.String },
|
|
52
52
|
success: Schema.Void,
|
|
53
|
-
failure:
|
|
53
|
+
failure: Schema.Never,
|
|
54
54
|
},
|
|
55
55
|
) {}
|
|
56
56
|
|
|
@@ -78,7 +78,7 @@ export class LeaderWorkerInnerBootStatusStream extends Schema.TaggedRequest<Lead
|
|
|
78
78
|
{
|
|
79
79
|
payload: {},
|
|
80
80
|
success: BootStatus,
|
|
81
|
-
failure:
|
|
81
|
+
failure: Schema.Never,
|
|
82
82
|
},
|
|
83
83
|
) {}
|
|
84
84
|
|
|
@@ -89,7 +89,7 @@ export class LeaderWorkerInnerPushToLeader extends Schema.TaggedRequest<LeaderWo
|
|
|
89
89
|
batch: Schema.Array(Schema.typeSchema(LiveStoreEvent.Client.Encoded)),
|
|
90
90
|
},
|
|
91
91
|
success: Schema.Void as Schema.Schema<void>,
|
|
92
|
-
failure:
|
|
92
|
+
failure: RejectedPushError,
|
|
93
93
|
},
|
|
94
94
|
) {}
|
|
95
95
|
|
|
@@ -100,7 +100,7 @@ export class LeaderWorkerInnerPullStream extends Schema.TaggedRequest<LeaderWork
|
|
|
100
100
|
success: Schema.Struct({
|
|
101
101
|
payload: SyncState.PayloadUpstream,
|
|
102
102
|
}),
|
|
103
|
-
failure:
|
|
103
|
+
failure: Schema.Never,
|
|
104
104
|
}) {}
|
|
105
105
|
|
|
106
106
|
export class LeaderWorkerInnerStreamEvents extends Schema.TaggedRequest<LeaderWorkerInnerStreamEvents>()(
|
|
@@ -108,14 +108,14 @@ export class LeaderWorkerInnerStreamEvents extends Schema.TaggedRequest<LeaderWo
|
|
|
108
108
|
{
|
|
109
109
|
payload: StreamEventsOptionsFields,
|
|
110
110
|
success: LiveStoreEvent.Client.Encoded,
|
|
111
|
-
failure:
|
|
111
|
+
failure: Schema.Never,
|
|
112
112
|
},
|
|
113
113
|
) {}
|
|
114
114
|
|
|
115
115
|
export class LeaderWorkerInnerExport extends Schema.TaggedRequest<LeaderWorkerInnerExport>()('Export', {
|
|
116
116
|
payload: {},
|
|
117
117
|
success: Transferable.Uint8Array as Schema.Schema<Uint8Array<ArrayBuffer>>,
|
|
118
|
-
failure:
|
|
118
|
+
failure: Schema.Never,
|
|
119
119
|
}) {}
|
|
120
120
|
|
|
121
121
|
export class LeaderWorkerInnerExportEventlog extends Schema.TaggedRequest<LeaderWorkerInnerExportEventlog>()(
|
|
@@ -123,7 +123,7 @@ export class LeaderWorkerInnerExportEventlog extends Schema.TaggedRequest<Leader
|
|
|
123
123
|
{
|
|
124
124
|
payload: {},
|
|
125
125
|
success: Transferable.Uint8Array as Schema.Schema<Uint8Array<ArrayBuffer>>,
|
|
126
|
-
failure:
|
|
126
|
+
failure: Schema.Never,
|
|
127
127
|
},
|
|
128
128
|
) {}
|
|
129
129
|
|
|
@@ -135,7 +135,7 @@ export class LeaderWorkerInnerGetRecreateSnapshot extends Schema.TaggedRequest<L
|
|
|
135
135
|
snapshot: Transferable.Uint8Array as Schema.Schema<Uint8Array<ArrayBuffer>>,
|
|
136
136
|
migrationsReport: MigrationsReport,
|
|
137
137
|
}),
|
|
138
|
-
failure:
|
|
138
|
+
failure: Schema.Never,
|
|
139
139
|
},
|
|
140
140
|
) {}
|
|
141
141
|
|
|
@@ -144,7 +144,7 @@ export class LeaderWorkerInnerGetLeaderHead extends Schema.TaggedRequest<LeaderW
|
|
|
144
144
|
{
|
|
145
145
|
payload: {},
|
|
146
146
|
success: Schema.typeSchema(EventSequenceNumber.Client.Composite),
|
|
147
|
-
failure:
|
|
147
|
+
failure: Schema.Never,
|
|
148
148
|
},
|
|
149
149
|
) {}
|
|
150
150
|
|
|
@@ -153,7 +153,7 @@ export class LeaderWorkerInnerGetLeaderSyncState extends Schema.TaggedRequest<Le
|
|
|
153
153
|
{
|
|
154
154
|
payload: {},
|
|
155
155
|
success: SyncState.SyncState,
|
|
156
|
-
failure:
|
|
156
|
+
failure: Schema.Never,
|
|
157
157
|
},
|
|
158
158
|
) {}
|
|
159
159
|
|
|
@@ -162,7 +162,7 @@ export class LeaderWorkerInnerSyncStateStream extends Schema.TaggedRequest<Leade
|
|
|
162
162
|
{
|
|
163
163
|
payload: {},
|
|
164
164
|
success: SyncState.SyncState,
|
|
165
|
-
failure:
|
|
165
|
+
failure: Schema.Never,
|
|
166
166
|
},
|
|
167
167
|
) {}
|
|
168
168
|
|
|
@@ -171,7 +171,7 @@ export class LeaderWorkerInnerGetNetworkStatus extends Schema.TaggedRequest<Lead
|
|
|
171
171
|
{
|
|
172
172
|
payload: {},
|
|
173
173
|
success: SyncBackend.NetworkStatus,
|
|
174
|
-
failure:
|
|
174
|
+
failure: Schema.Never,
|
|
175
175
|
},
|
|
176
176
|
) {}
|
|
177
177
|
|
|
@@ -180,14 +180,14 @@ export class LeaderWorkerInnerNetworkStatusStream extends Schema.TaggedRequest<L
|
|
|
180
180
|
{
|
|
181
181
|
payload: {},
|
|
182
182
|
success: SyncBackend.NetworkStatus,
|
|
183
|
-
failure:
|
|
183
|
+
failure: Schema.Never,
|
|
184
184
|
},
|
|
185
185
|
) {}
|
|
186
186
|
|
|
187
187
|
export class LeaderWorkerInnerShutdown extends Schema.TaggedRequest<LeaderWorkerInnerShutdown>()('Shutdown', {
|
|
188
188
|
payload: {},
|
|
189
189
|
success: Schema.Void,
|
|
190
|
-
failure:
|
|
190
|
+
failure: Schema.Never,
|
|
191
191
|
}) {}
|
|
192
192
|
|
|
193
193
|
export class LeaderWorkerInnerExtraDevtoolsMessage extends Schema.TaggedRequest<LeaderWorkerInnerExtraDevtoolsMessage>()(
|
|
@@ -197,7 +197,7 @@ export class LeaderWorkerInnerExtraDevtoolsMessage extends Schema.TaggedRequest<
|
|
|
197
197
|
message: Devtools.Leader.MessageToApp,
|
|
198
198
|
},
|
|
199
199
|
success: Schema.Void,
|
|
200
|
-
failure:
|
|
200
|
+
failure: Schema.Never,
|
|
201
201
|
},
|
|
202
202
|
) {}
|
|
203
203
|
|
|
@@ -241,7 +241,7 @@ export class SharedWorkerUpdateMessagePort extends Schema.TaggedRequest<SharedWo
|
|
|
241
241
|
},
|
|
242
242
|
) {}
|
|
243
243
|
|
|
244
|
-
export
|
|
244
|
+
export const SharedWorkerRequest = Schema.Union(
|
|
245
245
|
SharedWorkerUpdateMessagePort,
|
|
246
246
|
|
|
247
247
|
// Proxied requests
|
|
@@ -261,4 +261,5 @@ export class SharedWorkerRequest extends Schema.Union(
|
|
|
261
261
|
LeaderWorkerInnerExtraDevtoolsMessage,
|
|
262
262
|
|
|
263
263
|
WebmeshWorker.Schema.CreateConnection,
|
|
264
|
-
)
|
|
264
|
+
)
|
|
265
|
+
export type SharedWorkerRequest = typeof SharedWorkerRequest.Type
|
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
import type * as otel from '@opentelemetry/api'
|
|
2
|
+
|
|
1
3
|
import type { BootStatus, BootWarningReason, SqliteDb, SyncOptions } from '@livestore/common'
|
|
2
4
|
import { Devtools, LogConfig, UnknownError } from '@livestore/common'
|
|
3
5
|
import type { DevtoolsOptions, StreamEventsOptions } from '@livestore/common/leader-thread'
|
|
@@ -28,7 +30,6 @@ import {
|
|
|
28
30
|
WorkerRunner,
|
|
29
31
|
} from '@livestore/utils/effect'
|
|
30
32
|
import { BrowserWorkerRunner, Opfs, WebError } from '@livestore/utils/effect/browser'
|
|
31
|
-
import type * as otel from '@opentelemetry/api'
|
|
32
33
|
|
|
33
34
|
import { cleanupOldStateDbFiles, getStateDbFileName, sanitizeOpfsDir } from '../common/persisted-sqlite.ts'
|
|
34
35
|
import { makeShutdownChannel } from '../common/shutdown-channel.ts'
|
|
@@ -43,13 +44,13 @@ export type WorkerOptions = {
|
|
|
43
44
|
}
|
|
44
45
|
} & LogConfig.WithLoggerOptions
|
|
45
46
|
|
|
46
|
-
if (isDevEnv()) {
|
|
47
|
+
if (isDevEnv() === true) {
|
|
47
48
|
globalThis.__debugLiveStoreUtils = {
|
|
48
49
|
opfs: Opfs.debugUtils,
|
|
49
50
|
blobUrl: (buffer: Uint8Array<ArrayBuffer>) =>
|
|
50
51
|
URL.createObjectURL(new Blob([buffer], { type: 'application/octet-stream' })),
|
|
51
|
-
runSync: (effect: Effect.Effect<
|
|
52
|
-
runFork: (effect: Effect.Effect<
|
|
52
|
+
runSync: <A, E>(effect: Effect.Effect<A, E>) => Effect.runSync(effect),
|
|
53
|
+
runFork: <A, E>(effect: Effect.Effect<A, E>) => Effect.runFork(effect),
|
|
53
54
|
}
|
|
54
55
|
}
|
|
55
56
|
|
|
@@ -58,7 +59,7 @@ export const makeWorker = (options: WorkerOptions) => {
|
|
|
58
59
|
}
|
|
59
60
|
|
|
60
61
|
export const makeWorkerEffect = (options: WorkerOptions) => {
|
|
61
|
-
const TracingLive = options.otelOptions?.tracer
|
|
62
|
+
const TracingLive = options.otelOptions?.tracer !== undefined
|
|
62
63
|
? Layer.unwrapEffect(Effect.map(OtelTracer.make, Layer.setTracer)).pipe(
|
|
63
64
|
Layer.provideMerge(Layer.succeed(OtelTracer.OtelTracer, options.otelOptions.tracer)),
|
|
64
65
|
)
|
|
@@ -73,7 +74,7 @@ export const makeWorkerEffect = (options: WorkerOptions) => {
|
|
|
73
74
|
Effect.tapCauseLogPretty,
|
|
74
75
|
Effect.annotateLogs({ thread: self.name }),
|
|
75
76
|
Effect.provide(runtimeLayer),
|
|
76
|
-
LS_DEV ? TaskTracing.withAsyncTaggingTracing((name) => (console as any).createTask(name)) : identity,
|
|
77
|
+
LS_DEV === true ? TaskTracing.withAsyncTaggingTracing((name) => (console as any).createTask(name)) : identity,
|
|
77
78
|
// We're using this custom scheduler to improve op batching behaviour and reduce the overhead
|
|
78
79
|
// of the Effect fiber runtime given we have different tradeoffs on a worker thread.
|
|
79
80
|
// Despite the "message channel" name, is has nothing to do with the `incomingRequestsPort` above.
|
|
@@ -118,7 +119,7 @@ const makeWorkerRunnerInner = ({ schema, sync: syncOptions, syncPayloadSchema }:
|
|
|
118
119
|
Effect.gen(function* () {
|
|
119
120
|
const sqlite3 = yield* Effect.promise(() => loadSqlite3Wasm())
|
|
120
121
|
const makeSqliteDb = sqliteDbFactory({ sqlite3 })
|
|
121
|
-
const runtime = yield* Effect.runtime
|
|
122
|
+
const runtime = yield* Effect.runtime()
|
|
122
123
|
|
|
123
124
|
// Check OPFS availability and determine storage mode
|
|
124
125
|
const opfsCheck = yield* checkOpfsAvailability
|
|
@@ -126,7 +127,7 @@ const makeWorkerRunnerInner = ({ schema, sync: syncOptions, syncPayloadSchema }:
|
|
|
126
127
|
|
|
127
128
|
// Track boot warning to emit later
|
|
128
129
|
let bootWarning: BootStatus | undefined
|
|
129
|
-
if (
|
|
130
|
+
if (useOpfs === false) {
|
|
130
131
|
yield* Effect.logWarning(
|
|
131
132
|
'[@livestore/adapter-web:worker] OPFS unavailable, using in-memory storage',
|
|
132
133
|
opfsCheck,
|
|
@@ -134,7 +135,7 @@ const makeWorkerRunnerInner = ({ schema, sync: syncOptions, syncPayloadSchema }:
|
|
|
134
135
|
bootWarning = { stage: 'warning', ...opfsCheck }
|
|
135
136
|
}
|
|
136
137
|
|
|
137
|
-
const opfsDirectory = useOpfs ? yield* sanitizeOpfsDir(storageOptions.directory, storeId) : undefined
|
|
138
|
+
const opfsDirectory = useOpfs === true ? yield* sanitizeOpfsDir(storageOptions.directory, storeId) : undefined
|
|
138
139
|
|
|
139
140
|
const makeOpfsDb = (kind: 'state' | 'eventlog') =>
|
|
140
141
|
makeSqliteDb({
|
|
@@ -160,7 +161,7 @@ const makeWorkerRunnerInner = ({ schema, sync: syncOptions, syncPayloadSchema }:
|
|
|
160
161
|
}).pipe(Effect.acquireRelease((db) => Effect.try(() => db.close()).pipe(Effect.ignoreLogged)))
|
|
161
162
|
|
|
162
163
|
// Use OPFS if available, otherwise fall back to in-memory
|
|
163
|
-
const [dbState, dbEventlog] = useOpfs
|
|
164
|
+
const [dbState, dbEventlog] = useOpfs === true
|
|
164
165
|
? yield* Effect.all([makeOpfsDb('state'), makeOpfsDb('eventlog')], { concurrency: 2 })
|
|
165
166
|
: yield* Effect.all([makeInMemoryDb(), makeInMemoryDb()], { concurrency: 2 })
|
|
166
167
|
|
|
@@ -199,19 +200,18 @@ const makeWorkerRunnerInner = ({ schema, sync: syncOptions, syncPayloadSchema }:
|
|
|
199
200
|
Effect.annotateSpans({ debugInstanceId }),
|
|
200
201
|
Layer.unwrapScoped,
|
|
201
202
|
),
|
|
202
|
-
GetRecreateSnapshot: ()
|
|
203
|
-
|
|
204
|
-
const workerCtx = yield* LeaderThreadCtx
|
|
203
|
+
GetRecreateSnapshot: Effect.fn('@livestore/adapter-web:worker:GetRecreateSnapshot')(function* () {
|
|
204
|
+
const workerCtx = yield* LeaderThreadCtx
|
|
205
205
|
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
206
|
+
// NOTE we can only return the cached snapshot once as it's transferred (i.e. disposed), so we need to set it to undefined
|
|
207
|
+
// const cachedSnapshot =
|
|
208
|
+
// result._tag === 'Recreate' ? yield* Ref.getAndSet(result.snapshotRef, undefined) : undefined
|
|
209
209
|
|
|
210
|
-
|
|
210
|
+
// return cachedSnapshot ?? workerCtx.db.export()
|
|
211
211
|
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
212
|
+
const snapshot = workerCtx.dbState.export()
|
|
213
|
+
return { snapshot, migrationsReport: workerCtx.initialState.migrationsReport }
|
|
214
|
+
}),
|
|
215
215
|
PullStream: ({ cursor }) =>
|
|
216
216
|
Effect.gen(function* () {
|
|
217
217
|
const { syncProcessor } = yield* LeaderThreadCtx // <- syncState comes from here
|
|
@@ -245,52 +245,45 @@ const makeWorkerRunnerInner = ({ schema, sync: syncOptions, syncPayloadSchema }:
|
|
|
245
245
|
),
|
|
246
246
|
Export: () =>
|
|
247
247
|
Effect.andThen(LeaderThreadCtx, (_) => _.dbState.export()).pipe(
|
|
248
|
-
UnknownError.mapToUnknownError,
|
|
249
248
|
Effect.withSpan('@livestore/adapter-web:worker:Export'),
|
|
250
249
|
),
|
|
251
250
|
ExportEventlog: () =>
|
|
252
251
|
Effect.andThen(LeaderThreadCtx, (_) => _.dbEventlog.export()).pipe(
|
|
253
|
-
UnknownError.mapToUnknownError,
|
|
254
252
|
Effect.withSpan('@livestore/adapter-web:worker:ExportEventlog'),
|
|
255
253
|
),
|
|
256
254
|
BootStatusStream: () =>
|
|
257
255
|
Effect.andThen(LeaderThreadCtx, (_) => Stream.fromQueue(_.bootStatusQueue)).pipe(Stream.unwrap),
|
|
258
|
-
GetLeaderHead: ()
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
return yield* workerCtx.syncProcessor.syncState
|
|
267
|
-
}).pipe(UnknownError.mapToUnknownError, Effect.withSpan('@livestore/adapter-web:worker:GetLeaderSyncState')),
|
|
256
|
+
GetLeaderHead: Effect.fn('@livestore/adapter-web:worker:GetLeaderHead')(function* () {
|
|
257
|
+
const workerCtx = yield* LeaderThreadCtx
|
|
258
|
+
return Eventlog.getClientHeadFromDb(workerCtx.dbEventlog)
|
|
259
|
+
}),
|
|
260
|
+
GetLeaderSyncState: Effect.fn('@livestore/adapter-web:worker:GetLeaderSyncState')(function* () {
|
|
261
|
+
const workerCtx = yield* LeaderThreadCtx
|
|
262
|
+
return yield* workerCtx.syncProcessor.syncState
|
|
263
|
+
}),
|
|
268
264
|
SyncStateStream: () =>
|
|
269
265
|
Effect.gen(function* () {
|
|
270
266
|
const workerCtx = yield* LeaderThreadCtx
|
|
271
267
|
return workerCtx.syncProcessor.syncState.changes
|
|
272
268
|
}).pipe(Stream.unwrapScoped),
|
|
273
|
-
GetNetworkStatus: ()
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
}).pipe(UnknownError.mapToUnknownError, Effect.withSpan('@livestore/adapter-web:worker:GetNetworkStatus')),
|
|
269
|
+
GetNetworkStatus: Effect.fn('@livestore/adapter-web:worker:GetNetworkStatus')(function* () {
|
|
270
|
+
const workerCtx = yield* LeaderThreadCtx
|
|
271
|
+
return yield* workerCtx.networkStatus
|
|
272
|
+
}),
|
|
278
273
|
NetworkStatusStream: () =>
|
|
279
274
|
Effect.gen(function* () {
|
|
280
275
|
const workerCtx = yield* LeaderThreadCtx
|
|
281
276
|
return workerCtx.networkStatus.changes
|
|
282
277
|
}).pipe(Stream.unwrapScoped),
|
|
283
|
-
Shutdown: ()
|
|
284
|
-
Effect.
|
|
285
|
-
yield* Effect.logDebug('[@livestore/adapter-web:worker] Shutdown')
|
|
278
|
+
Shutdown: Effect.fn('@livestore/adapter-web:worker:Shutdown')(function* () {
|
|
279
|
+
yield* Effect.logDebug('[@livestore/adapter-web:worker] Shutdown')
|
|
286
280
|
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
281
|
+
// Buy some time for Otel to flush
|
|
282
|
+
// TODO find a cleaner way to do this
|
|
283
|
+
yield* Effect.sleep(300)
|
|
284
|
+
}),
|
|
291
285
|
ExtraDevtoolsMessage: ({ message }) =>
|
|
292
286
|
Effect.andThen(LeaderThreadCtx, (_) => _.extraIncomingMessagesQueue.offer(message)).pipe(
|
|
293
|
-
UnknownError.mapToUnknownError,
|
|
294
287
|
Effect.withSpan('@livestore/adapter-web:worker:ExtraDevtoolsMessage'),
|
|
295
288
|
),
|
|
296
289
|
'DevtoolsWebCommon.CreateConnection': WebmeshWorker.CreateConnection,
|
|
@@ -339,7 +332,7 @@ const checkOpfsAvailability = Effect.gen(function* () {
|
|
|
339
332
|
Effect.as(undefined),
|
|
340
333
|
Effect.catchAll((error) => {
|
|
341
334
|
const reason: BootWarningReason =
|
|
342
|
-
Schema.is(WebError.SecurityError)(error) || Schema.is(WebError.NotAllowedError)(error)
|
|
335
|
+
Schema.is(WebError.SecurityError)(error) === true || Schema.is(WebError.NotAllowedError)(error) === true
|
|
343
336
|
? 'private-browsing'
|
|
344
337
|
: 'storage-unavailable'
|
|
345
338
|
const message =
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { Devtools, LogConfig, liveStoreVersion, UnknownError } from '@livestore/common'
|
|
1
|
+
import { Devtools, isWorkerTransportError, LogConfig, liveStoreVersion, UnknownError } from '@livestore/common'
|
|
2
2
|
import * as DevtoolsWeb from '@livestore/devtools-web-common/web-channel'
|
|
3
3
|
import * as WebmeshWorker from '@livestore/devtools-web-common/worker'
|
|
4
4
|
import { isDevEnv, isNotUndefined, LS_DEV } from '@livestore/utils'
|
|
@@ -9,7 +9,7 @@ import {
|
|
|
9
9
|
FetchHttpClient,
|
|
10
10
|
identity,
|
|
11
11
|
Layer,
|
|
12
|
-
|
|
12
|
+
Option,
|
|
13
13
|
Ref,
|
|
14
14
|
Schema,
|
|
15
15
|
Scope,
|
|
@@ -17,7 +17,6 @@ import {
|
|
|
17
17
|
SubscriptionRef,
|
|
18
18
|
TaskTracing,
|
|
19
19
|
Worker,
|
|
20
|
-
WorkerError,
|
|
21
20
|
WorkerRunner,
|
|
22
21
|
} from '@livestore/utils/effect'
|
|
23
22
|
import { BrowserWorker, BrowserWorkerRunner } from '@livestore/utils/effect/browser'
|
|
@@ -39,15 +38,16 @@ navigator.locks.request(
|
|
|
39
38
|
async () => new Promise(() => {}),
|
|
40
39
|
)
|
|
41
40
|
|
|
42
|
-
if (isDevEnv()) {
|
|
41
|
+
if (isDevEnv() === true) {
|
|
43
42
|
globalThis.__debugLiveStoreUtils = {
|
|
44
43
|
blobUrl: (buffer: Uint8Array<ArrayBuffer>) =>
|
|
45
44
|
URL.createObjectURL(new Blob([buffer], { type: 'application/octet-stream' })),
|
|
46
|
-
runSync: (effect: Effect.Effect<
|
|
47
|
-
runFork: (effect: Effect.Effect<
|
|
45
|
+
runSync: <A, E>(effect: Effect.Effect<A, E>) => Effect.runSync(effect),
|
|
46
|
+
runFork: <A, E>(effect: Effect.Effect<A, E>) => Effect.runFork(effect),
|
|
48
47
|
}
|
|
49
48
|
}
|
|
50
49
|
|
|
50
|
+
// @effect-diagnostics-next-line anyUnknownInErrorContext:off -- `SerializedRunner.Handlers` uses `any` in the R channel, propagating as `unknown` in `HandlersContext`
|
|
51
51
|
const makeWorkerRunner = Effect.gen(function* () {
|
|
52
52
|
const leaderWorkerContextSubRef = yield* SubscriptionRef.make<
|
|
53
53
|
| {
|
|
@@ -61,17 +61,14 @@ const makeWorkerRunner = Effect.gen(function* () {
|
|
|
61
61
|
Effect.map((_) => _.worker),
|
|
62
62
|
)
|
|
63
63
|
|
|
64
|
-
const forwardRequest = <
|
|
65
|
-
req:
|
|
66
|
-
): Effect.Effect<
|
|
67
|
-
|
|
68
|
-
UnknownError | Schema.WithResult.Failure<TReq>,
|
|
69
|
-
Schema.WithResult.Context<TReq>
|
|
70
|
-
> =>
|
|
71
|
-
// Forward the request to the active worker and normalize platform errors into UnknownError.
|
|
64
|
+
const forwardRequest = <A, I, E, EI, R>(
|
|
65
|
+
req: WorkerSchema.LeaderWorkerInnerRequest & Schema.WithResult<A, I, E, EI, R>,
|
|
66
|
+
): Effect.Effect<A, E, R> =>
|
|
67
|
+
// Forward the request to the active worker and convert transport errors to defects.
|
|
72
68
|
waitForWorker.pipe(
|
|
73
69
|
// Effect.logBefore(`forwardRequest: ${req._tag}`),
|
|
74
|
-
Effect.andThen((worker) => worker.executeEffect(req)
|
|
70
|
+
Effect.andThen((worker) => worker.executeEffect(req)),
|
|
71
|
+
Effect.catchIf(isWorkerTransportError, (e) => Effect.die(e)),
|
|
75
72
|
// Effect.tap((_) => Effect.log(`forwardRequest: ${req._tag}`, _)),
|
|
76
73
|
// Effect.tapError((cause) => Effect.logError(`forwardRequest err: ${req._tag}`, cause)),
|
|
77
74
|
Effect.interruptible,
|
|
@@ -79,33 +76,18 @@ const makeWorkerRunner = Effect.gen(function* () {
|
|
|
79
76
|
label: `@livestore/adapter-web:shared-worker:forwardRequest:${req._tag}`,
|
|
80
77
|
duration: 500,
|
|
81
78
|
}),
|
|
82
|
-
Effect.mapError((cause) =>
|
|
83
|
-
Schema.is(UnknownError)(cause)
|
|
84
|
-
? cause
|
|
85
|
-
: ParseResult.isParseError(cause) || Schema.is(WorkerError.WorkerError)(cause)
|
|
86
|
-
? new UnknownError({ cause })
|
|
87
|
-
: cause,
|
|
88
|
-
),
|
|
89
|
-
Effect.catchAllDefect((cause) => new UnknownError({ cause })),
|
|
90
79
|
Effect.tapCauseLogPretty,
|
|
91
|
-
)
|
|
92
|
-
Schema.WithResult.Success<TReq>,
|
|
93
|
-
UnknownError | Schema.WithResult.Failure<TReq>,
|
|
94
|
-
Schema.WithResult.Context<TReq>
|
|
95
|
-
>
|
|
80
|
+
)
|
|
96
81
|
|
|
97
|
-
const forwardRequestStream = <
|
|
98
|
-
req:
|
|
99
|
-
): Stream.Stream<
|
|
100
|
-
Schema.WithResult.Success<TReq>,
|
|
101
|
-
UnknownError | Schema.WithResult.Failure<TReq>,
|
|
102
|
-
Schema.WithResult.Context<TReq>
|
|
103
|
-
> =>
|
|
82
|
+
const forwardRequestStream = <A, I, E, EI, R>(
|
|
83
|
+
req: WorkerSchema.LeaderWorkerInnerRequest & Schema.WithResult<A, I, E, EI, R>,
|
|
84
|
+
): Stream.Stream<A, E, R> =>
|
|
104
85
|
Effect.gen(function* () {
|
|
105
86
|
yield* Effect.logDebug(`forwardRequestStream: ${req._tag}`)
|
|
106
87
|
const { worker, scope } = yield* SubscriptionRef.waitUntil(leaderWorkerContextSubRef, isNotUndefined)
|
|
107
|
-
const stream = worker.execute(req)
|
|
108
|
-
|
|
88
|
+
const stream = worker.execute(req).pipe(
|
|
89
|
+
Stream.refineOrDie((e) => isWorkerTransportError(e) === true ? Option.none() : Option.some(e)),
|
|
90
|
+
)
|
|
109
91
|
// It seems the request stream is not automatically interrupted when the scope shuts down
|
|
110
92
|
// so we need to manually interrupt it when the scope shuts down
|
|
111
93
|
const shutdownDeferred = yield* Deferred.make<void>()
|
|
@@ -120,16 +102,10 @@ const makeWorkerRunner = Effect.gen(function* () {
|
|
|
120
102
|
return Stream.merge(stream, scopeShutdownStream, { haltStrategy: 'either' })
|
|
121
103
|
}).pipe(
|
|
122
104
|
Effect.interruptible,
|
|
123
|
-
UnknownError.mapToUnknownError,
|
|
124
105
|
Effect.tapCauseLogPretty,
|
|
125
106
|
Stream.unwrap,
|
|
126
107
|
Stream.ensuring(Effect.logDebug(`shutting down stream for ${req._tag}`)),
|
|
127
|
-
|
|
128
|
-
) as Stream.Stream<
|
|
129
|
-
Schema.WithResult.Success<TReq>,
|
|
130
|
-
UnknownError | Schema.WithResult.Failure<TReq>,
|
|
131
|
-
Schema.WithResult.Context<TReq>
|
|
132
|
-
>
|
|
108
|
+
)
|
|
133
109
|
|
|
134
110
|
const resetCurrentWorkerCtx = Effect.gen(function* () {
|
|
135
111
|
const prevWorker = yield* SubscriptionRef.get(leaderWorkerContextSubRef)
|
|
@@ -168,6 +144,7 @@ const makeWorkerRunner = Effect.gen(function* () {
|
|
|
168
144
|
const invariantsRef = yield* Ref.make<Invariants | undefined>(undefined)
|
|
169
145
|
const sameInvariants = Schema.equivalence(InvariantsSchema)
|
|
170
146
|
|
|
147
|
+
// @effect-diagnostics-next-line anyUnknownInErrorContext:off -- `SerializedRunner.Handlers` uses `any` in the R channel
|
|
171
148
|
return WorkerRunner.layerSerialized(WorkerSchema.SharedWorkerRequest, {
|
|
172
149
|
// Whenever the client session leader changes (and thus creates a new leader thread), the new client session leader
|
|
173
150
|
// sends a new MessagePort to the shared worker which proxies messages to the new leader thread.
|
|
@@ -183,7 +160,7 @@ const makeWorkerRunner = Effect.gen(function* () {
|
|
|
183
160
|
}
|
|
184
161
|
const prev = yield* Ref.get(invariantsRef)
|
|
185
162
|
// Early return on mismatch to keep happy path linear
|
|
186
|
-
if (prev !== undefined &&
|
|
163
|
+
if (prev !== undefined && sameInvariants(prev, invariants) === false) {
|
|
187
164
|
const diff = Schema.debugDiff(InvariantsSchema)(prev, invariants)
|
|
188
165
|
return yield* new UnknownError({
|
|
189
166
|
cause: 'Store invariants changed across leader transitions',
|
|
@@ -235,7 +212,6 @@ const makeWorkerRunner = Effect.gen(function* () {
|
|
|
235
212
|
}).pipe(Effect.tapCauseLogPretty, Scope.extend(scope), Effect.forkIn(scope))
|
|
236
213
|
}).pipe(
|
|
237
214
|
Effect.withSpan('@livestore/adapter-web:shared-worker:updateMessagePort'),
|
|
238
|
-
UnknownError.mapToUnknownError,
|
|
239
215
|
Effect.tapCauseLogPretty,
|
|
240
216
|
),
|
|
241
217
|
|
|
@@ -267,6 +243,7 @@ export const makeWorker = (options?: LogConfig.WithLoggerOptions): void => {
|
|
|
267
243
|
WebmeshWorker.CacheService.layer({ nodeName: DevtoolsWeb.makeNodeName.sharedWorker({ storeId }) }),
|
|
268
244
|
)
|
|
269
245
|
|
|
246
|
+
// @effect-diagnostics-next-line anyUnknownInErrorContext:off -- propagated from `makeWorkerRunner`
|
|
270
247
|
makeWorkerRunner.pipe(
|
|
271
248
|
Layer.provide(BrowserWorkerRunner.layer),
|
|
272
249
|
// WorkerRunner.launch,
|
|
@@ -275,9 +252,10 @@ export const makeWorker = (options?: LogConfig.WithLoggerOptions): void => {
|
|
|
275
252
|
Effect.tapCauseLogPretty,
|
|
276
253
|
Effect.annotateLogs({ thread: self.name }),
|
|
277
254
|
Effect.provide(runtimeLayer),
|
|
278
|
-
LS_DEV ? TaskTracing.withAsyncTaggingTracing((name) => (console as any).createTask(name)) : identity,
|
|
255
|
+
LS_DEV === true ? TaskTracing.withAsyncTaggingTracing((name) => (console as any).createTask(name)) : identity,
|
|
279
256
|
// TODO remove type-cast (currently needed to silence a tsc bug)
|
|
280
|
-
|
|
257
|
+
// @effect-diagnostics-next-line anyUnknownInErrorContext:off -- TSC bug workaround; the cast uses `any` as an intermediate
|
|
258
|
+
(_) => _ as any as Effect.Effect<void>,
|
|
281
259
|
LogConfig.withLoggerConfig(options, { threadName: self.name }),
|
|
282
260
|
Effect.runFork,
|
|
283
261
|
)
|