@livestore/adapter-web 0.4.0-dev.1 → 0.4.0-dev.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/.tsbuildinfo +1 -1
- package/dist/in-memory/in-memory-adapter.d.ts.map +1 -1
- package/dist/in-memory/in-memory-adapter.js +5 -6
- package/dist/in-memory/in-memory-adapter.js.map +1 -1
- package/dist/opfs-utils.d.ts +3 -1
- package/dist/opfs-utils.d.ts.map +1 -1
- package/dist/opfs-utils.js +4 -4
- package/dist/opfs-utils.js.map +1 -1
- package/dist/web-worker/client-session/persisted-adapter.d.ts +15 -0
- package/dist/web-worker/client-session/persisted-adapter.d.ts.map +1 -1
- package/dist/web-worker/client-session/persisted-adapter.js +19 -7
- package/dist/web-worker/client-session/persisted-adapter.js.map +1 -1
- package/dist/web-worker/client-session/sqlite-loader.d.ts +2 -0
- package/dist/web-worker/client-session/sqlite-loader.d.ts.map +1 -0
- package/dist/web-worker/client-session/sqlite-loader.js +16 -0
- package/dist/web-worker/client-session/sqlite-loader.js.map +1 -0
- package/dist/web-worker/common/persisted-sqlite.d.ts +34 -0
- package/dist/web-worker/common/persisted-sqlite.d.ts.map +1 -1
- package/dist/web-worker/common/persisted-sqlite.js +152 -0
- package/dist/web-worker/common/persisted-sqlite.js.map +1 -1
- package/dist/web-worker/common/shutdown-channel.d.ts +1 -1
- package/dist/web-worker/common/shutdown-channel.d.ts.map +1 -1
- package/dist/web-worker/common/worker-schema.d.ts +33 -6
- package/dist/web-worker/common/worker-schema.d.ts.map +1 -1
- package/dist/web-worker/common/worker-schema.js +21 -3
- package/dist/web-worker/common/worker-schema.js.map +1 -1
- package/dist/web-worker/leader-worker/make-leader-worker.js +24 -2
- package/dist/web-worker/leader-worker/make-leader-worker.js.map +1 -1
- package/dist/web-worker/shared-worker/make-shared-worker.d.ts.map +1 -1
- package/dist/web-worker/shared-worker/make-shared-worker.js +17 -4
- package/dist/web-worker/shared-worker/make-shared-worker.js.map +1 -1
- package/dist/web-worker/vite-dev-polyfill.js +1 -0
- package/dist/web-worker/vite-dev-polyfill.js.map +1 -1
- package/package.json +7 -7
- package/src/in-memory/in-memory-adapter.ts +6 -7
- package/src/opfs-utils.ts +4 -4
- package/src/web-worker/ambient.d.ts +7 -4
- package/src/web-worker/client-session/persisted-adapter.ts +39 -10
- package/src/web-worker/client-session/sqlite-loader.ts +19 -0
- package/src/web-worker/common/persisted-sqlite.ts +220 -1
- package/src/web-worker/common/worker-schema.ts +34 -0
- package/src/web-worker/leader-worker/make-leader-worker.ts +29 -4
- package/src/web-worker/shared-worker/make-shared-worker.ts +46 -15
- package/src/web-worker/vite-dev-polyfill.ts +1 -0
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import { loadSqlite3Wasm } from '@livestore/sqlite-wasm/load-wasm'
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Browser sessions benefit from downloading and compiling the wasm binary as soon as
|
|
5
|
+
* possible to hide network and IO latency behind the rest of the boot process. We kick
|
|
6
|
+
* that work off eagerly on the client while still returning the shared promise.
|
|
7
|
+
*
|
|
8
|
+
* The Cloudflare / Workerd runtime has stricter rules: async fetches during module
|
|
9
|
+
* evaluation are blocked, so we defer loading until the worker asks for it.
|
|
10
|
+
*/
|
|
11
|
+
const isServerRuntime = String(import.meta.env.SSR) === 'true'
|
|
12
|
+
|
|
13
|
+
let sqlite3Promise: ReturnType<typeof loadSqlite3Wasm> | undefined
|
|
14
|
+
|
|
15
|
+
if (isServerRuntime === false) {
|
|
16
|
+
sqlite3Promise = loadSqlite3Wasm()
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export const loadSqlite3 = () => (isServerRuntime ? loadSqlite3Wasm() : (sqlite3Promise ?? loadSqlite3Wasm()))
|
|
@@ -1,12 +1,14 @@
|
|
|
1
1
|
import { liveStoreStorageFormatVersion, UnexpectedError } from '@livestore/common'
|
|
2
2
|
import type { LiveStoreSchema } from '@livestore/common/schema'
|
|
3
|
-
import { decodeSAHPoolFilename, HEADER_OFFSET_DATA } from '@livestore/sqlite-wasm/browser'
|
|
3
|
+
import { decodeSAHPoolFilename, HEADER_OFFSET_DATA, type WebDatabaseMetadataOpfs } from '@livestore/sqlite-wasm/browser'
|
|
4
|
+
import { isDevEnv } from '@livestore/utils'
|
|
4
5
|
import { Effect, Schedule, Schema } from '@livestore/utils/effect'
|
|
5
6
|
|
|
6
7
|
import * as OpfsUtils from '../../opfs-utils.ts'
|
|
7
8
|
import type * as WorkerSchema from './worker-schema.ts'
|
|
8
9
|
|
|
9
10
|
export class PersistedSqliteError extends Schema.TaggedError<PersistedSqliteError>()('PersistedSqliteError', {
|
|
11
|
+
message: Schema.String,
|
|
10
12
|
cause: Schema.Defect,
|
|
11
13
|
}) {}
|
|
12
14
|
|
|
@@ -141,3 +143,220 @@ export const getStateDbFileName = (schema: LiveStoreSchema) => {
|
|
|
141
143
|
schema.state.sqlite.migrations.strategy === 'manual' ? 'fixed' : schema.state.sqlite.hash.toString()
|
|
142
144
|
return `state${schemaHashSuffix}.db`
|
|
143
145
|
}
|
|
146
|
+
|
|
147
|
+
export const MAX_ARCHIVED_STATE_DBS_IN_DEV = 3
|
|
148
|
+
|
|
149
|
+
/**
|
|
150
|
+
* Cleanup old state database files after successful migration.
|
|
151
|
+
* This prevents OPFS file pool capacity from being exhausted by accumulated schema files.
|
|
152
|
+
*
|
|
153
|
+
* @param vfs - The AccessHandlePoolVFS instance for safe file operations
|
|
154
|
+
* @param currentSchema - Current schema (to avoid deleting the active database)
|
|
155
|
+
*/
|
|
156
|
+
export const cleanupOldStateDbFiles = Effect.fn('@livestore/adapter-web:cleanupOldStateDbFiles')(
|
|
157
|
+
function* ({
|
|
158
|
+
vfs,
|
|
159
|
+
currentSchema,
|
|
160
|
+
opfsDirectory,
|
|
161
|
+
}: {
|
|
162
|
+
vfs: WebDatabaseMetadataOpfs['vfs']
|
|
163
|
+
currentSchema: LiveStoreSchema
|
|
164
|
+
opfsDirectory: string
|
|
165
|
+
}) {
|
|
166
|
+
// Only cleanup for auto migration strategy because:
|
|
167
|
+
// - Auto strategy: Creates new database files per schema change (e.g., state123.db, state456.db)
|
|
168
|
+
// which accumulate over time and can exhaust OPFS file pool capacity
|
|
169
|
+
// - Manual strategy: Always reuses the same database file (statefixed.db) across schema changes,
|
|
170
|
+
// so there are never multiple old files to clean up
|
|
171
|
+
if (currentSchema.state.sqlite.migrations.strategy === 'manual') {
|
|
172
|
+
yield* Effect.logDebug('Skipping state db cleanup - manual migration strategy uses fixed filename')
|
|
173
|
+
return
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
const isDev = isDevEnv()
|
|
177
|
+
const currentDbFileName = getStateDbFileName(currentSchema)
|
|
178
|
+
const currentPath = `/${currentDbFileName}`
|
|
179
|
+
|
|
180
|
+
const allPaths = yield* Effect.sync(() => vfs.getTrackedFilePaths())
|
|
181
|
+
const oldStateDbPaths = allPaths.filter(
|
|
182
|
+
(path) => path.startsWith('/state') && path.endsWith('.db') && path !== currentPath,
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
if (oldStateDbPaths.length === 0) {
|
|
186
|
+
yield* Effect.logDebug('State db cleanup completed: no old database files found')
|
|
187
|
+
return
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
yield* Effect.logDebug(`Found ${oldStateDbPaths.length} old state database file(s) to clean up`)
|
|
191
|
+
|
|
192
|
+
let deletedCount = 0
|
|
193
|
+
const archivedFileNames: string[] = []
|
|
194
|
+
let archiveDirHandle: FileSystemDirectoryHandle | undefined
|
|
195
|
+
|
|
196
|
+
for (const path of oldStateDbPaths) {
|
|
197
|
+
const fileName = path.startsWith('/') ? path.slice(1) : path
|
|
198
|
+
|
|
199
|
+
if (isDev) {
|
|
200
|
+
archiveDirHandle = yield* Effect.tryPromise({
|
|
201
|
+
try: () => OpfsUtils.getDirHandle(`${opfsDirectory}/archive`, { create: true }),
|
|
202
|
+
catch: (cause) => new ArchiveStateDbError({ message: 'Failed to ensure archive directory', cause }),
|
|
203
|
+
})
|
|
204
|
+
|
|
205
|
+
const archivedFileName = yield* archiveStateDbFile({
|
|
206
|
+
vfs,
|
|
207
|
+
fileName,
|
|
208
|
+
archiveDirHandle,
|
|
209
|
+
})
|
|
210
|
+
|
|
211
|
+
archivedFileNames.push(archivedFileName)
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
const vfsResultCode = yield* Effect.try({
|
|
215
|
+
try: () => vfs.jDelete(fileName, 0),
|
|
216
|
+
catch: (cause) => new SqliteVfsError({ operation: 'jDelete', fileName, cause }),
|
|
217
|
+
})
|
|
218
|
+
|
|
219
|
+
// 0 indicates a successful result in SQLite.
|
|
220
|
+
// See https://www.sqlite.org/c3ref/c_abort.html
|
|
221
|
+
if (vfsResultCode !== 0) {
|
|
222
|
+
return yield* new SqliteVfsError({
|
|
223
|
+
operation: 'jDelete',
|
|
224
|
+
fileName,
|
|
225
|
+
vfsResultCode,
|
|
226
|
+
})
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
deletedCount++
|
|
230
|
+
yield* Effect.logDebug(`Successfully deleted old state database file: ${fileName}`)
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
if (isDev && archiveDirHandle !== undefined) {
|
|
234
|
+
const pruneResult = yield* pruneArchiveDir({
|
|
235
|
+
archiveDirHandle,
|
|
236
|
+
keep: MAX_ARCHIVED_STATE_DBS_IN_DEV,
|
|
237
|
+
})
|
|
238
|
+
|
|
239
|
+
yield* Effect.logDebug(
|
|
240
|
+
`State db cleanup completed: archived ${archivedFileNames.length} file(s); removed ${deletedCount} old database file(s) from active pool; archive retained ${pruneResult.retained.length} file(s)`,
|
|
241
|
+
)
|
|
242
|
+
} else {
|
|
243
|
+
yield* Effect.logDebug(`State db cleanup completed: removed ${deletedCount} old database file(s)`)
|
|
244
|
+
}
|
|
245
|
+
},
|
|
246
|
+
Effect.mapError(
|
|
247
|
+
(error) =>
|
|
248
|
+
new PersistedSqliteError({
|
|
249
|
+
message: 'Failed to clean up old state database file(s)',
|
|
250
|
+
cause: error,
|
|
251
|
+
}),
|
|
252
|
+
),
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
const archiveStateDbFile = Effect.fn('@livestore/adapter-web:archiveStateDbFile')(function* ({
|
|
256
|
+
vfs,
|
|
257
|
+
fileName,
|
|
258
|
+
archiveDirHandle,
|
|
259
|
+
}: {
|
|
260
|
+
vfs: WebDatabaseMetadataOpfs['vfs']
|
|
261
|
+
fileName: string
|
|
262
|
+
archiveDirHandle: FileSystemDirectoryHandle
|
|
263
|
+
}) {
|
|
264
|
+
const stateDbBuffer = vfs.readFilePayload(fileName)
|
|
265
|
+
|
|
266
|
+
const archiveFileName = `${Date.now()}-${fileName}`
|
|
267
|
+
|
|
268
|
+
const archiveFileHandle = yield* Effect.tryPromise({
|
|
269
|
+
try: () => archiveDirHandle.getFileHandle(archiveFileName, { create: true }),
|
|
270
|
+
catch: (cause) =>
|
|
271
|
+
new ArchiveStateDbError({
|
|
272
|
+
message: 'Failed to open archive file handle',
|
|
273
|
+
fileName: archiveFileName,
|
|
274
|
+
cause,
|
|
275
|
+
}),
|
|
276
|
+
})
|
|
277
|
+
|
|
278
|
+
const archiveFileAccessHandle = yield* Effect.acquireRelease(
|
|
279
|
+
Effect.tryPromise({
|
|
280
|
+
try: () => archiveFileHandle.createSyncAccessHandle(),
|
|
281
|
+
catch: (cause) =>
|
|
282
|
+
new ArchiveStateDbError({
|
|
283
|
+
message: 'Failed to create sync access handle for archived file',
|
|
284
|
+
fileName: archiveFileName,
|
|
285
|
+
cause,
|
|
286
|
+
}),
|
|
287
|
+
}),
|
|
288
|
+
(handle) => Effect.sync(() => handle.close()).pipe(Effect.ignoreLogged),
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
yield* Effect.try({
|
|
292
|
+
try: () => {
|
|
293
|
+
archiveFileAccessHandle.write(stateDbBuffer)
|
|
294
|
+
archiveFileAccessHandle.flush()
|
|
295
|
+
},
|
|
296
|
+
catch: (cause) =>
|
|
297
|
+
new ArchiveStateDbError({
|
|
298
|
+
message: 'Failed to write archived state database',
|
|
299
|
+
fileName: archiveFileName,
|
|
300
|
+
cause,
|
|
301
|
+
}),
|
|
302
|
+
})
|
|
303
|
+
|
|
304
|
+
return archiveFileName
|
|
305
|
+
}, Effect.scoped)
|
|
306
|
+
|
|
307
|
+
const pruneArchiveDir = Effect.fn('@livestore/adapter-web:pruneArchiveDir')(function* ({
|
|
308
|
+
archiveDirHandle,
|
|
309
|
+
keep,
|
|
310
|
+
}: {
|
|
311
|
+
archiveDirHandle: FileSystemDirectoryHandle
|
|
312
|
+
keep: number
|
|
313
|
+
}) {
|
|
314
|
+
const files = yield* Effect.tryPromise({
|
|
315
|
+
try: async () => {
|
|
316
|
+
const result: { name: string; lastModified: number }[] = []
|
|
317
|
+
|
|
318
|
+
for await (const entry of archiveDirHandle.values()) {
|
|
319
|
+
if (entry.kind !== 'file') continue
|
|
320
|
+
const fileHandle = await archiveDirHandle.getFileHandle(entry.name)
|
|
321
|
+
const file = await fileHandle.getFile()
|
|
322
|
+
result.push({ name: entry.name, lastModified: file.lastModified })
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
return result.sort((a, b) => b.lastModified - a.lastModified)
|
|
326
|
+
},
|
|
327
|
+
catch: (cause) => new ArchiveStateDbError({ message: 'Failed to enumerate archived state databases', cause }),
|
|
328
|
+
})
|
|
329
|
+
|
|
330
|
+
const retained = files.slice(0, keep)
|
|
331
|
+
const toDelete = files.slice(keep)
|
|
332
|
+
|
|
333
|
+
yield* Effect.forEach(toDelete, ({ name }) =>
|
|
334
|
+
Effect.tryPromise({
|
|
335
|
+
try: () => archiveDirHandle.removeEntry(name),
|
|
336
|
+
catch: (cause) =>
|
|
337
|
+
new ArchiveStateDbError({
|
|
338
|
+
message: 'Failed to delete archived state database',
|
|
339
|
+
fileName: name,
|
|
340
|
+
cause,
|
|
341
|
+
}),
|
|
342
|
+
}),
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
return {
|
|
346
|
+
retained,
|
|
347
|
+
deleted: toDelete,
|
|
348
|
+
}
|
|
349
|
+
})
|
|
350
|
+
|
|
351
|
+
export class ArchiveStateDbError extends Schema.TaggedError<ArchiveStateDbError>()('ArchiveStateDbError', {
|
|
352
|
+
message: Schema.String,
|
|
353
|
+
fileName: Schema.optional(Schema.String),
|
|
354
|
+
cause: Schema.Defect,
|
|
355
|
+
}) {}
|
|
356
|
+
|
|
357
|
+
export class SqliteVfsError extends Schema.TaggedError<SqliteVfsError>()('SqliteVfsError', {
|
|
358
|
+
operation: Schema.String,
|
|
359
|
+
fileName: Schema.String,
|
|
360
|
+
vfsResultCode: Schema.optional(Schema.Number),
|
|
361
|
+
cause: Schema.optional(Schema.Defect),
|
|
362
|
+
}) {}
|
|
@@ -4,6 +4,7 @@ import {
|
|
|
4
4
|
LeaderAheadError,
|
|
5
5
|
liveStoreVersion,
|
|
6
6
|
MigrationsReport,
|
|
7
|
+
SyncBackend,
|
|
7
8
|
SyncState,
|
|
8
9
|
UnexpectedError,
|
|
9
10
|
} from '@livestore/common'
|
|
@@ -146,6 +147,33 @@ export class LeaderWorkerInnerGetLeaderSyncState extends Schema.TaggedRequest<Le
|
|
|
146
147
|
},
|
|
147
148
|
) {}
|
|
148
149
|
|
|
150
|
+
export class LeaderWorkerInnerSyncStateStream extends Schema.TaggedRequest<LeaderWorkerInnerSyncStateStream>()(
|
|
151
|
+
'SyncStateStream',
|
|
152
|
+
{
|
|
153
|
+
payload: {},
|
|
154
|
+
success: SyncState.SyncState,
|
|
155
|
+
failure: UnexpectedError,
|
|
156
|
+
},
|
|
157
|
+
) {}
|
|
158
|
+
|
|
159
|
+
export class LeaderWorkerInnerGetNetworkStatus extends Schema.TaggedRequest<LeaderWorkerInnerGetNetworkStatus>()(
|
|
160
|
+
'GetNetworkStatus',
|
|
161
|
+
{
|
|
162
|
+
payload: {},
|
|
163
|
+
success: SyncBackend.NetworkStatus,
|
|
164
|
+
failure: UnexpectedError,
|
|
165
|
+
},
|
|
166
|
+
) {}
|
|
167
|
+
|
|
168
|
+
export class LeaderWorkerInnerNetworkStatusStream extends Schema.TaggedRequest<LeaderWorkerInnerNetworkStatusStream>()(
|
|
169
|
+
'NetworkStatusStream',
|
|
170
|
+
{
|
|
171
|
+
payload: {},
|
|
172
|
+
success: SyncBackend.NetworkStatus,
|
|
173
|
+
failure: UnexpectedError,
|
|
174
|
+
},
|
|
175
|
+
) {}
|
|
176
|
+
|
|
149
177
|
export class LeaderWorkerInnerShutdown extends Schema.TaggedRequest<LeaderWorkerInnerShutdown>()('Shutdown', {
|
|
150
178
|
payload: {},
|
|
151
179
|
success: Schema.Void,
|
|
@@ -173,6 +201,9 @@ export const LeaderWorkerInnerRequest = Schema.Union(
|
|
|
173
201
|
LeaderWorkerInnerGetRecreateSnapshot,
|
|
174
202
|
LeaderWorkerInnerGetLeaderHead,
|
|
175
203
|
LeaderWorkerInnerGetLeaderSyncState,
|
|
204
|
+
LeaderWorkerInnerSyncStateStream,
|
|
205
|
+
LeaderWorkerInnerGetNetworkStatus,
|
|
206
|
+
LeaderWorkerInnerNetworkStatusStream,
|
|
176
207
|
LeaderWorkerInnerShutdown,
|
|
177
208
|
LeaderWorkerInnerExtraDevtoolsMessage,
|
|
178
209
|
WebmeshWorker.Schema.CreateConnection,
|
|
@@ -218,6 +249,9 @@ export class SharedWorkerRequest extends Schema.Union(
|
|
|
218
249
|
LeaderWorkerInnerExportEventlog,
|
|
219
250
|
LeaderWorkerInnerGetLeaderHead,
|
|
220
251
|
LeaderWorkerInnerGetLeaderSyncState,
|
|
252
|
+
LeaderWorkerInnerSyncStateStream,
|
|
253
|
+
LeaderWorkerInnerGetNetworkStatus,
|
|
254
|
+
LeaderWorkerInnerNetworkStatusStream,
|
|
221
255
|
LeaderWorkerInnerShutdown,
|
|
222
256
|
LeaderWorkerInnerExtraDevtoolsMessage,
|
|
223
257
|
|
|
@@ -26,7 +26,7 @@ import {
|
|
|
26
26
|
import type * as otel from '@opentelemetry/api'
|
|
27
27
|
|
|
28
28
|
import * as OpfsUtils from '../../opfs-utils.ts'
|
|
29
|
-
import { getStateDbFileName, sanitizeOpfsDir } from '../common/persisted-sqlite.ts'
|
|
29
|
+
import { cleanupOldStateDbFiles, getStateDbFileName, sanitizeOpfsDir } from '../common/persisted-sqlite.ts'
|
|
30
30
|
import { makeShutdownChannel } from '../common/shutdown-channel.ts'
|
|
31
31
|
import * as WorkerSchema from '../common/worker-schema.ts'
|
|
32
32
|
|
|
@@ -59,16 +59,16 @@ export const makeWorkerEffect = (options: WorkerOptions) => {
|
|
|
59
59
|
)
|
|
60
60
|
: undefined
|
|
61
61
|
|
|
62
|
+
const layer = Layer.mergeAll(Logger.prettyWithThread(self.name), FetchHttpClient.layer, TracingLive ?? Layer.empty)
|
|
63
|
+
|
|
62
64
|
return makeWorkerRunnerOuter(options).pipe(
|
|
63
65
|
Layer.provide(BrowserWorkerRunner.layer),
|
|
64
66
|
WorkerRunner.launch,
|
|
65
67
|
Effect.scoped,
|
|
66
68
|
Effect.tapCauseLogPretty,
|
|
67
69
|
Effect.annotateLogs({ thread: self.name }),
|
|
68
|
-
Effect.provide(
|
|
69
|
-
Effect.provide(FetchHttpClient.layer),
|
|
70
|
+
Effect.provide(layer),
|
|
70
71
|
LS_DEV ? TaskTracing.withAsyncTaggingTracing((name) => (console as any).createTask(name)) : identity,
|
|
71
|
-
TracingLive ? Effect.provide(TracingLive) : identity,
|
|
72
72
|
// We're using this custom scheduler to improve op batching behaviour and reduce the overhead
|
|
73
73
|
// of the Effect fiber runtime given we have different tradeoffs on a worker thread.
|
|
74
74
|
// Despite the "message channel" name, is has nothing to do with the `incomingRequestsPort` above.
|
|
@@ -131,6 +131,16 @@ const makeWorkerRunnerInner = ({ schema, sync: syncOptions }: WorkerOptions) =>
|
|
|
131
131
|
concurrency: 2,
|
|
132
132
|
})
|
|
133
133
|
|
|
134
|
+
// Clean up old state database files after successful database creation
|
|
135
|
+
// This prevents OPFS file pool capacity exhaustion from accumulated state db files after schema changes/migrations
|
|
136
|
+
if (dbState.metadata._tag === 'opfs') {
|
|
137
|
+
yield* cleanupOldStateDbFiles({
|
|
138
|
+
vfs: dbState.metadata.vfs,
|
|
139
|
+
currentSchema: schema,
|
|
140
|
+
opfsDirectory: dbState.metadata.persistenceInfo.opfsDirectory,
|
|
141
|
+
})
|
|
142
|
+
}
|
|
143
|
+
|
|
134
144
|
const devtoolsOptions = yield* makeDevtoolsOptions({ devtoolsEnabled, dbState, dbEventlog })
|
|
135
145
|
const shutdownChannel = yield* makeShutdownChannel(storeId)
|
|
136
146
|
|
|
@@ -212,6 +222,21 @@ const makeWorkerRunnerInner = ({ schema, sync: syncOptions }: WorkerOptions) =>
|
|
|
212
222
|
UnexpectedError.mapToUnexpectedError,
|
|
213
223
|
Effect.withSpan('@livestore/adapter-web:worker:GetLeaderSyncState'),
|
|
214
224
|
),
|
|
225
|
+
SyncStateStream: () =>
|
|
226
|
+
Effect.gen(function* () {
|
|
227
|
+
const workerCtx = yield* LeaderThreadCtx
|
|
228
|
+
return workerCtx.syncProcessor.syncState.changes
|
|
229
|
+
}).pipe(Stream.unwrapScoped),
|
|
230
|
+
GetNetworkStatus: () =>
|
|
231
|
+
Effect.gen(function* () {
|
|
232
|
+
const workerCtx = yield* LeaderThreadCtx
|
|
233
|
+
return yield* workerCtx.networkStatus
|
|
234
|
+
}).pipe(UnexpectedError.mapToUnexpectedError, Effect.withSpan('@livestore/adapter-web:worker:GetNetworkStatus')),
|
|
235
|
+
NetworkStatusStream: () =>
|
|
236
|
+
Effect.gen(function* () {
|
|
237
|
+
const workerCtx = yield* LeaderThreadCtx
|
|
238
|
+
return workerCtx.networkStatus.changes
|
|
239
|
+
}).pipe(Stream.unwrapScoped),
|
|
215
240
|
Shutdown: () =>
|
|
216
241
|
Effect.gen(function* () {
|
|
217
242
|
yield* Effect.logDebug('[@livestore/adapter-web:worker] Shutdown')
|
|
@@ -28,6 +28,20 @@ import {
|
|
|
28
28
|
import { makeShutdownChannel } from '../common/shutdown-channel.ts'
|
|
29
29
|
import * as WorkerSchema from '../common/worker-schema.ts'
|
|
30
30
|
|
|
31
|
+
// Extract from `livestore-shared-worker-${storeId}`
|
|
32
|
+
const storeId = self.name.replace('livestore-shared-worker-', '')
|
|
33
|
+
|
|
34
|
+
// We acquire a lock that is held as long as this shared worker is alive.
|
|
35
|
+
// This way, when the shared worker is terminated (e.g. by the browser when the page is closed),
|
|
36
|
+
// the lock is released and any thread waiting for the lock can be notified.
|
|
37
|
+
const LIVESTORE_SHARED_WORKER_TERMINATION_LOCK = `livestore-shared-worker-termination-lock-${storeId}`
|
|
38
|
+
navigator.locks.request(
|
|
39
|
+
LIVESTORE_SHARED_WORKER_TERMINATION_LOCK,
|
|
40
|
+
{ steal: true },
|
|
41
|
+
// We use a never-resolving promise to hold the lock
|
|
42
|
+
async () => new Promise(() => {}),
|
|
43
|
+
)
|
|
44
|
+
|
|
31
45
|
if (isDevEnv()) {
|
|
32
46
|
globalThis.__debugLiveStoreUtils = {
|
|
33
47
|
blobUrl: (buffer: Uint8Array<ArrayBuffer>) =>
|
|
@@ -56,12 +70,15 @@ const makeWorkerRunner = Effect.gen(function* () {
|
|
|
56
70
|
|
|
57
71
|
const forwardRequest = <TReq extends WorkerSchema.LeaderWorkerInnerRequest>(
|
|
58
72
|
req: TReq,
|
|
59
|
-
):
|
|
60
|
-
|
|
61
|
-
|
|
73
|
+
): Effect.Effect<
|
|
74
|
+
Schema.WithResult.Success<TReq>,
|
|
75
|
+
UnexpectedError | Schema.WithResult.Failure<TReq>,
|
|
76
|
+
Schema.WithResult.Context<TReq>
|
|
77
|
+
> =>
|
|
78
|
+
// Forward the request to the active worker and normalize platform errors into UnexpectedError.
|
|
62
79
|
waitForWorker.pipe(
|
|
63
80
|
// Effect.logBefore(`forwardRequest: ${req._tag}`),
|
|
64
|
-
Effect.andThen((worker) => worker.executeEffect(req) as Effect.Effect<unknown, unknown,
|
|
81
|
+
Effect.andThen((worker) => worker.executeEffect(req) as Effect.Effect<unknown, unknown, unknown>),
|
|
65
82
|
// Effect.tap((_) => Effect.log(`forwardRequest: ${req._tag}`, _)),
|
|
66
83
|
// Effect.tapError((cause) => Effect.logError(`forwardRequest err: ${req._tag}`, cause)),
|
|
67
84
|
Effect.interruptible,
|
|
@@ -78,17 +95,23 @@ const makeWorkerRunner = Effect.gen(function* () {
|
|
|
78
95
|
),
|
|
79
96
|
Effect.catchAllDefect((cause) => new UnexpectedError({ cause })),
|
|
80
97
|
Effect.tapCauseLogPretty,
|
|
81
|
-
) as
|
|
98
|
+
) as Effect.Effect<
|
|
99
|
+
Schema.WithResult.Success<TReq>,
|
|
100
|
+
UnexpectedError | Schema.WithResult.Failure<TReq>,
|
|
101
|
+
Schema.WithResult.Context<TReq>
|
|
102
|
+
>
|
|
82
103
|
|
|
83
104
|
const forwardRequestStream = <TReq extends WorkerSchema.LeaderWorkerInnerRequest>(
|
|
84
105
|
req: TReq,
|
|
85
|
-
):
|
|
86
|
-
|
|
87
|
-
|
|
106
|
+
): Stream.Stream<
|
|
107
|
+
Schema.WithResult.Success<TReq>,
|
|
108
|
+
UnexpectedError | Schema.WithResult.Failure<TReq>,
|
|
109
|
+
Schema.WithResult.Context<TReq>
|
|
110
|
+
> =>
|
|
88
111
|
Effect.gen(function* () {
|
|
89
112
|
yield* Effect.logDebug(`forwardRequestStream: ${req._tag}`)
|
|
90
113
|
const { worker, scope } = yield* SubscriptionRef.waitUntil(leaderWorkerContextSubRef, isNotUndefined)
|
|
91
|
-
const stream = worker.execute(req) as Stream.Stream<unknown, unknown,
|
|
114
|
+
const stream = worker.execute(req) as Stream.Stream<unknown, unknown, unknown>
|
|
92
115
|
|
|
93
116
|
// It seems the request stream is not automatically interrupted when the scope shuts down
|
|
94
117
|
// so we need to manually interrupt it when the scope shuts down
|
|
@@ -109,7 +132,11 @@ const makeWorkerRunner = Effect.gen(function* () {
|
|
|
109
132
|
Stream.unwrap,
|
|
110
133
|
Stream.ensuring(Effect.logDebug(`shutting down stream for ${req._tag}`)),
|
|
111
134
|
UnexpectedError.mapToUnexpectedErrorStream,
|
|
112
|
-
) as
|
|
135
|
+
) as Stream.Stream<
|
|
136
|
+
Schema.WithResult.Success<TReq>,
|
|
137
|
+
UnexpectedError | Schema.WithResult.Failure<TReq>,
|
|
138
|
+
Schema.WithResult.Context<TReq>
|
|
139
|
+
>
|
|
113
140
|
|
|
114
141
|
const resetCurrentWorkerCtx = Effect.gen(function* () {
|
|
115
142
|
const prevWorker = yield* SubscriptionRef.get(leaderWorkerContextSubRef)
|
|
@@ -231,7 +258,10 @@ const makeWorkerRunner = Effect.gen(function* () {
|
|
|
231
258
|
ExportEventlog: forwardRequest,
|
|
232
259
|
Setup: forwardRequest,
|
|
233
260
|
GetLeaderSyncState: forwardRequest,
|
|
261
|
+
SyncStateStream: forwardRequestStream,
|
|
234
262
|
GetLeaderHead: forwardRequest,
|
|
263
|
+
GetNetworkStatus: forwardRequest,
|
|
264
|
+
NetworkStatusStream: forwardRequestStream,
|
|
235
265
|
Shutdown: forwardRequest,
|
|
236
266
|
ExtraDevtoolsMessage: forwardRequest,
|
|
237
267
|
|
|
@@ -241,8 +271,11 @@ const makeWorkerRunner = Effect.gen(function* () {
|
|
|
241
271
|
}).pipe(Layer.unwrapScoped)
|
|
242
272
|
|
|
243
273
|
export const makeWorker = () => {
|
|
244
|
-
|
|
245
|
-
|
|
274
|
+
const layer = Layer.mergeAll(
|
|
275
|
+
Logger.prettyWithThread(self.name),
|
|
276
|
+
FetchHttpClient.layer,
|
|
277
|
+
WebmeshWorker.CacheService.layer({ nodeName: DevtoolsWeb.makeNodeName.sharedWorker({ storeId }) }),
|
|
278
|
+
)
|
|
246
279
|
|
|
247
280
|
makeWorkerRunner.pipe(
|
|
248
281
|
Layer.provide(BrowserWorkerRunner.layer),
|
|
@@ -251,9 +284,7 @@ export const makeWorker = () => {
|
|
|
251
284
|
Effect.scoped,
|
|
252
285
|
Effect.tapCauseLogPretty,
|
|
253
286
|
Effect.annotateLogs({ thread: self.name }),
|
|
254
|
-
Effect.provide(
|
|
255
|
-
Effect.provide(FetchHttpClient.layer),
|
|
256
|
-
Effect.provide(WebmeshWorker.CacheService.layer({ nodeName: DevtoolsWeb.makeNodeName.sharedWorker({ storeId }) })),
|
|
287
|
+
Effect.provide(layer),
|
|
257
288
|
LS_DEV ? TaskTracing.withAsyncTaggingTracing((name) => (console as any).createTask(name)) : identity,
|
|
258
289
|
// TODO remove type-cast (currently needed to silence a tsc bug)
|
|
259
290
|
(_) => _ as any as Effect.Effect<void, any>,
|
|
@@ -5,6 +5,7 @@ globalThis.$RefreshReg$ = () => {}
|
|
|
5
5
|
// @ts-expect-error TODO remove when Vite does proper treeshaking during dev
|
|
6
6
|
globalThis.$RefreshSig$ = () => (type: any) => type
|
|
7
7
|
|
|
8
|
+
// biome-ignore lint/suspicious/noTsIgnore: sometimes @types/node is there, sometimes not.
|
|
8
9
|
// @ts-ignore
|
|
9
10
|
globalThis.process = globalThis.process ?? { env: {} }
|
|
10
11
|
|