pushwork 1.1.8 → 1.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/ARCHITECTURE-ACCORDING-TO-CLAUDE.md +17 -11
  2. package/CLAUDE.md +46 -1
  3. package/README.md +18 -4
  4. package/dist/cli.js +45 -4
  5. package/dist/cli.js.map +1 -1
  6. package/dist/commands.d.ts +1 -0
  7. package/dist/commands.d.ts.map +1 -1
  8. package/dist/commands.js +151 -38
  9. package/dist/commands.js.map +1 -1
  10. package/dist/core/change-detection.js +2 -2
  11. package/dist/core/change-detection.js.map +1 -1
  12. package/dist/core/config.d.ts.map +1 -1
  13. package/dist/core/config.js +3 -0
  14. package/dist/core/config.js.map +1 -1
  15. package/dist/core/move-detection.d.ts.map +1 -1
  16. package/dist/core/move-detection.js +4 -1
  17. package/dist/core/move-detection.js.map +1 -1
  18. package/dist/core/sync-engine.d.ts +7 -3
  19. package/dist/core/sync-engine.d.ts.map +1 -1
  20. package/dist/core/sync-engine.js +40 -14
  21. package/dist/core/sync-engine.js.map +1 -1
  22. package/dist/types/config.d.ts +4 -0
  23. package/dist/types/config.d.ts.map +1 -1
  24. package/dist/types/config.js +2 -1
  25. package/dist/types/config.js.map +1 -1
  26. package/dist/utils/content.js +1 -1
  27. package/dist/utils/content.js.map +1 -1
  28. package/dist/utils/network-sync.d.ts +1 -2
  29. package/dist/utils/network-sync.d.ts.map +1 -1
  30. package/dist/utils/network-sync.js +76 -7
  31. package/dist/utils/network-sync.js.map +1 -1
  32. package/dist/utils/output.js +7 -7
  33. package/dist/utils/output.js.map +1 -1
  34. package/dist/utils/repo-factory.d.ts +11 -3
  35. package/dist/utils/repo-factory.d.ts.map +1 -1
  36. package/dist/utils/repo-factory.js +113 -8
  37. package/dist/utils/repo-factory.js.map +1 -1
  38. package/flake.lock +128 -0
  39. package/flake.nix +66 -0
  40. package/package.json +6 -6
  41. package/scripts/roundtrip-test.sh +35 -0
  42. package/src/cli.ts +53 -6
  43. package/src/commands.ts +150 -26
  44. package/src/core/change-detection.ts +2 -2
  45. package/src/core/config.ts +4 -0
  46. package/src/core/move-detection.ts +3 -1
  47. package/src/core/sync-engine.ts +40 -15
  48. package/src/types/config.ts +4 -0
  49. package/src/utils/content.ts +1 -1
  50. package/src/utils/network-sync.ts +92 -8
  51. package/src/utils/output.ts +7 -7
  52. package/src/utils/repo-factory.ts +140 -19
  53. package/test/integration/sub-flag.test.ts +187 -0
  54. package/test/unit/network-sync-sub.test.ts +144 -0
  55. package/test/unit/repo-factory.test.ts +111 -0
  56. package/test/unit/subduction-config.test.ts +69 -0
@@ -197,16 +197,25 @@ export class SyncEngine {
197
197
 
198
198
  /**
199
199
  * Get the appropriate URL for a subdirectory's directory entry.
200
- * Always uses plain URLs versioned URLs on directories can cause
201
- * issues where consumers see a version without the docs array.
200
+ * Artifact directories get versioned URLs (with heads) so consumers can
201
+ * fetch the exact snapshotted version, matching how artifact files work.
202
+ * Non-artifact directories get plain URLs for collaborative editing.
202
203
  */
203
- private getDirEntryUrl(handle: DocHandle<unknown>): AutomergeUrl {
204
+ private getDirEntryUrl(handle: DocHandle<unknown>, dirPath: string): AutomergeUrl {
205
+ if (this.isArtifactPath(dirPath)) {
206
+ return this.getVersionedUrl(handle)
207
+ }
204
208
  return getPlainUrl(handle.url)
205
209
  }
206
210
 
207
211
  /**
208
212
  * Set the root directory URL in the snapshot
209
213
  */
214
+ async getRootDirectoryUrl(): Promise<AutomergeUrl | undefined> {
215
+ const snapshot = await this.snapshotManager.load()
216
+ return snapshot?.rootDirectoryUrl
217
+ }
218
+
210
219
  async setRootDirectoryUrl(url: AutomergeUrl): Promise<void> {
211
220
  let snapshot = await this.snapshotManager.load()
212
221
  if (!snapshot) {
@@ -423,7 +432,7 @@ export class SyncEngine {
423
432
  /**
424
433
  * Run full bidirectional sync
425
434
  */
426
- async sync(): Promise<SyncResult> {
435
+ async sync(options?: {sub?: boolean}): Promise<SyncResult> {
427
436
  const result: SyncResult = {
428
437
  success: false,
429
438
  filesChanged: 0,
@@ -482,7 +491,6 @@ export class SyncEngine {
482
491
  await waitForBidirectionalSync(
483
492
  this.repo,
484
493
  snapshot.rootDirectoryUrl,
485
- this.config.sync_server_storage_id,
486
494
  {
487
495
  timeoutMs: 5000, // Increased timeout for initial sync
488
496
  pollIntervalMs: 100,
@@ -526,6 +534,12 @@ export class SyncEngine {
526
534
 
527
535
  // Wait for network sync (important for clone scenarios)
528
536
  if (this.config.sync_enabled) {
537
+ const sub = options?.sub ?? false
538
+ // In Subduction mode, pass no StorageId so waitForSync
539
+ // falls back to head-stability polling. In WebSocket mode,
540
+ // pass the StorageId for precise getSyncInfo-based verification.
541
+ const storageId = sub ? undefined : this.config.sync_server_storage_id
542
+
529
543
  try {
530
544
  // Ensure root directory handle is tracked for sync
531
545
  if (snapshot.rootDirectoryUrl) {
@@ -546,11 +560,13 @@ export class SyncEngine {
546
560
  out.update(`Uploading ${allHandles.length} documents to sync server`)
547
561
  const {failed} = await waitForSync(
548
562
  allHandles,
549
- this.config.sync_server_storage_id
563
+ storageId
550
564
  )
551
565
 
552
- // Recreate failed documents and retry once
553
- if (failed.length > 0) {
566
+ // Recreate failed documents and retry once.
567
+ // Skip in Subduction mode — SubductionSource has its
568
+ // own heal-sync retry logic.
569
+ if (failed.length > 0 && !sub) {
554
570
  debug(`sync: ${failed.length} documents failed, recreating`)
555
571
  out.update(`Recreating ${failed.length} failed documents`)
556
572
  const retryHandles = await this.recreateFailedDocuments(failed, snapshot)
@@ -559,7 +575,7 @@ export class SyncEngine {
559
575
  out.update(`Retrying ${retryHandles.length} recreated documents`)
560
576
  const retry = await waitForSync(
561
577
  retryHandles,
562
- this.config.sync_server_storage_id
578
+ storageId
563
579
  )
564
580
  if (retry.failed.length > 0) {
565
581
  const msg = `${retry.failed.length} documents failed to sync to server after recreation`
@@ -572,6 +588,11 @@ export class SyncEngine {
572
588
  })
573
589
  }
574
590
  }
591
+ } else if (failed.length > 0 && sub) {
592
+ const msg = `${failed.length} document${failed.length === 1 ? '' : 's'} did not converge during sync (Subduction will retry in the background; re-run sync to confirm)`
593
+ debug(`sync: ${msg}`)
594
+ out.taskLine(msg, true)
595
+ result.warnings.push(msg)
575
596
  }
576
597
 
577
598
  debug("sync: all handles synced to server")
@@ -585,7 +606,6 @@ export class SyncEngine {
585
606
  await waitForBidirectionalSync(
586
607
  this.repo,
587
608
  snapshot.rootDirectoryUrl,
588
- this.config.sync_server_storage_id,
589
609
  {
590
610
  timeoutMs: BIDIRECTIONAL_SYNC_TIMEOUT_MS,
591
611
  pollIntervalMs: 100,
@@ -608,10 +628,15 @@ export class SyncEngine {
608
628
  )
609
629
  debug("sync: syncing root directory touch to server")
610
630
  out.update("Syncing root directory update")
611
- await waitForSync(
631
+ const rootSync = await waitForSync(
612
632
  [rootHandle],
613
- this.config.sync_server_storage_id
633
+ storageId
614
634
  )
635
+ if (rootSync.failed.length > 0) {
636
+ const msg = "Root directory update did not converge to server; consumers may not see recent changes until next sync"
637
+ debug(`sync: ${msg}`)
638
+ result.warnings.push(msg)
639
+ }
615
640
  }
616
641
  } catch (error) {
617
642
  debug(`sync: network sync error: ${error}`)
@@ -938,7 +963,7 @@ export class SyncEngine {
938
963
  )
939
964
  subdirUpdates.push({
940
965
  name: childName,
941
- url: this.getDirEntryUrl(childHandle),
966
+ url: this.getDirEntryUrl(childHandle, modifiedDir),
942
967
  })
943
968
  }
944
969
  }
@@ -1438,7 +1463,7 @@ export class SyncEngine {
1438
1463
  this.handlesByPath.set(directoryPath, childDirHandle)
1439
1464
 
1440
1465
  // Get appropriate URL for directory entry
1441
- const entryUrl = this.getDirEntryUrl(childDirHandle)
1466
+ const entryUrl = this.getDirEntryUrl(childDirHandle, directoryPath)
1442
1467
 
1443
1468
  // Update snapshot with discovered directory
1444
1469
  this.snapshotManager.updateDirectoryEntry(snapshot, directoryPath, {
@@ -1469,7 +1494,7 @@ export class SyncEngine {
1469
1494
  const dirHandle = this.repo.create(dirDoc)
1470
1495
 
1471
1496
  // Get appropriate URL for directory entry
1472
- const dirEntryUrl = this.getDirEntryUrl(dirHandle)
1497
+ const dirEntryUrl = this.getDirEntryUrl(dirHandle, directoryPath)
1473
1498
 
1474
1499
  // Add this directory to its parent
1475
1500
  // Use plain URL for mutable handle
@@ -6,6 +6,7 @@ import { StorageId } from "@automerge/automerge-repo";
6
6
  export const DEFAULT_SYNC_SERVER = "wss://sync3.automerge.org";
7
7
  export const DEFAULT_SYNC_SERVER_STORAGE_ID =
8
8
  "3760df37-a4c6-4f66-9ecd-732039a9385d" as StorageId;
9
+ export const DEFAULT_SUBDUCTION_SERVER = "wss://subduction.sync.inkandswitch.com";
9
10
 
10
11
  /**
11
12
  * Global configuration options
@@ -25,6 +26,7 @@ export interface GlobalConfig {
25
26
  */
26
27
  export interface DirectoryConfig extends GlobalConfig {
27
28
  root_directory_url?: string;
29
+ subduction?: boolean;
28
30
  sync_enabled: boolean;
29
31
  }
30
32
 
@@ -42,6 +44,7 @@ export interface CloneOptions extends CommandOptions {
42
44
  force?: boolean; // Overwrite existing directory
43
45
  syncServer?: string; // Custom sync server URL
44
46
  syncServerStorageId?: StorageId; // Custom sync server storage ID
47
+ sub?: boolean;
45
48
  }
46
49
 
47
50
  /**
@@ -83,6 +86,7 @@ export interface CheckoutOptions extends CommandOptions {
83
86
  export interface InitOptions extends CommandOptions {
84
87
  syncServer?: string;
85
88
  syncServerStorageId?: StorageId;
89
+ sub?: boolean;
86
90
  }
87
91
 
88
92
  /**
@@ -16,7 +16,7 @@ export function isContentEqual(
16
16
  content2: string | Uint8Array | null
17
17
  ): boolean {
18
18
  if (content1 === content2) return true;
19
- if (!content1 || !content2) return false;
19
+ if (content1 == null || content2 == null) return false;
20
20
 
21
21
  if (typeof content1 !== typeof content2) return false;
22
22
 
@@ -21,13 +21,11 @@ function debug(...args: any[]) {
21
21
  *
22
22
  * @param repo - The Automerge repository
23
23
  * @param rootDirectoryUrl - The root directory URL to start traversal from
24
- * @param syncServerStorageId - The sync server storage ID
25
24
  * @param options - Configuration options
26
25
  */
27
26
  export async function waitForBidirectionalSync(
28
27
  repo: Repo,
29
28
  rootDirectoryUrl: AutomergeUrl | undefined,
30
- syncServerStorageId: StorageId | undefined,
31
29
  options: {
32
30
  timeoutMs?: number;
33
31
  pollIntervalMs?: number;
@@ -42,7 +40,7 @@ export async function waitForBidirectionalSync(
42
40
  handles,
43
41
  } = options;
44
42
 
45
- if (!syncServerStorageId || !rootDirectoryUrl) {
43
+ if (!rootDirectoryUrl) {
46
44
  return;
47
45
  }
48
46
 
@@ -295,16 +293,20 @@ export async function waitForSync(
295
293
  ): Promise<SyncWaitResult> {
296
294
  const startTime = Date.now();
297
295
 
298
- if (!syncServerStorageId) {
299
- debug("waitForSync: no sync server storage ID, skipping");
300
- return { failed: [] };
301
- }
302
-
303
296
  if (handlesToWaitOn.length === 0) {
304
297
  debug("waitForSync: no documents to sync");
305
298
  return { failed: [] };
306
299
  }
307
300
 
301
+ // When no StorageId is available (Subduction mode), use head-stability
302
+ // polling. The SubductionSource handles sync internally — we just wait
303
+ // for each handle's heads to stop changing.
304
+ if (!syncServerStorageId) {
305
+ debug(`waitForSync: no storage ID, using head-stability polling for ${handlesToWaitOn.length} documents`);
306
+ out.taskLine(`Waiting for ${handlesToWaitOn.length} documents to sync`);
307
+ return waitForSyncViaHeadStability(handlesToWaitOn, timeoutMs, startTime);
308
+ }
309
+
308
310
  debug(`waitForSync: waiting for ${handlesToWaitOn.length} documents (timeout=${timeoutMs}ms, batchSize=${SYNC_BATCH_SIZE})`);
309
311
 
310
312
  // Separate already-synced from needs-sync
@@ -370,3 +372,85 @@ export async function waitForSync(
370
372
 
371
373
  return { failed };
372
374
  }
375
+
376
+ /**
377
+ * Wait for sync by polling head stability (Subduction mode).
378
+ * Each handle's heads are polled until they remain unchanged for
379
+ * several consecutive checks, indicating the SubductionSource has
380
+ * finished syncing.
381
+ */
382
+ async function waitForSyncViaHeadStability(
383
+ handles: DocHandle<unknown>[],
384
+ timeoutMs: number,
385
+ startTime: number,
386
+ ): Promise<SyncWaitResult> {
387
+ const failed: DocHandle<unknown>[] = [];
388
+ let synced = 0;
389
+
390
+ // Process in batches
391
+ for (let i = 0; i < handles.length; i += SYNC_BATCH_SIZE) {
392
+ const batch = handles.slice(i, i + SYNC_BATCH_SIZE);
393
+
394
+ const results = await Promise.allSettled(
395
+ batch.map(handle => waitForHandleHeadStability(handle, timeoutMs, startTime))
396
+ );
397
+
398
+ for (const result of results) {
399
+ if (result.status === "rejected") {
400
+ failed.push(result.reason as DocHandle<unknown>);
401
+ } else {
402
+ synced++;
403
+ }
404
+ }
405
+ }
406
+
407
+ const elapsed = Date.now() - startTime;
408
+ if (failed.length > 0) {
409
+ debug(`waitForSync(heads): ${failed.length} documents failed after ${elapsed}ms`);
410
+ out.taskLine(`Sync: ${synced} synced, ${failed.length} timed out after ${(elapsed / 1000).toFixed(1)}s`, true);
411
+ } else {
412
+ debug(`waitForSync(heads): all ${handles.length} documents synced in ${elapsed}ms`);
413
+ out.taskLine(`All ${handles.length} documents synced (${(elapsed / 1000).toFixed(1)}s)`);
414
+ }
415
+
416
+ return { failed };
417
+ }
418
+
419
+ /**
420
+ * Wait for a single handle's heads to stabilize.
421
+ * Polls heads at 100ms intervals; resolves after 3 consecutive stable
422
+ * checks, rejects on timeout.
423
+ */
424
+ function waitForHandleHeadStability(
425
+ handle: DocHandle<unknown>,
426
+ timeoutMs: number,
427
+ startTime: number,
428
+ ): Promise<DocHandle<unknown>> {
429
+ return new Promise<DocHandle<unknown>>((resolve, reject) => {
430
+ let lastHeads = JSON.stringify(handle.heads());
431
+ let stableCount = 0;
432
+ const stableRequired = 3;
433
+
434
+ const pollInterval = setInterval(() => {
435
+ const currentHeads = JSON.stringify(handle.heads());
436
+ if (currentHeads === lastHeads) {
437
+ stableCount++;
438
+ if (stableCount >= stableRequired) {
439
+ clearInterval(pollInterval);
440
+ clearTimeout(timeout);
441
+ debug(`waitForSync(heads): ${handle.url}... converged in ${Date.now() - startTime}ms`);
442
+ resolve(handle);
443
+ }
444
+ } else {
445
+ stableCount = 0;
446
+ lastHeads = currentHeads;
447
+ }
448
+ }, 100);
449
+
450
+ const timeout = setTimeout(() => {
451
+ clearInterval(pollInterval);
452
+ debug(`waitForSync(heads): ${handle.url}... timed out after ${timeoutMs}ms`);
453
+ reject(handle);
454
+ }, timeoutMs);
455
+ });
456
+ }
@@ -345,7 +345,7 @@ export class Output {
345
345
  this.taskOriginalMessage = null;
346
346
  this.taskCurrentMessage = null;
347
347
  }
348
- console.log(
348
+ console.error(
349
349
  chalk.red(
350
350
  message instanceof Error
351
351
  ? message.message
@@ -367,7 +367,7 @@ export class Output {
367
367
  this.taskOriginalMessage = null;
368
368
  this.taskCurrentMessage = null;
369
369
  }
370
- console.log(
370
+ console.error(
371
371
  `\n${chalk.bgRed.white(` ${label} `)}${message && ` ${message}`}`
372
372
  );
373
373
  }
@@ -400,19 +400,19 @@ export class Output {
400
400
 
401
401
  if (error instanceof Error) {
402
402
  // Error type and message
403
- console.log(chalk.red(`${error.name}: ${error.message}`));
403
+ console.error(chalk.red(`${error.name}: ${error.message}`));
404
404
 
405
405
  // Stack trace
406
406
  if (error.stack) {
407
- console.log("");
408
- console.log(chalk.dim("Stack trace:"));
407
+ console.error("");
408
+ console.error(chalk.dim("Stack trace:"));
409
409
  const stackLines = error.stack.split("\n").slice(1); // Skip first line (error message)
410
410
  stackLines.forEach((line) =>
411
- console.log(chalk.dim(` ${line.trim()}`))
411
+ console.error(chalk.dim(` ${line.trim()}`))
412
412
  );
413
413
  }
414
414
  } else {
415
- console.log(chalk.red(String(error)));
415
+ console.error(chalk.red(String(error)));
416
416
  }
417
417
 
418
418
  process.exit(exitCode);
@@ -1,28 +1,149 @@
1
- import { Repo } from "@automerge/automerge-repo";
2
- import { NodeFSStorageAdapter } from "@automerge/automerge-repo-storage-nodefs";
3
- import { BrowserWebSocketClientAdapter } from "@automerge/automerge-repo-network-websocket";
4
- import * as path from "path";
5
- import { DirectoryConfig } from "../types";
1
+ import {
2
+ type Repo,
3
+ type RepoConfig,
4
+ type NetworkAdapterInterface,
5
+ } from "@automerge/automerge-repo"
6
+ import {NodeFSStorageAdapter} from "@automerge/automerge-repo-storage-nodefs"
7
+ import * as fs from "fs/promises"
8
+ import * as path from "path"
9
+ import {DirectoryConfig} from "../types"
6
10
 
7
11
  /**
8
- * Create an Automerge repository with configuration-based setup
12
+ * Perform a real ESM dynamic import that tsc won't rewrite to require().
13
+ *
14
+ * TypeScript with `"module": "commonjs"` compiles `await import("x")` to
15
+ * `require("x")`, which resolves CJS entries instead of ESM entries. The
16
+ * Wasm module instance is different between the CJS and ESM module graphs,
17
+ * so initializing via CJS require() doesn't help the ESM /slim imports
18
+ * inside automerge-repo.
19
+ *
20
+ * This helper uses `new Function` to create a real `import()` expression
21
+ * that Node.js evaluates as ESM, sharing the same module graph as the
22
+ * Repo's internal imports.
23
+ */
24
+ const dynamicImport = new Function("specifier", "return import(specifier)") as (
25
+ specifier: string
26
+ ) => Promise<any>
27
+
28
+ /**
29
+ * Initialize the Subduction Wasm module and return the Repo constructor.
30
+ *
31
+ * The Repo constructor calls set_subduction_logger() and new MemorySigner()
32
+ * from @automerge/automerge-subduction/slim, which require the Wasm module
33
+ * to be initialized first. automerge-repo exports initSubduction() to
34
+ * handle this — it dynamically imports the non-/slim entry (which
35
+ * auto-initializes the Wasm as a side effect).
36
+ *
37
+ * Both the Repo and initSubduction must be loaded via ESM dynamic import()
38
+ * so they share the same module graph as the Repo's internal /slim imports.
39
+ */
40
+ let cachedRepoClass: typeof Repo | undefined
41
+
42
+ async function getRepoClass(): Promise<typeof Repo> {
43
+ if (cachedRepoClass) return cachedRepoClass
44
+
45
+ // Import Repo and initialize Subduction Wasm via automerge-repo's
46
+ // initSubduction() helper. This must happen before new Repo() because
47
+ // the constructor calls set_subduction_logger() and new MemorySigner()
48
+ // which require the Wasm module to be ready.
49
+ //
50
+ // Both imports use the ESM dynamic import wrapper so they share the
51
+ // same module graph as the Repo's internal /slim imports.
52
+ const repoMod = await dynamicImport("@automerge/automerge-repo")
53
+ await repoMod.initSubduction()
54
+ cachedRepoClass = repoMod.Repo as typeof Repo
55
+ return cachedRepoClass
56
+ }
57
+
58
+ /**
59
+ * Scan a directory tree for 0-byte files, which indicate incomplete writes
60
+ * from a previous run (process exited before storage flushed). Returns true
61
+ * if any are found.
62
+ */
63
+ async function hasCorruptStorage(dir: string): Promise<boolean> {
64
+ try {
65
+ await fs.access(dir)
66
+ } catch {
67
+ return false
68
+ }
69
+
70
+ const entries = await fs.readdir(dir, {withFileTypes: true})
71
+ for (const entry of entries) {
72
+ const fullPath = path.join(dir, entry.name)
73
+ if (entry.isDirectory()) {
74
+ if (await hasCorruptStorage(fullPath)) return true
75
+ } else if (entry.isFile()) {
76
+ const stat = await fs.stat(fullPath)
77
+ if (stat.size === 0) return true
78
+ }
79
+ }
80
+ return false
81
+ }
82
+
83
+ /**
84
+ * Create an Automerge repository with configuration-based setup.
85
+ *
86
+ * When `sub` is true, uses the Subduction sync backend built into
87
+ * automerge-repo. The Repo manages its own SubductionSource internally —
88
+ * we just pass `subductionWebsocketEndpoints` and the Repo handles
89
+ * connection management, sync, and retries.
90
+ *
91
+ * When `sub` is false (default), uses the traditional WebSocket network
92
+ * adapter for sync via the automerge sync server.
9
93
  */
10
94
  export async function createRepo(
11
- workingDir: string,
12
- config: DirectoryConfig
95
+ workingDir: string,
96
+ config: DirectoryConfig,
97
+ sub: boolean = false
13
98
  ): Promise<Repo> {
14
- const syncToolDir = path.join(workingDir, ".pushwork");
15
- const storage = new NodeFSStorageAdapter(path.join(syncToolDir, "automerge"));
99
+ const RepoClass = await getRepoClass()
100
+
101
+ const syncToolDir = path.join(workingDir, ".pushwork")
102
+ const automergeDir = path.join(syncToolDir, "automerge")
103
+
104
+ // Detect and recover from corrupt local storage (0-byte files left by
105
+ // incomplete writes from a previous run). Wipe the cache so the Repo
106
+ // hydrates cleanly from the sync server.
107
+ if (await hasCorruptStorage(automergeDir)) {
108
+ console.warn("[pushwork] Corrupt local storage detected, clearing cache...")
109
+ await fs.rm(automergeDir, {recursive: true, force: true})
110
+ await fs.mkdir(automergeDir, {recursive: true})
111
+ }
112
+
113
+ const storage = new NodeFSStorageAdapter(automergeDir)
114
+
115
+ if (sub) {
116
+ const endpoints: string[] = []
117
+ if (config.sync_enabled && config.sync_server) {
118
+ endpoints.push(config.sync_server)
119
+ }
120
+
121
+ return new RepoClass({
122
+ storage,
123
+ // @ts-expect-error i don't know why
124
+ subductionWebsocketEndpoints: endpoints,
125
+ })
126
+ }
16
127
 
17
- const repoConfig: any = { storage };
128
+ // Default: WebSocket sync adapter
129
+ const repoConfig: RepoConfig = {storage}
18
130
 
19
- // Add network adapter only if sync is enabled and server is configured
20
- if (config.sync_enabled && config.sync_server) {
21
- const networkAdapter = new BrowserWebSocketClientAdapter(
22
- config.sync_server
23
- );
24
- repoConfig.network = [networkAdapter];
25
- }
131
+ if (config.sync_enabled && config.sync_server) {
132
+ // Load the WebSocket adapter via ESM dynamic import to stay in the
133
+ // same module graph as the Repo.
134
+ const wsMod = await dynamicImport(
135
+ "@automerge/automerge-repo-network-websocket"
136
+ )
137
+ // The websocket adapter package (subduction.8) hasn't updated its
138
+ // NetworkAdapter base-class types to match the repo's new
139
+ // NetworkAdapterInterface (which added state() and stricter
140
+ // EventEmitter generics). At runtime the adapter has all required
141
+ // methods; this is purely a declaration mismatch.
142
+ const networkAdapter = new wsMod.BrowserWebSocketClientAdapter(
143
+ config.sync_server
144
+ ) as unknown as NetworkAdapterInterface
145
+ repoConfig.network = [networkAdapter]
146
+ }
26
147
 
27
- return new Repo(repoConfig);
148
+ return new RepoClass(repoConfig)
28
149
  }