pushwork 1.1.6 → 1.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. package/ARCHITECTURE-ACCORDING-TO-CLAUDE.md +17 -11
  2. package/CLAUDE.md +46 -1
  3. package/README.md +18 -4
  4. package/dist/cli.js +45 -4
  5. package/dist/cli.js.map +1 -1
  6. package/dist/commands.d.ts +1 -0
  7. package/dist/commands.d.ts.map +1 -1
  8. package/dist/commands.js +151 -38
  9. package/dist/commands.js.map +1 -1
  10. package/dist/core/change-detection.js +2 -2
  11. package/dist/core/change-detection.js.map +1 -1
  12. package/dist/core/config.d.ts.map +1 -1
  13. package/dist/core/config.js +3 -0
  14. package/dist/core/config.js.map +1 -1
  15. package/dist/core/move-detection.d.ts.map +1 -1
  16. package/dist/core/move-detection.js +4 -1
  17. package/dist/core/move-detection.js.map +1 -1
  18. package/dist/core/sync-engine.d.ts +24 -4
  19. package/dist/core/sync-engine.d.ts.map +1 -1
  20. package/dist/core/sync-engine.js +85 -50
  21. package/dist/core/sync-engine.js.map +1 -1
  22. package/dist/types/config.d.ts +4 -0
  23. package/dist/types/config.d.ts.map +1 -1
  24. package/dist/types/config.js +2 -1
  25. package/dist/types/config.js.map +1 -1
  26. package/dist/utils/content.js +1 -1
  27. package/dist/utils/content.js.map +1 -1
  28. package/dist/utils/network-sync.d.ts +1 -2
  29. package/dist/utils/network-sync.d.ts.map +1 -1
  30. package/dist/utils/network-sync.js +76 -7
  31. package/dist/utils/network-sync.js.map +1 -1
  32. package/dist/utils/output.js +7 -7
  33. package/dist/utils/output.js.map +1 -1
  34. package/dist/utils/repo-factory.d.ts +11 -3
  35. package/dist/utils/repo-factory.d.ts.map +1 -1
  36. package/dist/utils/repo-factory.js +112 -8
  37. package/dist/utils/repo-factory.js.map +1 -1
  38. package/flake.lock +128 -0
  39. package/flake.nix +66 -0
  40. package/package.json +98 -97
  41. package/scripts/roundtrip-test.sh +35 -0
  42. package/src/cli.ts +53 -6
  43. package/src/commands.ts +150 -26
  44. package/src/core/change-detection.ts +2 -2
  45. package/src/core/config.ts +4 -0
  46. package/src/core/move-detection.ts +3 -1
  47. package/src/core/sync-engine.ts +99 -59
  48. package/src/types/config.ts +4 -0
  49. package/src/utils/content.ts +1 -1
  50. package/src/utils/network-sync.ts +92 -8
  51. package/src/utils/output.ts +7 -7
  52. package/src/utils/repo-factory.ts +124 -10
  53. package/test/integration/clone-test.sh +0 -0
  54. package/test/integration/conflict-resolution-test.sh +0 -0
  55. package/test/integration/deletion-behavior-test.sh +0 -0
  56. package/test/integration/deletion-sync-test-simple.sh +0 -0
  57. package/test/integration/deletion-sync-test.sh +0 -0
  58. package/test/integration/full-integration-test.sh +0 -0
  59. package/test/integration/manual-sync-test.sh +0 -0
  60. package/test/integration/sub-flag.test.ts +187 -0
  61. package/test/run-tests.sh +0 -0
  62. package/test/unit/artifact-nuke-reinsert.test.ts +80 -0
  63. package/test/unit/network-sync-sub.test.ts +144 -0
  64. package/test/unit/repo-factory.test.ts +111 -0
  65. package/test/unit/subduction-config.test.ts +69 -0
  66. package/dist/cli/commands.d.ts +0 -71
  67. package/dist/cli/commands.d.ts.map +0 -1
  68. package/dist/cli/commands.js +0 -794
  69. package/dist/cli/commands.js.map +0 -1
  70. package/dist/cli/index.d.ts +0 -2
  71. package/dist/cli/index.d.ts.map +0 -1
  72. package/dist/cli/index.js +0 -19
  73. package/dist/cli/index.js.map +0 -1
  74. package/dist/config/index.d.ts +0 -71
  75. package/dist/config/index.d.ts.map +0 -1
  76. package/dist/config/index.js +0 -314
  77. package/dist/config/index.js.map +0 -1
  78. package/dist/utils/content-similarity.d.ts +0 -53
  79. package/dist/utils/content-similarity.d.ts.map +0 -1
  80. package/dist/utils/content-similarity.js +0 -155
  81. package/dist/utils/content-similarity.js.map +0 -1
  82. package/dist/utils/node-polyfills.d.ts +0 -9
  83. package/dist/utils/node-polyfills.d.ts.map +0 -1
  84. package/dist/utils/node-polyfills.js +0 -9
  85. package/dist/utils/node-polyfills.js.map +0 -1
@@ -60,6 +60,64 @@ function changeWithOptionalHeads<T>(
60
60
  }
61
61
  }
62
62
 
63
+ /**
64
+ * Nuke an artifact directory's docs array and rebuild it from scratch.
65
+ * Entries must be spread into plain objects — pushing Automerge proxy objects
66
+ * back after splicing them out throws "Cannot create a reference to an
67
+ * existing document object".
68
+ */
69
+ export function nukeAndRebuildDocs(
70
+ doc: DirectoryDocument,
71
+ dirPath: string,
72
+ newEntries: {name: string; url: AutomergeUrl}[],
73
+ updatedEntries: {name: string; url: AutomergeUrl}[],
74
+ deletedNames: string[],
75
+ subdirUpdates: {name: string; url: AutomergeUrl}[],
76
+ ): void {
77
+ const deletedSet = new Set(deletedNames)
78
+ const updatedMap = new Map(updatedEntries.map(e => [e.name, e.url]))
79
+ const newMap = new Map(newEntries.map(e => [e.name, e.url]))
80
+ const subdirMap = new Map(subdirUpdates.map(e => [e.name, e.url]))
81
+
82
+ const kept: DirectoryEntry[] = []
83
+ for (const entry of doc.docs) {
84
+ if (entry.type === "file" && deletedSet.has(entry.name)) {
85
+ out.taskLine(
86
+ `Removed ${entry.name} from ${
87
+ formatRelativePath(dirPath) || "root"
88
+ }`
89
+ )
90
+ continue
91
+ }
92
+ if (entry.type === "file" && updatedMap.has(entry.name)) {
93
+ kept.push({...entry, url: updatedMap.get(entry.name)!})
94
+ continue
95
+ }
96
+ if (entry.type === "file" && newMap.has(entry.name)) {
97
+ // Existing entry being re-added (e.g. from immutable string replacement)
98
+ kept.push({...entry, url: newMap.get(entry.name)!})
99
+ newMap.delete(entry.name)
100
+ continue
101
+ }
102
+ if (entry.type === "folder" && subdirMap.has(entry.name)) {
103
+ kept.push({...entry, url: subdirMap.get(entry.name)!})
104
+ continue
105
+ }
106
+ kept.push({...entry})
107
+ }
108
+
109
+ // Add genuinely new file entries
110
+ for (const [name, url] of newMap) {
111
+ kept.push({name, type: "file", url})
112
+ }
113
+
114
+ // Nuke and rebuild
115
+ doc.docs.splice(0, doc.docs.length)
116
+ for (const entry of kept) {
117
+ doc.docs.push(entry)
118
+ }
119
+ }
120
+
63
121
  /**
64
122
  * Sync configuration constants
65
123
  */
@@ -139,16 +197,25 @@ export class SyncEngine {
139
197
 
140
198
  /**
141
199
  * Get the appropriate URL for a subdirectory's directory entry.
142
- * Always uses plain URLs versioned URLs on directories can cause
143
- * issues where consumers see a version without the docs array.
200
+ * Artifact directories get versioned URLs (with heads) so consumers can
201
+ * fetch the exact snapshotted version, matching how artifact files work.
202
+ * Non-artifact directories get plain URLs for collaborative editing.
144
203
  */
145
- private getDirEntryUrl(handle: DocHandle<unknown>): AutomergeUrl {
204
+ private getDirEntryUrl(handle: DocHandle<unknown>, dirPath: string): AutomergeUrl {
205
+ if (this.isArtifactPath(dirPath)) {
206
+ return this.getVersionedUrl(handle)
207
+ }
146
208
  return getPlainUrl(handle.url)
147
209
  }
148
210
 
149
211
  /**
150
212
  * Set the root directory URL in the snapshot
151
213
  */
214
+ async getRootDirectoryUrl(): Promise<AutomergeUrl | undefined> {
215
+ const snapshot = await this.snapshotManager.load()
216
+ return snapshot?.rootDirectoryUrl
217
+ }
218
+
152
219
  async setRootDirectoryUrl(url: AutomergeUrl): Promise<void> {
153
220
  let snapshot = await this.snapshotManager.load()
154
221
  if (!snapshot) {
@@ -365,7 +432,7 @@ export class SyncEngine {
365
432
  /**
366
433
  * Run full bidirectional sync
367
434
  */
368
- async sync(): Promise<SyncResult> {
435
+ async sync(options?: {sub?: boolean}): Promise<SyncResult> {
369
436
  const result: SyncResult = {
370
437
  success: false,
371
438
  filesChanged: 0,
@@ -424,7 +491,6 @@ export class SyncEngine {
424
491
  await waitForBidirectionalSync(
425
492
  this.repo,
426
493
  snapshot.rootDirectoryUrl,
427
- this.config.sync_server_storage_id,
428
494
  {
429
495
  timeoutMs: 5000, // Increased timeout for initial sync
430
496
  pollIntervalMs: 100,
@@ -468,6 +534,12 @@ export class SyncEngine {
468
534
 
469
535
  // Wait for network sync (important for clone scenarios)
470
536
  if (this.config.sync_enabled) {
537
+ const sub = options?.sub ?? false
538
+ // In Subduction mode, pass no StorageId so waitForSync
539
+ // falls back to head-stability polling. In WebSocket mode,
540
+ // pass the StorageId for precise getSyncInfo-based verification.
541
+ const storageId = sub ? undefined : this.config.sync_server_storage_id
542
+
471
543
  try {
472
544
  // Ensure root directory handle is tracked for sync
473
545
  if (snapshot.rootDirectoryUrl) {
@@ -488,11 +560,13 @@ export class SyncEngine {
488
560
  out.update(`Uploading ${allHandles.length} documents to sync server`)
489
561
  const {failed} = await waitForSync(
490
562
  allHandles,
491
- this.config.sync_server_storage_id
563
+ storageId
492
564
  )
493
565
 
494
- // Recreate failed documents and retry once
495
- if (failed.length > 0) {
566
+ // Recreate failed documents and retry once.
567
+ // Skip in Subduction mode — SubductionSource has its
568
+ // own heal-sync retry logic.
569
+ if (failed.length > 0 && !sub) {
496
570
  debug(`sync: ${failed.length} documents failed, recreating`)
497
571
  out.update(`Recreating ${failed.length} failed documents`)
498
572
  const retryHandles = await this.recreateFailedDocuments(failed, snapshot)
@@ -501,7 +575,7 @@ export class SyncEngine {
501
575
  out.update(`Retrying ${retryHandles.length} recreated documents`)
502
576
  const retry = await waitForSync(
503
577
  retryHandles,
504
- this.config.sync_server_storage_id
578
+ storageId
505
579
  )
506
580
  if (retry.failed.length > 0) {
507
581
  const msg = `${retry.failed.length} documents failed to sync to server after recreation`
@@ -514,6 +588,11 @@ export class SyncEngine {
514
588
  })
515
589
  }
516
590
  }
591
+ } else if (failed.length > 0 && sub) {
592
+ const msg = `${failed.length} document${failed.length === 1 ? '' : 's'} did not converge during sync (Subduction will retry in the background; re-run sync to confirm)`
593
+ debug(`sync: ${msg}`)
594
+ out.taskLine(msg, true)
595
+ result.warnings.push(msg)
517
596
  }
518
597
 
519
598
  debug("sync: all handles synced to server")
@@ -527,7 +606,6 @@ export class SyncEngine {
527
606
  await waitForBidirectionalSync(
528
607
  this.repo,
529
608
  snapshot.rootDirectoryUrl,
530
- this.config.sync_server_storage_id,
531
609
  {
532
610
  timeoutMs: BIDIRECTIONAL_SYNC_TIMEOUT_MS,
533
611
  pollIntervalMs: 100,
@@ -550,10 +628,15 @@ export class SyncEngine {
550
628
  )
551
629
  debug("sync: syncing root directory touch to server")
552
630
  out.update("Syncing root directory update")
553
- await waitForSync(
631
+ const rootSync = await waitForSync(
554
632
  [rootHandle],
555
- this.config.sync_server_storage_id
633
+ storageId
556
634
  )
635
+ if (rootSync.failed.length > 0) {
636
+ const msg = "Root directory update did not converge to server; consumers may not see recent changes until next sync"
637
+ debug(`sync: ${msg}`)
638
+ result.warnings.push(msg)
639
+ }
557
640
  }
558
641
  } catch (error) {
559
642
  debug(`sync: network sync error: ${error}`)
@@ -880,7 +963,7 @@ export class SyncEngine {
880
963
  )
881
964
  subdirUpdates.push({
882
965
  name: childName,
883
- url: this.getDirEntryUrl(childHandle),
966
+ url: this.getDirEntryUrl(childHandle, modifiedDir),
884
967
  })
885
968
  }
886
969
  }
@@ -1380,7 +1463,7 @@ export class SyncEngine {
1380
1463
  this.handlesByPath.set(directoryPath, childDirHandle)
1381
1464
 
1382
1465
  // Get appropriate URL for directory entry
1383
- const entryUrl = this.getDirEntryUrl(childDirHandle)
1466
+ const entryUrl = this.getDirEntryUrl(childDirHandle, directoryPath)
1384
1467
 
1385
1468
  // Update snapshot with discovered directory
1386
1469
  this.snapshotManager.updateDirectoryEntry(snapshot, directoryPath, {
@@ -1411,7 +1494,7 @@ export class SyncEngine {
1411
1494
  const dirHandle = this.repo.create(dirDoc)
1412
1495
 
1413
1496
  // Get appropriate URL for directory entry
1414
- const dirEntryUrl = this.getDirEntryUrl(dirHandle)
1497
+ const dirEntryUrl = this.getDirEntryUrl(dirHandle, directoryPath)
1415
1498
 
1416
1499
  // Add this directory to its parent
1417
1500
  // Use plain URL for mutable handle
@@ -1555,53 +1638,10 @@ export class SyncEngine {
1555
1638
  if (this.isArtifactPath(dirPath)) {
1556
1639
  // Artifact directories are always nuked: rebuild docs array from scratch
1557
1640
  // using a plain change() to avoid changeAt forking from stale heads.
1558
- const deletedSet = new Set(deletedNames)
1559
- const updatedMap = new Map(updatedEntries.map(e => [e.name, e.url]))
1560
- const newMap = new Map(newEntries.map(e => [e.name, e.url]))
1561
- const subdirMap = new Map(subdirUpdates.map(e => [e.name, e.url]))
1562
-
1563
1641
  dirHandle.change((doc: DirectoryDocument) => {
1564
1642
  if (!doc.name) doc.name = dirName
1565
1643
  if (!doc.title) doc.title = dirName
1566
-
1567
- // Collect desired entries from current state + changes
1568
- const kept: DirectoryEntry[] = []
1569
- for (const entry of doc.docs) {
1570
- if (entry.type === "file" && deletedSet.has(entry.name)) {
1571
- out.taskLine(
1572
- `Removed ${entry.name} from ${
1573
- formatRelativePath(dirPath) || "root"
1574
- }`
1575
- )
1576
- continue
1577
- }
1578
- if (entry.type === "file" && updatedMap.has(entry.name)) {
1579
- kept.push({...entry, url: updatedMap.get(entry.name)!})
1580
- continue
1581
- }
1582
- if (entry.type === "file" && newMap.has(entry.name)) {
1583
- // Existing entry being re-added (e.g. from immutable string replacement)
1584
- kept.push({...entry, url: newMap.get(entry.name)!})
1585
- newMap.delete(entry.name)
1586
- continue
1587
- }
1588
- if (entry.type === "folder" && subdirMap.has(entry.name)) {
1589
- kept.push({...entry, url: subdirMap.get(entry.name)!})
1590
- continue
1591
- }
1592
- kept.push(entry)
1593
- }
1594
-
1595
- // Add genuinely new file entries
1596
- for (const [name, url] of newMap) {
1597
- kept.push({name, type: "file", url})
1598
- }
1599
-
1600
- // Nuke and rebuild
1601
- doc.docs.splice(0, doc.docs.length)
1602
- for (const entry of kept) {
1603
- doc.docs.push(entry)
1604
- }
1644
+ nukeAndRebuildDocs(doc, dirPath, newEntries, updatedEntries, deletedNames, subdirUpdates)
1605
1645
  })
1606
1646
  } else {
1607
1647
  changeWithOptionalHeads(dirHandle, heads, (doc: DirectoryDocument) => {
@@ -6,6 +6,7 @@ import { StorageId } from "@automerge/automerge-repo";
6
6
  export const DEFAULT_SYNC_SERVER = "wss://sync3.automerge.org";
7
7
  export const DEFAULT_SYNC_SERVER_STORAGE_ID =
8
8
  "3760df37-a4c6-4f66-9ecd-732039a9385d" as StorageId;
9
+ export const DEFAULT_SUBDUCTION_SERVER = "wss://subduction.sync.inkandswitch.com";
9
10
 
10
11
  /**
11
12
  * Global configuration options
@@ -25,6 +26,7 @@ export interface GlobalConfig {
25
26
  */
26
27
  export interface DirectoryConfig extends GlobalConfig {
27
28
  root_directory_url?: string;
29
+ subduction?: boolean;
28
30
  sync_enabled: boolean;
29
31
  }
30
32
 
@@ -42,6 +44,7 @@ export interface CloneOptions extends CommandOptions {
42
44
  force?: boolean; // Overwrite existing directory
43
45
  syncServer?: string; // Custom sync server URL
44
46
  syncServerStorageId?: StorageId; // Custom sync server storage ID
47
+ sub?: boolean;
45
48
  }
46
49
 
47
50
  /**
@@ -83,6 +86,7 @@ export interface CheckoutOptions extends CommandOptions {
83
86
  export interface InitOptions extends CommandOptions {
84
87
  syncServer?: string;
85
88
  syncServerStorageId?: StorageId;
89
+ sub?: boolean;
86
90
  }
87
91
 
88
92
  /**
@@ -16,7 +16,7 @@ export function isContentEqual(
16
16
  content2: string | Uint8Array | null
17
17
  ): boolean {
18
18
  if (content1 === content2) return true;
19
- if (!content1 || !content2) return false;
19
+ if (content1 == null || content2 == null) return false;
20
20
 
21
21
  if (typeof content1 !== typeof content2) return false;
22
22
 
@@ -21,13 +21,11 @@ function debug(...args: any[]) {
21
21
  *
22
22
  * @param repo - The Automerge repository
23
23
  * @param rootDirectoryUrl - The root directory URL to start traversal from
24
- * @param syncServerStorageId - The sync server storage ID
25
24
  * @param options - Configuration options
26
25
  */
27
26
  export async function waitForBidirectionalSync(
28
27
  repo: Repo,
29
28
  rootDirectoryUrl: AutomergeUrl | undefined,
30
- syncServerStorageId: StorageId | undefined,
31
29
  options: {
32
30
  timeoutMs?: number;
33
31
  pollIntervalMs?: number;
@@ -42,7 +40,7 @@ export async function waitForBidirectionalSync(
42
40
  handles,
43
41
  } = options;
44
42
 
45
- if (!syncServerStorageId || !rootDirectoryUrl) {
43
+ if (!rootDirectoryUrl) {
46
44
  return;
47
45
  }
48
46
 
@@ -295,16 +293,20 @@ export async function waitForSync(
295
293
  ): Promise<SyncWaitResult> {
296
294
  const startTime = Date.now();
297
295
 
298
- if (!syncServerStorageId) {
299
- debug("waitForSync: no sync server storage ID, skipping");
300
- return { failed: [] };
301
- }
302
-
303
296
  if (handlesToWaitOn.length === 0) {
304
297
  debug("waitForSync: no documents to sync");
305
298
  return { failed: [] };
306
299
  }
307
300
 
301
+ // When no StorageId is available (Subduction mode), use head-stability
302
+ // polling. The SubductionSource handles sync internally — we just wait
303
+ // for each handle's heads to stop changing.
304
+ if (!syncServerStorageId) {
305
+ debug(`waitForSync: no storage ID, using head-stability polling for ${handlesToWaitOn.length} documents`);
306
+ out.taskLine(`Waiting for ${handlesToWaitOn.length} documents to sync`);
307
+ return waitForSyncViaHeadStability(handlesToWaitOn, timeoutMs, startTime);
308
+ }
309
+
308
310
  debug(`waitForSync: waiting for ${handlesToWaitOn.length} documents (timeout=${timeoutMs}ms, batchSize=${SYNC_BATCH_SIZE})`);
309
311
 
310
312
  // Separate already-synced from needs-sync
@@ -370,3 +372,85 @@ export async function waitForSync(
370
372
 
371
373
  return { failed };
372
374
  }
375
+
376
+ /**
377
+ * Wait for sync by polling head stability (Subduction mode).
378
+ * Each handle's heads are polled until they remain unchanged for
379
+ * several consecutive checks, indicating the SubductionSource has
380
+ * finished syncing.
381
+ */
382
+ async function waitForSyncViaHeadStability(
383
+ handles: DocHandle<unknown>[],
384
+ timeoutMs: number,
385
+ startTime: number,
386
+ ): Promise<SyncWaitResult> {
387
+ const failed: DocHandle<unknown>[] = [];
388
+ let synced = 0;
389
+
390
+ // Process in batches
391
+ for (let i = 0; i < handles.length; i += SYNC_BATCH_SIZE) {
392
+ const batch = handles.slice(i, i + SYNC_BATCH_SIZE);
393
+
394
+ const results = await Promise.allSettled(
395
+ batch.map(handle => waitForHandleHeadStability(handle, timeoutMs, startTime))
396
+ );
397
+
398
+ for (const result of results) {
399
+ if (result.status === "rejected") {
400
+ failed.push(result.reason as DocHandle<unknown>);
401
+ } else {
402
+ synced++;
403
+ }
404
+ }
405
+ }
406
+
407
+ const elapsed = Date.now() - startTime;
408
+ if (failed.length > 0) {
409
+ debug(`waitForSync(heads): ${failed.length} documents failed after ${elapsed}ms`);
410
+ out.taskLine(`Sync: ${synced} synced, ${failed.length} timed out after ${(elapsed / 1000).toFixed(1)}s`, true);
411
+ } else {
412
+ debug(`waitForSync(heads): all ${handles.length} documents synced in ${elapsed}ms`);
413
+ out.taskLine(`All ${handles.length} documents synced (${(elapsed / 1000).toFixed(1)}s)`);
414
+ }
415
+
416
+ return { failed };
417
+ }
418
+
419
+ /**
420
+ * Wait for a single handle's heads to stabilize.
421
+ * Polls heads at 100ms intervals; resolves after 3 consecutive stable
422
+ * checks, rejects on timeout.
423
+ */
424
+ function waitForHandleHeadStability(
425
+ handle: DocHandle<unknown>,
426
+ timeoutMs: number,
427
+ startTime: number,
428
+ ): Promise<DocHandle<unknown>> {
429
+ return new Promise<DocHandle<unknown>>((resolve, reject) => {
430
+ let lastHeads = JSON.stringify(handle.heads());
431
+ let stableCount = 0;
432
+ const stableRequired = 3;
433
+
434
+ const pollInterval = setInterval(() => {
435
+ const currentHeads = JSON.stringify(handle.heads());
436
+ if (currentHeads === lastHeads) {
437
+ stableCount++;
438
+ if (stableCount >= stableRequired) {
439
+ clearInterval(pollInterval);
440
+ clearTimeout(timeout);
441
+ debug(`waitForSync(heads): ${handle.url}... converged in ${Date.now() - startTime}ms`);
442
+ resolve(handle);
443
+ }
444
+ } else {
445
+ stableCount = 0;
446
+ lastHeads = currentHeads;
447
+ }
448
+ }, 100);
449
+
450
+ const timeout = setTimeout(() => {
451
+ clearInterval(pollInterval);
452
+ debug(`waitForSync(heads): ${handle.url}... timed out after ${timeoutMs}ms`);
453
+ reject(handle);
454
+ }, timeoutMs);
455
+ });
456
+ }
@@ -345,7 +345,7 @@ export class Output {
345
345
  this.taskOriginalMessage = null;
346
346
  this.taskCurrentMessage = null;
347
347
  }
348
- console.log(
348
+ console.error(
349
349
  chalk.red(
350
350
  message instanceof Error
351
351
  ? message.message
@@ -367,7 +367,7 @@ export class Output {
367
367
  this.taskOriginalMessage = null;
368
368
  this.taskCurrentMessage = null;
369
369
  }
370
- console.log(
370
+ console.error(
371
371
  `\n${chalk.bgRed.white(` ${label} `)}${message && ` ${message}`}`
372
372
  );
373
373
  }
@@ -400,19 +400,19 @@ export class Output {
400
400
 
401
401
  if (error instanceof Error) {
402
402
  // Error type and message
403
- console.log(chalk.red(`${error.name}: ${error.message}`));
403
+ console.error(chalk.red(`${error.name}: ${error.message}`));
404
404
 
405
405
  // Stack trace
406
406
  if (error.stack) {
407
- console.log("");
408
- console.log(chalk.dim("Stack trace:"));
407
+ console.error("");
408
+ console.error(chalk.dim("Stack trace:"));
409
409
  const stackLines = error.stack.split("\n").slice(1); // Skip first line (error message)
410
410
  stackLines.forEach((line) =>
411
- console.log(chalk.dim(` ${line.trim()}`))
411
+ console.error(chalk.dim(` ${line.trim()}`))
412
412
  );
413
413
  }
414
414
  } else {
415
- console.log(chalk.red(String(error)));
415
+ console.error(chalk.red(String(error)));
416
416
  }
417
417
 
418
418
  process.exit(exitCode);
@@ -1,28 +1,142 @@
1
- import { Repo } from "@automerge/automerge-repo";
1
+ import { type Repo, type RepoConfig, type NetworkAdapterInterface } from "@automerge/automerge-repo";
2
2
  import { NodeFSStorageAdapter } from "@automerge/automerge-repo-storage-nodefs";
3
- import { BrowserWebSocketClientAdapter } from "@automerge/automerge-repo-network-websocket";
3
+ import * as fs from "fs/promises";
4
4
  import * as path from "path";
5
5
  import { DirectoryConfig } from "../types";
6
6
 
7
7
  /**
8
- * Create an Automerge repository with configuration-based setup
8
+ * Perform a real ESM dynamic import that tsc won't rewrite to require().
9
+ *
10
+ * TypeScript with `"module": "commonjs"` compiles `await import("x")` to
11
+ * `require("x")`, which resolves CJS entries instead of ESM entries. The
12
+ * Wasm module instance is different between the CJS and ESM module graphs,
13
+ * so initializing via CJS require() doesn't help the ESM /slim imports
14
+ * inside automerge-repo.
15
+ *
16
+ * This helper uses `new Function` to create a real `import()` expression
17
+ * that Node.js evaluates as ESM, sharing the same module graph as the
18
+ * Repo's internal imports.
19
+ */
20
+ const dynamicImport = new Function("specifier", "return import(specifier)") as (
21
+ specifier: string,
22
+ ) => Promise<any>;
23
+
24
+ /**
25
+ * Initialize the Subduction Wasm module and return the Repo constructor.
26
+ *
27
+ * The Repo constructor calls set_subduction_logger() and new MemorySigner()
28
+ * from @automerge/automerge-subduction/slim, which require the Wasm module
29
+ * to be initialized first. automerge-repo exports initSubduction() to
30
+ * handle this — it dynamically imports the non-/slim entry (which
31
+ * auto-initializes the Wasm as a side effect).
32
+ *
33
+ * Both the Repo and initSubduction must be loaded via ESM dynamic import()
34
+ * so they share the same module graph as the Repo's internal /slim imports.
35
+ */
36
+ let cachedRepoClass: typeof Repo | undefined;
37
+
38
+ async function getRepoClass(): Promise<typeof Repo> {
39
+ if (cachedRepoClass) return cachedRepoClass;
40
+
41
+ // Import Repo and initialize Subduction Wasm via automerge-repo's
42
+ // initSubduction() helper. This must happen before new Repo() because
43
+ // the constructor calls set_subduction_logger() and new MemorySigner()
44
+ // which require the Wasm module to be ready.
45
+ //
46
+ // Both imports use the ESM dynamic import wrapper so they share the
47
+ // same module graph as the Repo's internal /slim imports.
48
+ const repoMod = await dynamicImport("@automerge/automerge-repo");
49
+ await repoMod.initSubduction();
50
+ cachedRepoClass = repoMod.Repo as typeof Repo;
51
+ return cachedRepoClass;
52
+ }
53
+
54
+ /**
55
+ * Scan a directory tree for 0-byte files, which indicate incomplete writes
56
+ * from a previous run (process exited before storage flushed). Returns true
57
+ * if any are found.
58
+ */
59
+ async function hasCorruptStorage(dir: string): Promise<boolean> {
60
+ try {
61
+ await fs.access(dir);
62
+ } catch {
63
+ return false;
64
+ }
65
+
66
+ const entries = await fs.readdir(dir, { withFileTypes: true });
67
+ for (const entry of entries) {
68
+ const fullPath = path.join(dir, entry.name);
69
+ if (entry.isDirectory()) {
70
+ if (await hasCorruptStorage(fullPath)) return true;
71
+ } else if (entry.isFile()) {
72
+ const stat = await fs.stat(fullPath);
73
+ if (stat.size === 0) return true;
74
+ }
75
+ }
76
+ return false;
77
+ }
78
+
79
+ /**
80
+ * Create an Automerge repository with configuration-based setup.
81
+ *
82
+ * When `sub` is true, uses the Subduction sync backend built into
83
+ * automerge-repo. The Repo manages its own SubductionSource internally —
84
+ * we just pass `subductionWebsocketEndpoints` and the Repo handles
85
+ * connection management, sync, and retries.
86
+ *
87
+ * When `sub` is false (default), uses the traditional WebSocket network
88
+ * adapter for sync via the automerge sync server.
9
89
  */
10
90
  export async function createRepo(
11
91
  workingDir: string,
12
- config: DirectoryConfig
92
+ config: DirectoryConfig,
93
+ sub: boolean = false
13
94
  ): Promise<Repo> {
95
+ const RepoClass = await getRepoClass();
96
+
14
97
  const syncToolDir = path.join(workingDir, ".pushwork");
15
- const storage = new NodeFSStorageAdapter(path.join(syncToolDir, "automerge"));
98
+ const automergeDir = path.join(syncToolDir, "automerge");
99
+
100
+ // Detect and recover from corrupt local storage (0-byte files left by
101
+ // incomplete writes from a previous run). Wipe the cache so the Repo
102
+ // hydrates cleanly from the sync server.
103
+ if (await hasCorruptStorage(automergeDir)) {
104
+ console.warn("[pushwork] Corrupt local storage detected, clearing cache...");
105
+ await fs.rm(automergeDir, { recursive: true, force: true });
106
+ await fs.mkdir(automergeDir, { recursive: true });
107
+ }
108
+
109
+ const storage = new NodeFSStorageAdapter(automergeDir);
110
+
111
+ if (sub) {
112
+ const endpoints: string[] = [];
113
+ if (config.sync_enabled && config.sync_server) {
114
+ endpoints.push(config.sync_server);
115
+ }
116
+
117
+ return new RepoClass({
118
+ storage,
119
+ subductionWebsocketEndpoints: endpoints,
120
+ });
121
+ }
16
122
 
17
- const repoConfig: any = { storage };
123
+ // Default: WebSocket sync adapter
124
+ const repoConfig: RepoConfig = { storage };
18
125
 
19
- // Add network adapter only if sync is enabled and server is configured
20
126
  if (config.sync_enabled && config.sync_server) {
21
- const networkAdapter = new BrowserWebSocketClientAdapter(
127
+ // Load the WebSocket adapter via ESM dynamic import to stay in the
128
+ // same module graph as the Repo.
129
+ const wsMod = await dynamicImport("@automerge/automerge-repo-network-websocket");
130
+ // The websocket adapter package (subduction.8) hasn't updated its
131
+ // NetworkAdapter base-class types to match the repo's new
132
+ // NetworkAdapterInterface (which added state() and stricter
133
+ // EventEmitter generics). At runtime the adapter has all required
134
+ // methods; this is purely a declaration mismatch.
135
+ const networkAdapter = new wsMod.BrowserWebSocketClientAdapter(
22
136
  config.sync_server
23
- );
137
+ ) as unknown as NetworkAdapterInterface;
24
138
  repoConfig.network = [networkAdapter];
25
139
  }
26
140
 
27
- return new Repo(repoConfig);
141
+ return new RepoClass(repoConfig);
28
142
  }
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes