pushwork 1.0.21 → 1.0.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/CLAUDE.md +24 -2
  2. package/dist/cli.js +21 -0
  3. package/dist/cli.js.map +1 -1
  4. package/dist/commands.d.ts +11 -1
  5. package/dist/commands.d.ts.map +1 -1
  6. package/dist/commands.js +81 -7
  7. package/dist/commands.js.map +1 -1
  8. package/dist/core/change-detection.d.ts +8 -1
  9. package/dist/core/change-detection.d.ts.map +1 -1
  10. package/dist/core/change-detection.js +69 -1
  11. package/dist/core/change-detection.js.map +1 -1
  12. package/dist/core/sync-engine.d.ts +11 -0
  13. package/dist/core/sync-engine.d.ts.map +1 -1
  14. package/dist/core/sync-engine.js +257 -42
  15. package/dist/core/sync-engine.js.map +1 -1
  16. package/dist/types/config.d.ts +5 -0
  17. package/dist/types/config.d.ts.map +1 -1
  18. package/dist/types/snapshot.d.ts +1 -0
  19. package/dist/types/snapshot.d.ts.map +1 -1
  20. package/dist/utils/content.d.ts +5 -0
  21. package/dist/utils/content.d.ts.map +1 -1
  22. package/dist/utils/content.js +9 -0
  23. package/dist/utils/content.js.map +1 -1
  24. package/dist/utils/network-sync.d.ts +12 -2
  25. package/dist/utils/network-sync.d.ts.map +1 -1
  26. package/dist/utils/network-sync.js +127 -77
  27. package/dist/utils/network-sync.js.map +1 -1
  28. package/dist/utils/repo-factory.d.ts.map +1 -1
  29. package/dist/utils/repo-factory.js +0 -1
  30. package/dist/utils/repo-factory.js.map +1 -1
  31. package/package.json +1 -1
  32. package/src/cli.ts +36 -0
  33. package/src/commands.ts +106 -6
  34. package/src/core/change-detection.ts +81 -2
  35. package/src/core/sync-engine.ts +319 -48
  36. package/src/types/config.ts +5 -0
  37. package/src/types/snapshot.ts +1 -0
  38. package/src/utils/content.ts +10 -0
  39. package/src/utils/network-sync.ts +162 -94
  40. package/src/utils/repo-factory.ts +0 -1
@@ -32,12 +32,14 @@ export async function waitForBidirectionalSync(
32
32
  timeoutMs?: number;
33
33
  pollIntervalMs?: number;
34
34
  stableChecksRequired?: number;
35
+ handles?: DocHandle<unknown>[];
35
36
  } = {},
36
37
  ): Promise<void> {
37
38
  const {
38
39
  timeoutMs = 10000,
39
40
  pollIntervalMs = 100,
40
41
  stableChecksRequired = 3,
42
+ handles,
41
43
  } = options;
42
44
 
43
45
  if (!syncServerStorageId || !rootDirectoryUrl) {
@@ -48,13 +50,25 @@ export async function waitForBidirectionalSync(
48
50
  let lastSeenHeads = new Map<string, string>();
49
51
  let stableCount = 0;
50
52
  let pollCount = 0;
53
+ let dynamicTimeoutMs = timeoutMs;
51
54
 
52
- debug(`waitForBidirectionalSync: starting (timeout=${timeoutMs}ms, stableChecks=${stableChecksRequired})`);
55
+ debug(`waitForBidirectionalSync: starting (timeout=${timeoutMs}ms, stableChecks=${stableChecksRequired}${handles ? `, tracking ${handles.length} handles` : ', full tree scan'})`);
53
56
 
54
- while (Date.now() - startTime < timeoutMs) {
57
+ while (Date.now() - startTime < dynamicTimeoutMs) {
55
58
  pollCount++;
56
- // Get current heads for all documents in the directory hierarchy
57
- const currentHeads = await getAllDocumentHeads(repo, rootDirectoryUrl);
59
+ // Get current heads: use provided handles if available, otherwise full tree scan
60
+ const currentHeads = handles
61
+ ? getHandleHeads(handles)
62
+ : await getAllDocumentHeads(repo, rootDirectoryUrl);
63
+
64
+ // After first scan: scale timeout to tree size and reset the clock.
65
+ // The first scan is just establishing a baseline — its duration
66
+ // shouldn't count against the stability-wait timeout.
67
+ if (pollCount === 1) {
68
+ const scanDuration = Date.now() - startTime;
69
+ dynamicTimeoutMs = Math.max(timeoutMs, 5000 + currentHeads.size * 50) + scanDuration;
70
+ debug(`waitForBidirectionalSync: first scan took ${scanDuration}ms, timeout now ${dynamicTimeoutMs}ms for ${currentHeads.size} docs`);
71
+ }
58
72
 
59
73
  // Check if heads are stable (no changes since last check)
60
74
  const isStable = headsMapEqual(lastSeenHeads, currentHeads);
@@ -103,6 +117,20 @@ export async function waitForBidirectionalSync(
103
117
  out.taskLine(`Bidirectional sync timed out after ${(elapsed / 1000).toFixed(1)}s - document heads were still changing after ${pollCount} checks across ${lastSeenHeads.size} docs (reached ${stableCount}/${stableChecksRequired} stability checks). This may mean another peer is actively editing, or the sync server is slow to relay changes. The sync will continue but some remote changes may not be reflected yet.`, true);
104
118
  }
105
119
 
120
+ /**
121
+ * Get heads from a pre-collected set of handles (cheap, synchronous reads).
122
+ * Used for post-push stabilization where we already know which documents changed.
123
+ */
124
+ function getHandleHeads(
125
+ handles: DocHandle<unknown>[],
126
+ ): Map<string, string> {
127
+ const heads = new Map<string, string>();
128
+ for (const handle of handles) {
129
+ heads.set(getPlainUrl(handle.url), JSON.stringify(handle.heads()));
130
+ }
131
+ return heads;
132
+ }
133
+
106
134
  /**
107
135
  * Get all document heads in the directory hierarchy.
108
136
  * Returns a map of document URL -> serialized heads.
@@ -139,8 +167,8 @@ async function collectHeadsRecursive(
139
167
  return;
140
168
  }
141
169
 
142
- // Process all entries in the directory
143
- for (const entry of doc.docs) {
170
+ // Process all entries in the directory concurrently
171
+ await Promise.all(doc.docs.map(async (entry: { type: string; url: AutomergeUrl; name: string }) => {
144
172
  if (entry.type === "folder") {
145
173
  // Recurse into subdirectory (entry.url may have stale heads)
146
174
  await collectHeadsRecursive(repo, entry.url, heads);
@@ -154,7 +182,7 @@ async function collectHeadsRecursive(
154
182
  // File document may not exist yet
155
183
  }
156
184
  }
157
- }
185
+ }));
158
186
  } catch {
159
187
  // Directory may not exist yet
160
188
  }
@@ -179,126 +207,166 @@ function headsMapEqual(
179
207
  }
180
208
 
181
209
  /**
182
- * Wait for documents to sync to the remote server
210
+ * Result of waitForSync lists which handles failed to sync.
211
+ */
212
+ export interface SyncWaitResult {
213
+ failed: DocHandle<unknown>[];
214
+ }
215
+
216
+ /** Maximum documents to sync concurrently to avoid flooding the server */
217
+ const SYNC_BATCH_SIZE = 10;
218
+
219
+ /**
220
+ * Wait for a single document handle to sync to the server.
221
+ * Resolves with the handle on success, rejects with the handle on timeout.
222
+ */
223
+ function waitForHandleSync(
224
+ handle: DocHandle<unknown>,
225
+ syncServerStorageId: StorageId,
226
+ timeoutMs: number,
227
+ startTime: number,
228
+ ): Promise<DocHandle<unknown>> {
229
+ return new Promise<DocHandle<unknown>>((resolve, reject) => {
230
+ let pollInterval: NodeJS.Timeout;
231
+
232
+ const cleanup = () => {
233
+ clearTimeout(timeout);
234
+ clearInterval(pollInterval);
235
+ handle.off("remote-heads", onRemoteHeads);
236
+ };
237
+
238
+ const onConverged = () => {
239
+ debug(`waitForSync: ${handle.url.slice(0, 20)}... converged in ${Date.now() - startTime}ms`);
240
+ cleanup();
241
+ resolve(handle);
242
+ };
243
+
244
+ const timeout = setTimeout(() => {
245
+ debug(`waitForSync: ${handle.url.slice(0, 20)}... timed out after ${timeoutMs}ms`);
246
+ cleanup();
247
+ reject(handle);
248
+ }, timeoutMs);
249
+
250
+ const isConverged = () => {
251
+ const localHeads = handle.heads();
252
+ const info = handle.getSyncInfo(syncServerStorageId);
253
+ return A.equals(localHeads, info?.lastHeads);
254
+ };
255
+
256
+ const onRemoteHeads = ({
257
+ storageId,
258
+ }: {
259
+ storageId: StorageId;
260
+ heads: any;
261
+ }) => {
262
+ if (storageId === syncServerStorageId && isConverged()) {
263
+ onConverged();
264
+ }
265
+ };
266
+
267
+ // Initial check
268
+ if (isConverged()) {
269
+ cleanup();
270
+ resolve(handle);
271
+ return;
272
+ }
273
+
274
+ // Start polling and event listening
275
+ pollInterval = setInterval(() => {
276
+ if (isConverged()) {
277
+ onConverged();
278
+ }
279
+ }, 100);
280
+
281
+ handle.on("remote-heads", onRemoteHeads);
282
+ });
283
+ }
284
+
285
+ /**
286
+ * Wait for documents to sync to the remote server.
287
+ * Processes handles in batches to avoid flooding the server.
288
+ * Returns a result with any failed handles instead of throwing,
289
+ * so callers can attempt recovery (e.g. recreating documents).
183
290
  */
184
291
  export async function waitForSync(
185
292
  handlesToWaitOn: DocHandle<unknown>[],
186
293
  syncServerStorageId?: StorageId,
187
294
  timeoutMs: number = 60000,
188
- ): Promise<void> {
295
+ ): Promise<SyncWaitResult> {
189
296
  const startTime = Date.now();
190
297
 
191
298
  if (!syncServerStorageId) {
192
299
  debug("waitForSync: no sync server storage ID, skipping");
193
- return;
300
+ return { failed: [] };
194
301
  }
195
302
 
196
303
  if (handlesToWaitOn.length === 0) {
197
304
  debug("waitForSync: no documents to sync");
198
- return;
305
+ return { failed: [] };
199
306
  }
200
307
 
201
- debug(`waitForSync: waiting for ${handlesToWaitOn.length} documents (timeout=${timeoutMs}ms)`);
308
+ debug(`waitForSync: waiting for ${handlesToWaitOn.length} documents (timeout=${timeoutMs}ms, batchSize=${SYNC_BATCH_SIZE})`);
202
309
 
310
+ // Separate already-synced from needs-sync
311
+ const needsSync: DocHandle<unknown>[] = [];
203
312
  let alreadySynced = 0;
204
313
 
205
- const promises = handlesToWaitOn.map((handle) => {
206
- // Check if already synced
314
+ for (const handle of handlesToWaitOn) {
207
315
  const heads = handle.heads();
208
316
  const syncInfo = handle.getSyncInfo(syncServerStorageId);
209
317
  const remoteHeads = syncInfo?.lastHeads;
210
- const wasAlreadySynced = A.equals(heads, remoteHeads);
211
-
212
- if (wasAlreadySynced) {
318
+ if (A.equals(heads, remoteHeads)) {
213
319
  alreadySynced++;
214
320
  debug(`waitForSync: ${handle.url.slice(0, 20)}... already synced`);
215
- return Promise.resolve();
321
+ } else {
322
+ debug(`waitForSync: ${handle.url.slice(0, 20)}... needs sync (remoteHeads=${remoteHeads ? 'present' : 'missing'})`);
323
+ needsSync.push(handle);
216
324
  }
325
+ }
217
326
 
218
- debug(`waitForSync: ${handle.url.slice(0, 20)}... waiting for convergence (remoteHeads=${remoteHeads ? 'present' : 'missing'})`);
219
-
220
- // Wait for convergence
221
- return new Promise<void>((resolve, reject) => {
222
- let pollInterval: NodeJS.Timeout;
223
-
224
- const cleanup = () => {
225
- clearTimeout(timeout);
226
- clearInterval(pollInterval);
227
- handle.off("remote-heads", onRemoteHeads);
228
- };
229
-
230
- const onConverged = () => {
231
- debug(`waitForSync: ${handle.url.slice(0, 20)}... converged in ${Date.now() - startTime}ms`);
232
- cleanup();
233
- resolve();
234
- };
235
-
236
- const timeout = setTimeout(() => {
237
- debug(`waitForSync: ${handle.url.slice(0, 20)}... timed out after ${timeoutMs}ms`);
238
- cleanup();
239
- reject(
240
- new Error(
241
- `Sync timeout after ${timeoutMs}ms for document ${handle.url}`,
242
- ),
243
- );
244
- }, timeoutMs);
245
-
246
- const isConverged = () => {
247
- const localHeads = handle.heads();
248
- const info = handle.getSyncInfo(syncServerStorageId);
249
- return A.equals(localHeads, info?.lastHeads);
250
- };
251
-
252
- const onRemoteHeads = ({
253
- storageId,
254
- }: {
255
- storageId: StorageId;
256
- heads: any;
257
- }) => {
258
- if (storageId === syncServerStorageId && isConverged()) {
259
- onConverged();
260
- }
261
- };
327
+ if (needsSync.length > 0) {
328
+ debug(`waitForSync: ${alreadySynced} already synced, ${needsSync.length} need sync`);
329
+ out.taskLine(`Uploading: ${alreadySynced}/${handlesToWaitOn.length} already synced, waiting for ${needsSync.length} more`);
330
+ } else {
331
+ debug(`waitForSync: all ${handlesToWaitOn.length} already synced`);
332
+ return { failed: [] };
333
+ }
262
334
 
263
- const poll = () => {
264
- if (isConverged()) {
265
- onConverged();
266
- return true;
267
- }
268
- return false;
269
- };
335
+ // Process in batches to avoid flooding the server
336
+ const failed: DocHandle<unknown>[] = [];
337
+ let synced = alreadySynced;
270
338
 
271
- // Initial check
272
- if (poll()) {
273
- return;
274
- }
339
+ for (let i = 0; i < needsSync.length; i += SYNC_BATCH_SIZE) {
340
+ const batch = needsSync.slice(i, i + SYNC_BATCH_SIZE);
341
+ const batchNum = Math.floor(i / SYNC_BATCH_SIZE) + 1;
342
+ const totalBatches = Math.ceil(needsSync.length / SYNC_BATCH_SIZE);
275
343
 
276
- // Start polling and event listening
277
- pollInterval = setInterval(() => {
278
- poll();
279
- }, 100);
344
+ if (totalBatches > 1) {
345
+ debug(`waitForSync: batch ${batchNum}/${totalBatches} (${batch.length} docs)`);
346
+ out.update(`Uploading batch ${batchNum}/${totalBatches} (${synced}/${handlesToWaitOn.length} done)`);
347
+ }
280
348
 
281
- handle.on("remote-heads", onRemoteHeads);
282
- });
283
- });
349
+ const results = await Promise.allSettled(
350
+ batch.map(handle => waitForHandleSync(handle, syncServerStorageId, timeoutMs, startTime))
351
+ );
284
352
 
285
- const needSync = handlesToWaitOn.length - alreadySynced;
286
- if (needSync > 0) {
287
- debug(`waitForSync: ${alreadySynced} already synced, waiting for ${needSync} remaining`);
288
- out.taskLine(`Uploading: ${alreadySynced}/${handlesToWaitOn.length} already synced, waiting for ${needSync} more`);
289
- } else {
290
- debug(`waitForSync: all ${handlesToWaitOn.length} already synced`);
353
+ for (const result of results) {
354
+ if (result.status === "rejected") {
355
+ failed.push(result.reason as DocHandle<unknown>);
356
+ } else {
357
+ synced++;
358
+ }
359
+ }
291
360
  }
292
361
 
293
- try {
294
- await Promise.all(promises);
295
- const elapsed = Date.now() - startTime;
362
+ const elapsed = Date.now() - startTime;
363
+ if (failed.length > 0) {
364
+ debug(`waitForSync: ${failed.length} documents failed after ${elapsed}ms`);
365
+ out.taskLine(`Upload: ${synced} synced, ${failed.length} failed after ${(elapsed / 1000).toFixed(1)}s`, true);
366
+ } else {
296
367
  debug(`waitForSync: all ${handlesToWaitOn.length} documents synced in ${elapsed}ms (${alreadySynced} were already synced)`);
297
368
  out.taskLine(`All ${handlesToWaitOn.length} documents uploaded to server (${(elapsed / 1000).toFixed(1)}s)`);
298
- } catch (error) {
299
- const elapsed = Date.now() - startTime;
300
- debug(`waitForSync: failed after ${elapsed}ms: ${error}`);
301
- out.taskLine(`Upload to server failed after ${(elapsed / 1000).toFixed(1)}s: ${error}`, true);
302
- throw error;
303
369
  }
370
+
371
+ return { failed };
304
372
  }
@@ -22,7 +22,6 @@ export async function createRepo(
22
22
  config.sync_server
23
23
  );
24
24
  repoConfig.network = [networkAdapter];
25
- repoConfig.enableRemoteHeadsGossiping = true;
26
25
  }
27
26
 
28
27
  const repo = new Repo(repoConfig);