pushwork 1.0.22 → 1.0.26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/CLAUDE.md +22 -2
  2. package/dist/cli.js +10 -11
  3. package/dist/cli.js.map +1 -1
  4. package/dist/commands.d.ts +5 -3
  5. package/dist/commands.d.ts.map +1 -1
  6. package/dist/commands.js +33 -40
  7. package/dist/commands.js.map +1 -1
  8. package/dist/core/change-detection.d.ts +8 -1
  9. package/dist/core/change-detection.d.ts.map +1 -1
  10. package/dist/core/change-detection.js +69 -1
  11. package/dist/core/change-detection.js.map +1 -1
  12. package/dist/core/sync-engine.d.ts +4 -3
  13. package/dist/core/sync-engine.d.ts.map +1 -1
  14. package/dist/core/sync-engine.js +156 -108
  15. package/dist/core/sync-engine.js.map +1 -1
  16. package/dist/types/config.d.ts +0 -5
  17. package/dist/types/config.d.ts.map +1 -1
  18. package/dist/types/snapshot.d.ts +1 -0
  19. package/dist/types/snapshot.d.ts.map +1 -1
  20. package/dist/utils/content.d.ts +5 -0
  21. package/dist/utils/content.d.ts.map +1 -1
  22. package/dist/utils/content.js +9 -0
  23. package/dist/utils/content.js.map +1 -1
  24. package/dist/utils/network-sync.d.ts +11 -2
  25. package/dist/utils/network-sync.d.ts.map +1 -1
  26. package/dist/utils/network-sync.js +103 -74
  27. package/dist/utils/network-sync.js.map +1 -1
  28. package/dist/utils/repo-factory.d.ts.map +1 -1
  29. package/dist/utils/repo-factory.js +0 -1
  30. package/dist/utils/repo-factory.js.map +1 -1
  31. package/package.json +1 -1
  32. package/src/cli.ts +19 -17
  33. package/src/commands.ts +39 -47
  34. package/src/core/change-detection.ts +81 -2
  35. package/src/core/sync-engine.ts +171 -132
  36. package/src/types/config.ts +0 -5
  37. package/src/types/snapshot.ts +1 -0
  38. package/src/utils/content.ts +10 -0
  39. package/src/utils/network-sync.ts +133 -92
  40. package/src/utils/repo-factory.ts +0 -1
@@ -61,12 +61,13 @@ export async function waitForBidirectionalSync(
61
61
  ? getHandleHeads(handles)
62
62
  : await getAllDocumentHeads(repo, rootDirectoryUrl);
63
63
 
64
- // Scale timeout proportionally to tree size after first scan
64
+ // After first scan: scale timeout to tree size and reset the clock.
65
+ // The first scan is just establishing a baseline — its duration
66
+ // shouldn't count against the stability-wait timeout.
65
67
  if (pollCount === 1) {
66
- dynamicTimeoutMs = Math.max(timeoutMs, 5000 + currentHeads.size * 50);
67
- if (dynamicTimeoutMs !== timeoutMs) {
68
- debug(`waitForBidirectionalSync: scaled timeout to ${dynamicTimeoutMs}ms for ${currentHeads.size} docs`);
69
- }
68
+ const scanDuration = Date.now() - startTime;
69
+ dynamicTimeoutMs = Math.max(timeoutMs, 5000 + currentHeads.size * 50) + scanDuration;
70
+ debug(`waitForBidirectionalSync: first scan took ${scanDuration}ms, timeout now ${dynamicTimeoutMs}ms for ${currentHeads.size} docs`);
70
71
  }
71
72
 
72
73
  // Check if heads are stable (no changes since last check)
@@ -206,126 +207,166 @@ function headsMapEqual(
206
207
  }
207
208
 
208
209
  /**
209
- * Wait for documents to sync to the remote server
210
+ * Result of waitForSync lists which handles failed to sync.
211
+ */
212
+ export interface SyncWaitResult {
213
+ failed: DocHandle<unknown>[];
214
+ }
215
+
216
+ /** Maximum documents to sync concurrently to avoid flooding the server */
217
+ const SYNC_BATCH_SIZE = 10;
218
+
219
+ /**
220
+ * Wait for a single document handle to sync to the server.
221
+ * Resolves with the handle on success, rejects with the handle on timeout.
222
+ */
223
+ function waitForHandleSync(
224
+ handle: DocHandle<unknown>,
225
+ syncServerStorageId: StorageId,
226
+ timeoutMs: number,
227
+ startTime: number,
228
+ ): Promise<DocHandle<unknown>> {
229
+ return new Promise<DocHandle<unknown>>((resolve, reject) => {
230
+ let pollInterval: NodeJS.Timeout;
231
+
232
+ const cleanup = () => {
233
+ clearTimeout(timeout);
234
+ clearInterval(pollInterval);
235
+ handle.off("remote-heads", onRemoteHeads);
236
+ };
237
+
238
+ const onConverged = () => {
239
+ debug(`waitForSync: ${handle.url.slice(0, 20)}... converged in ${Date.now() - startTime}ms`);
240
+ cleanup();
241
+ resolve(handle);
242
+ };
243
+
244
+ const timeout = setTimeout(() => {
245
+ debug(`waitForSync: ${handle.url.slice(0, 20)}... timed out after ${timeoutMs}ms`);
246
+ cleanup();
247
+ reject(handle);
248
+ }, timeoutMs);
249
+
250
+ const isConverged = () => {
251
+ const localHeads = handle.heads();
252
+ const info = handle.getSyncInfo(syncServerStorageId);
253
+ return A.equals(localHeads, info?.lastHeads);
254
+ };
255
+
256
+ const onRemoteHeads = ({
257
+ storageId,
258
+ }: {
259
+ storageId: StorageId;
260
+ heads: any;
261
+ }) => {
262
+ if (storageId === syncServerStorageId && isConverged()) {
263
+ onConverged();
264
+ }
265
+ };
266
+
267
+ // Initial check
268
+ if (isConverged()) {
269
+ cleanup();
270
+ resolve(handle);
271
+ return;
272
+ }
273
+
274
+ // Start polling and event listening
275
+ pollInterval = setInterval(() => {
276
+ if (isConverged()) {
277
+ onConverged();
278
+ }
279
+ }, 100);
280
+
281
+ handle.on("remote-heads", onRemoteHeads);
282
+ });
283
+ }
284
+
285
+ /**
286
+ * Wait for documents to sync to the remote server.
287
+ * Processes handles in batches to avoid flooding the server.
288
+ * Returns a result with any failed handles instead of throwing,
289
+ * so callers can attempt recovery (e.g. recreating documents).
210
290
  */
211
291
  export async function waitForSync(
212
292
  handlesToWaitOn: DocHandle<unknown>[],
213
293
  syncServerStorageId?: StorageId,
214
294
  timeoutMs: number = 60000,
215
- ): Promise<void> {
295
+ ): Promise<SyncWaitResult> {
216
296
  const startTime = Date.now();
217
297
 
218
298
  if (!syncServerStorageId) {
219
299
  debug("waitForSync: no sync server storage ID, skipping");
220
- return;
300
+ return { failed: [] };
221
301
  }
222
302
 
223
303
  if (handlesToWaitOn.length === 0) {
224
304
  debug("waitForSync: no documents to sync");
225
- return;
305
+ return { failed: [] };
226
306
  }
227
307
 
228
- debug(`waitForSync: waiting for ${handlesToWaitOn.length} documents (timeout=${timeoutMs}ms)`);
308
+ debug(`waitForSync: waiting for ${handlesToWaitOn.length} documents (timeout=${timeoutMs}ms, batchSize=${SYNC_BATCH_SIZE})`);
229
309
 
310
+ // Separate already-synced from needs-sync
311
+ const needsSync: DocHandle<unknown>[] = [];
230
312
  let alreadySynced = 0;
231
313
 
232
- const promises = handlesToWaitOn.map((handle) => {
233
- // Check if already synced
314
+ for (const handle of handlesToWaitOn) {
234
315
  const heads = handle.heads();
235
316
  const syncInfo = handle.getSyncInfo(syncServerStorageId);
236
317
  const remoteHeads = syncInfo?.lastHeads;
237
- const wasAlreadySynced = A.equals(heads, remoteHeads);
238
-
239
- if (wasAlreadySynced) {
318
+ if (A.equals(heads, remoteHeads)) {
240
319
  alreadySynced++;
241
320
  debug(`waitForSync: ${handle.url.slice(0, 20)}... already synced`);
242
- return Promise.resolve();
321
+ } else {
322
+ debug(`waitForSync: ${handle.url.slice(0, 20)}... needs sync (remoteHeads=${remoteHeads ? 'present' : 'missing'})`);
323
+ needsSync.push(handle);
243
324
  }
325
+ }
244
326
 
245
- debug(`waitForSync: ${handle.url.slice(0, 20)}... waiting for convergence (remoteHeads=${remoteHeads ? 'present' : 'missing'})`);
246
-
247
- // Wait for convergence
248
- return new Promise<void>((resolve, reject) => {
249
- let pollInterval: NodeJS.Timeout;
250
-
251
- const cleanup = () => {
252
- clearTimeout(timeout);
253
- clearInterval(pollInterval);
254
- handle.off("remote-heads", onRemoteHeads);
255
- };
256
-
257
- const onConverged = () => {
258
- debug(`waitForSync: ${handle.url.slice(0, 20)}... converged in ${Date.now() - startTime}ms`);
259
- cleanup();
260
- resolve();
261
- };
262
-
263
- const timeout = setTimeout(() => {
264
- debug(`waitForSync: ${handle.url.slice(0, 20)}... timed out after ${timeoutMs}ms`);
265
- cleanup();
266
- reject(
267
- new Error(
268
- `Sync timeout after ${timeoutMs}ms for document ${handle.url}`,
269
- ),
270
- );
271
- }, timeoutMs);
272
-
273
- const isConverged = () => {
274
- const localHeads = handle.heads();
275
- const info = handle.getSyncInfo(syncServerStorageId);
276
- return A.equals(localHeads, info?.lastHeads);
277
- };
278
-
279
- const onRemoteHeads = ({
280
- storageId,
281
- }: {
282
- storageId: StorageId;
283
- heads: any;
284
- }) => {
285
- if (storageId === syncServerStorageId && isConverged()) {
286
- onConverged();
287
- }
288
- };
327
+ if (needsSync.length > 0) {
328
+ debug(`waitForSync: ${alreadySynced} already synced, ${needsSync.length} need sync`);
329
+ out.taskLine(`Uploading: ${alreadySynced}/${handlesToWaitOn.length} already synced, waiting for ${needsSync.length} more`);
330
+ } else {
331
+ debug(`waitForSync: all ${handlesToWaitOn.length} already synced`);
332
+ return { failed: [] };
333
+ }
289
334
 
290
- const poll = () => {
291
- if (isConverged()) {
292
- onConverged();
293
- return true;
294
- }
295
- return false;
296
- };
335
+ // Process in batches to avoid flooding the server
336
+ const failed: DocHandle<unknown>[] = [];
337
+ let synced = alreadySynced;
297
338
 
298
- // Initial check
299
- if (poll()) {
300
- return;
301
- }
339
+ for (let i = 0; i < needsSync.length; i += SYNC_BATCH_SIZE) {
340
+ const batch = needsSync.slice(i, i + SYNC_BATCH_SIZE);
341
+ const batchNum = Math.floor(i / SYNC_BATCH_SIZE) + 1;
342
+ const totalBatches = Math.ceil(needsSync.length / SYNC_BATCH_SIZE);
302
343
 
303
- // Start polling and event listening
304
- pollInterval = setInterval(() => {
305
- poll();
306
- }, 100);
344
+ if (totalBatches > 1) {
345
+ debug(`waitForSync: batch ${batchNum}/${totalBatches} (${batch.length} docs)`);
346
+ out.update(`Uploading batch ${batchNum}/${totalBatches} (${synced}/${handlesToWaitOn.length} done)`);
347
+ }
307
348
 
308
- handle.on("remote-heads", onRemoteHeads);
309
- });
310
- });
349
+ const results = await Promise.allSettled(
350
+ batch.map(handle => waitForHandleSync(handle, syncServerStorageId, timeoutMs, startTime))
351
+ );
311
352
 
312
- const needSync = handlesToWaitOn.length - alreadySynced;
313
- if (needSync > 0) {
314
- debug(`waitForSync: ${alreadySynced} already synced, waiting for ${needSync} remaining`);
315
- out.taskLine(`Uploading: ${alreadySynced}/${handlesToWaitOn.length} already synced, waiting for ${needSync} more`);
316
- } else {
317
- debug(`waitForSync: all ${handlesToWaitOn.length} already synced`);
353
+ for (const result of results) {
354
+ if (result.status === "rejected") {
355
+ failed.push(result.reason as DocHandle<unknown>);
356
+ } else {
357
+ synced++;
358
+ }
359
+ }
318
360
  }
319
361
 
320
- try {
321
- await Promise.all(promises);
322
- const elapsed = Date.now() - startTime;
362
+ const elapsed = Date.now() - startTime;
363
+ if (failed.length > 0) {
364
+ debug(`waitForSync: ${failed.length} documents failed after ${elapsed}ms`);
365
+ out.taskLine(`Upload: ${synced} synced, ${failed.length} failed after ${(elapsed / 1000).toFixed(1)}s`, true);
366
+ } else {
323
367
  debug(`waitForSync: all ${handlesToWaitOn.length} documents synced in ${elapsed}ms (${alreadySynced} were already synced)`);
324
368
  out.taskLine(`All ${handlesToWaitOn.length} documents uploaded to server (${(elapsed / 1000).toFixed(1)}s)`);
325
- } catch (error) {
326
- const elapsed = Date.now() - startTime;
327
- debug(`waitForSync: failed after ${elapsed}ms: ${error}`);
328
- out.taskLine(`Upload to server failed after ${(elapsed / 1000).toFixed(1)}s: ${error}`, true);
329
- throw error;
330
369
  }
370
+
371
+ return { failed };
331
372
  }
@@ -22,7 +22,6 @@ export async function createRepo(
22
22
  config.sync_server
23
23
  );
24
24
  repoConfig.network = [networkAdapter];
25
- repoConfig.enableRemoteHeadsGossiping = true;
26
25
  }
27
26
 
28
27
  const repo = new Repo(repoConfig);