@prabhask5/stellar-engine 1.1.7 → 1.1.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -1
- package/dist/actions/remoteChange.d.ts +143 -18
- package/dist/actions/remoteChange.d.ts.map +1 -1
- package/dist/actions/remoteChange.js +182 -58
- package/dist/actions/remoteChange.js.map +1 -1
- package/dist/actions/truncateTooltip.d.ts +26 -12
- package/dist/actions/truncateTooltip.d.ts.map +1 -1
- package/dist/actions/truncateTooltip.js +89 -34
- package/dist/actions/truncateTooltip.js.map +1 -1
- package/dist/auth/admin.d.ts +40 -3
- package/dist/auth/admin.d.ts.map +1 -1
- package/dist/auth/admin.js +45 -5
- package/dist/auth/admin.js.map +1 -1
- package/dist/auth/crypto.d.ts +55 -5
- package/dist/auth/crypto.d.ts.map +1 -1
- package/dist/auth/crypto.js +58 -5
- package/dist/auth/crypto.js.map +1 -1
- package/dist/auth/deviceVerification.d.ts +236 -20
- package/dist/auth/deviceVerification.d.ts.map +1 -1
- package/dist/auth/deviceVerification.js +293 -40
- package/dist/auth/deviceVerification.js.map +1 -1
- package/dist/auth/displayUtils.d.ts +98 -0
- package/dist/auth/displayUtils.d.ts.map +1 -0
- package/dist/auth/displayUtils.js +133 -0
- package/dist/auth/displayUtils.js.map +1 -0
- package/dist/auth/loginGuard.d.ts +108 -14
- package/dist/auth/loginGuard.d.ts.map +1 -1
- package/dist/auth/loginGuard.js +153 -31
- package/dist/auth/loginGuard.js.map +1 -1
- package/dist/auth/offlineCredentials.d.ts +132 -15
- package/dist/auth/offlineCredentials.d.ts.map +1 -1
- package/dist/auth/offlineCredentials.js +167 -23
- package/dist/auth/offlineCredentials.js.map +1 -1
- package/dist/auth/offlineLogin.d.ts +96 -10
- package/dist/auth/offlineLogin.d.ts.map +1 -1
- package/dist/auth/offlineLogin.js +82 -15
- package/dist/auth/offlineLogin.js.map +1 -1
- package/dist/auth/offlineSession.d.ts +83 -9
- package/dist/auth/offlineSession.d.ts.map +1 -1
- package/dist/auth/offlineSession.js +104 -13
- package/dist/auth/offlineSession.js.map +1 -1
- package/dist/auth/resolveAuthState.d.ts +70 -8
- package/dist/auth/resolveAuthState.d.ts.map +1 -1
- package/dist/auth/resolveAuthState.js +142 -46
- package/dist/auth/resolveAuthState.js.map +1 -1
- package/dist/auth/singleUser.d.ts +390 -37
- package/dist/auth/singleUser.d.ts.map +1 -1
- package/dist/auth/singleUser.js +500 -99
- package/dist/auth/singleUser.js.map +1 -1
- package/dist/bin/install-pwa.d.ts +18 -2
- package/dist/bin/install-pwa.d.ts.map +1 -1
- package/dist/bin/install-pwa.js +801 -25
- package/dist/bin/install-pwa.js.map +1 -1
- package/dist/config.d.ts +132 -12
- package/dist/config.d.ts.map +1 -1
- package/dist/config.js +87 -9
- package/dist/config.js.map +1 -1
- package/dist/conflicts.d.ts +246 -23
- package/dist/conflicts.d.ts.map +1 -1
- package/dist/conflicts.js +495 -46
- package/dist/conflicts.js.map +1 -1
- package/dist/data.d.ts +338 -18
- package/dist/data.d.ts.map +1 -1
- package/dist/data.js +385 -34
- package/dist/data.js.map +1 -1
- package/dist/database.d.ts +72 -14
- package/dist/database.d.ts.map +1 -1
- package/dist/database.js +120 -29
- package/dist/database.js.map +1 -1
- package/dist/debug.d.ts +77 -1
- package/dist/debug.d.ts.map +1 -1
- package/dist/debug.js +88 -1
- package/dist/debug.js.map +1 -1
- package/dist/deviceId.d.ts +38 -7
- package/dist/deviceId.d.ts.map +1 -1
- package/dist/deviceId.js +68 -10
- package/dist/deviceId.js.map +1 -1
- package/dist/engine.d.ts +175 -3
- package/dist/engine.d.ts.map +1 -1
- package/dist/engine.js +756 -109
- package/dist/engine.js.map +1 -1
- package/dist/entries/actions.d.ts +13 -0
- package/dist/entries/actions.d.ts.map +1 -1
- package/dist/entries/actions.js +26 -1
- package/dist/entries/actions.js.map +1 -1
- package/dist/entries/auth.d.ts +16 -0
- package/dist/entries/auth.d.ts.map +1 -1
- package/dist/entries/auth.js +73 -1
- package/dist/entries/auth.js.map +1 -1
- package/dist/entries/config.d.ts +12 -0
- package/dist/entries/config.d.ts.map +1 -1
- package/dist/entries/config.js +18 -1
- package/dist/entries/config.js.map +1 -1
- package/dist/entries/kit.d.ts +11 -0
- package/dist/entries/kit.d.ts.map +1 -1
- package/dist/entries/kit.js +52 -2
- package/dist/entries/kit.js.map +1 -1
- package/dist/entries/stores.d.ts +11 -0
- package/dist/entries/stores.d.ts.map +1 -1
- package/dist/entries/stores.js +43 -2
- package/dist/entries/stores.js.map +1 -1
- package/dist/entries/types.d.ts +10 -0
- package/dist/entries/types.d.ts.map +1 -1
- package/dist/entries/types.js +10 -0
- package/dist/entries/types.js.map +1 -1
- package/dist/entries/utils.d.ts +6 -0
- package/dist/entries/utils.d.ts.map +1 -1
- package/dist/entries/utils.js +22 -1
- package/dist/entries/utils.js.map +1 -1
- package/dist/entries/vite.d.ts +17 -0
- package/dist/entries/vite.d.ts.map +1 -1
- package/dist/entries/vite.js +24 -1
- package/dist/entries/vite.js.map +1 -1
- package/dist/index.d.ts +31 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +175 -20
- package/dist/index.js.map +1 -1
- package/dist/kit/auth.d.ts +60 -5
- package/dist/kit/auth.d.ts.map +1 -1
- package/dist/kit/auth.js +45 -4
- package/dist/kit/auth.js.map +1 -1
- package/dist/kit/confirm.d.ts +93 -12
- package/dist/kit/confirm.d.ts.map +1 -1
- package/dist/kit/confirm.js +103 -16
- package/dist/kit/confirm.js.map +1 -1
- package/dist/kit/loads.d.ts +150 -23
- package/dist/kit/loads.d.ts.map +1 -1
- package/dist/kit/loads.js +140 -24
- package/dist/kit/loads.js.map +1 -1
- package/dist/kit/server.d.ts +142 -10
- package/dist/kit/server.d.ts.map +1 -1
- package/dist/kit/server.js +158 -15
- package/dist/kit/server.js.map +1 -1
- package/dist/kit/sw.d.ts +152 -23
- package/dist/kit/sw.d.ts.map +1 -1
- package/dist/kit/sw.js +182 -26
- package/dist/kit/sw.js.map +1 -1
- package/dist/queue.d.ts +274 -0
- package/dist/queue.d.ts.map +1 -1
- package/dist/queue.js +556 -38
- package/dist/queue.js.map +1 -1
- package/dist/realtime.d.ts +241 -27
- package/dist/realtime.d.ts.map +1 -1
- package/dist/realtime.js +633 -109
- package/dist/realtime.js.map +1 -1
- package/dist/runtime/runtimeConfig.d.ts +91 -8
- package/dist/runtime/runtimeConfig.d.ts.map +1 -1
- package/dist/runtime/runtimeConfig.js +146 -19
- package/dist/runtime/runtimeConfig.js.map +1 -1
- package/dist/stores/authState.d.ts +150 -11
- package/dist/stores/authState.d.ts.map +1 -1
- package/dist/stores/authState.js +169 -17
- package/dist/stores/authState.js.map +1 -1
- package/dist/stores/network.d.ts +39 -0
- package/dist/stores/network.d.ts.map +1 -1
- package/dist/stores/network.js +169 -16
- package/dist/stores/network.js.map +1 -1
- package/dist/stores/remoteChanges.d.ts +327 -52
- package/dist/stores/remoteChanges.d.ts.map +1 -1
- package/dist/stores/remoteChanges.js +337 -75
- package/dist/stores/remoteChanges.js.map +1 -1
- package/dist/stores/sync.d.ts +130 -0
- package/dist/stores/sync.d.ts.map +1 -1
- package/dist/stores/sync.js +167 -7
- package/dist/stores/sync.js.map +1 -1
- package/dist/supabase/auth.d.ts +325 -18
- package/dist/supabase/auth.d.ts.map +1 -1
- package/dist/supabase/auth.js +374 -26
- package/dist/supabase/auth.js.map +1 -1
- package/dist/supabase/client.d.ts +79 -6
- package/dist/supabase/client.d.ts.map +1 -1
- package/dist/supabase/client.js +158 -15
- package/dist/supabase/client.js.map +1 -1
- package/dist/supabase/validate.d.ts +101 -7
- package/dist/supabase/validate.d.ts.map +1 -1
- package/dist/supabase/validate.js +117 -8
- package/dist/supabase/validate.js.map +1 -1
- package/dist/sw/build/vite-plugin.d.ts +55 -10
- package/dist/sw/build/vite-plugin.d.ts.map +1 -1
- package/dist/sw/build/vite-plugin.js +77 -18
- package/dist/sw/build/vite-plugin.js.map +1 -1
- package/dist/sw/sw.js +99 -44
- package/dist/types.d.ts +150 -26
- package/dist/types.d.ts.map +1 -1
- package/dist/types.js +12 -10
- package/dist/types.js.map +1 -1
- package/dist/utils.d.ts +55 -13
- package/dist/utils.d.ts.map +1 -1
- package/dist/utils.js +83 -22
- package/dist/utils.js.map +1 -1
- package/package.json +1 -1
package/dist/queue.js
CHANGED
|
@@ -1,49 +1,220 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Sync Queue & Operation Coalescing Engine
|
|
3
|
+
*
|
|
4
|
+
* This module manages the offline-first sync queue for stellar-engine. All local
|
|
5
|
+
* mutations (creates, sets, increments, deletes) are enqueued as individual
|
|
6
|
+
* intent-based operations in an IndexedDB-backed queue (`syncQueue` table via Dexie).
|
|
7
|
+
* Before pushing to the remote server, the coalescing algorithm reduces redundant
|
|
8
|
+
* operations to minimize network requests and payload size.
|
|
9
|
+
*
|
|
10
|
+
* ## Design Philosophy
|
|
11
|
+
*
|
|
12
|
+
* The queue stores **intent-based operations** (create, set, increment, delete)
|
|
13
|
+
* rather than **state snapshots**. This is critical for two reasons:
|
|
14
|
+
*
|
|
15
|
+
* - **Coalescing:** Intent-based ops can be algebraically reduced. For example,
|
|
16
|
+
* two increments on the same field can be summed, and a create followed by
|
|
17
|
+
* a delete cancels out entirely. State snapshots cannot be reduced this way.
|
|
18
|
+
* - **Conflict resolution:** When a conflict arises during sync, the conflict
|
|
19
|
+
* resolver can inspect the *intent* (e.g., "user incremented score by 3")
|
|
20
|
+
* rather than just the final value. This enables smarter merge strategies.
|
|
21
|
+
*
|
|
22
|
+
* ## Coalescing Algorithm (6-Step Pipeline)
|
|
23
|
+
*
|
|
24
|
+
* The {@link coalescePendingOps} function implements a multi-pass coalescing pipeline:
|
|
25
|
+
*
|
|
26
|
+
* 1. **Group by entity** -- Operations are bucketed by `table:entityId` composite key.
|
|
27
|
+
* 2. **Entity-level reduction** -- Four mutually exclusive cases per entity group:
|
|
28
|
+
* - CREATE + DELETE = cancel everything (entity was born and died offline).
|
|
29
|
+
* - DELETE only = drop preceding sets/increments (they are moot).
|
|
30
|
+
* - CREATE only = fold subsequent sets/increments into the create payload.
|
|
31
|
+
* - Updates only = delegate to field-level coalescing ({@link processFieldOperations}).
|
|
32
|
+
* 3. **Increment coalescing** -- Surviving increment ops on the same field are summed.
|
|
33
|
+
* 4. **Set coalescing** -- Surviving set ops on the same entity are merged.
|
|
34
|
+
* 5. **No-op pruning** -- Zero-delta increments, empty sets, and `updated_at`-only
|
|
35
|
+
* sets are removed.
|
|
36
|
+
* 6. **Batch persist** -- All deletions and updates are flushed to IndexedDB in
|
|
37
|
+
* a single batch/transaction.
|
|
38
|
+
*
|
|
39
|
+
* ## Performance Characteristics
|
|
40
|
+
*
|
|
41
|
+
* - **O(n)** memory where n = queue length (single fetch, in-memory processing).
|
|
42
|
+
* - **O(1)** IndexedDB reads regardless of queue size (one `toArray()` call).
|
|
43
|
+
* - **O(k)** IndexedDB writes where k = number of changed rows (bulk delete + transaction).
|
|
44
|
+
* - No re-fetching between phases; all intermediate state lives in `idsToDelete` / `itemUpdates`.
|
|
45
|
+
*
|
|
46
|
+
* ## Retry & Backoff
|
|
47
|
+
*
|
|
48
|
+
* Failed items are retried with exponential backoff (2^(retries-1) seconds) up to
|
|
49
|
+
* {@link MAX_SYNC_RETRIES} attempts, after which {@link cleanupFailedItems} permanently
|
|
50
|
+
* removes them and reports the affected tables for user notification.
|
|
51
|
+
*
|
|
52
|
+
* ## Data Integrity
|
|
53
|
+
*
|
|
54
|
+
* - Operations are never modified in-place during coalescing; all mutations are
|
|
55
|
+
* accumulated in `idsToDelete` and `itemUpdates` and flushed atomically at
|
|
56
|
+
* the end. If the process crashes mid-pipeline, the queue is untouched.
|
|
57
|
+
* - The `timestamp` field on each operation is **immutable after creation**.
|
|
58
|
+
* It preserves enqueue order for deterministic sync and is not updated on
|
|
59
|
+
* retry (only `lastRetryAt` is updated). This ensures that coalescing and
|
|
60
|
+
* sync always process operations in the order the user intended.
|
|
61
|
+
*
|
|
62
|
+
* @see {@link SyncOperationItem} for the queue row schema.
|
|
63
|
+
* @see {@link processFieldOperations} for field-level increment/set interaction logic.
|
|
64
|
+
*/
|
|
1
65
|
import { debugWarn } from './debug';
|
|
2
66
|
import { getEngineConfig } from './config';
|
|
3
|
-
//
|
|
67
|
+
// =============================================================================
|
|
68
|
+
// Constants
|
|
69
|
+
// =============================================================================
|
|
70
|
+
/**
|
|
71
|
+
* Maximum number of retry attempts before a sync item is considered permanently failed.
|
|
72
|
+
*
|
|
73
|
+
* **Why 5?** With exponential backoff (1s, 2s, 4s, 8s), 5 retries span ~15 seconds
|
|
74
|
+
* of cumulative wait time. This covers transient network errors and brief server
|
|
75
|
+
* outages without keeping doomed operations in the queue indefinitely.
|
|
76
|
+
*
|
|
77
|
+
* After exceeding this threshold, items are removed by {@link cleanupFailedItems}
|
|
78
|
+
* and the affected tables are reported back to the caller for user notification.
|
|
79
|
+
*/
|
|
4
80
|
const MAX_SYNC_RETRIES = 5;
|
|
81
|
+
// =============================================================================
|
|
82
|
+
// Internal Helpers
|
|
83
|
+
// =============================================================================
|
|
84
|
+
/**
|
|
85
|
+
* Retrieve the Dexie database instance from the global engine configuration.
|
|
86
|
+
*
|
|
87
|
+
* @returns The configured Dexie database. Assumes `getEngineConfig().db` is non-null
|
|
88
|
+
* (the engine must be initialized before any queue operations).
|
|
89
|
+
*
|
|
90
|
+
* @throws Will throw a TypeError if the engine has not been initialized
|
|
91
|
+
* (`db` is null/undefined). This is intentional -- queue operations
|
|
92
|
+
* before engine init indicate a programming error.
|
|
93
|
+
*
|
|
94
|
+
* @see {@link getEngineConfig} for the configuration provider.
|
|
95
|
+
*/
|
|
5
96
|
function getDb() {
|
|
6
97
|
return getEngineConfig().db;
|
|
7
98
|
}
|
|
99
|
+
// =============================================================================
|
|
100
|
+
// Coalescing Pipeline (Public Entry Point)
|
|
101
|
+
// =============================================================================
|
|
8
102
|
/**
|
|
9
103
|
* Coalesce multiple operations to the same entity into fewer operations.
|
|
10
104
|
* This dramatically reduces the number of server requests and data transfer.
|
|
11
105
|
*
|
|
106
|
+
* The algorithm runs a 6-step pipeline entirely in memory after a single IndexedDB
|
|
107
|
+
* read, then flushes all mutations (deletes + updates) back to the database in batch.
|
|
108
|
+
*
|
|
109
|
+
* **When to call:** Before each sync push cycle. The sync engine typically calls
|
|
110
|
+
* this once, then calls {@link getPendingSync} to retrieve the reduced queue.
|
|
111
|
+
*
|
|
112
|
+
* **Idempotency:** Calling this multiple times is safe but wasteful -- after the
|
|
113
|
+
* first call, subsequent calls will find nothing to coalesce and return 0.
|
|
114
|
+
*
|
|
115
|
+
* **Atomicity:** The pipeline accumulates all mutations in memory and flushes them
|
|
116
|
+
* at the end. If the browser crashes mid-pipeline, no data is lost -- the queue
|
|
117
|
+
* remains in its pre-coalescing state and will be coalesced on the next cycle.
|
|
118
|
+
*
|
|
12
119
|
* PERFORMANCE OPTIMIZED:
|
|
13
120
|
* - Single DB fetch at start (no re-fetching between phases)
|
|
14
121
|
* - All processing done in memory
|
|
15
122
|
* - Batch deletes and updates at the end
|
|
123
|
+
*
|
|
124
|
+
* @returns The number of redundant operations that were removed from the queue.
|
|
125
|
+
*
|
|
126
|
+
* @example
|
|
127
|
+
* ```ts
|
|
128
|
+
* const removed = await coalescePendingOps();
|
|
129
|
+
* console.log(`Coalesced away ${removed} redundant operations`);
|
|
130
|
+
* ```
|
|
131
|
+
*
|
|
132
|
+
* @see {@link processFieldOperations} for the field-level reduction used in Step 2 Case 4.
|
|
133
|
+
* @see {@link getPendingSync} which typically calls this before fetching items to push.
|
|
16
134
|
*/
|
|
17
135
|
export async function coalescePendingOps() {
|
|
18
136
|
const db = getDb();
|
|
19
137
|
const allItems = (await db.table('syncQueue').toArray());
|
|
138
|
+
/* Early exit: 0 or 1 items can never be coalesced. This avoids the overhead
|
|
139
|
+
of creating the tracking structures for the common case of a small queue. */
|
|
20
140
|
if (allItems.length <= 1)
|
|
21
141
|
return 0;
|
|
22
|
-
//
|
|
142
|
+
// ---------------------------------------------------------------------------
|
|
143
|
+
// In-memory tracking structures
|
|
144
|
+
// ---------------------------------------------------------------------------
|
|
145
|
+
// We accumulate all intended mutations here so we can flush them in a single
|
|
146
|
+
// batch at the end. This avoids interleaving IndexedDB I/O between phases,
|
|
147
|
+
// which would be both slower and harder to reason about. It also provides
|
|
148
|
+
// crash safety: if the process dies mid-pipeline, the queue is untouched.
|
|
149
|
+
/** IDs of queue rows that should be deleted (redundant / cancelled). */
|
|
23
150
|
const idsToDelete = new Set();
|
|
151
|
+
/** Pending partial updates to queue rows, keyed by row ID. */
|
|
24
152
|
const itemUpdates = new Map();
|
|
25
|
-
|
|
153
|
+
/**
|
|
154
|
+
* Check whether an item is still "alive" -- i.e., has a defined ID and has
|
|
155
|
+
* not been marked for deletion by an earlier phase of the pipeline.
|
|
156
|
+
*
|
|
157
|
+
* This is used by later phases (Steps 3-5) to avoid operating on items
|
|
158
|
+
* that were already eliminated by earlier phases (Steps 1-2).
|
|
159
|
+
*
|
|
160
|
+
* @param item - The sync operation item to check.
|
|
161
|
+
* @returns `true` if the item should still be considered during subsequent phases.
|
|
162
|
+
*/
|
|
26
163
|
const isAlive = (item) => item.id !== undefined && !idsToDelete.has(item.id);
|
|
27
|
-
|
|
164
|
+
/**
|
|
165
|
+
* Mark an item for deletion at the end of the pipeline.
|
|
166
|
+
*
|
|
167
|
+
* Items are not immediately removed from the allItems array -- they are
|
|
168
|
+
* simply flagged via `idsToDelete`. This avoids costly array mutations
|
|
169
|
+
* and allows later phases to iterate the original array with `isAlive` checks.
|
|
170
|
+
*
|
|
171
|
+
* @param item - The sync operation item to remove.
|
|
172
|
+
*/
|
|
28
173
|
const markDeleted = (item) => {
|
|
29
174
|
if (item.id !== undefined)
|
|
30
175
|
idsToDelete.add(item.id);
|
|
31
176
|
};
|
|
32
|
-
|
|
177
|
+
/**
|
|
178
|
+
* Mark an item for update at the end of the pipeline. Multiple calls for the
|
|
179
|
+
* same item are merged (later updates win on a per-key basis).
|
|
180
|
+
*
|
|
181
|
+
* **Merge semantics:** Uses shallow spread, so nested objects are replaced
|
|
182
|
+
* wholesale (not deep-merged). This is correct for our use case because
|
|
183
|
+
* the `value` field is always replaced entirely, never partially updated.
|
|
184
|
+
*
|
|
185
|
+
* @param item - The sync operation item to update.
|
|
186
|
+
* @param updates - The partial fields to apply.
|
|
187
|
+
*/
|
|
33
188
|
const markUpdated = (item, updates) => {
|
|
34
189
|
if (item.id !== undefined) {
|
|
35
190
|
const existing = itemUpdates.get(item.id) || {};
|
|
36
191
|
itemUpdates.set(item.id, { ...existing, ...updates });
|
|
37
192
|
}
|
|
38
193
|
};
|
|
39
|
-
|
|
194
|
+
/**
|
|
195
|
+
* Return the "effective" value of an item, accounting for any pending in-memory
|
|
196
|
+
* updates that earlier phases may have applied. This is essential for Steps 3-5,
|
|
197
|
+
* which operate on the results of Step 2.
|
|
198
|
+
*
|
|
199
|
+
* **Why not mutate items in place?** Because the items array is a snapshot from
|
|
200
|
+
* IndexedDB. Mutating it would make the code harder to reason about (which
|
|
201
|
+
* fields are "real" vs. "modified?") and would prevent crash-safe behavior.
|
|
202
|
+
*
|
|
203
|
+
* @param item - The sync operation item whose effective value is needed.
|
|
204
|
+
* @returns The pending updated value if one exists, otherwise the original value.
|
|
205
|
+
*/
|
|
40
206
|
const getEffectiveValue = (item) => {
|
|
41
207
|
if (item.id !== undefined && itemUpdates.has(item.id)) {
|
|
42
208
|
return itemUpdates.get(item.id).value ?? item.value;
|
|
43
209
|
}
|
|
44
210
|
return item.value;
|
|
45
211
|
};
|
|
46
|
-
//
|
|
212
|
+
// ===========================================================================
|
|
213
|
+
// STEP 1: Group all operations by entity
|
|
214
|
+
// ===========================================================================
|
|
215
|
+
/* Composite key `table:entityId` ensures operations on different tables with
|
|
216
|
+
the same entity UUID are never incorrectly merged. This matters when
|
|
217
|
+
multiple tables reference the same ID scheme (e.g., UUIDs). */
|
|
47
218
|
const entityGroups = new Map();
|
|
48
219
|
for (const item of allItems) {
|
|
49
220
|
const key = `${item.table}:${item.entityId}`;
|
|
@@ -51,20 +222,30 @@ export async function coalescePendingOps() {
|
|
|
51
222
|
entityGroups.set(key, []);
|
|
52
223
|
entityGroups.get(key).push(item);
|
|
53
224
|
}
|
|
54
|
-
//
|
|
225
|
+
// ===========================================================================
|
|
226
|
+
// STEP 2: Process each entity group (entity-level reduction)
|
|
227
|
+
// ===========================================================================
|
|
55
228
|
for (const [, items] of entityGroups) {
|
|
56
|
-
|
|
229
|
+
/* Chronological sort is critical: it lets us reason about "before" and
|
|
230
|
+
"after" relationships between creates, updates, and deletes. The
|
|
231
|
+
timestamp is the original enqueue time, which never changes. */
|
|
57
232
|
items.sort((a, b) => new Date(a.timestamp).getTime() - new Date(b.timestamp).getTime());
|
|
58
233
|
const hasCreate = items.some((i) => i.operationType === 'create');
|
|
59
234
|
const hasDelete = items.some((i) => i.operationType === 'delete');
|
|
60
|
-
// Case 1: CREATE
|
|
235
|
+
// ---- Case 1: CREATE + DELETE -> everything cancels out ----
|
|
236
|
+
/* The entity was created and deleted within the same offline session.
|
|
237
|
+
The server never knew about it, so we can discard every operation.
|
|
238
|
+
This is the most aggressive optimization: N operations become 0. */
|
|
61
239
|
if (hasCreate && hasDelete) {
|
|
62
240
|
for (const item of items) {
|
|
63
241
|
markDeleted(item);
|
|
64
242
|
}
|
|
65
243
|
continue;
|
|
66
244
|
}
|
|
67
|
-
// Case 2:
|
|
245
|
+
// ---- Case 2: DELETE without CREATE -> only the delete survives ----
|
|
246
|
+
/* The entity existed on the server before going offline. Intermediate
|
|
247
|
+
sets/increments are pointless because the delete will wipe the row.
|
|
248
|
+
We keep only the delete operation itself. */
|
|
68
249
|
if (!hasCreate && hasDelete) {
|
|
69
250
|
for (const item of items) {
|
|
70
251
|
if (item.operationType !== 'delete') {
|
|
@@ -73,7 +254,14 @@ export async function coalescePendingOps() {
|
|
|
73
254
|
}
|
|
74
255
|
continue;
|
|
75
256
|
}
|
|
76
|
-
// Case 3:
|
|
257
|
+
// ---- Case 3: CREATE without DELETE -> fold updates into create payload ----
|
|
258
|
+
/* Since the server hasn't seen the entity yet, we can build the final
|
|
259
|
+
create payload by replaying all subsequent sets and increments into
|
|
260
|
+
the original create value. This turns N operations into one.
|
|
261
|
+
|
|
262
|
+
**Why replay in order?** Because a later set on the same field should
|
|
263
|
+
overwrite an earlier one. Chronological ordering ensures the final
|
|
264
|
+
payload reflects the user's last action. */
|
|
77
265
|
if (hasCreate && !hasDelete) {
|
|
78
266
|
const createItem = items.find((i) => i.operationType === 'create');
|
|
79
267
|
const otherItems = items.filter((i) => i.operationType !== 'create');
|
|
@@ -82,13 +270,20 @@ export async function coalescePendingOps() {
|
|
|
82
270
|
for (const item of otherItems) {
|
|
83
271
|
if (item.operationType === 'set') {
|
|
84
272
|
if (item.field) {
|
|
273
|
+
/* Field-targeted set: overwrite a single key in the payload. */
|
|
85
274
|
mergedPayload[item.field] = item.value;
|
|
86
275
|
}
|
|
87
276
|
else if (typeof item.value === 'object' && item.value !== null) {
|
|
277
|
+
/* Whole-object set: shallow-merge into the payload. Later
|
|
278
|
+
fields overwrite earlier ones due to spread semantics. */
|
|
88
279
|
mergedPayload = { ...mergedPayload, ...item.value };
|
|
89
280
|
}
|
|
90
281
|
}
|
|
91
282
|
else if (item.operationType === 'increment' && item.field) {
|
|
283
|
+
/* Increments are folded arithmetically into the current field value.
|
|
284
|
+
If the field doesn't exist yet (or isn't a number), we treat it as 0.
|
|
285
|
+
This is safe because the create payload is the entity's initial state;
|
|
286
|
+
a missing numeric field logically starts at zero. */
|
|
92
287
|
const currentVal = typeof mergedPayload[item.field] === 'number'
|
|
93
288
|
? mergedPayload[item.field]
|
|
94
289
|
: 0;
|
|
@@ -103,10 +298,22 @@ export async function coalescePendingOps() {
|
|
|
103
298
|
}
|
|
104
299
|
continue;
|
|
105
300
|
}
|
|
106
|
-
// Case 4: No create, no delete
|
|
301
|
+
// ---- Case 4: No create, no delete -> field-level coalescing ----
|
|
302
|
+
/* This is the most nuanced case: the entity exists on the server and we
|
|
303
|
+
have a mix of sets and increments targeting various fields. Delegate to
|
|
304
|
+
the specialized field-operations processor which handles interactions
|
|
305
|
+
between sets and increments on the same field. */
|
|
107
306
|
processFieldOperations(items, markDeleted, markUpdated);
|
|
108
307
|
}
|
|
109
|
-
//
|
|
308
|
+
// ===========================================================================
|
|
309
|
+
// STEP 3: Coalesce remaining INCREMENT operations (not yet deleted)
|
|
310
|
+
// ===========================================================================
|
|
311
|
+
/* After entity-level reduction, there may still be multiple surviving
|
|
312
|
+
increment operations targeting the same field. We sum their deltas into
|
|
313
|
+
the oldest operation and discard the rest. The oldest is kept because it
|
|
314
|
+
preserves the original enqueue order (important for deterministic sync).
|
|
315
|
+
|
|
316
|
+
**Example:** INC score+3, INC score+5 -> INC score+8 (on the oldest item) */
|
|
110
317
|
const incrementGroups = new Map();
|
|
111
318
|
for (const item of allItems) {
|
|
112
319
|
if (item.operationType === 'increment' && item.field && isAlive(item)) {
|
|
@@ -127,13 +334,25 @@ export async function coalescePendingOps() {
|
|
|
127
334
|
const delta = typeof effectiveValue === 'number' ? effectiveValue : 0;
|
|
128
335
|
totalDelta += delta;
|
|
129
336
|
}
|
|
337
|
+
/* Keep the oldest item with the summed delta; delete the rest.
|
|
338
|
+
Keeping the oldest preserves enqueue ordering for sync. */
|
|
130
339
|
const oldestItem = aliveItems[0];
|
|
131
340
|
markUpdated(oldestItem, { value: totalDelta });
|
|
132
341
|
for (let i = 1; i < aliveItems.length; i++) {
|
|
133
342
|
markDeleted(aliveItems[i]);
|
|
134
343
|
}
|
|
135
344
|
}
|
|
136
|
-
//
|
|
345
|
+
// ===========================================================================
|
|
346
|
+
// STEP 4: Coalesce remaining SET operations (not yet deleted)
|
|
347
|
+
// ===========================================================================
|
|
348
|
+
/* Multiple surviving set operations on the same entity are merged into a
|
|
349
|
+
single whole-object set. Field-targeted sets contribute their field; whole-
|
|
350
|
+
object sets are shallow-merged. The oldest item is kept as the carrier.
|
|
351
|
+
|
|
352
|
+
**Why merge sets?** Consider a user who changes the title, then the
|
|
353
|
+
description, then the title again -- all while offline. Without merging,
|
|
354
|
+
the server would receive 3 separate set operations. With merging, it
|
|
355
|
+
receives one set with both the final title and description. */
|
|
137
356
|
const setGroups = new Map();
|
|
138
357
|
for (const item of allItems) {
|
|
139
358
|
if (item.operationType === 'set' && isAlive(item)) {
|
|
@@ -152,41 +371,67 @@ export async function coalescePendingOps() {
|
|
|
152
371
|
for (const item of aliveItems) {
|
|
153
372
|
const effectiveValue = getEffectiveValue(item);
|
|
154
373
|
if (item.field) {
|
|
374
|
+
/* Field-targeted set: slot into the merged object under its field name. */
|
|
155
375
|
mergedValue[item.field] = effectiveValue;
|
|
156
376
|
}
|
|
157
377
|
else if (typeof effectiveValue === 'object' && effectiveValue !== null) {
|
|
378
|
+
/* Whole-object set: shallow-merge (later values overwrite earlier ones).
|
|
379
|
+
This ensures the user's most recent value wins when fields overlap. */
|
|
158
380
|
mergedValue = { ...mergedValue, ...effectiveValue };
|
|
159
381
|
}
|
|
160
382
|
}
|
|
383
|
+
/* Clear `field` on the carrier so it becomes a whole-object set containing
|
|
384
|
+
all the merged fields. This transformation is necessary because the
|
|
385
|
+
carrier might originally have been a field-targeted set (e.g., field='title'),
|
|
386
|
+
but it now carries multiple fields. */
|
|
161
387
|
const oldestItem = aliveItems[0];
|
|
162
388
|
markUpdated(oldestItem, { value: mergedValue, field: undefined });
|
|
163
389
|
for (let i = 1; i < aliveItems.length; i++) {
|
|
164
390
|
markDeleted(aliveItems[i]);
|
|
165
391
|
}
|
|
166
392
|
}
|
|
167
|
-
//
|
|
393
|
+
// ===========================================================================
|
|
394
|
+
// STEP 5: Remove no-op operations
|
|
395
|
+
// ===========================================================================
|
|
396
|
+
/* Final cleanup pass: any operation that would have no server-side effect is
|
|
397
|
+
pruned. This catches edge cases produced by the earlier merging phases
|
|
398
|
+
(e.g., increments that sum to zero, or sets that only touch `updated_at`).
|
|
399
|
+
|
|
400
|
+
**Why is this a separate pass?** Steps 2-4 can produce no-ops as a side
|
|
401
|
+
effect of merging (e.g., INC +3 and INC -3 sum to 0). Detecting these
|
|
402
|
+
inline would complicate those steps. A dedicated cleanup pass is cleaner. */
|
|
168
403
|
for (const item of allItems) {
|
|
169
404
|
if (!isAlive(item))
|
|
170
405
|
continue;
|
|
171
406
|
let shouldDelete = false;
|
|
172
407
|
const effectiveValue = getEffectiveValue(item);
|
|
173
|
-
|
|
408
|
+
/* Zero-delta increments are no-ops -- incrementing by 0 is meaningless.
|
|
409
|
+
These can arise when opposite increments cancel out in Step 3. */
|
|
174
410
|
if (item.operationType === 'increment') {
|
|
175
411
|
const delta = typeof effectiveValue === 'number' ? effectiveValue : 0;
|
|
176
412
|
if (delta === 0) {
|
|
177
413
|
shouldDelete = true;
|
|
178
414
|
}
|
|
179
415
|
}
|
|
180
|
-
|
|
416
|
+
/* Sets that carry no meaningful data are no-ops. We check three sub-cases:
|
|
417
|
+
(a) field-targeted set where the field is just `updated_at`,
|
|
418
|
+
(b) whole-object set where all keys are `updated_at`,
|
|
419
|
+
(c) set with null/undefined value. */
|
|
181
420
|
if (item.operationType === 'set') {
|
|
182
421
|
const pendingUpdate = item.id !== undefined ? itemUpdates.get(item.id) : undefined;
|
|
183
422
|
const effectiveField = pendingUpdate?.field !== undefined ? pendingUpdate.field : item.field;
|
|
184
423
|
if (effectiveField) {
|
|
424
|
+
/* (a) A single-field set targeting only `updated_at` -- the server
|
|
425
|
+
manages this timestamp itself via triggers or the sync push
|
|
426
|
+
handler, so pushing it from the client is wasteful. */
|
|
185
427
|
if (effectiveField === 'updated_at') {
|
|
186
428
|
shouldDelete = true;
|
|
187
429
|
}
|
|
188
430
|
}
|
|
189
431
|
else if (typeof effectiveValue === 'object' && effectiveValue !== null) {
|
|
432
|
+
/* (b) A whole-object set where the only remaining key is `updated_at`.
|
|
433
|
+
This can happen when Step 4 merges multiple field-targeted sets
|
|
434
|
+
and all meaningful fields were eliminated by other passes. */
|
|
190
435
|
const payload = effectiveValue;
|
|
191
436
|
const keys = Object.keys(payload).filter((k) => k !== 'updated_at');
|
|
192
437
|
if (keys.length === 0) {
|
|
@@ -194,6 +439,9 @@ export async function coalescePendingOps() {
|
|
|
194
439
|
}
|
|
195
440
|
}
|
|
196
441
|
else if (effectiveValue === undefined || effectiveValue === null) {
|
|
442
|
+
/* (c) A set with no value at all -- nothing to send. This is a
|
|
443
|
+
degenerate case that shouldn't normally occur, but we handle
|
|
444
|
+
it defensively. */
|
|
197
445
|
shouldDelete = true;
|
|
198
446
|
}
|
|
199
447
|
}
|
|
@@ -201,9 +449,12 @@ export async function coalescePendingOps() {
|
|
|
201
449
|
markDeleted(item);
|
|
202
450
|
}
|
|
203
451
|
}
|
|
204
|
-
//
|
|
452
|
+
// ===========================================================================
|
|
453
|
+
// STEP 6: Apply all changes in batch
|
|
454
|
+
// ===========================================================================
|
|
205
455
|
const deleteIds = Array.from(idsToDelete);
|
|
206
|
-
|
|
456
|
+
/* Discard updates targeting rows we are about to delete -- applying them
|
|
457
|
+
would be wasteful and could cause Dexie errors on missing keys. */
|
|
207
458
|
const finalUpdates = [];
|
|
208
459
|
for (const [id, changes] of itemUpdates) {
|
|
209
460
|
if (!idsToDelete.has(id)) {
|
|
@@ -211,11 +462,16 @@ export async function coalescePendingOps() {
|
|
|
211
462
|
}
|
|
212
463
|
}
|
|
213
464
|
const syncQueue = db.table('syncQueue');
|
|
214
|
-
|
|
465
|
+
/* Batch delete in one IndexedDB call. `bulkDelete` is significantly faster
|
|
466
|
+
than individual `delete` calls because it batches into a single IDB
|
|
467
|
+
transaction internally. */
|
|
215
468
|
if (deleteIds.length > 0) {
|
|
216
469
|
await syncQueue.bulkDelete(deleteIds);
|
|
217
470
|
}
|
|
218
|
-
|
|
471
|
+
/* Batch update via a Dexie transaction. Dexie doesn't have a `bulkUpdate`
|
|
472
|
+
method, so we wrap individual updates in a single read-write transaction
|
|
473
|
+
to avoid N separate implicit transactions. This reduces IDB overhead from
|
|
474
|
+
O(N) transaction commits to O(1). */
|
|
219
475
|
if (finalUpdates.length > 0) {
|
|
220
476
|
await db.transaction('rw', syncQueue, async () => {
|
|
221
477
|
for (const { id, changes } of finalUpdates) {
|
|
@@ -225,11 +481,39 @@ export async function coalescePendingOps() {
|
|
|
225
481
|
}
|
|
226
482
|
return deleteIds.length;
|
|
227
483
|
}
|
|
484
|
+
// =============================================================================
|
|
485
|
+
// Field-Level Operation Processor (Internal)
|
|
486
|
+
// =============================================================================
|
|
228
487
|
/**
|
|
229
488
|
* Process increment/set interactions for the same field within an entity (in-memory).
|
|
489
|
+
*
|
|
490
|
+
* This is the workhorse for "Case 4" of the entity-level reduction: the entity has
|
|
491
|
+
* no pending create or delete, so we must carefully reason about per-field interactions
|
|
492
|
+
* between set and increment operations.
|
|
493
|
+
*
|
|
494
|
+
* The key insight is that a `set` on a field establishes a new absolute value, which
|
|
495
|
+
* renders all *preceding* operations on that field irrelevant. If `increment` operations
|
|
496
|
+
* follow the last `set`, their deltas can be folded into the set's value, turning
|
|
497
|
+
* N operations into one.
|
|
498
|
+
*
|
|
499
|
+
* **Correctness invariant:** The resulting operations, when replayed in order against
|
|
500
|
+
* the server's current state, must produce the same entity as replaying the original
|
|
501
|
+
* operations. This is preserved because:
|
|
502
|
+
* - We only eliminate operations that are provably superseded (before the last set)
|
|
503
|
+
* - We only fold increments into a set when the set's base value is known
|
|
504
|
+
*
|
|
505
|
+
* @param items - All sync operations for a single entity (already filtered to one
|
|
506
|
+
* `table:entityId` group). May include operations without a `field`
|
|
507
|
+
* (whole-object sets); those are ignored here and handled by Step 4.
|
|
508
|
+
* @param markDeleted - Callback to schedule an item for deletion.
|
|
509
|
+
* @param markUpdated - Callback to schedule a partial update on an item.
|
|
510
|
+
*
|
|
511
|
+
* @see {@link coalescePendingOps} Step 2, Case 4 -- the only call site.
|
|
230
512
|
*/
|
|
231
513
|
function processFieldOperations(items, markDeleted, markUpdated) {
|
|
232
|
-
|
|
514
|
+
/* Group by field name. Only field-targeted increments and sets participate;
|
|
515
|
+
whole-object sets (field === undefined) are left for Step 4 because their
|
|
516
|
+
effect spans multiple fields and cannot be reduced at the single-field level. */
|
|
233
517
|
const fieldGroups = new Map();
|
|
234
518
|
for (const item of items) {
|
|
235
519
|
if (item.field && (item.operationType === 'increment' || item.operationType === 'set')) {
|
|
@@ -240,22 +524,32 @@ function processFieldOperations(items, markDeleted, markUpdated) {
|
|
|
240
524
|
}
|
|
241
525
|
}
|
|
242
526
|
for (const [, fieldItems] of fieldGroups) {
|
|
527
|
+
/* Single operation on a field cannot be reduced further. */
|
|
243
528
|
if (fieldItems.length <= 1)
|
|
244
529
|
continue;
|
|
245
|
-
|
|
530
|
+
/* Chronological sort to determine which operations come before/after others.
|
|
531
|
+
This ordering is the foundation of the "last set wins" logic below. */
|
|
246
532
|
fieldItems.sort((a, b) => new Date(a.timestamp).getTime() - new Date(b.timestamp).getTime());
|
|
247
533
|
const hasIncrement = fieldItems.some((i) => i.operationType === 'increment');
|
|
248
534
|
const hasSet = fieldItems.some((i) => i.operationType === 'set');
|
|
249
535
|
if (hasIncrement && hasSet) {
|
|
250
|
-
|
|
536
|
+
/* Mixed increment + set on the same field. The last `set` establishes a
|
|
537
|
+
known absolute value, so everything before it is superseded.
|
|
538
|
+
|
|
539
|
+
**Example:** INC score+3, SET score=10, INC score+5
|
|
540
|
+
-> The INC+3 is moot (SET overwrites it).
|
|
541
|
+
-> The INC+5 is folded into the SET: SET score=15.
|
|
542
|
+
-> Final result: one SET score=15. */
|
|
251
543
|
const lastSetIndex = fieldItems.map((i) => i.operationType).lastIndexOf('set');
|
|
252
544
|
const lastSet = fieldItems[lastSetIndex];
|
|
253
|
-
|
|
545
|
+
/* Increments AFTER the last set can be folded into the set's value
|
|
546
|
+
because we know the base value the set establishes. */
|
|
254
547
|
const incrementsAfterSet = fieldItems
|
|
255
548
|
.slice(lastSetIndex + 1)
|
|
256
549
|
.filter((i) => i.operationType === 'increment');
|
|
257
550
|
if (incrementsAfterSet.length > 0) {
|
|
258
|
-
|
|
551
|
+
/* Sum all post-set increment deltas and bake them into the set value.
|
|
552
|
+
E.g., SET score=10, INC score+3, INC score+5 -> SET score=18. */
|
|
259
553
|
let totalDelta = 0;
|
|
260
554
|
for (const inc of incrementsAfterSet) {
|
|
261
555
|
totalDelta += typeof inc.value === 'number' ? inc.value : 0;
|
|
@@ -263,43 +557,125 @@ function processFieldOperations(items, markDeleted, markUpdated) {
|
|
|
263
557
|
const baseValue = typeof lastSet.value === 'number' ? lastSet.value : 0;
|
|
264
558
|
const finalValue = baseValue + totalDelta;
|
|
265
559
|
markUpdated(lastSet, { value: finalValue });
|
|
266
|
-
// Delete all increments after the set
|
|
267
560
|
for (const inc of incrementsAfterSet) {
|
|
268
561
|
markDeleted(inc);
|
|
269
562
|
}
|
|
270
563
|
}
|
|
271
|
-
|
|
564
|
+
/* Everything before the last set is moot -- the set overwrites whatever
|
|
565
|
+
those operations would have produced. This includes both earlier sets
|
|
566
|
+
and earlier increments on this field. */
|
|
272
567
|
const itemsBeforeLastSet = fieldItems.slice(0, lastSetIndex);
|
|
273
568
|
for (const item of itemsBeforeLastSet) {
|
|
274
569
|
markDeleted(item);
|
|
275
570
|
}
|
|
276
571
|
}
|
|
572
|
+
/* Note: Groups with only increments (no sets) or only sets (no increments)
|
|
573
|
+
are handled by Steps 3 and 4 respectively. They are intentionally
|
|
574
|
+
NOT processed here to keep this function focused on mixed interactions. */
|
|
277
575
|
}
|
|
278
576
|
}
|
|
279
|
-
//
|
|
280
|
-
//
|
|
577
|
+
// =============================================================================
|
|
578
|
+
// Retry & Backoff Logic
|
|
579
|
+
// =============================================================================
|
|
580
|
+
/**
|
|
581
|
+
* Determine whether a failed sync item is eligible for retry based on
|
|
582
|
+
* exponential backoff timing.
|
|
583
|
+
*
|
|
584
|
+
* The backoff schedule is: 1s, 2s, 4s, 8s for retries 1-4. The first attempt
|
|
585
|
+
* (retries === 0) is always immediate. Items that have reached
|
|
586
|
+
* {@link MAX_SYNC_RETRIES} are never retried.
|
|
587
|
+
*
|
|
588
|
+
* **Why exponential backoff?** It prevents hammering a server that may be
|
|
589
|
+
* temporarily overloaded or unreachable, while still retrying quickly for
|
|
590
|
+
* transient errors (first retry after just 1 second).
|
|
591
|
+
*
|
|
592
|
+
* @param item - The sync operation item to evaluate.
|
|
593
|
+
* @returns `true` if the item should be included in the next sync push.
|
|
594
|
+
*
|
|
595
|
+
* @see {@link getPendingSync} which uses this to filter the queue.
|
|
596
|
+
* @see {@link incrementRetry} which advances the retry counter after a failure.
|
|
597
|
+
* @see {@link cleanupFailedItems} which removes items past the max retry threshold.
|
|
598
|
+
*/
|
|
281
599
|
function shouldRetryItem(item) {
|
|
282
600
|
if (item.retries >= MAX_SYNC_RETRIES)
|
|
283
601
|
return false;
|
|
284
|
-
|
|
602
|
+
/* First attempt (retries=0) is always immediate -- no backoff needed. */
|
|
285
603
|
if (item.retries === 0)
|
|
286
604
|
return true;
|
|
287
|
-
|
|
605
|
+
/* Exponential backoff: 2^(retries-1) seconds -> 1s, 2s, 4s, 8s
|
|
606
|
+
Uses `lastRetryAt` if available, otherwise falls back to the original
|
|
607
|
+
`timestamp` for backward compatibility with items that predate the
|
|
608
|
+
`lastRetryAt` field. */
|
|
288
609
|
const backoffMs = Math.pow(2, item.retries - 1) * 1000;
|
|
289
610
|
const lastAttempt = new Date(item.lastRetryAt || item.timestamp).getTime();
|
|
290
611
|
const now = Date.now();
|
|
291
612
|
return now - lastAttempt >= backoffMs;
|
|
292
613
|
}
|
|
614
|
+
// =============================================================================
|
|
615
|
+
// Queue Query Functions
|
|
616
|
+
// =============================================================================
|
|
617
|
+
/**
|
|
618
|
+
* Retrieve all pending sync operations that are currently eligible for processing.
|
|
619
|
+
*
|
|
620
|
+
* Items are returned in enqueue order (`timestamp` ascending). Items that have
|
|
621
|
+
* exceeded {@link MAX_SYNC_RETRIES} or are still within their backoff window
|
|
622
|
+
* are excluded.
|
|
623
|
+
*
|
|
624
|
+
* **Ordering guarantee:** Results are sorted by `timestamp` (the original
|
|
625
|
+
* enqueue time). This ensures operations are pushed to the server in the order
|
|
626
|
+
* the user performed them, which is important for correctness (e.g., a create
|
|
627
|
+
* must be pushed before subsequent updates to the same entity).
|
|
628
|
+
*
|
|
629
|
+
* @returns An array of sync operation items ready to be pushed to the server.
|
|
630
|
+
*
|
|
631
|
+
* @example
|
|
632
|
+
* ```ts
|
|
633
|
+
* const pending = await getPendingSync();
|
|
634
|
+
* for (const op of pending) {
|
|
635
|
+
* await pushToServer(op);
|
|
636
|
+
* }
|
|
637
|
+
* ```
|
|
638
|
+
*
|
|
639
|
+
* @see {@link shouldRetryItem} for the retry eligibility logic.
|
|
640
|
+
* @see {@link coalescePendingOps} which should be called before this to reduce the queue.
|
|
641
|
+
*/
|
|
293
642
|
export async function getPendingSync() {
|
|
294
643
|
const db = getDb();
|
|
295
644
|
const allItems = (await db
|
|
296
645
|
.table('syncQueue')
|
|
297
646
|
.orderBy('timestamp')
|
|
298
647
|
.toArray());
|
|
299
|
-
|
|
648
|
+
/* Filter to only items that should be retried (haven't exceeded max retries
|
|
649
|
+
and their backoff window has elapsed). Items still in backoff are left in
|
|
650
|
+
the queue for the next sync cycle. */
|
|
300
651
|
return allItems.filter((item) => shouldRetryItem(item));
|
|
301
652
|
}
|
|
302
|
-
|
|
653
|
+
/**
|
|
654
|
+
* Remove sync items that have permanently failed (exceeded {@link MAX_SYNC_RETRIES})
|
|
655
|
+
* and return a summary for user notification.
|
|
656
|
+
*
|
|
657
|
+
* This is a garbage-collection function typically called periodically or after
|
|
658
|
+
* a sync cycle completes. It logs a warning for each removed item via
|
|
659
|
+
* {@link debugWarn}.
|
|
660
|
+
*
|
|
661
|
+
* **Why return affected tables?** The caller (usually the sync engine) can use
|
|
662
|
+
* the table names to show targeted error messages to the user, e.g.,
|
|
663
|
+
* "Some changes to your goals could not be synced."
|
|
664
|
+
*
|
|
665
|
+
* @returns An object containing the count of removed items and the list of
|
|
666
|
+
* affected table names (useful for showing targeted error messages).
|
|
667
|
+
*
|
|
668
|
+
* @example
|
|
669
|
+
* ```ts
|
|
670
|
+
* const { count, tables } = await cleanupFailedItems();
|
|
671
|
+
* if (count > 0) {
|
|
672
|
+
* showToast(`${count} sync operations failed for: ${tables.join(', ')}`);
|
|
673
|
+
* }
|
|
674
|
+
* ```
|
|
675
|
+
*
|
|
676
|
+
* @see {@link MAX_SYNC_RETRIES} for the retry threshold.
|
|
677
|
+
* @see {@link shouldRetryItem} for the backoff logic that precedes permanent failure.
|
|
678
|
+
*/
|
|
303
679
|
export async function cleanupFailedItems() {
|
|
304
680
|
const db = getDb();
|
|
305
681
|
const allItems = (await db.table('syncQueue').toArray());
|
|
@@ -321,30 +697,124 @@ export async function cleanupFailedItems() {
|
|
|
321
697
|
tables: Array.from(affectedTables)
|
|
322
698
|
};
|
|
323
699
|
}
|
|
700
|
+
// =============================================================================
|
|
701
|
+
// Queue Mutation Functions
|
|
702
|
+
// =============================================================================
|
|
703
|
+
/**
|
|
704
|
+
* Remove a single sync operation from the queue by its primary key.
|
|
705
|
+
*
|
|
706
|
+
* Typically called after a successful server push to acknowledge the operation.
|
|
707
|
+
* This is the "happy path" cleanup -- the operation was pushed successfully
|
|
708
|
+
* and no longer needs to be tracked.
|
|
709
|
+
*
|
|
710
|
+
* @param id - The auto-increment primary key of the sync queue row to remove.
|
|
711
|
+
*
|
|
712
|
+
* @example
|
|
713
|
+
* ```ts
|
|
714
|
+
* await pushToServer(op);
|
|
715
|
+
* await removeSyncItem(op.id!);
|
|
716
|
+
* ```
|
|
717
|
+
*/
|
|
324
718
|
export async function removeSyncItem(id) {
|
|
325
719
|
const db = getDb();
|
|
326
720
|
await db.table('syncQueue').delete(id);
|
|
327
721
|
}
|
|
722
|
+
/**
|
|
723
|
+
* Increment the retry counter and record the current time as the last retry
|
|
724
|
+
* attempt for a sync operation that failed to push.
|
|
725
|
+
*
|
|
726
|
+
* The `timestamp` field is intentionally *not* modified -- it must be preserved
|
|
727
|
+
* to maintain correct operation ordering during coalescing and sync. Only
|
|
728
|
+
* `lastRetryAt` is updated, which is used exclusively by the backoff logic
|
|
729
|
+
* in {@link shouldRetryItem}.
|
|
730
|
+
*
|
|
731
|
+
* @param id - The auto-increment primary key of the sync queue row.
|
|
732
|
+
*
|
|
733
|
+
* @example
|
|
734
|
+
* ```ts
|
|
735
|
+
* try {
|
|
736
|
+
* await pushToServer(op);
|
|
737
|
+
* } catch {
|
|
738
|
+
* await incrementRetry(op.id!);
|
|
739
|
+
* }
|
|
740
|
+
* ```
|
|
741
|
+
*
|
|
742
|
+
* @see {@link shouldRetryItem} which reads `retries` and `lastRetryAt` for backoff.
|
|
743
|
+
*/
|
|
328
744
|
export async function incrementRetry(id) {
|
|
329
745
|
const db = getDb();
|
|
330
746
|
const item = await db.table('syncQueue').get(id);
|
|
331
747
|
if (item) {
|
|
332
|
-
|
|
333
|
-
|
|
748
|
+
/* Update retry count and lastRetryAt for exponential backoff calculation.
|
|
749
|
+
Note: timestamp is preserved to maintain operation ordering.
|
|
750
|
+
lastRetryAt is always set to the current time so the backoff delay
|
|
751
|
+
is measured from the most recent failed attempt. */
|
|
334
752
|
await db.table('syncQueue').update(id, {
|
|
335
753
|
retries: item.retries + 1,
|
|
336
754
|
lastRetryAt: new Date().toISOString()
|
|
337
755
|
});
|
|
338
756
|
}
|
|
339
757
|
}
|
|
340
|
-
|
|
758
|
+
/**
|
|
759
|
+
* Retrieve the set of all entity IDs that have at least one pending sync operation.
|
|
760
|
+
*
|
|
761
|
+
* This is useful for:
|
|
762
|
+
* - **UI indicators:** Showing a "syncing" badge on entities that haven't been
|
|
763
|
+
* pushed to the server yet.
|
|
764
|
+
* - **Conflict detection:** The realtime handler uses this to decide whether an
|
|
765
|
+
* incoming remote change needs conflict resolution (Branch 3) or can be
|
|
766
|
+
* accepted directly (Branch 2).
|
|
767
|
+
*
|
|
768
|
+
* @returns A `Set` of entity UUID strings with pending operations.
|
|
769
|
+
*
|
|
770
|
+
* @example
|
|
771
|
+
* ```ts
|
|
772
|
+
* const pendingIds = await getPendingEntityIds();
|
|
773
|
+
* const isSyncing = pendingIds.has(goal.id);
|
|
774
|
+
* ```
|
|
775
|
+
*
|
|
776
|
+
* @see {@link ./realtime.ts} which calls this during change processing
|
|
777
|
+
*/
|
|
341
778
|
export async function getPendingEntityIds() {
|
|
342
779
|
const db = getDb();
|
|
343
780
|
const pending = (await db.table('syncQueue').toArray());
|
|
344
781
|
return new Set(pending.map((item) => item.entityId));
|
|
345
782
|
}
|
|
783
|
+
// =============================================================================
|
|
784
|
+
// Queue Enqueue Functions
|
|
785
|
+
// =============================================================================
|
|
346
786
|
/**
|
|
347
787
|
* Queue a sync operation using the intent-based format.
|
|
788
|
+
*
|
|
789
|
+
* This is the low-level enqueue function. It stamps the operation with the
|
|
790
|
+
* current ISO 8601 timestamp and initializes the retry counter to 0, then
|
|
791
|
+
* inserts it into the `syncQueue` IndexedDB table.
|
|
792
|
+
*
|
|
793
|
+
* **Auto-generated fields:** The `id` (auto-increment primary key),
|
|
794
|
+
* `timestamp` (current time), and `retries` (0) are automatically set.
|
|
795
|
+
* Callers must not provide these.
|
|
796
|
+
*
|
|
797
|
+
* **Durability:** The operation is persisted to IndexedDB immediately. Even
|
|
798
|
+
* if the browser crashes or is closed before the next sync cycle, the
|
|
799
|
+
* operation will be picked up when the app restarts.
|
|
800
|
+
*
|
|
801
|
+
* @param item - The operation to enqueue, excluding auto-generated fields
|
|
802
|
+
* (`id`, `timestamp`, `retries`).
|
|
803
|
+
*
|
|
804
|
+
* @example
|
|
805
|
+
* ```ts
|
|
806
|
+
* await queueSyncOperation({
|
|
807
|
+
* table: 'goals',
|
|
808
|
+
* entityId: 'abc-123',
|
|
809
|
+
* operationType: 'set',
|
|
810
|
+
* field: 'title',
|
|
811
|
+
* value: 'New Title'
|
|
812
|
+
* });
|
|
813
|
+
* ```
|
|
814
|
+
*
|
|
815
|
+
* @see {@link queueCreateOperation} for a convenience wrapper around create ops.
|
|
816
|
+
* @see {@link queueDeleteOperation} for a convenience wrapper around delete ops.
|
|
817
|
+
* @see {@link coalescePendingOps} which later reduces redundant queued operations.
|
|
348
818
|
*/
|
|
349
819
|
export async function queueSyncOperation(item) {
|
|
350
820
|
const db = getDb();
|
|
@@ -357,6 +827,32 @@ export async function queueSyncOperation(item) {
|
|
|
357
827
|
}
|
|
358
828
|
/**
|
|
359
829
|
* Helper to queue a create operation.
|
|
830
|
+
*
|
|
831
|
+
* Convenience wrapper around {@link queueSyncOperation} for the common case of
|
|
832
|
+
* creating a new entity. The entire entity payload is stored as the operation
|
|
833
|
+
* value so it can be sent as-is (or merged with subsequent updates by the
|
|
834
|
+
* coalescing pipeline).
|
|
835
|
+
*
|
|
836
|
+
* **Coalescing behavior:** If the user subsequently modifies fields of this
|
|
837
|
+
* entity before sync, those set/increment operations will be folded into this
|
|
838
|
+
* create payload by Step 2, Case 3 of the coalescing pipeline. If the user
|
|
839
|
+
* subsequently deletes this entity, all operations (including this create)
|
|
840
|
+
* will be cancelled entirely by Step 2, Case 1.
|
|
841
|
+
*
|
|
842
|
+
* @param table - The target Supabase table name (e.g., `"goals"`).
|
|
843
|
+
* @param entityId - The UUID of the new entity.
|
|
844
|
+
* @param payload - The full entity object to create on the server.
|
|
845
|
+
*
|
|
846
|
+
* @example
|
|
847
|
+
* ```ts
|
|
848
|
+
* await queueCreateOperation('goals', newGoal.id, {
|
|
849
|
+
* id: newGoal.id,
|
|
850
|
+
* title: 'Learn TypeScript',
|
|
851
|
+
* created_at: new Date().toISOString()
|
|
852
|
+
* });
|
|
853
|
+
* ```
|
|
854
|
+
*
|
|
855
|
+
* @see {@link queueSyncOperation} for the underlying enqueue mechanism.
|
|
360
856
|
*/
|
|
361
857
|
export async function queueCreateOperation(table, entityId, payload) {
|
|
362
858
|
await queueSyncOperation({
|
|
@@ -368,6 +864,28 @@ export async function queueCreateOperation(table, entityId, payload) {
|
|
|
368
864
|
}
|
|
369
865
|
/**
|
|
370
866
|
* Helper to queue a delete operation.
|
|
867
|
+
*
|
|
868
|
+
* Convenience wrapper around {@link queueSyncOperation} for deleting an entity.
|
|
869
|
+
* No value is needed -- the operation type and entity ID are sufficient for the
|
|
870
|
+
* server to process the deletion.
|
|
871
|
+
*
|
|
872
|
+
* **Coalescing behavior:**
|
|
873
|
+
* - If this entity was created offline (has a pending create), both the create
|
|
874
|
+
* and this delete are cancelled entirely (Step 2, Case 1).
|
|
875
|
+
* - If this entity existed on the server, all preceding set/increment operations
|
|
876
|
+
* are dropped and only this delete survives (Step 2, Case 2).
|
|
877
|
+
*
|
|
878
|
+
* @param table - The target Supabase table name (e.g., `"goals"`).
|
|
879
|
+
* @param entityId - The UUID of the entity to delete.
|
|
880
|
+
*
|
|
881
|
+
* @example
|
|
882
|
+
* ```ts
|
|
883
|
+
* await queueDeleteOperation('goals', goalToRemove.id);
|
|
884
|
+
* ```
|
|
885
|
+
*
|
|
886
|
+
* @see {@link queueSyncOperation} for the underlying enqueue mechanism.
|
|
887
|
+
* @see {@link coalescePendingOps} Step 2, Cases 1-2 for how deletes interact with
|
|
888
|
+
* other operations during coalescing.
|
|
371
889
|
*/
|
|
372
890
|
export async function queueDeleteOperation(table, entityId) {
|
|
373
891
|
await queueSyncOperation({
|