@prabhask5/stellar-engine 1.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (138) hide show
  1. package/README.md +295 -0
  2. package/dist/actions/remoteChange.d.ts +79 -0
  3. package/dist/actions/remoteChange.d.ts.map +1 -0
  4. package/dist/actions/remoteChange.js +300 -0
  5. package/dist/actions/remoteChange.js.map +1 -0
  6. package/dist/auth/admin.d.ts +12 -0
  7. package/dist/auth/admin.d.ts.map +1 -0
  8. package/dist/auth/admin.js +23 -0
  9. package/dist/auth/admin.js.map +1 -0
  10. package/dist/auth/offlineCredentials.d.ts +41 -0
  11. package/dist/auth/offlineCredentials.d.ts.map +1 -0
  12. package/dist/auth/offlineCredentials.js +121 -0
  13. package/dist/auth/offlineCredentials.js.map +1 -0
  14. package/dist/auth/offlineLogin.d.ts +34 -0
  15. package/dist/auth/offlineLogin.d.ts.map +1 -0
  16. package/dist/auth/offlineLogin.js +75 -0
  17. package/dist/auth/offlineLogin.js.map +1 -0
  18. package/dist/auth/offlineSession.d.ts +22 -0
  19. package/dist/auth/offlineSession.d.ts.map +1 -0
  20. package/dist/auth/offlineSession.js +54 -0
  21. package/dist/auth/offlineSession.js.map +1 -0
  22. package/dist/auth/resolveAuthState.d.ts +24 -0
  23. package/dist/auth/resolveAuthState.d.ts.map +1 -0
  24. package/dist/auth/resolveAuthState.js +69 -0
  25. package/dist/auth/resolveAuthState.js.map +1 -0
  26. package/dist/config.d.ts +53 -0
  27. package/dist/config.d.ts.map +1 -0
  28. package/dist/config.js +55 -0
  29. package/dist/config.js.map +1 -0
  30. package/dist/conflicts.d.ts +70 -0
  31. package/dist/conflicts.d.ts.map +1 -0
  32. package/dist/conflicts.js +321 -0
  33. package/dist/conflicts.js.map +1 -0
  34. package/dist/data.d.ts +77 -0
  35. package/dist/data.d.ts.map +1 -0
  36. package/dist/data.js +360 -0
  37. package/dist/data.js.map +1 -0
  38. package/dist/database.d.ts +31 -0
  39. package/dist/database.d.ts.map +1 -0
  40. package/dist/database.js +51 -0
  41. package/dist/database.js.map +1 -0
  42. package/dist/debug.d.ts +11 -0
  43. package/dist/debug.d.ts.map +1 -0
  44. package/dist/debug.js +48 -0
  45. package/dist/debug.js.map +1 -0
  46. package/dist/deviceId.d.ts +16 -0
  47. package/dist/deviceId.d.ts.map +1 -0
  48. package/dist/deviceId.js +48 -0
  49. package/dist/deviceId.js.map +1 -0
  50. package/dist/engine.d.ts +14 -0
  51. package/dist/engine.d.ts.map +1 -0
  52. package/dist/engine.js +1903 -0
  53. package/dist/engine.js.map +1 -0
  54. package/dist/entries/actions.d.ts +2 -0
  55. package/dist/entries/actions.d.ts.map +1 -0
  56. package/dist/entries/actions.js +3 -0
  57. package/dist/entries/actions.js.map +1 -0
  58. package/dist/entries/auth.d.ts +7 -0
  59. package/dist/entries/auth.d.ts.map +1 -0
  60. package/dist/entries/auth.js +6 -0
  61. package/dist/entries/auth.js.map +1 -0
  62. package/dist/entries/config.d.ts +3 -0
  63. package/dist/entries/config.d.ts.map +1 -0
  64. package/dist/entries/config.js +3 -0
  65. package/dist/entries/config.js.map +1 -0
  66. package/dist/entries/stores.d.ts +9 -0
  67. package/dist/entries/stores.d.ts.map +1 -0
  68. package/dist/entries/stores.js +9 -0
  69. package/dist/entries/stores.js.map +1 -0
  70. package/dist/entries/types.d.ts +11 -0
  71. package/dist/entries/types.d.ts.map +1 -0
  72. package/dist/entries/types.js +2 -0
  73. package/dist/entries/types.js.map +1 -0
  74. package/dist/entries/utils.d.ts +3 -0
  75. package/dist/entries/utils.d.ts.map +1 -0
  76. package/dist/entries/utils.js +4 -0
  77. package/dist/entries/utils.js.map +1 -0
  78. package/dist/index.d.ts +32 -0
  79. package/dist/index.d.ts.map +1 -0
  80. package/dist/index.js +39 -0
  81. package/dist/index.js.map +1 -0
  82. package/dist/operations.d.ts +73 -0
  83. package/dist/operations.d.ts.map +1 -0
  84. package/dist/operations.js +227 -0
  85. package/dist/operations.js.map +1 -0
  86. package/dist/queue.d.ts +32 -0
  87. package/dist/queue.d.ts.map +1 -0
  88. package/dist/queue.js +377 -0
  89. package/dist/queue.js.map +1 -0
  90. package/dist/realtime.d.ts +57 -0
  91. package/dist/realtime.d.ts.map +1 -0
  92. package/dist/realtime.js +491 -0
  93. package/dist/realtime.js.map +1 -0
  94. package/dist/reconnectHandler.d.ts +16 -0
  95. package/dist/reconnectHandler.d.ts.map +1 -0
  96. package/dist/reconnectHandler.js +21 -0
  97. package/dist/reconnectHandler.js.map +1 -0
  98. package/dist/runtime/runtimeConfig.d.ts +27 -0
  99. package/dist/runtime/runtimeConfig.d.ts.map +1 -0
  100. package/dist/runtime/runtimeConfig.js +133 -0
  101. package/dist/runtime/runtimeConfig.js.map +1 -0
  102. package/dist/stores/authState.d.ts +57 -0
  103. package/dist/stores/authState.d.ts.map +1 -0
  104. package/dist/stores/authState.js +154 -0
  105. package/dist/stores/authState.js.map +1 -0
  106. package/dist/stores/network.d.ts +9 -0
  107. package/dist/stores/network.d.ts.map +1 -0
  108. package/dist/stores/network.js +97 -0
  109. package/dist/stores/network.js.map +1 -0
  110. package/dist/stores/remoteChanges.d.ts +142 -0
  111. package/dist/stores/remoteChanges.d.ts.map +1 -0
  112. package/dist/stores/remoteChanges.js +353 -0
  113. package/dist/stores/remoteChanges.js.map +1 -0
  114. package/dist/stores/sync.d.ts +35 -0
  115. package/dist/stores/sync.d.ts.map +1 -0
  116. package/dist/stores/sync.js +115 -0
  117. package/dist/stores/sync.js.map +1 -0
  118. package/dist/supabase/auth.d.ts +60 -0
  119. package/dist/supabase/auth.d.ts.map +1 -0
  120. package/dist/supabase/auth.js +298 -0
  121. package/dist/supabase/auth.js.map +1 -0
  122. package/dist/supabase/client.d.ts +15 -0
  123. package/dist/supabase/client.d.ts.map +1 -0
  124. package/dist/supabase/client.js +149 -0
  125. package/dist/supabase/client.js.map +1 -0
  126. package/dist/supabase/validate.d.ts +11 -0
  127. package/dist/supabase/validate.d.ts.map +1 -0
  128. package/dist/supabase/validate.js +38 -0
  129. package/dist/supabase/validate.js.map +1 -0
  130. package/dist/types.d.ts +78 -0
  131. package/dist/types.d.ts.map +1 -0
  132. package/dist/types.js +16 -0
  133. package/dist/types.js.map +1 -0
  134. package/dist/utils.d.ts +24 -0
  135. package/dist/utils.d.ts.map +1 -0
  136. package/dist/utils.js +56 -0
  137. package/dist/utils.js.map +1 -0
  138. package/package.json +84 -0
package/dist/engine.js ADDED
@@ -0,0 +1,1903 @@
1
+ import { getEngineConfig } from './config';
2
+ import { debugLog, debugWarn, debugError, isDebugMode } from './debug';
3
+ import { getPendingSync, removeSyncItem, incrementRetry, getPendingEntityIds, cleanupFailedItems, coalescePendingOps, queueSyncOperation } from './queue';
4
+ import { getDeviceId } from './deviceId';
5
+ import { syncStatusStore } from './stores/sync';
6
+ import { resolveConflicts, storeConflictHistory, cleanupConflictHistory, getPendingOpsForEntity } from './conflicts';
7
+ import { startRealtimeSubscriptions, stopRealtimeSubscriptions, onRealtimeDataUpdate, onConnectionStateChange, cleanupRealtimeTracking, isRealtimeHealthy, getConnectionState, pauseRealtime, wasRecentlyProcessedByRealtime } from './realtime';
8
+ import { isOnline } from './stores/network';
9
+ import { getSession } from './supabase/auth';
10
+ import { supabase as supabaseProxy } from './supabase/client';
11
+ import { getOfflineCredentials } from './auth/offlineCredentials';
12
+ import { getValidOfflineSession, createOfflineSession } from './auth/offlineSession';
13
+ // ============================================================
14
+ // LOCAL-FIRST SYNC ENGINE
15
+ //
16
+ // Rules:
17
+ // 1. All reads come from local DB (IndexedDB)
18
+ // 2. All writes go to local DB first, immediately
19
+ // 3. Every write creates a pending operation in the outbox
20
+ // 4. Sync loop ships outbox to server in background
21
+ // 5. On refresh, load local state instantly, then run background sync
22
+ // ============================================================
23
+ // Helper functions for config-driven access
24
+ function getDb() {
25
+ const db = getEngineConfig().db;
26
+ if (!db)
27
+ throw new Error('Database not initialized. Provide db or database config to initEngine().');
28
+ return db;
29
+ }
30
+ function getSupabase() {
31
+ const config = getEngineConfig();
32
+ if (config.supabase)
33
+ return config.supabase;
34
+ // Fall back to the proxy-based supabase client
35
+ return supabaseProxy;
36
+ }
37
+ function getDexieTableName(supabaseName) {
38
+ const table = getEngineConfig().tables.find(t => t.supabaseName === supabaseName);
39
+ return table?.dexieTable || supabaseName;
40
+ }
41
+ function getColumns(supabaseName) {
42
+ const table = getEngineConfig().tables.find(t => t.supabaseName === supabaseName);
43
+ return table?.columns || '*';
44
+ }
45
+ function isSingletonTable(supabaseName) {
46
+ const table = getEngineConfig().tables.find(t => t.supabaseName === supabaseName);
47
+ return table?.isSingleton || false;
48
+ }
49
+ // Getter functions for config values (can't read config at module level)
50
+ function getSyncDebounceMs() {
51
+ return getEngineConfig().syncDebounceMs ?? 2000;
52
+ }
53
+ function getSyncIntervalMs() {
54
+ return getEngineConfig().syncIntervalMs ?? 900000;
55
+ }
56
+ function getTombstoneMaxAgeDays() {
57
+ return getEngineConfig().tombstoneMaxAgeDays ?? 1;
58
+ }
59
+ function getVisibilitySyncMinAwayMs() {
60
+ return getEngineConfig().visibilitySyncMinAwayMs ?? 300000;
61
+ }
62
+ function getOnlineReconnectCooldownMs() {
63
+ return getEngineConfig().onlineReconnectCooldownMs ?? 120000;
64
+ }
65
+ function getPrefix() {
66
+ return getEngineConfig().prefix || 'engine';
67
+ }
68
+ // Track if we were recently offline (for auth validation on reconnect)
69
+ let wasOffline = false;
70
+ let authValidatedAfterReconnect = true; // Start as true (no validation needed initially)
71
+ /**
72
+ * Clear all pending sync operations (used when auth is invalid)
73
+ * SECURITY: Called when offline credentials are found to be invalid
74
+ * to prevent unauthorized data from being synced to the server
75
+ */
76
+ export async function clearPendingSyncQueue() {
77
+ try {
78
+ const db = getDb();
79
+ const count = await db.table('syncQueue').count();
80
+ await db.table('syncQueue').clear();
81
+ debugLog(`[SYNC] Cleared ${count} pending sync operations (auth invalid)`);
82
+ return count;
83
+ }
84
+ catch (e) {
85
+ debugError('[SYNC] Failed to clear sync queue:', e);
86
+ return 0;
87
+ }
88
+ }
89
+ /**
90
+ * Mark that we need auth validation before next sync
91
+ * Called when going offline
92
+ */
93
+ function markOffline() {
94
+ wasOffline = true;
95
+ authValidatedAfterReconnect = false;
96
+ }
97
+ /**
98
+ * Mark auth as validated (safe to sync)
99
+ * Called after successful credential validation on reconnect
100
+ */
101
+ function markAuthValidated() {
102
+ authValidatedAfterReconnect = true;
103
+ wasOffline = false;
104
+ }
105
+ /**
106
+ * Check if auth needs validation before syncing
107
+ */
108
+ function needsAuthValidation() {
109
+ return wasOffline && !authValidatedAfterReconnect;
110
+ }
111
+ const syncStats = [];
112
+ let totalSyncCycles = 0;
113
+ const egressStats = {
114
+ totalBytes: 0,
115
+ totalRecords: 0,
116
+ byTable: {},
117
+ sessionStart: new Date().toISOString()
118
+ };
119
+ // Helper to estimate JSON size in bytes
120
+ function estimateJsonSize(data) {
121
+ try {
122
+ return new Blob([JSON.stringify(data)]).size;
123
+ }
124
+ catch {
125
+ // Fallback: rough estimate based on JSON string length
126
+ return JSON.stringify(data).length;
127
+ }
128
+ }
129
+ // Track egress for a table
130
+ function trackEgress(tableName, data) {
131
+ if (!data || data.length === 0) {
132
+ return { bytes: 0, records: 0 };
133
+ }
134
+ const bytes = estimateJsonSize(data);
135
+ const records = data.length;
136
+ // Update totals
137
+ egressStats.totalBytes += bytes;
138
+ egressStats.totalRecords += records;
139
+ // Update per-table stats
140
+ if (!egressStats.byTable[tableName]) {
141
+ egressStats.byTable[tableName] = { bytes: 0, records: 0 };
142
+ }
143
+ egressStats.byTable[tableName].bytes += bytes;
144
+ egressStats.byTable[tableName].records += records;
145
+ return { bytes, records };
146
+ }
147
+ // Format bytes for display
148
+ function formatBytes(bytes) {
149
+ if (bytes < 1024)
150
+ return `${bytes} B`;
151
+ if (bytes < 1024 * 1024)
152
+ return `${(bytes / 1024).toFixed(2)} KB`;
153
+ return `${(bytes / (1024 * 1024)).toFixed(2)} MB`;
154
+ }
155
+ function logSyncCycle(stats) {
156
+ const entry = {
157
+ ...stats,
158
+ timestamp: new Date().toISOString()
159
+ };
160
+ syncStats.push(entry);
161
+ totalSyncCycles++;
162
+ // Keep only last 100 entries
163
+ if (syncStats.length > 100) {
164
+ syncStats.shift();
165
+ }
166
+ debugLog(`[SYNC] Cycle #${totalSyncCycles}: ` +
167
+ `trigger=${stats.trigger}, pushed=${stats.pushedItems}, ` +
168
+ `pulled=${stats.pulledRecords} records (${formatBytes(stats.egressBytes)}), ${stats.durationMs}ms`);
169
+ }
170
+ // Export for debugging in browser console
171
+ // Uses configurable prefix: window.__<prefix>SyncStats?.()
172
+ // Also: window.__<prefix>Tombstones?.() or window.__<prefix>Tombstones?.({ cleanup: true, force: true })
173
+ // Also: window.__<prefix>Egress?.()
174
+ // Also: window.__<prefix>Sync.forceFullSync(), .resetSyncCursor(), .sync(), .getStatus(), .checkConnection(), .realtimeStatus()
175
+ function initDebugWindowUtilities() {
176
+ if (typeof window === 'undefined' || !isDebugMode())
177
+ return;
178
+ const prefix = getPrefix();
179
+ window[`__${prefix}SyncStats`] = () => {
180
+ const recentMinute = syncStats.filter((s) => new Date(s.timestamp).getTime() > Date.now() - 60000);
181
+ debugLog(`=== ${prefix.toUpperCase()} SYNC STATS ===`);
182
+ debugLog(`Total cycles: ${totalSyncCycles}`);
183
+ debugLog(`Last minute: ${recentMinute.length} cycles`);
184
+ debugLog(`Recent cycles:`, syncStats.slice(-10));
185
+ return { totalSyncCycles, recentMinute: recentMinute.length, recent: syncStats.slice(-10) };
186
+ };
187
+ window[`__${prefix}Egress`] = () => {
188
+ debugLog(`=== ${prefix.toUpperCase()} EGRESS STATS ===`);
189
+ debugLog(`Session started: ${egressStats.sessionStart}`);
190
+ debugLog(`Total egress: ${formatBytes(egressStats.totalBytes)} (${egressStats.totalRecords} records)`);
191
+ debugLog('');
192
+ debugLog('--- BY TABLE ---');
193
+ // Sort tables by bytes descending
194
+ const sortedTables = Object.entries(egressStats.byTable).sort(([, a], [, b]) => b.bytes - a.bytes);
195
+ for (const [table, stats] of sortedTables) {
196
+ const pct = egressStats.totalBytes > 0
197
+ ? ((stats.bytes / egressStats.totalBytes) * 100).toFixed(1)
198
+ : '0';
199
+ debugLog(` ${table}: ${formatBytes(stats.bytes)} (${stats.records} records, ${pct}%)`);
200
+ }
201
+ debugLog('');
202
+ debugLog('--- RECENT SYNC CYCLES ---');
203
+ const recent = syncStats.slice(-5);
204
+ for (const cycle of recent) {
205
+ debugLog(` ${cycle.timestamp}: ${formatBytes(cycle.egressBytes)} (${cycle.pulledRecords} records)`);
206
+ }
207
+ return {
208
+ sessionStart: egressStats.sessionStart,
209
+ totalBytes: egressStats.totalBytes,
210
+ totalFormatted: formatBytes(egressStats.totalBytes),
211
+ totalRecords: egressStats.totalRecords,
212
+ byTable: egressStats.byTable,
213
+ recentCycles: syncStats.slice(-10)
214
+ };
215
+ };
216
+ // Tombstone debug - will be initialized after debugTombstones function is defined
217
+ // See below where it's assigned after the function definition
218
+ }
219
+ let syncTimeout = null;
220
+ let syncInterval = null;
221
+ let _hasHydrated = false; // Track if initial hydration has been attempted
222
+ // EGRESS OPTIMIZATION: Cache getUser() validation to avoid network call every sync cycle
223
+ let lastUserValidation = 0;
224
+ let lastValidatedUserId = null;
225
+ const USER_VALIDATION_INTERVAL_MS = 60 * 60 * 1000; // 1 hour
226
+ // EGRESS OPTIMIZATION: Track last successful sync for online-reconnect cooldown
227
+ let lastSuccessfulSyncTimestamp = 0;
228
+ let isTabVisible = true; // Track tab visibility
229
+ let visibilityDebounceTimeout = null;
230
+ let tabHiddenAt = null; // Track when tab became hidden for smart sync
231
+ const VISIBILITY_SYNC_DEBOUNCE_MS = 1000; // Debounce for visibility change syncs
232
+ const RECENTLY_MODIFIED_TTL_MS = 2000; // Protect recently modified entities for 2 seconds
233
+ // Industry standard: 500ms-2000ms. 2s covers sync debounce (1s) + network latency with margin.
234
+ // Track recently modified entity IDs to prevent pull from overwriting fresh local changes
235
+ // This provides an additional layer of protection beyond the pending queue check
236
+ const recentlyModifiedEntities = new Map();
237
+ // Mark an entity as recently modified (called by repositories after local writes)
238
+ export function markEntityModified(entityId) {
239
+ recentlyModifiedEntities.set(entityId, Date.now());
240
+ }
241
+ // Check if entity was recently modified locally
242
+ function isRecentlyModified(entityId) {
243
+ const modifiedAt = recentlyModifiedEntities.get(entityId);
244
+ if (!modifiedAt)
245
+ return false;
246
+ const age = Date.now() - modifiedAt;
247
+ if (age > RECENTLY_MODIFIED_TTL_MS) {
248
+ // Expired, clean up
249
+ recentlyModifiedEntities.delete(entityId);
250
+ return false;
251
+ }
252
+ return true;
253
+ }
254
+ // Clean up expired entries (called periodically)
255
+ function cleanupRecentlyModified() {
256
+ const now = Date.now();
257
+ for (const [entityId, modifiedAt] of recentlyModifiedEntities) {
258
+ if (now - modifiedAt > RECENTLY_MODIFIED_TTL_MS) {
259
+ recentlyModifiedEntities.delete(entityId);
260
+ }
261
+ }
262
+ }
263
+ // Proper async mutex to prevent concurrent syncs
264
+ // Uses a queue-based approach where each caller waits for the previous one
265
+ let lockPromise = null;
266
+ let lockResolve = null;
267
+ let lockAcquiredAt = null;
268
+ const SYNC_LOCK_TIMEOUT_MS = 60000; // Force-release lock after 60s
269
+ // Store event listener references for cleanup
270
+ let handleOnlineRef = null;
271
+ let handleOfflineRef = null;
272
+ let handleVisibilityChangeRef = null;
273
+ // Watchdog: detect stuck syncs and auto-retry
274
+ let watchdogInterval = null;
275
+ const WATCHDOG_INTERVAL_MS = 15000; // Check every 15s
276
+ const SYNC_OPERATION_TIMEOUT_MS = 45000; // Abort sync operations after 45s
277
+ async function acquireSyncLock() {
278
+ // If lock is held, check if it's stale (held too long)
279
+ if (lockPromise !== null) {
280
+ if (lockAcquiredAt && Date.now() - lockAcquiredAt > SYNC_LOCK_TIMEOUT_MS) {
281
+ debugWarn(`[SYNC] Force-releasing stale sync lock (held for ${Math.round((Date.now() - lockAcquiredAt) / 1000)}s)`);
282
+ releaseSyncLock();
283
+ }
284
+ else {
285
+ return false;
286
+ }
287
+ }
288
+ // Create a new lock promise
289
+ lockPromise = new Promise((resolve) => {
290
+ lockResolve = resolve;
291
+ });
292
+ lockAcquiredAt = Date.now();
293
+ return true;
294
+ }
295
+ function releaseSyncLock() {
296
+ if (lockResolve) {
297
+ lockResolve();
298
+ }
299
+ lockPromise = null;
300
+ lockResolve = null;
301
+ lockAcquiredAt = null;
302
+ }
303
+ // Timeout wrapper: races a promise against a timeout
304
+ function withTimeout(promise, ms, label) {
305
+ return new Promise((resolve, reject) => {
306
+ const timer = setTimeout(() => {
307
+ reject(new Error(`${label} timed out after ${Math.round(ms / 1000)}s`));
308
+ }, ms);
309
+ promise.then((val) => { clearTimeout(timer); resolve(val); }, (err) => { clearTimeout(timer); reject(err); });
310
+ });
311
+ }
312
+ // Callbacks for when sync completes (stores can refresh from local)
313
+ const syncCompleteCallbacks = new Set();
314
+ export function onSyncComplete(callback) {
315
+ syncCompleteCallbacks.add(callback);
316
+ debugLog(`[SYNC] Store registered for sync complete (total: ${syncCompleteCallbacks.size})`);
317
+ return () => {
318
+ syncCompleteCallbacks.delete(callback);
319
+ debugLog(`[SYNC] Store unregistered from sync complete (total: ${syncCompleteCallbacks.size})`);
320
+ };
321
+ }
322
+ function notifySyncComplete() {
323
+ debugLog(`[SYNC] Notifying ${syncCompleteCallbacks.size} store callbacks to refresh`);
324
+ for (const callback of syncCompleteCallbacks) {
325
+ try {
326
+ callback();
327
+ }
328
+ catch (e) {
329
+ debugError('Sync callback error:', e);
330
+ }
331
+ }
332
+ }
333
+ // ============================================================
334
+ // SYNC OPERATIONS - Background sync to/from Supabase
335
+ // ============================================================
336
+ // Schedule a debounced sync after local writes
337
+ export function scheduleSyncPush() {
338
+ if (syncTimeout) {
339
+ clearTimeout(syncTimeout);
340
+ }
341
+ syncTimeout = setTimeout(() => {
342
+ // EGRESS OPTIMIZATION: When realtime is healthy, other devices' changes arrive via realtime.
343
+ // Skip pulling all tables after local writes - just push.
344
+ const skipPull = isRealtimeHealthy();
345
+ if (skipPull) {
346
+ debugLog('[SYNC] Realtime healthy - push-only mode (skipping pull)');
347
+ }
348
+ runFullSync(false, skipPull); // Show syncing indicator for user-triggered writes
349
+ }, getSyncDebounceMs());
350
+ }
351
+ // Get current user ID for sync cursor isolation
352
+ // CRITICAL: This validates the session is actually valid, not just cached
353
+ async function getCurrentUserId() {
354
+ try {
355
+ const supabase = getSupabase();
356
+ // First check if we have a session at all
357
+ const { data: { session }, error: sessionError } = await supabase.auth.getSession();
358
+ if (sessionError) {
359
+ debugWarn('[SYNC] Session error:', sessionError.message);
360
+ return null;
361
+ }
362
+ if (!session) {
363
+ debugWarn('[SYNC] No active session');
364
+ return null;
365
+ }
366
+ // Check if session is expired
367
+ const expiresAt = session.expires_at;
368
+ if (expiresAt && expiresAt * 1000 < Date.now()) {
369
+ debugLog('[SYNC] Session expired, attempting refresh...');
370
+ // Try to refresh the session
371
+ const { data: refreshData, error: refreshError } = await supabase.auth.refreshSession();
372
+ if (refreshError || !refreshData.session) {
373
+ debugWarn('[SYNC] Failed to refresh session:', refreshError?.message);
374
+ return null;
375
+ }
376
+ debugLog('[SYNC] Session refreshed successfully');
377
+ const refreshedId = refreshData.session.user?.id || null;
378
+ if (refreshedId) {
379
+ lastValidatedUserId = refreshedId;
380
+ lastUserValidation = Date.now();
381
+ }
382
+ return refreshedId;
383
+ }
384
+ // EGRESS OPTIMIZATION: Only validate with getUser() (network call) once per hour.
385
+ // Between validations, trust the cached session.
386
+ const now = Date.now();
387
+ if (lastValidatedUserId && session.user?.id === lastValidatedUserId && (now - lastUserValidation) < USER_VALIDATION_INTERVAL_MS) {
388
+ return session.user.id;
389
+ }
390
+ // Session is valid, but also validate with getUser() which makes a network call
391
+ // This catches cases where the token is revoked server-side
392
+ const { data: { user }, error: userError } = await supabase.auth.getUser();
393
+ if (userError) {
394
+ debugWarn('[SYNC] User validation failed:', userError.message);
395
+ // Invalidate cache on error
396
+ lastValidatedUserId = null;
397
+ lastUserValidation = 0;
398
+ // Try to refresh the session
399
+ const { data: refreshData, error: refreshError } = await supabase.auth.refreshSession();
400
+ if (refreshError || !refreshData.session) {
401
+ debugWarn('[SYNC] Failed to refresh after user validation error');
402
+ return null;
403
+ }
404
+ const refreshedId = refreshData.session.user?.id || null;
405
+ if (refreshedId) {
406
+ lastValidatedUserId = refreshedId;
407
+ lastUserValidation = Date.now();
408
+ }
409
+ return refreshedId;
410
+ }
411
+ // Cache successful validation
412
+ if (user?.id) {
413
+ lastValidatedUserId = user.id;
414
+ lastUserValidation = Date.now();
415
+ }
416
+ return user?.id || null;
417
+ }
418
+ catch (e) {
419
+ debugError('[SYNC] Auth validation error:', e);
420
+ return null;
421
+ }
422
+ }
423
+ // Get last sync cursor from localStorage (per-user to prevent cross-user sync issues)
424
+ function getLastSyncCursor(userId) {
425
+ if (typeof localStorage === 'undefined')
426
+ return '1970-01-01T00:00:00.000Z';
427
+ const key = userId ? `lastSyncCursor_${userId}` : 'lastSyncCursor';
428
+ return localStorage.getItem(key) || '1970-01-01T00:00:00.000Z';
429
+ }
430
+ // Set last sync cursor (per-user)
431
+ function setLastSyncCursor(cursor, userId) {
432
+ if (typeof localStorage !== 'undefined') {
433
+ const key = userId ? `lastSyncCursor_${userId}` : 'lastSyncCursor';
434
+ localStorage.setItem(key, cursor);
435
+ }
436
+ }
437
+ /**
438
+ * Reset the sync cursor so the next sync pulls ALL data.
439
+ * Available in browser console via window.__<prefix>Sync.resetSyncCursor()
440
+ */
441
+ async function resetSyncCursor() {
442
+ const userId = await getCurrentUserId();
443
+ if (typeof localStorage !== 'undefined') {
444
+ const key = userId ? `lastSyncCursor_${userId}` : 'lastSyncCursor';
445
+ localStorage.removeItem(key);
446
+ debugLog('[SYNC] Sync cursor reset - next sync will pull all data');
447
+ }
448
+ }
449
+ /**
450
+ * Force a full sync by resetting the cursor and re-downloading all data.
451
+ * Available in browser console via window.__<prefix>Sync.forceFullSync()
452
+ */
453
+ async function forceFullSync() {
454
+ debugLog('[SYNC] Starting force full sync...');
455
+ const config = getEngineConfig();
456
+ const db = config.db;
457
+ await resetSyncCursor();
458
+ // Clear local data (except sync queue - keep pending changes)
459
+ const entityTables = config.tables.map(t => db.table(t.dexieTable));
460
+ await db.transaction('rw', entityTables, async () => {
461
+ for (const t of entityTables) {
462
+ await t.clear();
463
+ }
464
+ });
465
+ debugLog('[SYNC] Local data cleared, pulling from server...');
466
+ try {
467
+ syncStatusStore.setStatus('syncing');
468
+ syncStatusStore.setSyncMessage('Downloading all data...');
469
+ await pullRemoteChanges();
470
+ syncStatusStore.setStatus('idle');
471
+ syncStatusStore.setSyncMessage('Full sync complete');
472
+ notifySyncComplete();
473
+ debugLog('[SYNC] Force full sync complete');
474
+ }
475
+ catch (error) {
476
+ debugError('[SYNC] Force full sync failed:', error);
477
+ syncStatusStore.setStatus('error');
478
+ syncStatusStore.setError('Full sync failed', String(error));
479
+ throw error;
480
+ }
481
+ }
482
+ // PULL: Fetch changes from remote since last sync
483
+ // Returns egress stats for this pull operation
484
+ // minCursor: optional minimum cursor to use (e.g., timestamp after push completes)
485
+ async function pullRemoteChanges(minCursor) {
486
+ const userId = await getCurrentUserId();
487
+ // Abort if no authenticated user (avoids confusing RLS errors)
488
+ if (!userId) {
489
+ throw new Error('Not authenticated. Please sign in to sync.');
490
+ }
491
+ const config = getEngineConfig();
492
+ const db = config.db;
493
+ const supabase = config.supabase;
494
+ // Use the later of stored cursor or provided minCursor
495
+ // This prevents re-fetching records we just pushed in this sync cycle
496
+ const storedCursor = getLastSyncCursor(userId);
497
+ const lastSync = minCursor && minCursor > storedCursor ? minCursor : storedCursor;
498
+ const pendingEntityIds = await getPendingEntityIds();
499
+ debugLog(`[SYNC] Pulling changes since: ${lastSync} (stored: ${storedCursor}, min: ${minCursor || 'none'})`);
500
+ // Track the newest updated_at we see
501
+ let newestUpdate = lastSync;
502
+ // Track egress for this pull
503
+ let pullBytes = 0;
504
+ let pullRecords = 0;
505
+ // Pull all tables in parallel (egress optimization: reduces wall time per sync cycle)
506
+ // Wrapped in timeout to prevent hanging if Supabase doesn't respond
507
+ const results = await withTimeout(Promise.all(config.tables.map(table => supabase.from(table.supabaseName).select(table.columns).gt('updated_at', lastSync))), 30000, 'Pull remote changes');
508
+ // Check for errors
509
+ for (let i = 0; i < results.length; i++) {
510
+ if (results[i].error)
511
+ throw results[i].error;
512
+ }
513
+ // Track egress
514
+ const tableNames = config.tables.map(t => t.supabaseName);
515
+ for (let i = 0; i < config.tables.length; i++) {
516
+ const egress = trackEgress(tableNames[i], results[i].data);
517
+ pullBytes += egress.bytes;
518
+ pullRecords += egress.records;
519
+ }
520
+ // Helper function to apply remote changes with field-level conflict resolution
521
+ async function applyRemoteWithConflictResolution(entityType, remoteRecords, table) {
522
+ for (const remote of remoteRecords || []) {
523
+ // Skip recently modified entities (protects against race conditions)
524
+ // Note: We no longer skip entities with pending ops - conflict resolution handles them
525
+ if (isRecentlyModified(remote.id))
526
+ continue;
527
+ // Skip entities that were just processed by realtime (prevents duplicate processing)
528
+ if (wasRecentlyProcessedByRealtime(remote.id))
529
+ continue;
530
+ const local = await table.get(remote.id);
531
+ // Track newest update for cursor
532
+ if (remote.updated_at > newestUpdate)
533
+ newestUpdate = remote.updated_at;
534
+ // If no local entity, just accept remote
535
+ if (!local) {
536
+ await table.put(remote);
537
+ continue;
538
+ }
539
+ // If remote is not newer than local, skip (no conflict possible)
540
+ if (new Date(remote.updated_at) <= new Date(local.updated_at)) {
541
+ continue;
542
+ }
543
+ // Check if we have pending operations for this entity
544
+ const hasPendingOps = pendingEntityIds.has(remote.id);
545
+ if (!hasPendingOps) {
546
+ // No pending ops and remote is newer - simple case, accept remote
547
+ await table.put(remote);
548
+ }
549
+ else {
550
+ // Entity has pending operations - apply field-level conflict resolution
551
+ const pendingOps = await getPendingOpsForEntity(remote.id);
552
+ const resolution = await resolveConflicts(entityType, remote.id, local, remote, pendingOps);
553
+ // Store the merged entity
554
+ await table.put(resolution.mergedEntity);
555
+ // Store conflict history if there were conflicts
556
+ if (resolution.hasConflicts) {
557
+ await storeConflictHistory(resolution);
558
+ }
559
+ }
560
+ }
561
+ }
562
+ // Log what we're about to apply
563
+ const pullSummary = {};
564
+ for (let i = 0; i < config.tables.length; i++) {
565
+ pullSummary[tableNames[i]] = results[i].data?.length || 0;
566
+ }
567
+ debugLog(`[SYNC] Pulled from server:`, pullSummary);
568
+ // Apply changes to local DB with conflict handling
569
+ const entityTables = config.tables.map(t => db.table(t.dexieTable));
570
+ await db.transaction('rw', [...entityTables, db.table('conflictHistory')], async () => {
571
+ for (let i = 0; i < config.tables.length; i++) {
572
+ const data = results[i].data;
573
+ await applyRemoteWithConflictResolution(tableNames[i], data, db.table(config.tables[i].dexieTable));
574
+ }
575
+ });
576
+ // Update sync cursor (per-user)
577
+ setLastSyncCursor(newestUpdate, userId);
578
+ return { bytes: pullBytes, records: pullRecords };
579
+ }
580
+ // PUSH: Send pending operations to remote
581
+ // Continues until queue is empty to catch items added during sync
582
+ // Track push errors for this sync cycle
583
+ let pushErrors = [];
584
+ async function pushPendingOps() {
585
+ const maxIterations = 10; // Safety limit to prevent infinite loops
586
+ let iterations = 0;
587
+ let actualPushed = 0;
588
+ const db = getDb();
589
+ // Clear previous push errors
590
+ pushErrors = [];
591
+ // Get original count before coalescing
592
+ const originalItems = await getPendingSync();
593
+ const originalCount = originalItems.length;
594
+ // CRITICAL: Pre-flight auth check before attempting to push
595
+ // This catches expired/invalid sessions early, before we try operations that would fail silently
596
+ if (originalCount > 0) {
597
+ const userId = await getCurrentUserId();
598
+ if (!userId) {
599
+ debugError('[SYNC] Auth validation failed before push - session may be expired');
600
+ const authError = {
601
+ message: 'Session expired - please sign in again',
602
+ table: 'auth',
603
+ operation: 'validate',
604
+ entityId: 'session'
605
+ };
606
+ pushErrors.push(authError);
607
+ syncStatusStore.addSyncError({
608
+ ...authError,
609
+ timestamp: new Date().toISOString()
610
+ });
611
+ throw new Error('Authentication required - please sign in again');
612
+ }
613
+ }
614
+ // Coalesce multiple updates to the same entity before pushing
615
+ // This merges e.g. 50 rapid increments into 1 update request
616
+ const coalescedCount = await coalescePendingOps();
617
+ if (coalescedCount > 0) {
618
+ debugLog(`[SYNC] Coalesced ${coalescedCount} redundant operations (${originalCount} -> ${originalCount - coalescedCount})`);
619
+ }
620
+ while (iterations < maxIterations) {
621
+ const pendingItems = await getPendingSync();
622
+ if (pendingItems.length === 0)
623
+ break;
624
+ iterations++;
625
+ let processedAny = false;
626
+ for (const item of pendingItems) {
627
+ try {
628
+ // Skip items that were purged from the queue during reconciliation
629
+ // (e.g. singleton ID reconciliation deletes old queued ops)
630
+ if (item.id) {
631
+ const stillQueued = await db.table('syncQueue').get(item.id);
632
+ if (!stillQueued) {
633
+ debugLog(`[SYNC] Skipping purged item: ${item.operationType} ${item.table}/${item.entityId}`);
634
+ continue;
635
+ }
636
+ }
637
+ debugLog(`[SYNC] Processing: ${item.operationType} ${item.table}/${item.entityId}`);
638
+ await processSyncItem(item);
639
+ if (item.id) {
640
+ await removeSyncItem(item.id);
641
+ processedAny = true;
642
+ actualPushed++;
643
+ debugLog(`[SYNC] Success: ${item.operationType} ${item.table}/${item.entityId}`);
644
+ }
645
+ }
646
+ catch (error) {
647
+ debugError(`[SYNC] Failed: ${item.operationType} ${item.table}/${item.entityId}:`, error);
648
+ // Determine if this is a transient error that will likely succeed on retry
649
+ const transient = isTransientError(error);
650
+ // Only show error in UI if:
651
+ // 1. It's a persistent error (won't fix itself) OR
652
+ // 2. It's a transient error AND this is the last retry attempt (retries >= 3)
653
+ // This prevents momentary error flashes for network hiccups that resolve on retry
654
+ const shouldShowError = !transient || item.retries >= 3;
655
+ if (shouldShowError) {
656
+ // Capture error details for UI display
657
+ const errorInfo = {
658
+ message: extractErrorMessage(error),
659
+ table: item.table,
660
+ operation: item.operationType,
661
+ entityId: item.entityId
662
+ };
663
+ pushErrors.push(errorInfo);
664
+ // Also add to the sync status store for UI
665
+ syncStatusStore.addSyncError({
666
+ ...errorInfo,
667
+ timestamp: new Date().toISOString()
668
+ });
669
+ }
670
+ if (item.id) {
671
+ await incrementRetry(item.id);
672
+ }
673
+ }
674
+ }
675
+ // If we didn't process anything (all items in backoff), stop iterating
676
+ if (!processedAny)
677
+ break;
678
+ }
679
+ return { originalCount, coalescedCount, actualPushed };
680
+ }
681
+ // Check if error is a duplicate key violation (item already exists)
682
+ function isDuplicateKeyError(error) {
683
+ // PostgreSQL error code for unique violation
684
+ if (error.code === '23505')
685
+ return true;
686
+ // PostgREST error codes
687
+ if (error.code === 'PGRST409')
688
+ return true;
689
+ // Fallback to message check for compatibility
690
+ const msg = (error.message || '').toLowerCase();
691
+ return msg.includes('duplicate') || msg.includes('unique') || msg.includes('already exists');
692
+ }
693
+ // Check if error is a "not found" error (item doesn't exist)
694
+ function isNotFoundError(error) {
695
+ // PostgREST error code for no rows affected/found
696
+ if (error.code === 'PGRST116')
697
+ return true;
698
+ // HTTP 404 style code
699
+ if (error.code === '404')
700
+ return true;
701
+ // Fallback to message check
702
+ const msg = (error.message || '').toLowerCase();
703
+ return msg.includes('not found') || msg.includes('no rows');
704
+ }
705
+ // Classify an error as transient (will likely succeed on retry) or persistent (won't improve)
706
+ // Transient errors should not show UI errors until retries are exhausted
707
+ // Persistent errors should show immediately since they require user action
708
+ function isTransientError(error) {
709
+ const msg = (error instanceof Error ? error.message : String(error)).toLowerCase();
710
+ const errObj = error;
711
+ // Network/connectivity issues - transient
712
+ if (msg.includes('fetch') || msg.includes('network') || msg.includes('failed to fetch')) {
713
+ return true;
714
+ }
715
+ if (msg.includes('timeout') || msg.includes('timed out')) {
716
+ return true;
717
+ }
718
+ if (msg.includes('connection') || msg.includes('offline')) {
719
+ return true;
720
+ }
721
+ // Rate limiting - transient (will succeed after backoff)
722
+ if (msg.includes('rate') || msg.includes('limit') || msg.includes('too many')) {
723
+ return true;
724
+ }
725
+ if (errObj.code === '429' || errObj.status === 429) {
726
+ return true;
727
+ }
728
+ // Server errors (5xx) - transient
729
+ if (msg.includes('500') || msg.includes('502') || msg.includes('503') || msg.includes('504')) {
730
+ return true;
731
+ }
732
+ if (errObj.status && errObj.status >= 500 && errObj.status < 600) {
733
+ return true;
734
+ }
735
+ // Service unavailable - transient
736
+ if (msg.includes('unavailable') || msg.includes('temporarily')) {
737
+ return true;
738
+ }
739
+ // Everything else (auth errors, validation errors, etc.) - persistent
740
+ // These require user action and won't fix themselves with retries
741
+ return false;
742
+ }
743
+ // Process a single sync item (intent-based operation format)
744
+ // CRITICAL: All operations use .select() to verify they succeeded
745
+ // RLS can silently block operations - returning success but affecting 0 rows
746
+ async function processSyncItem(item) {
747
+ const { table, entityId, operationType, field, value, timestamp } = item;
748
+ const deviceId = getDeviceId();
749
+ const supabase = getSupabase();
750
+ const db = getDb();
751
+ const dexieTable = getDexieTableName(table);
752
+ switch (operationType) {
753
+ case 'create': {
754
+ // Create: insert the full payload with device_id
755
+ const payload = value;
756
+ const { data, error } = await supabase
757
+ .from(table)
758
+ .insert({ id: entityId, ...payload, device_id: deviceId })
759
+ .select('id')
760
+ .maybeSingle();
761
+ // Ignore duplicate key errors (item already synced from another device)
762
+ if (error && isDuplicateKeyError(error)) {
763
+ // For singleton tables, reconcile local ID with server
764
+ if (isSingletonTable(table) && payload.user_id) {
765
+ const { data: existing } = await supabase
766
+ .from(table)
767
+ .select(getColumns(table))
768
+ .eq('user_id', payload.user_id)
769
+ .maybeSingle();
770
+ if (existing) {
771
+ // Replace local entry: delete old ID, add with server ID
772
+ await db.table(dexieTable).delete(entityId);
773
+ await db.table(dexieTable).put(existing);
774
+ // Purge any queued operations referencing the old ID
775
+ await db.table('syncQueue')
776
+ .where('entityId')
777
+ .equals(entityId)
778
+ .delete();
779
+ }
780
+ }
781
+ break;
782
+ }
783
+ if (error) {
784
+ throw error;
785
+ }
786
+ // If no error but also no data returned, RLS likely blocked the insert
787
+ if (!data) {
788
+ // Check if it already exists (could be a race condition)
789
+ const { data: existing } = await supabase
790
+ .from(table)
791
+ .select('id')
792
+ .eq('id', entityId)
793
+ .maybeSingle();
794
+ if (!existing) {
795
+ throw new Error(`Insert blocked by RLS - please re-authenticate`);
796
+ }
797
+ // Already exists, treat as success
798
+ }
799
+ break;
800
+ }
801
+ case 'delete': {
802
+ // Delete: soft delete with tombstone and device_id
803
+ const { data, error } = await supabase
804
+ .from(table)
805
+ .update({ deleted: true, updated_at: timestamp, device_id: deviceId })
806
+ .eq('id', entityId)
807
+ .select('id')
808
+ .maybeSingle();
809
+ // Ignore "not found" errors - item may already be deleted
810
+ if (error && !isNotFoundError(error)) {
811
+ throw error;
812
+ }
813
+ // If update returned no data, the row may not exist or RLS blocked it
814
+ // For deletes, we treat this as success (already deleted or will be on next sync)
815
+ if (!error && !data) {
816
+ debugLog(`[SYNC] Delete may have been blocked or row missing: ${table}/${entityId}`);
817
+ }
818
+ break;
819
+ }
820
+ case 'increment': {
821
+ // Increment: we need to read current value, add delta, and update
822
+ // This is done atomically by reading from local DB (which has the current state)
823
+ // The value we push is already the final computed value from local
824
+ if (!field) {
825
+ throw new Error('Increment operation requires a field');
826
+ }
827
+ // For increment, the local DB already has the final value after increment
828
+ // We need to read it to get what to push to the server
829
+ const localEntity = await db.table(dexieTable).get(entityId);
830
+ if (!localEntity) {
831
+ // Entity was deleted locally, skip this increment
832
+ debugWarn(`[SYNC] Skipping increment for deleted entity: ${table}/${entityId}`);
833
+ return;
834
+ }
835
+ const currentValue = localEntity[field];
836
+ const updatePayload = {
837
+ [field]: currentValue,
838
+ updated_at: timestamp,
839
+ device_id: deviceId
840
+ };
841
+ // Also sync completed status if this is a goal/progress increment
842
+ if ('completed' in localEntity) {
843
+ updatePayload.completed = localEntity.completed;
844
+ }
845
+ const { data, error } = await supabase
846
+ .from(table)
847
+ .update(updatePayload)
848
+ .eq('id', entityId)
849
+ .select('id')
850
+ .maybeSingle();
851
+ if (error)
852
+ throw error;
853
+ // Check if update actually affected any rows
854
+ if (!data) {
855
+ throw new Error(`Update blocked by RLS or row missing: ${table}/${entityId}`);
856
+ }
857
+ break;
858
+ }
859
+ case 'set': {
860
+ // Set: update the field(s) with the new value(s) and device_id
861
+ let updatePayload;
862
+ if (field) {
863
+ // Single field set
864
+ updatePayload = {
865
+ [field]: value,
866
+ updated_at: timestamp,
867
+ device_id: deviceId
868
+ };
869
+ }
870
+ else {
871
+ // Multi-field set (value is the full payload)
872
+ updatePayload = {
873
+ ...value,
874
+ updated_at: timestamp,
875
+ device_id: deviceId
876
+ };
877
+ }
878
+ const { data, error } = await supabase
879
+ .from(table)
880
+ .update(updatePayload)
881
+ .eq('id', entityId)
882
+ .select('id')
883
+ .maybeSingle();
884
+ if (error)
885
+ throw error;
886
+ // Check if update actually affected any rows
887
+ if (!data) {
888
+ // For singleton tables, the local ID may not match the server.
889
+ // Look up the server's record by user_id and re-apply the update with the correct ID.
890
+ if (isSingletonTable(table)) {
891
+ const localEntity = await db.table(dexieTable).get(entityId);
892
+ const userId = localEntity?.user_id;
893
+ if (userId) {
894
+ const { data: serverRow } = await supabase
895
+ .from(table)
896
+ .select('*')
897
+ .eq('user_id', userId)
898
+ .maybeSingle();
899
+ if (serverRow) {
900
+ // Apply the update to the correct server row
901
+ const { error: retryError } = await supabase
902
+ .from(table)
903
+ .update(updatePayload)
904
+ .eq('id', serverRow.id)
905
+ .select('id')
906
+ .maybeSingle();
907
+ // Reconcile local: replace stale ID with server ID
908
+ await db.table(dexieTable).delete(entityId);
909
+ // Merge our pending changes into the server row
910
+ const merged = { ...serverRow, ...updatePayload, id: serverRow.id };
911
+ await db.table(dexieTable).put(merged);
912
+ // Purge any remaining queued operations referencing the old ID
913
+ await db.table('syncQueue')
914
+ .where('entityId')
915
+ .equals(entityId)
916
+ .delete();
917
+ if (retryError)
918
+ throw retryError;
919
+ break;
920
+ }
921
+ }
922
+ }
923
+ throw new Error(`Update blocked by RLS or row missing: ${table}/${entityId}`);
924
+ }
925
+ break;
926
+ }
927
+ default:
928
+ throw new Error(`Unknown operation type: ${operationType}`);
929
+ }
930
+ }
931
+ // Extract raw error message from various error formats (Supabase, Error, etc.)
932
+ function extractErrorMessage(error) {
933
+ // Standard Error object
934
+ if (error instanceof Error) {
935
+ return error.message;
936
+ }
937
+ // Supabase/PostgreSQL error object: { message, details, hint, code }
938
+ if (error && typeof error === 'object') {
939
+ const err = error;
940
+ // Try common error message properties
941
+ if (typeof err.message === 'string' && err.message) {
942
+ // Include details/hint if available for more context
943
+ let msg = err.message;
944
+ if (typeof err.details === 'string' && err.details) {
945
+ msg += ` - ${err.details}`;
946
+ }
947
+ if (typeof err.hint === 'string' && err.hint) {
948
+ msg += ` (${err.hint})`;
949
+ }
950
+ return msg;
951
+ }
952
+ // Try error property (some wrappers use this)
953
+ if (typeof err.error === 'string' && err.error) {
954
+ return err.error;
955
+ }
956
+ // Try description property
957
+ if (typeof err.description === 'string' && err.description) {
958
+ return err.description;
959
+ }
960
+ // Last resort: stringify the object
961
+ try {
962
+ return JSON.stringify(error);
963
+ }
964
+ catch {
965
+ return '[Unable to parse error]';
966
+ }
967
+ }
968
+ // Primitive types
969
+ return String(error);
970
+ }
971
+ // Parse error into user-friendly message
972
+ function parseErrorMessage(error) {
973
+ if (error instanceof Error) {
974
+ const msg = error.message.toLowerCase();
975
+ // Network errors
976
+ if (msg.includes('fetch') || msg.includes('network') || msg.includes('failed to fetch')) {
977
+ return 'Network connection lost. Changes saved locally.';
978
+ }
979
+ if (msg.includes('timeout') || msg.includes('timed out')) {
980
+ return 'Server took too long to respond. Will retry.';
981
+ }
982
+ // Auth errors
983
+ if (msg.includes('jwt') ||
984
+ msg.includes('token') ||
985
+ msg.includes('unauthorized') ||
986
+ msg.includes('401')) {
987
+ return 'Session expired. Please sign in again.';
988
+ }
989
+ // Rate limiting
990
+ if (msg.includes('rate') || msg.includes('limit') || msg.includes('429')) {
991
+ return 'Too many requests. Will retry shortly.';
992
+ }
993
+ // Server errors
994
+ if (msg.includes('500') || msg.includes('502') || msg.includes('503') || msg.includes('504')) {
995
+ return 'Server is temporarily unavailable.';
996
+ }
997
+ // Return clean error message
998
+ return error.message.length > 100 ? error.message.substring(0, 100) + '...' : error.message;
999
+ }
1000
+ return 'An unexpected error occurred';
1001
+ }
1002
+ // Full sync: push first (so our changes are persisted), then pull
1003
+ // quiet: if true, don't update UI status at all (for background periodic syncs)
1004
+ export async function runFullSync(quiet = false, skipPull = false) {
1005
+ if (typeof navigator === 'undefined' || !navigator.onLine) {
1006
+ if (!quiet) {
1007
+ syncStatusStore.setStatus('offline');
1008
+ syncStatusStore.setSyncMessage("You're offline. Changes will sync when reconnected.");
1009
+ }
1010
+ return;
1011
+ }
1012
+ // SECURITY: If we were offline and came back online, auth must be validated first
1013
+ // This prevents syncing potentially unauthorized data from an invalid offline session
1014
+ if (needsAuthValidation()) {
1015
+ debugLog('[SYNC] Waiting for auth validation before syncing (was offline)');
1016
+ if (!quiet) {
1017
+ syncStatusStore.setStatus('idle');
1018
+ syncStatusStore.setSyncMessage('Validating credentials...');
1019
+ }
1020
+ return;
1021
+ }
1022
+ // CRITICAL: Validate auth before attempting any sync operations
1023
+ // Without valid auth, Supabase RLS silently blocks writes (returns no error but 0 rows affected)
1024
+ // This causes the "sync succeeded but nothing synced" bug
1025
+ const userId = await getCurrentUserId();
1026
+ if (!userId) {
1027
+ debugWarn('[SYNC] No authenticated user - cannot sync. RLS would silently block all writes.');
1028
+ if (!quiet) {
1029
+ syncStatusStore.setStatus('error');
1030
+ syncStatusStore.setError('Not signed in', 'Please sign in to sync your data.');
1031
+ syncStatusStore.setSyncMessage('Sign in required to sync');
1032
+ }
1033
+ return;
1034
+ }
1035
+ // Atomically acquire sync lock to prevent concurrent syncs
1036
+ const acquired = await acquireSyncLock();
1037
+ if (!acquired)
1038
+ return;
1039
+ const config = getEngineConfig();
1040
+ // Track sync cycle for egress monitoring
1041
+ const cycleStart = Date.now();
1042
+ const trigger = quiet ? 'periodic' : 'user';
1043
+ let pushedItems = 0;
1044
+ let cycleEgressBytes = 0;
1045
+ let cycleEgressRecords = 0;
1046
+ let pushSucceeded = false;
1047
+ let pullSucceeded = false;
1048
+ try {
1049
+ // Only show "syncing" indicator for non-quiet syncs
1050
+ if (!quiet) {
1051
+ syncStatusStore.setStatus('syncing');
1052
+ syncStatusStore.setSyncMessage('Preparing changes...');
1053
+ }
1054
+ // Push first so local changes are persisted
1055
+ // Note: pushPendingOps coalesces before pushing, so actual requests are lower
1056
+ const pushStats = await withTimeout(pushPendingOps(), SYNC_OPERATION_TIMEOUT_MS, 'Push pending ops');
1057
+ pushedItems = pushStats.actualPushed;
1058
+ pushSucceeded = true;
1059
+ // EGRESS OPTIMIZATION: Skip pull when realtime is healthy and this is a push-triggered sync
1060
+ let pullEgress = { bytes: 0, records: 0 };
1061
+ if (skipPull) {
1062
+ debugLog('[SYNC] Skipping pull (realtime healthy, push-only mode)');
1063
+ pullSucceeded = true;
1064
+ }
1065
+ else {
1066
+ if (!quiet) {
1067
+ syncStatusStore.setSyncMessage('Downloading latest data...');
1068
+ }
1069
+ // Pull remote changes - retry up to 3 times if push succeeded
1070
+ // Uses stored cursor to get all changes since last sync
1071
+ // Conflict resolution handles our own pushed changes via device_id check
1072
+ let pullAttempts = 0;
1073
+ const maxPullAttempts = 3;
1074
+ let lastPullError = null;
1075
+ while (pullAttempts < maxPullAttempts && !pullSucceeded) {
1076
+ try {
1077
+ // Don't pass postPushCursor - we want ALL changes since stored cursor
1078
+ // The conflict resolution handles our own pushed changes via device_id check
1079
+ pullEgress = await withTimeout(pullRemoteChanges(), SYNC_OPERATION_TIMEOUT_MS, 'Pull remote changes');
1080
+ pullSucceeded = true;
1081
+ }
1082
+ catch (pullError) {
1083
+ lastPullError = pullError;
1084
+ pullAttempts++;
1085
+ debugWarn(`[SYNC] Pull attempt ${pullAttempts}/${maxPullAttempts} failed:`, pullError);
1086
+ if (pullAttempts < maxPullAttempts) {
1087
+ // Wait before retry (exponential backoff: 1s, 2s)
1088
+ await new Promise((resolve) => setTimeout(resolve, pullAttempts * 1000));
1089
+ }
1090
+ }
1091
+ }
1092
+ if (!pullSucceeded && lastPullError) {
1093
+ throw lastPullError;
1094
+ }
1095
+ }
1096
+ // Store egress for logging
1097
+ cycleEgressBytes = pullEgress.bytes;
1098
+ cycleEgressRecords = pullEgress.records;
1099
+ // Update status only for non-quiet syncs
1100
+ if (!quiet) {
1101
+ const remaining = await getPendingSync();
1102
+ syncStatusStore.setPendingCount(remaining.length);
1103
+ // Only show error status if:
1104
+ // 1. We have push errors that were deemed serious enough to show, OR
1105
+ // 2. Remaining items have been retrying for a while (retries >= 2)
1106
+ // This prevents "error" flash for items that will succeed on next retry
1107
+ const hasSignificantErrors = pushErrors.length > 0;
1108
+ const hasStaleRetries = remaining.some((item) => item.retries >= 2);
1109
+ const showErrorStatus = remaining.length > 0 && (hasSignificantErrors || hasStaleRetries);
1110
+ syncStatusStore.setStatus(showErrorStatus ? 'error' : 'idle');
1111
+ syncStatusStore.setLastSyncTime(new Date().toISOString());
1112
+ // Update message based on actual error state
1113
+ if (showErrorStatus) {
1114
+ syncStatusStore.setSyncMessage(`${remaining.length} change${remaining.length === 1 ? '' : 's'} failed to sync`);
1115
+ // Show error details
1116
+ if (hasSignificantErrors) {
1117
+ // Show the latest specific error
1118
+ const latestError = pushErrors[pushErrors.length - 1];
1119
+ syncStatusStore.setError(`Failed to sync ${latestError.table} (${latestError.operation})`, latestError.message);
1120
+ }
1121
+ else {
1122
+ // Items in retry backoff - no specific errors this cycle
1123
+ // Show pending retry info instead of clearing error details
1124
+ const retryInfo = remaining
1125
+ .map((item) => `${item.table} (${item.operationType})`)
1126
+ .slice(0, 3);
1127
+ const moreCount = remaining.length - retryInfo.length;
1128
+ const details = moreCount > 0 ? `${retryInfo.join(', ')} and ${moreCount} more` : retryInfo.join(', ');
1129
+ syncStatusStore.setError(`${remaining.length} change${remaining.length === 1 ? '' : 's'} pending retry`, `Affected: ${details}. Will retry automatically.`);
1130
+ }
1131
+ }
1132
+ else if (remaining.length > 0) {
1133
+ // Items exist but don't show error status yet (still early in retry cycle)
1134
+ // Show a neutral "syncing" message instead of error
1135
+ syncStatusStore.setSyncMessage('Syncing changes...');
1136
+ syncStatusStore.setError(null);
1137
+ }
1138
+ else {
1139
+ syncStatusStore.setSyncMessage('Everything is synced!');
1140
+ syncStatusStore.setError(null);
1141
+ }
1142
+ }
1143
+ // Notify stores that sync is complete so they can refresh from local
1144
+ notifySyncComplete();
1145
+ lastSuccessfulSyncTimestamp = Date.now();
1146
+ }
1147
+ catch (error) {
1148
+ debugError('Sync failed:', error);
1149
+ // Only show errors for user-initiated syncs (non-quiet)
1150
+ // Background syncs fail silently - they'll retry automatically
1151
+ if (!quiet) {
1152
+ const friendlyMessage = parseErrorMessage(error);
1153
+ const rawMessage = extractErrorMessage(error);
1154
+ syncStatusStore.setStatus('error');
1155
+ syncStatusStore.setError(friendlyMessage, rawMessage);
1156
+ syncStatusStore.setSyncMessage(friendlyMessage);
1157
+ }
1158
+ // If push succeeded but pull failed, still notify so UI refreshes with pushed data
1159
+ if (pushSucceeded && !pullSucceeded) {
1160
+ notifySyncComplete();
1161
+ }
1162
+ }
1163
+ finally {
1164
+ // Log sync cycle stats for egress monitoring
1165
+ logSyncCycle({
1166
+ trigger,
1167
+ pushedItems,
1168
+ pulledTables: pullSucceeded && !skipPull ? config.tables.length : 0,
1169
+ pulledRecords: cycleEgressRecords,
1170
+ egressBytes: cycleEgressBytes,
1171
+ durationMs: Date.now() - cycleStart
1172
+ });
1173
+ releaseSyncLock();
1174
+ }
1175
+ }
1176
+ /**
1177
+ * Reconcile orphaned local changes with remote.
1178
+ *
1179
+ * After re-login, local IndexedDB may have items that were modified offline
1180
+ * but whose sync queue entries were lost (e.g. cleared by a previous bug).
1181
+ * This scans all tables for items modified after the last sync cursor and
1182
+ * re-queues them so they get pushed on the next sync.
1183
+ *
1184
+ * Only runs when the sync queue is empty (otherwise normal sync handles it).
1185
+ */
1186
+ async function reconcileLocalWithRemote() {
1187
+ const db = getDb();
1188
+ const config = getEngineConfig();
1189
+ const queueCount = await db.table('syncQueue').count();
1190
+ if (queueCount > 0)
1191
+ return 0; // Queue has items, no reconciliation needed
1192
+ const userId = await getCurrentUserId();
1193
+ if (!userId)
1194
+ return 0;
1195
+ const cursor = getLastSyncCursor(userId);
1196
+ let requeued = 0;
1197
+ for (const tableConfig of config.tables) {
1198
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
1199
+ const allItems = await db.table(tableConfig.dexieTable).toArray();
1200
+ for (const item of allItems) {
1201
+ if (item.updated_at && item.updated_at > cursor) {
1202
+ const { id: _id, ...payload } = item;
1203
+ await queueSyncOperation({
1204
+ table: tableConfig.supabaseName,
1205
+ entityId: item.id,
1206
+ operationType: item.deleted ? 'delete' : 'create',
1207
+ value: item.deleted ? undefined : payload,
1208
+ });
1209
+ requeued++;
1210
+ }
1211
+ }
1212
+ }
1213
+ if (requeued > 0) {
1214
+ debugLog(`[SYNC] Reconciliation: re-queued ${requeued} orphaned items for sync`);
1215
+ }
1216
+ return requeued;
1217
+ }
1218
+ // Initial hydration: if local DB is empty, pull everything from remote
1219
+ async function hydrateFromRemote() {
1220
+ if (typeof navigator === 'undefined' || !navigator.onLine)
1221
+ return;
1222
+ // Atomically acquire sync lock to prevent concurrent syncs/hydrations
1223
+ const acquired = await acquireSyncLock();
1224
+ if (!acquired)
1225
+ return;
1226
+ const config = getEngineConfig();
1227
+ const db = config.db;
1228
+ const supabase = config.supabase;
1229
+ // Get user ID for sync cursor isolation
1230
+ const userId = await getCurrentUserId();
1231
+ // Abort if no authenticated user (can't hydrate without auth)
1232
+ if (!userId) {
1233
+ releaseSyncLock();
1234
+ return;
1235
+ }
1236
+ // Mark that we've attempted hydration (even if local has data)
1237
+ _hasHydrated = true;
1238
+ // Check if local DB has any data
1239
+ let hasLocalData = false;
1240
+ for (const table of config.tables) {
1241
+ const count = await db.table(table.dexieTable).count();
1242
+ if (count > 0) {
1243
+ hasLocalData = true;
1244
+ break;
1245
+ }
1246
+ }
1247
+ if (hasLocalData) {
1248
+ // Local has data, release lock and do a normal sync
1249
+ releaseSyncLock();
1250
+ // Check for orphaned changes (local data modified after last sync, but empty queue)
1251
+ await reconcileLocalWithRemote();
1252
+ await runFullSync();
1253
+ return;
1254
+ }
1255
+ // Local is empty, do a full pull (we already hold the lock)
1256
+ syncStatusStore.setStatus('syncing');
1257
+ syncStatusStore.setSyncMessage('Loading your data...');
1258
+ try {
1259
+ // Pull all non-deleted records from each table (explicit columns for egress optimization)
1260
+ // Filter deleted = false OR deleted IS NULL to exclude tombstones
1261
+ const results = await Promise.all(config.tables.map(table => supabase.from(table.supabaseName).select(table.columns).or('deleted.is.null,deleted.eq.false')));
1262
+ // Check for errors
1263
+ for (const r of results) {
1264
+ if (r.error)
1265
+ throw r.error;
1266
+ }
1267
+ // Track egress for initial hydration
1268
+ for (let i = 0; i < config.tables.length; i++) {
1269
+ trackEgress(config.tables[i].supabaseName, results[i].data);
1270
+ }
1271
+ let totalRecords = 0;
1272
+ for (const r of results) {
1273
+ totalRecords += r.data?.length || 0;
1274
+ }
1275
+ debugLog(`[SYNC] Initial hydration: ${totalRecords} records (${formatBytes(egressStats.totalBytes)})`);
1276
+ // Calculate the max updated_at from all pulled data to use as sync cursor
1277
+ // This prevents missing changes that happened during hydration
1278
+ let maxUpdatedAt = '1970-01-01T00:00:00.000Z';
1279
+ for (const r of results) {
1280
+ for (const item of (r.data || [])) {
1281
+ const updatedAt = item.updated_at;
1282
+ if (updatedAt && updatedAt > maxUpdatedAt) {
1283
+ maxUpdatedAt = updatedAt;
1284
+ }
1285
+ }
1286
+ }
1287
+ // Store everything locally
1288
+ const entityTables = config.tables.map(t => db.table(t.dexieTable));
1289
+ await db.transaction('rw', entityTables, async () => {
1290
+ for (let i = 0; i < config.tables.length; i++) {
1291
+ const data = results[i].data;
1292
+ if (data && data.length > 0) {
1293
+ await db.table(config.tables[i].dexieTable).bulkPut(data);
1294
+ }
1295
+ }
1296
+ });
1297
+ // Set sync cursor to MAX of pulled data timestamps (prevents missing concurrent changes)
1298
+ setLastSyncCursor(maxUpdatedAt, userId);
1299
+ syncStatusStore.setStatus('idle');
1300
+ syncStatusStore.setLastSyncTime(new Date().toISOString());
1301
+ syncStatusStore.setSyncMessage('Everything is synced!');
1302
+ syncStatusStore.setError(null);
1303
+ // Notify stores
1304
+ notifySyncComplete();
1305
+ }
1306
+ catch (error) {
1307
+ debugError('Hydration failed:', error);
1308
+ const friendlyMessage = parseErrorMessage(error);
1309
+ const rawMessage = extractErrorMessage(error);
1310
+ syncStatusStore.setStatus('error');
1311
+ syncStatusStore.setError(friendlyMessage, rawMessage);
1312
+ syncStatusStore.setSyncMessage(friendlyMessage);
1313
+ // Reset _hasHydrated so next read attempt can retry hydration
1314
+ _hasHydrated = false;
1315
+ }
1316
+ finally {
1317
+ releaseSyncLock();
1318
+ }
1319
+ }
1320
+ // ============================================================
1321
+ // TOMBSTONE CLEANUP
1322
+ // ============================================================
1323
+ // Clean up old tombstones (deleted records) from local DB AND Supabase
1324
+ // This prevents indefinite accumulation of soft-deleted records
1325
+ const CLEANUP_INTERVAL_MS = 86400000; // 24 hours - only run server cleanup once per day
1326
+ let lastServerCleanup = 0;
1327
+ // Clean up old tombstones from LOCAL IndexedDB
1328
+ async function cleanupLocalTombstones() {
1329
+ const tombstoneMaxAgeDays = getTombstoneMaxAgeDays();
1330
+ const cutoffDate = new Date();
1331
+ cutoffDate.setDate(cutoffDate.getDate() - tombstoneMaxAgeDays);
1332
+ const cutoffStr = cutoffDate.toISOString();
1333
+ const config = getEngineConfig();
1334
+ const db = config.db;
1335
+ let totalDeleted = 0;
1336
+ try {
1337
+ const entityTables = config.tables.map(t => db.table(t.dexieTable));
1338
+ await db.transaction('rw', entityTables, async () => {
1339
+ for (const tableConfig of config.tables) {
1340
+ const table = db.table(tableConfig.dexieTable);
1341
+ const count = await table
1342
+ .filter((item) => item.deleted === true && item.updated_at < cutoffStr)
1343
+ .delete();
1344
+ if (count > 0) {
1345
+ debugLog(`[Tombstone] Cleaned ${count} old records from local ${tableConfig.dexieTable}`);
1346
+ totalDeleted += count;
1347
+ }
1348
+ }
1349
+ });
1350
+ if (totalDeleted > 0) {
1351
+ debugLog(`[Tombstone] Local cleanup complete: ${totalDeleted} total records removed`);
1352
+ }
1353
+ }
1354
+ catch (error) {
1355
+ debugError('[Tombstone] Failed to cleanup local tombstones:', error);
1356
+ }
1357
+ return totalDeleted;
1358
+ }
1359
+ // Clean up old tombstones from SUPABASE (runs once per day max)
1360
+ async function cleanupServerTombstones(force = false) {
1361
+ // Only run once per day to avoid unnecessary requests (unless forced)
1362
+ const now = Date.now();
1363
+ if (!force && now - lastServerCleanup < CLEANUP_INTERVAL_MS) {
1364
+ return 0;
1365
+ }
1366
+ if (typeof navigator === 'undefined' || !navigator.onLine)
1367
+ return 0;
1368
+ const tombstoneMaxAgeDays = getTombstoneMaxAgeDays();
1369
+ const cutoffDate = new Date();
1370
+ cutoffDate.setDate(cutoffDate.getDate() - tombstoneMaxAgeDays);
1371
+ const cutoffStr = cutoffDate.toISOString();
1372
+ const config = getEngineConfig();
1373
+ const supabase = config.supabase;
1374
+ let totalDeleted = 0;
1375
+ try {
1376
+ for (const tableConfig of config.tables) {
1377
+ const { data, error } = await supabase
1378
+ .from(tableConfig.supabaseName)
1379
+ .delete()
1380
+ .eq('deleted', true)
1381
+ .lt('updated_at', cutoffStr)
1382
+ .select('id');
1383
+ if (error) {
1384
+ debugError(`[Tombstone] Failed to cleanup ${tableConfig.supabaseName}:`, error.message);
1385
+ }
1386
+ else if (data && data.length > 0) {
1387
+ debugLog(`[Tombstone] Cleaned ${data.length} old records from server ${tableConfig.supabaseName}`);
1388
+ totalDeleted += data.length;
1389
+ }
1390
+ }
1391
+ lastServerCleanup = now;
1392
+ if (totalDeleted > 0) {
1393
+ debugLog(`[Tombstone] Server cleanup complete: ${totalDeleted} total records removed`);
1394
+ }
1395
+ }
1396
+ catch (error) {
1397
+ debugError('[Tombstone] Failed to cleanup server tombstones:', error);
1398
+ }
1399
+ return totalDeleted;
1400
+ }
1401
+ // Combined cleanup function
1402
+ async function cleanupOldTombstones() {
1403
+ const local = await cleanupLocalTombstones();
1404
+ const server = await cleanupServerTombstones();
1405
+ return { local, server };
1406
+ }
1407
+ // Debug function to check tombstone status and manually trigger cleanup
1408
+ async function debugTombstones(options) {
1409
+ const tombstoneMaxAgeDays = getTombstoneMaxAgeDays();
1410
+ const cutoffDate = new Date();
1411
+ cutoffDate.setDate(cutoffDate.getDate() - tombstoneMaxAgeDays);
1412
+ const cutoffStr = cutoffDate.toISOString();
1413
+ const config = getEngineConfig();
1414
+ const db = config.db;
1415
+ const supabase = config.supabase;
1416
+ debugLog('=== TOMBSTONE DEBUG ===');
1417
+ debugLog(`Cutoff date (${tombstoneMaxAgeDays} days ago): ${cutoffStr}`);
1418
+ debugLog(`Last server cleanup: ${lastServerCleanup ? new Date(lastServerCleanup).toISOString() : 'Never'}`);
1419
+ debugLog('');
1420
+ // Check local tombstones
1421
+ debugLog('--- LOCAL TOMBSTONES (IndexedDB) ---');
1422
+ let totalLocalTombstones = 0;
1423
+ let totalLocalEligible = 0;
1424
+ for (const tableConfig of config.tables) {
1425
+ const table = db.table(tableConfig.dexieTable);
1426
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
1427
+ const allDeleted = await table.filter((item) => item.deleted === true).toArray();
1428
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
1429
+ const eligible = allDeleted.filter((item) => item.updated_at < cutoffStr);
1430
+ if (allDeleted.length > 0) {
1431
+ debugLog(` ${tableConfig.dexieTable}: ${allDeleted.length} tombstones (${eligible.length} eligible for cleanup)`);
1432
+ totalLocalTombstones += allDeleted.length;
1433
+ totalLocalEligible += eligible.length;
1434
+ // Show oldest tombstone
1435
+ if (allDeleted.length > 0) {
1436
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
1437
+ const oldest = allDeleted.reduce((a, b) => (a.updated_at < b.updated_at ? a : b));
1438
+ debugLog(` Oldest: ${oldest.updated_at}`);
1439
+ }
1440
+ }
1441
+ }
1442
+ debugLog(` TOTAL: ${totalLocalTombstones} tombstones (${totalLocalEligible} eligible)`);
1443
+ debugLog('');
1444
+ // Check server tombstones (if online)
1445
+ if (navigator.onLine) {
1446
+ debugLog('--- SERVER TOMBSTONES (Supabase) ---');
1447
+ let totalServerTombstones = 0;
1448
+ let totalServerEligible = 0;
1449
+ for (const tableConfig of config.tables) {
1450
+ const { data: allDeleted, error } = await supabase
1451
+ .from(tableConfig.supabaseName)
1452
+ .select('id,updated_at')
1453
+ .eq('deleted', true);
1454
+ if (error) {
1455
+ debugLog(` ${tableConfig.supabaseName}: ERROR - ${error.message}`);
1456
+ continue;
1457
+ }
1458
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
1459
+ const eligible = (allDeleted || []).filter((item) => item.updated_at < cutoffStr);
1460
+ if (allDeleted && allDeleted.length > 0) {
1461
+ debugLog(` ${tableConfig.supabaseName}: ${allDeleted.length} tombstones (${eligible.length} eligible for cleanup)`);
1462
+ totalServerTombstones += allDeleted.length;
1463
+ totalServerEligible += eligible.length;
1464
+ // Show oldest tombstone
1465
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
1466
+ const oldest = allDeleted.reduce((a, b) => (a.updated_at < b.updated_at ? a : b));
1467
+ debugLog(` Oldest: ${oldest.updated_at}`);
1468
+ }
1469
+ }
1470
+ debugLog(` TOTAL: ${totalServerTombstones} tombstones (${totalServerEligible} eligible)`);
1471
+ }
1472
+ else {
1473
+ debugLog('--- SERVER TOMBSTONES: Offline, skipping ---');
1474
+ }
1475
+ debugLog('');
1476
+ // Run cleanup if requested
1477
+ if (options?.cleanup) {
1478
+ debugLog('--- RUNNING CLEANUP ---');
1479
+ const localDeleted = await cleanupLocalTombstones();
1480
+ const serverDeleted = options?.force
1481
+ ? await cleanupServerTombstones(true)
1482
+ : await cleanupServerTombstones();
1483
+ debugLog(`Cleanup complete: ${localDeleted} local, ${serverDeleted} server records removed`);
1484
+ }
1485
+ else {
1486
+ debugLog('To run cleanup, call: debugTombstones({ cleanup: true })');
1487
+ debugLog('To force server cleanup (bypass 24h limit): debugTombstones({ cleanup: true, force: true })');
1488
+ }
1489
+ debugLog('========================');
1490
+ }
1491
+ // ============================================================
1492
+ // LIFECYCLE
1493
+ // ============================================================
1494
+ // Store cleanup functions for realtime subscriptions
1495
+ let realtimeDataUnsubscribe = null;
1496
+ let realtimeConnectionUnsubscribe = null;
1497
+ let authStateUnsubscribe = null;
1498
+ export async function startSyncEngine() {
1499
+ if (typeof window === 'undefined')
1500
+ return;
1501
+ const supabase = getSupabase();
1502
+ // Clean up any existing listeners and intervals first (prevents duplicates if called multiple times)
1503
+ if (handleOnlineRef) {
1504
+ window.removeEventListener('online', handleOnlineRef);
1505
+ }
1506
+ if (handleOfflineRef) {
1507
+ window.removeEventListener('offline', handleOfflineRef);
1508
+ }
1509
+ if (handleVisibilityChangeRef) {
1510
+ document.removeEventListener('visibilitychange', handleVisibilityChangeRef);
1511
+ }
1512
+ if (syncInterval) {
1513
+ clearInterval(syncInterval);
1514
+ syncInterval = null;
1515
+ }
1516
+ if (syncTimeout) {
1517
+ clearTimeout(syncTimeout);
1518
+ syncTimeout = null;
1519
+ }
1520
+ if (visibilityDebounceTimeout) {
1521
+ clearTimeout(visibilityDebounceTimeout);
1522
+ visibilityDebounceTimeout = null;
1523
+ }
1524
+ if (watchdogInterval) {
1525
+ clearInterval(watchdogInterval);
1526
+ watchdogInterval = null;
1527
+ }
1528
+ if (realtimeDataUnsubscribe) {
1529
+ realtimeDataUnsubscribe();
1530
+ realtimeDataUnsubscribe = null;
1531
+ }
1532
+ if (realtimeConnectionUnsubscribe) {
1533
+ realtimeConnectionUnsubscribe();
1534
+ realtimeConnectionUnsubscribe = null;
1535
+ }
1536
+ if (authStateUnsubscribe) {
1537
+ authStateUnsubscribe.data.subscription.unsubscribe();
1538
+ authStateUnsubscribe = null;
1539
+ }
1540
+ // Initialize debug window utilities now that config is available
1541
+ initDebugWindowUtilities();
1542
+ // Initialize network status monitoring (idempotent)
1543
+ isOnline.init();
1544
+ // Subscribe to auth state changes - critical for iOS PWA where sessions can expire
1545
+ authStateUnsubscribe = supabase.auth.onAuthStateChange(async (event, session) => {
1546
+ debugLog(`[SYNC] Auth state change: ${event}`);
1547
+ if (event === 'SIGNED_OUT') {
1548
+ // User signed out - stop realtime and show error
1549
+ debugWarn('[SYNC] User signed out - stopping sync');
1550
+ stopRealtimeSubscriptions();
1551
+ syncStatusStore.setStatus('error');
1552
+ syncStatusStore.setError('Signed out', 'Please sign in to sync your data.');
1553
+ }
1554
+ else if (event === 'SIGNED_IN' || event === 'TOKEN_REFRESHED') {
1555
+ // User signed in or token refreshed - restart sync
1556
+ debugLog('[SYNC] Auth restored - resuming sync');
1557
+ if (navigator.onLine) {
1558
+ // Clear any auth errors
1559
+ syncStatusStore.reset();
1560
+ // Restart realtime
1561
+ if (session?.user?.id) {
1562
+ startRealtimeSubscriptions(session.user.id);
1563
+ }
1564
+ // Run a sync to push any pending changes
1565
+ runFullSync(false);
1566
+ }
1567
+ }
1568
+ // Delegate to app-level callback
1569
+ const config = getEngineConfig();
1570
+ if (config.onAuthStateChange) {
1571
+ config.onAuthStateChange(event, session);
1572
+ }
1573
+ });
1574
+ // Register disconnect handler: create offline session from cached credentials
1575
+ isOnline.onDisconnect(async () => {
1576
+ debugLog('[Engine] Gone offline - creating offline session if credentials cached');
1577
+ try {
1578
+ const currentSession = await getSession();
1579
+ if (!currentSession?.user?.id) {
1580
+ debugLog('[Engine] No active Supabase session - skipping offline session creation');
1581
+ return;
1582
+ }
1583
+ const credentials = await getOfflineCredentials();
1584
+ if (!credentials) {
1585
+ debugLog('[Engine] No cached credentials - skipping offline session creation');
1586
+ return;
1587
+ }
1588
+ // SECURITY: Only create offline session if credentials match current user
1589
+ if (credentials.userId !== currentSession.user.id || credentials.email !== currentSession.user.email) {
1590
+ debugWarn('[Engine] Cached credentials do not match current user - skipping offline session creation');
1591
+ return;
1592
+ }
1593
+ const existingSession = await getValidOfflineSession();
1594
+ if (!existingSession) {
1595
+ await createOfflineSession(credentials.userId);
1596
+ debugLog('[Engine] Offline session created from cached credentials');
1597
+ }
1598
+ }
1599
+ catch (e) {
1600
+ debugError('[Engine] Failed to create offline session:', e);
1601
+ }
1602
+ });
1603
+ // Register reconnect handler: re-validate credentials with Supabase
1604
+ isOnline.onReconnect(async () => {
1605
+ debugLog('[Engine] Back online - validating credentials');
1606
+ const config = getEngineConfig();
1607
+ try {
1608
+ // Re-validate with Supabase with 15s timeout
1609
+ const timeoutPromise = new Promise((resolve) => setTimeout(() => resolve(null), 15000));
1610
+ const validationPromise = (async () => {
1611
+ const { data: { user }, error } = await getSupabase().auth.getUser();
1612
+ if (error || !user)
1613
+ return null;
1614
+ return user;
1615
+ })();
1616
+ const user = await Promise.race([validationPromise, timeoutPromise]);
1617
+ if (user) {
1618
+ markAuthValidated();
1619
+ debugLog('[Engine] Auth validated on reconnect');
1620
+ // Trigger sync after successful auth validation
1621
+ runFullSync(false);
1622
+ }
1623
+ else {
1624
+ debugWarn('[Engine] Auth validation failed on reconnect');
1625
+ if (config.onAuthKicked) {
1626
+ // Stop engine and clear data
1627
+ await clearPendingSyncQueue();
1628
+ config.onAuthKicked('Session expired. Please sign in again.');
1629
+ }
1630
+ }
1631
+ }
1632
+ catch (e) {
1633
+ debugError('[Engine] Reconnect auth check failed:', e);
1634
+ if (config.onAuthKicked) {
1635
+ config.onAuthKicked('Failed to verify session. Please sign in again.');
1636
+ }
1637
+ }
1638
+ });
1639
+ // Reset sync status to clean state (clears any stale error from previous session)
1640
+ // This prevents error flash when navigating back after a previous sync failure
1641
+ syncStatusStore.reset();
1642
+ // IMPORTANT: If starting while offline, mark that auth validation is needed
1643
+ // This ensures we don't attempt to sync until credentials are validated on reconnect
1644
+ // Fixes race condition where sync engine's 'online' handler fires before auth check
1645
+ if (!navigator.onLine) {
1646
+ markOffline();
1647
+ }
1648
+ // Handle online event - run sync and start realtime when connection restored
1649
+ handleOnlineRef = async () => {
1650
+ // EGRESS OPTIMIZATION: Skip sync if last successful sync was recent
1651
+ // iOS PWA triggers frequent network transitions - avoid redundant full syncs
1652
+ const timeSinceLastSync = Date.now() - lastSuccessfulSyncTimestamp;
1653
+ if (timeSinceLastSync < getOnlineReconnectCooldownMs()) {
1654
+ debugLog(`[SYNC] Skipping online-reconnect sync (last sync ${Math.round(timeSinceLastSync / 1000)}s ago)`);
1655
+ }
1656
+ else {
1657
+ runFullSync(false);
1658
+ }
1659
+ // Always restart realtime subscriptions regardless of cooldown
1660
+ const userId = await getCurrentUserId();
1661
+ if (userId) {
1662
+ startRealtimeSubscriptions(userId);
1663
+ }
1664
+ };
1665
+ window.addEventListener('online', handleOnlineRef);
1666
+ // Handle offline event - immediately update status indicator and mark for auth validation
1667
+ handleOfflineRef = () => {
1668
+ markOffline(); // Mark that auth needs validation when we come back online
1669
+ syncStatusStore.setStatus('offline');
1670
+ syncStatusStore.setSyncMessage("You're offline. Changes will sync when reconnected.");
1671
+ // Pause realtime - stops reconnection attempts until we come back online
1672
+ pauseRealtime();
1673
+ };
1674
+ window.addEventListener('offline', handleOfflineRef);
1675
+ // Track visibility and sync when returning to tab (with smart timing)
1676
+ handleVisibilityChangeRef = () => {
1677
+ const wasHidden = !isTabVisible;
1678
+ isTabVisible = !document.hidden;
1679
+ syncStatusStore.setTabVisible(isTabVisible);
1680
+ // Track when tab becomes hidden
1681
+ if (!isTabVisible) {
1682
+ tabHiddenAt = Date.now();
1683
+ return;
1684
+ }
1685
+ // If tab just became visible, check if we should sync
1686
+ if (wasHidden && isTabVisible && navigator.onLine) {
1687
+ // Only sync if user was away for > configured minutes AND realtime is not healthy
1688
+ // If realtime is connected, we're already up-to-date
1689
+ const awayDuration = tabHiddenAt ? Date.now() - tabHiddenAt : 0;
1690
+ tabHiddenAt = null;
1691
+ if (awayDuration < getVisibilitySyncMinAwayMs()) {
1692
+ // User was only away briefly, skip sync
1693
+ return;
1694
+ }
1695
+ // Skip sync if realtime is healthy (we're already up-to-date)
1696
+ if (isRealtimeHealthy()) {
1697
+ return;
1698
+ }
1699
+ // Clear any pending visibility sync
1700
+ if (visibilityDebounceTimeout) {
1701
+ clearTimeout(visibilityDebounceTimeout);
1702
+ }
1703
+ // Debounce to prevent rapid syncs when user quickly switches tabs
1704
+ visibilityDebounceTimeout = setTimeout(() => {
1705
+ visibilityDebounceTimeout = null;
1706
+ runFullSync(true); // Quiet - no error shown if it fails
1707
+ }, VISIBILITY_SYNC_DEBOUNCE_MS);
1708
+ }
1709
+ };
1710
+ document.addEventListener('visibilitychange', handleVisibilityChangeRef);
1711
+ // Set initial visibility state
1712
+ isTabVisible = !document.hidden;
1713
+ syncStatusStore.setTabVisible(isTabVisible);
1714
+ // Setup realtime subscriptions
1715
+ const userId = await getCurrentUserId();
1716
+ if (userId && navigator.onLine) {
1717
+ // Subscribe to realtime data updates - refresh stores when remote changes arrive
1718
+ realtimeDataUnsubscribe = onRealtimeDataUpdate((table, entityId) => {
1719
+ debugLog(`[SYNC] Realtime update received: ${table}/${entityId} - refreshing stores`);
1720
+ // Notify stores to refresh from local DB
1721
+ notifySyncComplete();
1722
+ });
1723
+ // Subscribe to realtime connection state changes
1724
+ realtimeConnectionUnsubscribe = onConnectionStateChange((connectionState) => {
1725
+ // Update sync store with realtime connection state
1726
+ syncStatusStore.setRealtimeState(connectionState);
1727
+ // Note: 'error' state means max reconnect attempts exhausted
1728
+ // Polling will automatically pick up the slack (periodic sync runs when realtime unhealthy)
1729
+ });
1730
+ // Start realtime subscriptions
1731
+ startRealtimeSubscriptions(userId);
1732
+ }
1733
+ // Start periodic sync (quiet mode - don't show indicator unless needed)
1734
+ // Reduced frequency when realtime is healthy
1735
+ syncInterval = setInterval(async () => {
1736
+ // Only run periodic sync if tab is visible and online
1737
+ // Skip if realtime is healthy (reduces egress significantly)
1738
+ if (navigator.onLine && isTabVisible && !isRealtimeHealthy()) {
1739
+ runFullSync(true); // Quiet background sync
1740
+ }
1741
+ // Cleanup old tombstones, conflict history, failed sync items, and recently modified cache
1742
+ await cleanupOldTombstones();
1743
+ await cleanupConflictHistory();
1744
+ cleanupRecentlyModified();
1745
+ cleanupRealtimeTracking();
1746
+ const failedResult = await cleanupFailedItems();
1747
+ // Notify user if items permanently failed
1748
+ if (failedResult.count > 0) {
1749
+ syncStatusStore.setStatus('error');
1750
+ syncStatusStore.setError(`${failedResult.count} change(s) could not be synced and were discarded.`, `Affected: ${failedResult.tables.join(', ')}`);
1751
+ syncStatusStore.setSyncMessage(`${failedResult.count} change(s) failed to sync`);
1752
+ }
1753
+ }, getSyncIntervalMs());
1754
+ // Initial sync: hydrate if empty, otherwise push pending
1755
+ if (navigator.onLine) {
1756
+ hydrateFromRemote();
1757
+ }
1758
+ // Run initial cleanup
1759
+ cleanupOldTombstones();
1760
+ cleanupConflictHistory();
1761
+ cleanupRealtimeTracking();
1762
+ cleanupFailedItems().then((failedResult) => {
1763
+ if (failedResult.count > 0) {
1764
+ syncStatusStore.setStatus('error');
1765
+ syncStatusStore.setError(`${failedResult.count} change(s) could not be synced and were discarded.`, `Affected: ${failedResult.tables.join(', ')}`);
1766
+ }
1767
+ });
1768
+ // Watchdog: detect stuck syncs and auto-retry
1769
+ if (watchdogInterval) {
1770
+ clearInterval(watchdogInterval);
1771
+ }
1772
+ watchdogInterval = setInterval(() => {
1773
+ // If the sync lock has been held for too long, force-release and retry
1774
+ if (lockAcquiredAt && Date.now() - lockAcquiredAt > SYNC_LOCK_TIMEOUT_MS) {
1775
+ debugWarn(`[SYNC] Watchdog: sync lock stuck for ${Math.round((Date.now() - lockAcquiredAt) / 1000)}s - force-releasing and retrying`);
1776
+ releaseSyncLock();
1777
+ syncStatusStore.setStatus('idle');
1778
+ // Auto-retry after force-release
1779
+ if (navigator.onLine) {
1780
+ runFullSync(true);
1781
+ }
1782
+ }
1783
+ }, WATCHDOG_INTERVAL_MS);
1784
+ // Expose debug utilities to window for console access
1785
+ if (typeof window !== 'undefined' && isDebugMode()) {
1786
+ const prefix = getPrefix();
1787
+ const supabase = getSupabase();
1788
+ window[`__${prefix}Tombstones`] = debugTombstones;
1789
+ // Sync debug tools: window.__<prefix>Sync.forceFullSync(), .resetSyncCursor(), etc.
1790
+ window[`__${prefix}Sync`] = {
1791
+ forceFullSync,
1792
+ resetSyncCursor,
1793
+ sync: () => runFullSync(false),
1794
+ getStatus: () => ({
1795
+ cursor: typeof localStorage !== 'undefined'
1796
+ ? localStorage.getItem('lastSyncCursor') ||
1797
+ Object.entries(localStorage)
1798
+ .filter(([k]) => k.startsWith('lastSyncCursor_'))
1799
+ .map(([k, v]) => ({ [k]: v }))[0]
1800
+ : 'N/A',
1801
+ pendingOps: getPendingSync().then((ops) => ops.length)
1802
+ }),
1803
+ checkConnection: async () => {
1804
+ try {
1805
+ const config = getEngineConfig();
1806
+ const firstTable = config.tables[0]?.supabaseName;
1807
+ if (!firstTable)
1808
+ return { connected: false, error: 'No tables configured' };
1809
+ const { data, error } = await supabase.from(firstTable).select('id').limit(1);
1810
+ if (error)
1811
+ return { connected: false, error: error.message };
1812
+ return { connected: true, records: data?.length || 0 };
1813
+ }
1814
+ catch (e) {
1815
+ return { connected: false, error: String(e) };
1816
+ }
1817
+ },
1818
+ realtimeStatus: () => ({
1819
+ state: getConnectionState(),
1820
+ healthy: isRealtimeHealthy()
1821
+ })
1822
+ };
1823
+ debugLog(`[SYNC] Debug utilities available at window.__${prefix}Sync`);
1824
+ }
1825
+ }
1826
+ export async function stopSyncEngine() {
1827
+ if (typeof window === 'undefined')
1828
+ return;
1829
+ // Stop watchdog
1830
+ if (watchdogInterval) {
1831
+ clearInterval(watchdogInterval);
1832
+ watchdogInterval = null;
1833
+ }
1834
+ // Remove event listeners to prevent memory leaks
1835
+ if (handleOnlineRef) {
1836
+ window.removeEventListener('online', handleOnlineRef);
1837
+ handleOnlineRef = null;
1838
+ }
1839
+ if (handleOfflineRef) {
1840
+ window.removeEventListener('offline', handleOfflineRef);
1841
+ handleOfflineRef = null;
1842
+ }
1843
+ if (handleVisibilityChangeRef) {
1844
+ document.removeEventListener('visibilitychange', handleVisibilityChangeRef);
1845
+ handleVisibilityChangeRef = null;
1846
+ }
1847
+ // Clean up realtime subscription callbacks
1848
+ if (realtimeDataUnsubscribe) {
1849
+ realtimeDataUnsubscribe();
1850
+ realtimeDataUnsubscribe = null;
1851
+ }
1852
+ if (realtimeConnectionUnsubscribe) {
1853
+ realtimeConnectionUnsubscribe();
1854
+ realtimeConnectionUnsubscribe = null;
1855
+ }
1856
+ if (authStateUnsubscribe) {
1857
+ authStateUnsubscribe.data.subscription.unsubscribe();
1858
+ authStateUnsubscribe = null;
1859
+ }
1860
+ // Stop realtime subscriptions
1861
+ await stopRealtimeSubscriptions();
1862
+ if (syncTimeout) {
1863
+ clearTimeout(syncTimeout);
1864
+ syncTimeout = null;
1865
+ }
1866
+ if (syncInterval) {
1867
+ clearInterval(syncInterval);
1868
+ syncInterval = null;
1869
+ }
1870
+ if (visibilityDebounceTimeout) {
1871
+ clearTimeout(visibilityDebounceTimeout);
1872
+ visibilityDebounceTimeout = null;
1873
+ }
1874
+ releaseSyncLock();
1875
+ _hasHydrated = false;
1876
+ }
1877
+ // Clear local cache (for logout)
1878
+ export async function clearLocalCache() {
1879
+ const config = getEngineConfig();
1880
+ const db = config.db;
1881
+ // Get user ID before clearing to remove their sync cursor
1882
+ const userId = await getCurrentUserId();
1883
+ const entityTables = config.tables.map(t => db.table(t.dexieTable));
1884
+ const metaTables = [db.table('syncQueue'), db.table('conflictHistory')];
1885
+ await db.transaction('rw', [...entityTables, ...metaTables], async () => {
1886
+ for (const t of entityTables) {
1887
+ await t.clear();
1888
+ }
1889
+ await db.table('syncQueue').clear();
1890
+ await db.table('conflictHistory').clear();
1891
+ });
1892
+ // Reset sync cursor (user-specific) and hydration flag
1893
+ if (typeof localStorage !== 'undefined') {
1894
+ // Remove user-specific cursor if we have userId
1895
+ if (userId) {
1896
+ localStorage.removeItem(`lastSyncCursor_${userId}`);
1897
+ }
1898
+ // Also remove legacy cursor for cleanup
1899
+ localStorage.removeItem('lastSyncCursor');
1900
+ }
1901
+ _hasHydrated = false;
1902
+ }
1903
+ //# sourceMappingURL=engine.js.map