stellar-drive 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +607 -0
- package/dist/actions/remoteChange.d.ts +204 -0
- package/dist/actions/remoteChange.d.ts.map +1 -0
- package/dist/actions/remoteChange.js +424 -0
- package/dist/actions/remoteChange.js.map +1 -0
- package/dist/actions/truncateTooltip.d.ts +56 -0
- package/dist/actions/truncateTooltip.d.ts.map +1 -0
- package/dist/actions/truncateTooltip.js +312 -0
- package/dist/actions/truncateTooltip.js.map +1 -0
- package/dist/auth/crypto.d.ts +41 -0
- package/dist/auth/crypto.d.ts.map +1 -0
- package/dist/auth/crypto.js +50 -0
- package/dist/auth/crypto.js.map +1 -0
- package/dist/auth/deviceVerification.d.ts +283 -0
- package/dist/auth/deviceVerification.d.ts.map +1 -0
- package/dist/auth/deviceVerification.js +575 -0
- package/dist/auth/deviceVerification.js.map +1 -0
- package/dist/auth/displayUtils.d.ts +98 -0
- package/dist/auth/displayUtils.d.ts.map +1 -0
- package/dist/auth/displayUtils.js +145 -0
- package/dist/auth/displayUtils.js.map +1 -0
- package/dist/auth/loginGuard.d.ts +134 -0
- package/dist/auth/loginGuard.d.ts.map +1 -0
- package/dist/auth/loginGuard.js +276 -0
- package/dist/auth/loginGuard.js.map +1 -0
- package/dist/auth/offlineCredentials.d.ts +105 -0
- package/dist/auth/offlineCredentials.d.ts.map +1 -0
- package/dist/auth/offlineCredentials.js +176 -0
- package/dist/auth/offlineCredentials.js.map +1 -0
- package/dist/auth/offlineSession.d.ts +96 -0
- package/dist/auth/offlineSession.d.ts.map +1 -0
- package/dist/auth/offlineSession.js +145 -0
- package/dist/auth/offlineSession.js.map +1 -0
- package/dist/auth/resolveAuthState.d.ts +85 -0
- package/dist/auth/resolveAuthState.d.ts.map +1 -0
- package/dist/auth/resolveAuthState.js +249 -0
- package/dist/auth/resolveAuthState.js.map +1 -0
- package/dist/auth/singleUser.d.ts +498 -0
- package/dist/auth/singleUser.d.ts.map +1 -0
- package/dist/auth/singleUser.js +1282 -0
- package/dist/auth/singleUser.js.map +1 -0
- package/dist/bin/commands.d.ts +14 -0
- package/dist/bin/commands.d.ts.map +1 -0
- package/dist/bin/commands.js +68 -0
- package/dist/bin/commands.js.map +1 -0
- package/dist/bin/install-pwa.d.ts +41 -0
- package/dist/bin/install-pwa.d.ts.map +1 -0
- package/dist/bin/install-pwa.js +4594 -0
- package/dist/bin/install-pwa.js.map +1 -0
- package/dist/config.d.ts +249 -0
- package/dist/config.d.ts.map +1 -0
- package/dist/config.js +395 -0
- package/dist/config.js.map +1 -0
- package/dist/conflicts.d.ts +306 -0
- package/dist/conflicts.d.ts.map +1 -0
- package/dist/conflicts.js +807 -0
- package/dist/conflicts.js.map +1 -0
- package/dist/crdt/awareness.d.ts +128 -0
- package/dist/crdt/awareness.d.ts.map +1 -0
- package/dist/crdt/awareness.js +284 -0
- package/dist/crdt/awareness.js.map +1 -0
- package/dist/crdt/channel.d.ts +165 -0
- package/dist/crdt/channel.d.ts.map +1 -0
- package/dist/crdt/channel.js +522 -0
- package/dist/crdt/channel.js.map +1 -0
- package/dist/crdt/config.d.ts +58 -0
- package/dist/crdt/config.d.ts.map +1 -0
- package/dist/crdt/config.js +123 -0
- package/dist/crdt/config.js.map +1 -0
- package/dist/crdt/helpers.d.ts +104 -0
- package/dist/crdt/helpers.d.ts.map +1 -0
- package/dist/crdt/helpers.js +116 -0
- package/dist/crdt/helpers.js.map +1 -0
- package/dist/crdt/offline.d.ts +58 -0
- package/dist/crdt/offline.d.ts.map +1 -0
- package/dist/crdt/offline.js +130 -0
- package/dist/crdt/offline.js.map +1 -0
- package/dist/crdt/persistence.d.ts +65 -0
- package/dist/crdt/persistence.d.ts.map +1 -0
- package/dist/crdt/persistence.js +171 -0
- package/dist/crdt/persistence.js.map +1 -0
- package/dist/crdt/provider.d.ts +109 -0
- package/dist/crdt/provider.d.ts.map +1 -0
- package/dist/crdt/provider.js +543 -0
- package/dist/crdt/provider.js.map +1 -0
- package/dist/crdt/store.d.ts +111 -0
- package/dist/crdt/store.d.ts.map +1 -0
- package/dist/crdt/store.js +158 -0
- package/dist/crdt/store.js.map +1 -0
- package/dist/crdt/types.d.ts +281 -0
- package/dist/crdt/types.d.ts.map +1 -0
- package/dist/crdt/types.js +26 -0
- package/dist/crdt/types.js.map +1 -0
- package/dist/data.d.ts +502 -0
- package/dist/data.d.ts.map +1 -0
- package/dist/data.js +862 -0
- package/dist/data.js.map +1 -0
- package/dist/database.d.ts +153 -0
- package/dist/database.d.ts.map +1 -0
- package/dist/database.js +325 -0
- package/dist/database.js.map +1 -0
- package/dist/debug.d.ts +87 -0
- package/dist/debug.d.ts.map +1 -0
- package/dist/debug.js +135 -0
- package/dist/debug.js.map +1 -0
- package/dist/demo.d.ts +131 -0
- package/dist/demo.d.ts.map +1 -0
- package/dist/demo.js +168 -0
- package/dist/demo.js.map +1 -0
- package/dist/deviceId.d.ts +47 -0
- package/dist/deviceId.d.ts.map +1 -0
- package/dist/deviceId.js +106 -0
- package/dist/deviceId.js.map +1 -0
- package/dist/diagnostics.d.ts +292 -0
- package/dist/diagnostics.d.ts.map +1 -0
- package/dist/diagnostics.js +378 -0
- package/dist/diagnostics.js.map +1 -0
- package/dist/engine.d.ts +230 -0
- package/dist/engine.d.ts.map +1 -0
- package/dist/engine.js +2636 -0
- package/dist/engine.js.map +1 -0
- package/dist/entries/actions.d.ts +16 -0
- package/dist/entries/actions.d.ts.map +1 -0
- package/dist/entries/actions.js +29 -0
- package/dist/entries/actions.js.map +1 -0
- package/dist/entries/auth.d.ts +19 -0
- package/dist/entries/auth.d.ts.map +1 -0
- package/dist/entries/auth.js +50 -0
- package/dist/entries/auth.js.map +1 -0
- package/dist/entries/config.d.ts +15 -0
- package/dist/entries/config.d.ts.map +1 -0
- package/dist/entries/config.js +20 -0
- package/dist/entries/config.js.map +1 -0
- package/dist/entries/crdt.d.ts +32 -0
- package/dist/entries/crdt.d.ts.map +1 -0
- package/dist/entries/crdt.js +52 -0
- package/dist/entries/crdt.js.map +1 -0
- package/dist/entries/kit.d.ts +22 -0
- package/dist/entries/kit.d.ts.map +1 -0
- package/dist/entries/kit.js +58 -0
- package/dist/entries/kit.js.map +1 -0
- package/dist/entries/stores.d.ts +22 -0
- package/dist/entries/stores.d.ts.map +1 -0
- package/dist/entries/stores.js +57 -0
- package/dist/entries/stores.js.map +1 -0
- package/dist/entries/types.d.ts +23 -0
- package/dist/entries/types.d.ts.map +1 -0
- package/dist/entries/types.js +12 -0
- package/dist/entries/types.js.map +1 -0
- package/dist/entries/utils.d.ts +12 -0
- package/dist/entries/utils.d.ts.map +1 -0
- package/dist/entries/utils.js +42 -0
- package/dist/entries/utils.js.map +1 -0
- package/dist/entries/vite.d.ts +20 -0
- package/dist/entries/vite.d.ts.map +1 -0
- package/dist/entries/vite.js +26 -0
- package/dist/entries/vite.js.map +1 -0
- package/dist/index.d.ts +77 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +234 -0
- package/dist/index.js.map +1 -0
- package/dist/kit/auth.d.ts +80 -0
- package/dist/kit/auth.d.ts.map +1 -0
- package/dist/kit/auth.js +75 -0
- package/dist/kit/auth.js.map +1 -0
- package/dist/kit/confirm.d.ts +111 -0
- package/dist/kit/confirm.d.ts.map +1 -0
- package/dist/kit/confirm.js +169 -0
- package/dist/kit/confirm.js.map +1 -0
- package/dist/kit/loads.d.ts +187 -0
- package/dist/kit/loads.d.ts.map +1 -0
- package/dist/kit/loads.js +208 -0
- package/dist/kit/loads.js.map +1 -0
- package/dist/kit/server.d.ts +175 -0
- package/dist/kit/server.d.ts.map +1 -0
- package/dist/kit/server.js +297 -0
- package/dist/kit/server.js.map +1 -0
- package/dist/kit/sw.d.ts +176 -0
- package/dist/kit/sw.d.ts.map +1 -0
- package/dist/kit/sw.js +320 -0
- package/dist/kit/sw.js.map +1 -0
- package/dist/queue.d.ts +306 -0
- package/dist/queue.d.ts.map +1 -0
- package/dist/queue.js +925 -0
- package/dist/queue.js.map +1 -0
- package/dist/realtime.d.ts +280 -0
- package/dist/realtime.d.ts.map +1 -0
- package/dist/realtime.js +1031 -0
- package/dist/realtime.js.map +1 -0
- package/dist/runtime/runtimeConfig.d.ts +110 -0
- package/dist/runtime/runtimeConfig.d.ts.map +1 -0
- package/dist/runtime/runtimeConfig.js +260 -0
- package/dist/runtime/runtimeConfig.js.map +1 -0
- package/dist/schema.d.ts +150 -0
- package/dist/schema.d.ts.map +1 -0
- package/dist/schema.js +891 -0
- package/dist/schema.js.map +1 -0
- package/dist/stores/authState.d.ts +204 -0
- package/dist/stores/authState.d.ts.map +1 -0
- package/dist/stores/authState.js +336 -0
- package/dist/stores/authState.js.map +1 -0
- package/dist/stores/factories.d.ts +140 -0
- package/dist/stores/factories.d.ts.map +1 -0
- package/dist/stores/factories.js +157 -0
- package/dist/stores/factories.js.map +1 -0
- package/dist/stores/network.d.ts +48 -0
- package/dist/stores/network.d.ts.map +1 -0
- package/dist/stores/network.js +261 -0
- package/dist/stores/network.js.map +1 -0
- package/dist/stores/remoteChanges.d.ts +417 -0
- package/dist/stores/remoteChanges.d.ts.map +1 -0
- package/dist/stores/remoteChanges.js +626 -0
- package/dist/stores/remoteChanges.js.map +1 -0
- package/dist/stores/sync.d.ts +165 -0
- package/dist/stores/sync.d.ts.map +1 -0
- package/dist/stores/sync.js +275 -0
- package/dist/stores/sync.js.map +1 -0
- package/dist/supabase/auth.d.ts +219 -0
- package/dist/supabase/auth.d.ts.map +1 -0
- package/dist/supabase/auth.js +459 -0
- package/dist/supabase/auth.js.map +1 -0
- package/dist/supabase/client.d.ts +88 -0
- package/dist/supabase/client.d.ts.map +1 -0
- package/dist/supabase/client.js +313 -0
- package/dist/supabase/client.js.map +1 -0
- package/dist/supabase/validate.d.ts +118 -0
- package/dist/supabase/validate.d.ts.map +1 -0
- package/dist/supabase/validate.js +208 -0
- package/dist/supabase/validate.js.map +1 -0
- package/dist/sw/build/vite-plugin.d.ts +149 -0
- package/dist/sw/build/vite-plugin.d.ts.map +1 -0
- package/dist/sw/build/vite-plugin.js +517 -0
- package/dist/sw/build/vite-plugin.js.map +1 -0
- package/dist/sw/sw.js +664 -0
- package/dist/types.d.ts +363 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +18 -0
- package/dist/types.js.map +1 -0
- package/dist/utils.d.ts +85 -0
- package/dist/utils.d.ts.map +1 -0
- package/dist/utils.js +156 -0
- package/dist/utils.js.map +1 -0
- package/package.json +117 -0
- package/src/components/DeferredChangesBanner.svelte +477 -0
- package/src/components/DemoBanner.svelte +110 -0
- package/src/components/SyncStatus.svelte +1732 -0
package/dist/realtime.js
ADDED
|
@@ -0,0 +1,1031 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Real-Time Subscription Manager -- Supabase Realtime WebSocket Layer
|
|
3
|
+
*
|
|
4
|
+
* Phase 5 of multi-device sync: Implements Supabase Realtime subscriptions
|
|
5
|
+
* for instant multi-device synchronization.
|
|
6
|
+
*
|
|
7
|
+
* ## Architecture
|
|
8
|
+
*
|
|
9
|
+
* This module manages a single Supabase Realtime channel per authenticated user,
|
|
10
|
+
* listening for PostgreSQL changes (INSERT, UPDATE, DELETE) across all configured
|
|
11
|
+
* entity tables. When a change arrives from another device, it is applied to the
|
|
12
|
+
* local Dexie (IndexedDB) store and subscribers are notified so the UI can react.
|
|
13
|
+
*
|
|
14
|
+
* ```
|
|
15
|
+
* Supabase Postgres --(CDC)--> Supabase Realtime Server
|
|
16
|
+
* |
|
|
17
|
+
* WebSocket
|
|
18
|
+
* |
|
|
19
|
+
* This module
|
|
20
|
+
* |
|
|
21
|
+
* +-------------+-------------+
|
|
22
|
+
* | |
|
|
23
|
+
* Local Dexie DB UI Notification
|
|
24
|
+
* (conflict-resolved) (animation / refresh)
|
|
25
|
+
* ```
|
|
26
|
+
*
|
|
27
|
+
* ## Echo Suppression
|
|
28
|
+
*
|
|
29
|
+
* Every write to Supabase includes a `device_id` field. When a realtime event
|
|
30
|
+
* arrives, we compare its `device_id` against our own. If they match, the event
|
|
31
|
+
* originated from this device and is silently discarded. This prevents the
|
|
32
|
+
* "echo" problem where a device processes its own outgoing changes a second time.
|
|
33
|
+
*
|
|
34
|
+
* ## Deduplication with Polling
|
|
35
|
+
*
|
|
36
|
+
* The sync engine also runs periodic polling as a fallback. To prevent the same
|
|
37
|
+
* remote change from being applied twice (once via realtime, once via poll), this
|
|
38
|
+
* module maintains a short-lived `recentlyProcessedByRealtime` map. The polling
|
|
39
|
+
* path in `engine.ts` checks this map before processing a change.
|
|
40
|
+
*
|
|
41
|
+
* ## Reconnection Strategy
|
|
42
|
+
*
|
|
43
|
+
* On WebSocket disconnection the module uses exponential backoff (1s, 2s, 4s, ...)
|
|
44
|
+
* up to {@link MAX_RECONNECT_ATTEMPTS} (5) attempts. If the browser is offline,
|
|
45
|
+
* reconnection is paused entirely -- no timers fire until a `navigator.onLine`
|
|
46
|
+
* event restores connectivity. A `reconnectScheduled` flag prevents duplicate
|
|
47
|
+
* reconnection timers from stacking up when multiple channel events fire in
|
|
48
|
+
* quick succession.
|
|
49
|
+
*
|
|
50
|
+
* ## Soft Deletes and Animations
|
|
51
|
+
*
|
|
52
|
+
* When a soft delete is detected (UPDATE with `deleted=true`), the module
|
|
53
|
+
* records the deletion in {@link remoteChangesStore} *before* writing to Dexie.
|
|
54
|
+
* This ordering is intentional: it allows the UI layer to play a removal
|
|
55
|
+
* animation before the reactive store filters out the deleted record.
|
|
56
|
+
*
|
|
57
|
+
* ## Security Considerations
|
|
58
|
+
*
|
|
59
|
+
* - **Row-Level Security (RLS):** No client-side user ID filter is applied to
|
|
60
|
+
* the channel subscription. All access control is enforced by Supabase RLS
|
|
61
|
+
* policies at the database level. This is a deliberate security decision:
|
|
62
|
+
* client-side filters can be bypassed, whereas RLS operates inside Postgres
|
|
63
|
+
* and cannot be circumvented by a malicious client.
|
|
64
|
+
* - **Device ID trust boundary:** The `device_id` field is used only for echo
|
|
65
|
+
* suppression and conflict tiebreaking, **not** for authorization. A spoofed
|
|
66
|
+
* `device_id` could cause an event to be incorrectly suppressed on another
|
|
67
|
+
* device, but it cannot escalate privileges or access unauthorized data.
|
|
68
|
+
* - **Channel naming:** The channel name includes the user ID to ensure
|
|
69
|
+
* Supabase routes CDC events correctly. This is a routing hint, not a
|
|
70
|
+
* security boundary -- RLS is the actual enforcement mechanism.
|
|
71
|
+
*
|
|
72
|
+
* @see {@link ./engine.ts} for the orchestrating sync engine and polling loop
|
|
73
|
+
* @see {@link ./conflicts.ts} for the conflict resolution algorithm
|
|
74
|
+
* @see {@link ./queue.ts} for the pending operations queue
|
|
75
|
+
* @see {@link ./stores/remoteChanges.ts} for UI change-tracking and animations
|
|
76
|
+
* @see {@link ./deviceId.ts} for per-device identity generation
|
|
77
|
+
*/
|
|
78
|
+
import { debugLog, debugWarn, debugError, isDebugMode } from './debug';
|
|
79
|
+
import { getEngineConfig, getDexieTableFor } from './config';
|
|
80
|
+
import { getDeviceId } from './deviceId';
|
|
81
|
+
import { resolveConflicts, storeConflictHistory, getPendingOpsForEntity } from './conflicts';
|
|
82
|
+
import { getPendingEntityIds } from './queue';
|
|
83
|
+
import { remoteChangesStore } from './stores/remoteChanges';
|
|
84
|
+
import { isDemoMode } from './demo';
|
|
85
|
+
// =============================================================================
|
|
86
|
+
// CONSTANTS
|
|
87
|
+
// =============================================================================
|
|
88
|
+
/**
|
|
89
|
+
* How long (in ms) a processed entity is considered "recent."
|
|
90
|
+
* Must match the TTL used in engine.ts for `recentlyModifiedEntities`
|
|
91
|
+
* so that the deduplication windows overlap correctly.
|
|
92
|
+
*
|
|
93
|
+
* **Why 2 seconds?** This window must be long enough to span the typical
|
|
94
|
+
* latency gap between a realtime WebSocket push and the next polling cycle.
|
|
95
|
+
* If the poll fires within 2s of the realtime event, the entity will still
|
|
96
|
+
* be in the dedup map and the poll result will be skipped.
|
|
97
|
+
*
|
|
98
|
+
* @see {@link ./engine.ts} -- `RECENTLY_MODIFIED_TTL_MS`
|
|
99
|
+
*/
|
|
100
|
+
const RECENTLY_MODIFIED_TTL_MS = 2000;
|
|
101
|
+
/**
|
|
102
|
+
* Maximum number of reconnection attempts before the module gives up
|
|
103
|
+
* and falls back to polling-only mode.
|
|
104
|
+
*
|
|
105
|
+
* **Why 5?** With exponential backoff (1s, 2s, 4s, 8s, 16s) the total
|
|
106
|
+
* wait before giving up is ~31 seconds, which covers most transient
|
|
107
|
+
* network hiccups without annoying the user with prolonged retry noise.
|
|
108
|
+
*/
|
|
109
|
+
const MAX_RECONNECT_ATTEMPTS = 5;
|
|
110
|
+
/**
|
|
111
|
+
* Base delay for exponential backoff between reconnection attempts.
|
|
112
|
+
* Actual delay = RECONNECT_BASE_DELAY * 2^(attemptIndex).
|
|
113
|
+
*/
|
|
114
|
+
const RECONNECT_BASE_DELAY = 1000;
|
|
115
|
+
// =============================================================================
|
|
116
|
+
// MODULE-LEVEL STATE
|
|
117
|
+
// =============================================================================
|
|
118
|
+
/**
|
|
119
|
+
* Tracks entities that realtime has recently processed, keyed by entity ID
|
|
120
|
+
* with the timestamp of processing as the value.
|
|
121
|
+
*
|
|
122
|
+
* This is intentionally separate from `engine.ts`'s `recentlyModifiedEntities`
|
|
123
|
+
* (which tracks *local* writes). This map tracks *remote* changes received via
|
|
124
|
+
* WebSocket so that the polling path can skip them.
|
|
125
|
+
*
|
|
126
|
+
* **Memory note:** Entries are lazily evicted on read (see {@link wasRecentlyProcessed})
|
|
127
|
+
* and actively cleaned by {@link cleanupRealtimeTracking}. In the worst case the map
|
|
128
|
+
* holds one entry per entity modified within the last {@link RECENTLY_MODIFIED_TTL_MS}.
|
|
129
|
+
*/
|
|
130
|
+
const recentlyProcessedByRealtime = new Map();
|
|
131
|
+
/**
|
|
132
|
+
* Singleton state instance. Initialized to a clean "disconnected" baseline.
|
|
133
|
+
*
|
|
134
|
+
* **Why a singleton?** A browser tab should never have more than one WebSocket
|
|
135
|
+
* connection to Supabase Realtime for the same user. Multiple connections would
|
|
136
|
+
* cause duplicate event processing and wasted bandwidth. The singleton pattern
|
|
137
|
+
* enforces this at the module level.
|
|
138
|
+
*/
|
|
139
|
+
const state = {
|
|
140
|
+
channel: null,
|
|
141
|
+
connectionState: 'disconnected',
|
|
142
|
+
userId: null,
|
|
143
|
+
deviceId: '',
|
|
144
|
+
lastError: null,
|
|
145
|
+
reconnectAttempts: 0,
|
|
146
|
+
reconnectTimeout: null
|
|
147
|
+
};
|
|
148
|
+
// =============================================================================
|
|
149
|
+
// CALLBACK REGISTRIES
|
|
150
|
+
// =============================================================================
|
|
151
|
+
/**
|
|
152
|
+
* Registered listeners that fire whenever the connection state transitions.
|
|
153
|
+
* Each callback receives the new {@link RealtimeConnectionState}.
|
|
154
|
+
*
|
|
155
|
+
* **Why a Set?** Using a `Set` ensures the same callback reference cannot be
|
|
156
|
+
* registered twice, which prevents duplicate notifications if consumer code
|
|
157
|
+
* accidentally calls `onConnectionStateChange` more than once with the same fn.
|
|
158
|
+
*/
|
|
159
|
+
const connectionCallbacks = new Set();
|
|
160
|
+
/**
|
|
161
|
+
* Registered listeners that fire after a remote change has been applied to the
|
|
162
|
+
* local Dexie database. Consumers (e.g. Svelte stores) use this to trigger
|
|
163
|
+
* reactive re-queries.
|
|
164
|
+
*
|
|
165
|
+
* **Ordering guarantee:** Callbacks are invoked *after* the Dexie write has
|
|
166
|
+
* completed, so any re-query inside the callback will return the updated data.
|
|
167
|
+
*/
|
|
168
|
+
const dataUpdateCallbacks = new Set();
|
|
169
|
+
// =============================================================================
|
|
170
|
+
// CONCURRENCY GUARDS
|
|
171
|
+
// =============================================================================
|
|
172
|
+
/**
|
|
173
|
+
* Mutex-like flag preventing concurrent `start` / `stop` operations.
|
|
174
|
+
* Because channel setup and teardown are async, overlapping calls could leave
|
|
175
|
+
* the module in an inconsistent state without this guard.
|
|
176
|
+
*
|
|
177
|
+
* **Not a true mutex:** This is a cooperative lock -- it relies on callers
|
|
178
|
+
* checking the flag and bailing out. Since JavaScript is single-threaded,
|
|
179
|
+
* there is no race between the check and the set, making this safe.
|
|
180
|
+
*/
|
|
181
|
+
let operationInProgress = false;
|
|
182
|
+
/**
|
|
183
|
+
* Prevents duplicate reconnection timers from being scheduled.
|
|
184
|
+
* Supabase may emit both `CHANNEL_ERROR` and `CLOSED` events for the same
|
|
185
|
+
* disconnection; without this flag each event would schedule its own timer.
|
|
186
|
+
*
|
|
187
|
+
* **Reset points:** This flag is cleared in three places:
|
|
188
|
+
* 1. Inside the setTimeout callback (normal reconnect flow)
|
|
189
|
+
* 2. In {@link stopRealtimeSubscriptionsInternal} (teardown)
|
|
190
|
+
* 3. In {@link pauseRealtime} (offline transition)
|
|
191
|
+
*/
|
|
192
|
+
let reconnectScheduled = false;
|
|
193
|
+
// =============================================================================
|
|
194
|
+
// PUBLIC API -- SUBSCRIPTION HOOKS
|
|
195
|
+
// =============================================================================
|
|
196
|
+
/**
|
|
197
|
+
* Subscribe to connection state changes.
|
|
198
|
+
*
|
|
199
|
+
* The callback is invoked immediately with the current state upon registration,
|
|
200
|
+
* then again on every subsequent transition.
|
|
201
|
+
*
|
|
202
|
+
* @param callback - Function invoked with the new {@link RealtimeConnectionState}.
|
|
203
|
+
* @returns An unsubscribe function. Call it to remove the listener.
|
|
204
|
+
*
|
|
205
|
+
* @example
|
|
206
|
+
* ```ts
|
|
207
|
+
* const unsub = onConnectionStateChange((state) => {
|
|
208
|
+
* if (state === 'error') showReconnectBanner();
|
|
209
|
+
* });
|
|
210
|
+
* // Later, to stop listening:
|
|
211
|
+
* unsub();
|
|
212
|
+
* ```
|
|
213
|
+
*/
|
|
214
|
+
export function onConnectionStateChange(callback) {
|
|
215
|
+
connectionCallbacks.add(callback);
|
|
216
|
+
/* Deliver the current state immediately so the subscriber doesn't have to
|
|
217
|
+
wait for the next transition to learn the baseline. This pattern is common
|
|
218
|
+
in observable/store implementations (e.g., Svelte stores call subscribers
|
|
219
|
+
on subscription). */
|
|
220
|
+
callback(state.connectionState);
|
|
221
|
+
return () => connectionCallbacks.delete(callback);
|
|
222
|
+
}
|
|
223
|
+
/**
|
|
224
|
+
* Subscribe to data update notifications.
|
|
225
|
+
*
|
|
226
|
+
* Callbacks fire *after* the remote change has been written to the local Dexie
|
|
227
|
+
* database, so re-querying inside the callback will return fresh data.
|
|
228
|
+
*
|
|
229
|
+
* @param callback - Function invoked with the Supabase table name and entity ID.
|
|
230
|
+
* @returns An unsubscribe function. Call it to remove the listener.
|
|
231
|
+
*
|
|
232
|
+
* @example
|
|
233
|
+
* ```ts
|
|
234
|
+
* const unsub = onRealtimeDataUpdate((table, entityId) => {
|
|
235
|
+
* if (table === 'habits') refreshHabitStore();
|
|
236
|
+
* });
|
|
237
|
+
* ```
|
|
238
|
+
*
|
|
239
|
+
* @see {@link notifyDataUpdate} for the internal dispatch function
|
|
240
|
+
*/
|
|
241
|
+
export function onRealtimeDataUpdate(callback) {
|
|
242
|
+
dataUpdateCallbacks.add(callback);
|
|
243
|
+
return () => dataUpdateCallbacks.delete(callback);
|
|
244
|
+
}
|
|
245
|
+
// =============================================================================
|
|
246
|
+
// PUBLIC API -- STATE QUERIES
|
|
247
|
+
// =============================================================================
|
|
248
|
+
/**
|
|
249
|
+
* Check whether an entity was recently processed via a realtime event.
|
|
250
|
+
*
|
|
251
|
+
* Called by `engine.ts` during polling to avoid applying the same remote
|
|
252
|
+
* change twice (once from realtime, once from the poll response).
|
|
253
|
+
*
|
|
254
|
+
* **Side effect:** Expired entries are lazily evicted on access. This keeps
|
|
255
|
+
* the map from growing during bursts of activity, complementing the
|
|
256
|
+
* periodic cleanup in {@link cleanupRealtimeTracking}.
|
|
257
|
+
*
|
|
258
|
+
* @param entityId - The UUID of the entity to check.
|
|
259
|
+
* @returns `true` if the entity was processed within the last {@link RECENTLY_MODIFIED_TTL_MS} ms.
|
|
260
|
+
*
|
|
261
|
+
* @example
|
|
262
|
+
* ```ts
|
|
263
|
+
* if (wasRecentlyProcessedByRealtime(entity.id)) {
|
|
264
|
+
* // Skip -- realtime already handled this change
|
|
265
|
+
* continue;
|
|
266
|
+
* }
|
|
267
|
+
* ```
|
|
268
|
+
*
|
|
269
|
+
* @see {@link ./engine.ts} -- polling path
|
|
270
|
+
*/
|
|
271
|
+
export function wasRecentlyProcessedByRealtime(entityId) {
|
|
272
|
+
const processedAt = recentlyProcessedByRealtime.get(entityId);
|
|
273
|
+
if (!processedAt)
|
|
274
|
+
return false;
|
|
275
|
+
const age = Date.now() - processedAt;
|
|
276
|
+
if (age > RECENTLY_MODIFIED_TTL_MS) {
|
|
277
|
+
recentlyProcessedByRealtime.delete(entityId);
|
|
278
|
+
return false;
|
|
279
|
+
}
|
|
280
|
+
return true;
|
|
281
|
+
}
|
|
282
|
+
/**
|
|
283
|
+
* Check if the realtime connection is healthy (connected and not in an error state).
|
|
284
|
+
*
|
|
285
|
+
* @returns `true` when the WebSocket channel is in the `'connected'` state.
|
|
286
|
+
*/
|
|
287
|
+
export function isRealtimeHealthy() {
|
|
288
|
+
return state.connectionState === 'connected';
|
|
289
|
+
}
|
|
290
|
+
/**
|
|
291
|
+
* Return a snapshot of realtime-internal state for diagnostics.
|
|
292
|
+
*
|
|
293
|
+
* This function is prefixed with `_` to signal that it exposes module-private
|
|
294
|
+
* state and should only be consumed by the diagnostics module.
|
|
295
|
+
*
|
|
296
|
+
* @returns A plain object containing current realtime state values
|
|
297
|
+
*/
|
|
298
|
+
export function _getRealtimeDiagnostics() {
|
|
299
|
+
return {
|
|
300
|
+
connectionState: state.connectionState,
|
|
301
|
+
healthy: state.connectionState === 'connected',
|
|
302
|
+
reconnectAttempts: state.reconnectAttempts,
|
|
303
|
+
lastError: state.lastError,
|
|
304
|
+
userId: state.userId,
|
|
305
|
+
deviceId: state.deviceId,
|
|
306
|
+
recentlyProcessedCount: recentlyProcessedByRealtime.size,
|
|
307
|
+
operationInProgress,
|
|
308
|
+
reconnectScheduled
|
|
309
|
+
};
|
|
310
|
+
}
|
|
311
|
+
/**
|
|
312
|
+
* Remove expired entries from the recently-processed tracking map.
|
|
313
|
+
*
|
|
314
|
+
* Called periodically by the sync engine's maintenance loop to prevent
|
|
315
|
+
* unbounded memory growth in long-running sessions.
|
|
316
|
+
*
|
|
317
|
+
* **Why explicit cleanup?** Lazy eviction in {@link wasRecentlyProcessedByRealtime}
|
|
318
|
+
* only fires when an entity is looked up. If an entity is processed by realtime
|
|
319
|
+
* but never polled (e.g., a table not included in the current poll cycle),
|
|
320
|
+
* its entry would persist indefinitely without this active sweep.
|
|
321
|
+
*
|
|
322
|
+
* @see {@link RECENTLY_MODIFIED_TTL_MS}
|
|
323
|
+
*/
|
|
324
|
+
export function cleanupRealtimeTracking() {
|
|
325
|
+
const now = Date.now();
|
|
326
|
+
for (const [entityId, processedAt] of recentlyProcessedByRealtime) {
|
|
327
|
+
if (now - processedAt > RECENTLY_MODIFIED_TTL_MS) {
|
|
328
|
+
recentlyProcessedByRealtime.delete(entityId);
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
// =============================================================================
|
|
333
|
+
// INTERNAL HELPERS -- STATE NOTIFICATIONS
|
|
334
|
+
// =============================================================================
|
|
335
|
+
/**
|
|
336
|
+
* Transition the connection state and notify all registered listeners.
|
|
337
|
+
*
|
|
338
|
+
* Errors thrown by individual callbacks are caught and logged so that one
|
|
339
|
+
* misbehaving listener cannot break the notification chain.
|
|
340
|
+
*
|
|
341
|
+
* @param newState - The {@link RealtimeConnectionState} to transition to.
|
|
342
|
+
* @param error - Optional human-readable error message stored in {@link state.lastError}.
|
|
343
|
+
*/
|
|
344
|
+
function setConnectionState(newState, error) {
|
|
345
|
+
state.connectionState = newState;
|
|
346
|
+
state.lastError = error || null;
|
|
347
|
+
for (const callback of connectionCallbacks) {
|
|
348
|
+
try {
|
|
349
|
+
callback(newState);
|
|
350
|
+
}
|
|
351
|
+
catch (e) {
|
|
352
|
+
/* Catch-and-continue: a broken subscriber must not prevent other
|
|
353
|
+
subscribers from being notified, nor should it crash the realtime
|
|
354
|
+
lifecycle management. */
|
|
355
|
+
debugError('[Realtime] Connection callback error:', e);
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
/**
|
|
360
|
+
* Dispatch a data-update event to all registered subscribers.
|
|
361
|
+
*
|
|
362
|
+
* Called after a remote change has been written to Dexie. Errors thrown by
|
|
363
|
+
* individual callbacks are caught and logged.
|
|
364
|
+
*
|
|
365
|
+
* @param table - The Supabase table name where the change originated (e.g. `'habits'`).
|
|
366
|
+
* @param entityId - The UUID of the changed entity.
|
|
367
|
+
*
|
|
368
|
+
* @see {@link onRealtimeDataUpdate} for the public subscription API
|
|
369
|
+
*/
|
|
370
|
+
function notifyDataUpdate(table, entityId) {
|
|
371
|
+
debugLog(`[Realtime] Notifying ${dataUpdateCallbacks.size} subscribers of update: ${table}/${entityId}`);
|
|
372
|
+
for (const callback of dataUpdateCallbacks) {
|
|
373
|
+
try {
|
|
374
|
+
callback(table, entityId);
|
|
375
|
+
}
|
|
376
|
+
catch (e) {
|
|
377
|
+
debugError('[Realtime] Data update callback error:', e);
|
|
378
|
+
}
|
|
379
|
+
}
|
|
380
|
+
}
|
|
381
|
+
// =============================================================================
|
|
382
|
+
// INTERNAL HELPERS -- ECHO & DEDUP FILTERS
|
|
383
|
+
// =============================================================================
|
|
384
|
+
/**
|
|
385
|
+
* Determine whether a change event originated from this device.
|
|
386
|
+
*
|
|
387
|
+
* Supabase Realtime delivers *all* changes matching the channel filter,
|
|
388
|
+
* including changes made by the current device. We compare the `device_id`
|
|
389
|
+
* field in the payload against our own to suppress these "echoes."
|
|
390
|
+
*
|
|
391
|
+
* **Security note:** The `device_id` comparison is used purely for performance
|
|
392
|
+
* optimization (avoiding redundant local writes). It is **not** a security
|
|
393
|
+
* mechanism. A spoofed `device_id` could only cause an event to be skipped
|
|
394
|
+
* on the spoofing device -- it cannot grant access to other users' data
|
|
395
|
+
* because RLS enforces row-level access at the database level.
|
|
396
|
+
*
|
|
397
|
+
* @param record - The `new` record from the realtime payload, or `null`.
|
|
398
|
+
* @returns `true` if the record's `device_id` matches this device.
|
|
399
|
+
*
|
|
400
|
+
* @see {@link ./deviceId.ts} -- where the device identity is generated
|
|
401
|
+
*/
|
|
402
|
+
function isOwnDeviceChange(record) {
|
|
403
|
+
if (!record)
|
|
404
|
+
return false;
|
|
405
|
+
const recordDeviceId = record.device_id;
|
|
406
|
+
return recordDeviceId === state.deviceId;
|
|
407
|
+
}
|
|
408
|
+
/**
|
|
409
|
+
* Check if an entity was recently processed by this realtime handler.
|
|
410
|
+
*
|
|
411
|
+
* This is the *internal* counterpart of the exported
|
|
412
|
+
* {@link wasRecentlyProcessedByRealtime}. It is called inside
|
|
413
|
+
* {@link handleRealtimeChange} to short-circuit duplicate events that may
|
|
414
|
+
* arrive in rapid succession (e.g. due to Supabase retries).
|
|
415
|
+
*
|
|
416
|
+
* **Why a separate function?** The internal version is used in the hot path
|
|
417
|
+
* of change processing, while the exported version is used by the polling
|
|
418
|
+
* engine. Keeping them separate makes it clear which is the internal guard
|
|
419
|
+
* and which is the cross-module dedup check.
|
|
420
|
+
*
|
|
421
|
+
* @param entityId - The UUID of the entity to check.
|
|
422
|
+
* @returns `true` if the entity is within the deduplication window.
|
|
423
|
+
*/
|
|
424
|
+
function wasRecentlyProcessed(entityId) {
|
|
425
|
+
const processedAt = recentlyProcessedByRealtime.get(entityId);
|
|
426
|
+
if (!processedAt)
|
|
427
|
+
return false;
|
|
428
|
+
const age = Date.now() - processedAt;
|
|
429
|
+
if (age > RECENTLY_MODIFIED_TTL_MS) {
|
|
430
|
+
recentlyProcessedByRealtime.delete(entityId);
|
|
431
|
+
return false;
|
|
432
|
+
}
|
|
433
|
+
return true;
|
|
434
|
+
}
|
|
435
|
+
// =============================================================================
|
|
436
|
+
// CORE CHANGE HANDLER
|
|
437
|
+
// =============================================================================
|
|
438
|
+
/**
|
|
439
|
+
* Process an incoming realtime change event from Supabase.
|
|
440
|
+
*
|
|
441
|
+
* This is the central routing function for all realtime events. It:
|
|
442
|
+
* 1. Extracts the entity ID and event type from the payload.
|
|
443
|
+
* 2. Applies echo suppression and deduplication filters.
|
|
444
|
+
* 3. Looks up the matching Dexie table via the engine config.
|
|
445
|
+
* 4. Delegates to the appropriate branch: INSERT/UPDATE or DELETE.
|
|
446
|
+
* 5. Records the change in {@link remoteChangesStore} for UI animations.
|
|
447
|
+
* 6. Marks the entity as recently processed to prevent polling duplication.
|
|
448
|
+
* 7. Notifies data-update subscribers.
|
|
449
|
+
*
|
|
450
|
+
* For INSERT/UPDATE events with pending local operations, the function
|
|
451
|
+
* delegates to {@link resolveConflicts} to produce a merged entity.
|
|
452
|
+
*
|
|
453
|
+
* **Error handling:** All errors are caught at the top level and logged.
|
|
454
|
+
* A failure to process one event must not crash the WebSocket listener or
|
|
455
|
+
* prevent subsequent events from being handled.
|
|
456
|
+
*
|
|
457
|
+
* **Ordering contract with remoteChangesStore:**
|
|
458
|
+
* For delete operations (both soft and hard), the change is recorded in
|
|
459
|
+
* remoteChangesStore **before** writing to Dexie. This ordering is critical
|
|
460
|
+
* for exit animations -- see the soft delete and hard delete sections below.
|
|
461
|
+
*
|
|
462
|
+
* @param table - The Supabase table name (e.g. `'habits'`, `'entries'`).
|
|
463
|
+
* @param payload - The raw Supabase realtime change payload.
|
|
464
|
+
*
|
|
465
|
+
* @throws Never throws -- all errors are caught internally and logged.
|
|
466
|
+
*
|
|
467
|
+
* @see {@link resolveConflicts} for the conflict resolution algorithm
|
|
468
|
+
* @see {@link remoteChangesStore} for how the UI animates remote changes
|
|
469
|
+
*/
|
|
470
|
+
async function handleRealtimeChange(table, payload) {
|
|
471
|
+
const eventType = payload.eventType;
|
|
472
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
473
|
+
const newRecord = payload.new;
|
|
474
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
475
|
+
const oldRecord = payload.old;
|
|
476
|
+
/* For DELETEs, Supabase only populates `old`; for INSERTs only `new`.
|
|
477
|
+
UPDATEs populate both. We need the ID from whichever is available. */
|
|
478
|
+
const entityId = (newRecord?.id || oldRecord?.id);
|
|
479
|
+
debugLog(`[Realtime] Received ${eventType} on ${table}:`, entityId);
|
|
480
|
+
if (!entityId) {
|
|
481
|
+
debugWarn('[Realtime] Change without entity ID:', table, eventType);
|
|
482
|
+
return;
|
|
483
|
+
}
|
|
484
|
+
/* ---- Echo suppression ----
|
|
485
|
+
Skip events that originated from this device. Without this check, every
|
|
486
|
+
local write would be processed a second time when the CDC event arrives
|
|
487
|
+
back through the WebSocket, causing redundant Dexie writes and UI flicker. */
|
|
488
|
+
if (isOwnDeviceChange(newRecord)) {
|
|
489
|
+
debugLog(`[Realtime] Skipping own device change: ${table}/${entityId}`);
|
|
490
|
+
return;
|
|
491
|
+
}
|
|
492
|
+
/* ---- Deduplication ----
|
|
493
|
+
Skip events for entities we already processed very recently. This guards
|
|
494
|
+
against Supabase delivering the same CDC event multiple times (which can
|
|
495
|
+
happen during server-side reconnection or rebalancing). */
|
|
496
|
+
if (wasRecentlyProcessed(entityId)) {
|
|
497
|
+
debugLog(`[Realtime] Skipping recently processed: ${table}/${entityId}`);
|
|
498
|
+
return;
|
|
499
|
+
}
|
|
500
|
+
debugLog(`[Realtime] Processing remote change: ${eventType} ${table}/${entityId}`);
|
|
501
|
+
const tableConfig = getEngineConfig().tables.find((t) => t.supabaseName === table);
|
|
502
|
+
const dexieTable = tableConfig ? getDexieTableFor(tableConfig) : undefined;
|
|
503
|
+
if (!dexieTable) {
|
|
504
|
+
debugWarn('[Realtime] Unknown table:', table);
|
|
505
|
+
return;
|
|
506
|
+
}
|
|
507
|
+
try {
|
|
508
|
+
switch (eventType) {
|
|
509
|
+
// -----------------------------------------------------------------------
|
|
510
|
+
// INSERT / UPDATE -- the bulk of the logic lives here
|
|
511
|
+
// -----------------------------------------------------------------------
|
|
512
|
+
case 'INSERT':
|
|
513
|
+
case 'UPDATE': {
|
|
514
|
+
if (!newRecord)
|
|
515
|
+
return;
|
|
516
|
+
/* Check if entity is being edited in a manual-save form. When true,
|
|
517
|
+
remoteChangesStore will defer the incoming change until the form is
|
|
518
|
+
closed, preventing jarring mid-edit overwrites. This is a UX decision:
|
|
519
|
+
we prioritize the active editing experience over instant sync. */
|
|
520
|
+
const _isBeingEdited = remoteChangesStore.isEditing(entityId, table);
|
|
521
|
+
/* Fetch the local version so we can diff fields and detect conflicts. */
|
|
522
|
+
const localEntity = await getEngineConfig().db.table(dexieTable).get(entityId);
|
|
523
|
+
/* Build a list of fields whose values actually differ between local
|
|
524
|
+
and remote. We skip metadata fields (updated_at, _version) because
|
|
525
|
+
they always change and would produce noisy animations. These fields
|
|
526
|
+
are managed by the sync engine, not the user, so highlighting them
|
|
527
|
+
would be misleading. */
|
|
528
|
+
const changedFields = [];
|
|
529
|
+
if (localEntity && newRecord) {
|
|
530
|
+
for (const key of Object.keys(newRecord)) {
|
|
531
|
+
if (key === 'updated_at' || key === '_version')
|
|
532
|
+
continue;
|
|
533
|
+
/* JSON.stringify comparison handles nested objects/arrays correctly.
|
|
534
|
+
For primitives it is equivalent to ===, with the caveat that
|
|
535
|
+
undefined fields are omitted (which is the desired behavior). */
|
|
536
|
+
if (JSON.stringify(localEntity[key]) !== JSON.stringify(newRecord[key])) {
|
|
537
|
+
changedFields.push(key);
|
|
538
|
+
}
|
|
539
|
+
}
|
|
540
|
+
}
|
|
541
|
+
/* ---- Soft delete detection ----
|
|
542
|
+
A soft delete manifests as an UPDATE where `deleted` transitions from
|
|
543
|
+
false to true. We handle this specially: the UI animation (fade-out)
|
|
544
|
+
must play BEFORE the record is written to Dexie, because reactive
|
|
545
|
+
stores will immediately filter out deleted records, removing the DOM
|
|
546
|
+
element and preventing any exit animation.
|
|
547
|
+
|
|
548
|
+
**Why not use CSS `animation-fill-mode: forwards`?** Because the DOM
|
|
549
|
+
element is removed entirely by the reactive framework (Svelte's
|
|
550
|
+
{#each} block), not just hidden. Once the Dexie write triggers a
|
|
551
|
+
store update, the element is gone from the DOM. */
|
|
552
|
+
const isSoftDelete = newRecord.deleted === true && localEntity && !localEntity.deleted;
|
|
553
|
+
if (isSoftDelete) {
|
|
554
|
+
debugLog(`[Realtime] Soft delete detected for ${table}/${entityId}`);
|
|
555
|
+
/* Record + await the delete animation before touching the DB.
|
|
556
|
+
The wildcard ['*'] signals the UI that the entire row is affected. */
|
|
557
|
+
remoteChangesStore.recordRemoteChange(entityId, table, ['*'], true, 'DELETE');
|
|
558
|
+
await remoteChangesStore.markPendingDelete(entityId, table);
|
|
559
|
+
/* Now persist the soft-deleted record; reactive stores refresh. */
|
|
560
|
+
await getEngineConfig().db.table(dexieTable).put(newRecord);
|
|
561
|
+
recentlyProcessedByRealtime.set(entityId, Date.now());
|
|
562
|
+
notifyDataUpdate(table, entityId);
|
|
563
|
+
break;
|
|
564
|
+
}
|
|
565
|
+
/* ---- Conflict resolution path ----
|
|
566
|
+
Three branches depending on local state:
|
|
567
|
+
1. No local entity -> simple insert (no conflict possible)
|
|
568
|
+
2. No pending ops -> accept remote if newer (last-write-wins)
|
|
569
|
+
3. Pending ops exist -> full conflict resolution via resolveConflicts
|
|
570
|
+
|
|
571
|
+
**Why check pending ops?** If the user has unsynced local changes,
|
|
572
|
+
blindly accepting the remote version would silently discard the
|
|
573
|
+
user's work. The conflict resolver preserves local pending changes
|
|
574
|
+
while incorporating non-conflicting remote updates. */
|
|
575
|
+
const pendingEntityIds = await getPendingEntityIds();
|
|
576
|
+
const hasPendingOps = pendingEntityIds.has(entityId);
|
|
577
|
+
let applied = false;
|
|
578
|
+
if (!localEntity) {
|
|
579
|
+
/* Branch 1: Entity doesn't exist locally -- just insert.
|
|
580
|
+
This happens when another device creates a new entity. */
|
|
581
|
+
await getEngineConfig().db.table(dexieTable).put(newRecord);
|
|
582
|
+
applied = true;
|
|
583
|
+
}
|
|
584
|
+
else if (!hasPendingOps) {
|
|
585
|
+
/* Branch 2: No unsynced local changes -- simple timestamp comparison.
|
|
586
|
+
Only overwrite if the remote timestamp is strictly newer. If the
|
|
587
|
+
local version is newer (possible if a local write just happened
|
|
588
|
+
but hasn't been pushed yet), we keep the local version to avoid
|
|
589
|
+
regressing the UI. */
|
|
590
|
+
const localUpdatedAt = new Date(localEntity.updated_at).getTime();
|
|
591
|
+
const remoteUpdatedAt = new Date(newRecord.updated_at).getTime();
|
|
592
|
+
if (remoteUpdatedAt > localUpdatedAt) {
|
|
593
|
+
await getEngineConfig().db.table(dexieTable).put(newRecord);
|
|
594
|
+
applied = true;
|
|
595
|
+
}
|
|
596
|
+
}
|
|
597
|
+
else {
|
|
598
|
+
/* Branch 3: Pending local operations exist -- we must merge.
|
|
599
|
+
The conflict resolver produces a merged entity that preserves
|
|
600
|
+
non-conflicting local edits while incorporating the remote state.
|
|
601
|
+
See conflicts.ts for the three-tier resolution algorithm. */
|
|
602
|
+
const pendingOps = await getPendingOpsForEntity(entityId);
|
|
603
|
+
const resolution = await resolveConflicts(table, entityId, localEntity, newRecord, pendingOps);
|
|
604
|
+
await getEngineConfig().db.table(dexieTable).put(resolution.mergedEntity);
|
|
605
|
+
applied = true;
|
|
606
|
+
/* Persist conflict history for auditability and potential undo.
|
|
607
|
+
Only stored when actual field-level conflicts were detected
|
|
608
|
+
(not for clean auto-merges). */
|
|
609
|
+
if (resolution.hasConflicts) {
|
|
610
|
+
await storeConflictHistory(resolution);
|
|
611
|
+
}
|
|
612
|
+
}
|
|
613
|
+
/* ---- Value delta for counter animations ----
|
|
614
|
+
If the `current_value` field changed, compute the delta so the UI
|
|
615
|
+
can show an increment/decrement animation (e.g. "+1" / "-3").
|
|
616
|
+
This is specific to counter-style entities (e.g., habit streaks,
|
|
617
|
+
goal progress). */
|
|
618
|
+
let valueDelta;
|
|
619
|
+
if (changedFields.includes('current_value') && localEntity && newRecord) {
|
|
620
|
+
const oldValue = localEntity.current_value || 0;
|
|
621
|
+
const newValue = newRecord.current_value || 0;
|
|
622
|
+
valueDelta = newValue - oldValue;
|
|
623
|
+
}
|
|
624
|
+
/* ---- UI change notification ----
|
|
625
|
+
Record the change in remoteChangesStore so the UI can highlight
|
|
626
|
+
the affected row / field. If the entity is currently being edited
|
|
627
|
+
in a form, the store defers the notification until editing ends.
|
|
628
|
+
|
|
629
|
+
We only notify when there are actual visible changes (changedFields > 0)
|
|
630
|
+
or when the entity is entirely new (!localEntity). This prevents
|
|
631
|
+
spurious highlight animations for metadata-only updates. */
|
|
632
|
+
if (changedFields.length > 0 || !localEntity) {
|
|
633
|
+
remoteChangesStore.recordRemoteChange(entityId, table, changedFields.length > 0 ? changedFields : ['*'], applied, eventType, valueDelta);
|
|
634
|
+
/* Fire the optional per-table hook so consumers can run custom
|
|
635
|
+
side-effects (e.g. toast notifications, badge updates). */
|
|
636
|
+
const tblConfig = getEngineConfig().tables.find((t) => t.supabaseName === table);
|
|
637
|
+
if (tblConfig?.onRemoteChange) {
|
|
638
|
+
tblConfig.onRemoteChange(table, newRecord);
|
|
639
|
+
}
|
|
640
|
+
}
|
|
641
|
+
/* Mark as recently processed so the polling path skips this entity.
|
|
642
|
+
This is the bridge between realtime and polling deduplication. */
|
|
643
|
+
recentlyProcessedByRealtime.set(entityId, Date.now());
|
|
644
|
+
notifyDataUpdate(table, entityId);
|
|
645
|
+
break;
|
|
646
|
+
}
|
|
647
|
+
// -----------------------------------------------------------------------
|
|
648
|
+
// DELETE -- hard-delete path (rare in soft-delete systems)
|
|
649
|
+
// -----------------------------------------------------------------------
|
|
650
|
+
case 'DELETE': {
|
|
651
|
+
/* In a soft-delete system most deletions arrive as UPDATEs with
|
|
652
|
+
`deleted=true` (handled above). A hard DELETE is uncommon but must
|
|
653
|
+
still be handled for correctness -- it can occur when:
|
|
654
|
+
- An admin purges records directly in the database
|
|
655
|
+
- A scheduled cleanup job removes old soft-deleted rows
|
|
656
|
+
- The application uses hard deletes for certain entity types */
|
|
657
|
+
if (oldRecord) {
|
|
658
|
+
/* Record delete animation BEFORE removing from DB, same ordering
|
|
659
|
+
rationale as the soft-delete path above: the reactive framework
|
|
660
|
+
will remove the DOM element immediately on Dexie deletion, so
|
|
661
|
+
the animation must be set up first. */
|
|
662
|
+
remoteChangesStore.recordRemoteChange(entityId, table, ['*'], true, 'DELETE');
|
|
663
|
+
/* Wait for the pending-delete animation to complete so the UI has
|
|
664
|
+
time to play an exit transition before the DOM element disappears. */
|
|
665
|
+
await remoteChangesStore.markPendingDelete(entityId, table);
|
|
666
|
+
/* Now remove the record from Dexie (triggers reactive DOM removal). */
|
|
667
|
+
await getEngineConfig().db.table(dexieTable).delete(entityId);
|
|
668
|
+
recentlyProcessedByRealtime.set(entityId, Date.now());
|
|
669
|
+
notifyDataUpdate(table, entityId);
|
|
670
|
+
}
|
|
671
|
+
break;
|
|
672
|
+
}
|
|
673
|
+
}
|
|
674
|
+
}
|
|
675
|
+
catch (error) {
|
|
676
|
+
/* Top-level catch ensures one bad event never crashes the WebSocket
|
|
677
|
+
listener. The channel continues processing subsequent events. */
|
|
678
|
+
debugError(`[Realtime] Error handling ${eventType} on ${table}:`, error);
|
|
679
|
+
}
|
|
680
|
+
}
|
|
681
|
+
// =============================================================================
|
|
682
|
+
// RECONNECTION LOGIC
|
|
683
|
+
// =============================================================================
|
|
684
|
+
/**
|
|
685
|
+
* Schedule a reconnection attempt using exponential backoff.
|
|
686
|
+
*
|
|
687
|
+
* Behavior:
|
|
688
|
+
* - If the browser is offline (`navigator.onLine === false`), reconnection is
|
|
689
|
+
* skipped entirely. The sync engine's `online` event handler will re-trigger
|
|
690
|
+
* subscription start when connectivity returns.
|
|
691
|
+
* - If the maximum number of attempts has been reached, the module gives up and
|
|
692
|
+
* transitions to `'error'` state; the polling fallback remains active.
|
|
693
|
+
* - A `reconnectScheduled` flag prevents duplicate timers from being created
|
|
694
|
+
* when multiple channel events (e.g. CHANNEL_ERROR + CLOSED) fire in quick
|
|
695
|
+
* succession for the same disconnection.
|
|
696
|
+
*
|
|
697
|
+
* **Backoff schedule:** 1s, 2s, 4s, 8s, 16s (geometric progression).
|
|
698
|
+
* Total wait across all 5 attempts: ~31 seconds.
|
|
699
|
+
*
|
|
700
|
+
* @see {@link MAX_RECONNECT_ATTEMPTS}
|
|
701
|
+
* @see {@link RECONNECT_BASE_DELAY}
|
|
702
|
+
* @see {@link startRealtimeSubscriptions} -- called by the timer callback
|
|
703
|
+
*/
|
|
704
|
+
function scheduleReconnect() {
|
|
705
|
+
/* Guard: prevent duplicate scheduling from multiple event callbacks.
|
|
706
|
+
Supabase can emit CHANNEL_ERROR followed closely by CLOSED for the same
|
|
707
|
+
disconnection event; both would call this function without this guard. */
|
|
708
|
+
if (reconnectScheduled) {
|
|
709
|
+
debugLog('[Realtime] Reconnect skipped: timer already scheduled (duplicate guard)');
|
|
710
|
+
return;
|
|
711
|
+
}
|
|
712
|
+
if (state.reconnectTimeout) {
|
|
713
|
+
clearTimeout(state.reconnectTimeout);
|
|
714
|
+
state.reconnectTimeout = null;
|
|
715
|
+
}
|
|
716
|
+
/* No point burning reconnect attempts while the network is down.
|
|
717
|
+
The sync engine listens for the browser's `online` event and will call
|
|
718
|
+
startRealtimeSubscriptions() when connectivity returns. */
|
|
719
|
+
if (typeof navigator !== 'undefined' && !navigator.onLine) {
|
|
720
|
+
debugLog('[Realtime] Offline - waiting for online event to reconnect');
|
|
721
|
+
setConnectionState('disconnected');
|
|
722
|
+
return;
|
|
723
|
+
}
|
|
724
|
+
if (state.reconnectAttempts >= MAX_RECONNECT_ATTEMPTS) {
|
|
725
|
+
debugLog('[Realtime] Max reconnect attempts reached, falling back to polling');
|
|
726
|
+
setConnectionState('error', 'Max reconnection attempts reached');
|
|
727
|
+
return;
|
|
728
|
+
}
|
|
729
|
+
reconnectScheduled = true;
|
|
730
|
+
/* Exponential backoff: 1s, 2s, 4s, 8s, 16s */
|
|
731
|
+
const delay = RECONNECT_BASE_DELAY * Math.pow(2, state.reconnectAttempts);
|
|
732
|
+
debugLog(`[Realtime] Scheduling reconnect attempt ${state.reconnectAttempts + 1} in ${delay}ms`);
|
|
733
|
+
state.reconnectTimeout = setTimeout(async () => {
|
|
734
|
+
reconnectScheduled = false;
|
|
735
|
+
/* Re-check online status in case we went offline during the backoff wait.
|
|
736
|
+
This avoids wasting a reconnect attempt on a network that's now down. */
|
|
737
|
+
if (typeof navigator !== 'undefined' && !navigator.onLine) {
|
|
738
|
+
debugLog('[Realtime] Went offline during backoff, cancelling reconnect');
|
|
739
|
+
return;
|
|
740
|
+
}
|
|
741
|
+
state.reconnectAttempts++;
|
|
742
|
+
if (state.userId) {
|
|
743
|
+
await startRealtimeSubscriptions(state.userId);
|
|
744
|
+
}
|
|
745
|
+
}, delay);
|
|
746
|
+
}
|
|
747
|
+
// =============================================================================
|
|
748
|
+
// LIFECYCLE -- START / STOP / PAUSE
|
|
749
|
+
// =============================================================================
|
|
750
|
+
/**
|
|
751
|
+
* Internal teardown: remove the channel and reset connection state.
|
|
752
|
+
*
|
|
753
|
+
* Does **not** acquire the `operationInProgress` lock -- callers are
|
|
754
|
+
* responsible for holding it. This allows {@link startRealtimeSubscriptions}
|
|
755
|
+
* to call it mid-operation without deadlocking.
|
|
756
|
+
*
|
|
757
|
+
* **Why separate from the public `stopRealtimeSubscriptions`?** The public
|
|
758
|
+
* version acquires the concurrency lock and clears session-level state (userId,
|
|
759
|
+
* tracking map). This internal version only handles the channel teardown,
|
|
760
|
+
* making it safe to call from within `startRealtimeSubscriptions` which
|
|
761
|
+
* already holds the lock.
|
|
762
|
+
*
|
|
763
|
+
* @see {@link stopRealtimeSubscriptions} -- the public API that wraps this
|
|
764
|
+
*/
|
|
765
|
+
async function stopRealtimeSubscriptionsInternal() {
|
|
766
|
+
/* Clear any pending reconnect timer and reset the scheduling flag.
|
|
767
|
+
If we don't clear these, a pending timer could fire after the channel
|
|
768
|
+
is torn down and attempt to reconnect with stale state. */
|
|
769
|
+
if (state.reconnectTimeout) {
|
|
770
|
+
clearTimeout(state.reconnectTimeout);
|
|
771
|
+
state.reconnectTimeout = null;
|
|
772
|
+
}
|
|
773
|
+
reconnectScheduled = false;
|
|
774
|
+
/* Unsubscribe from the Supabase channel. This sends an unsubscribe
|
|
775
|
+
message over the WebSocket and cleans up the local channel state. */
|
|
776
|
+
if (state.channel) {
|
|
777
|
+
try {
|
|
778
|
+
await getEngineConfig().supabase.removeChannel(state.channel);
|
|
779
|
+
}
|
|
780
|
+
catch (error) {
|
|
781
|
+
/* Log but don't rethrow -- we're tearing down and must continue
|
|
782
|
+
cleanup even if the server-side unsubscribe fails (e.g., the
|
|
783
|
+
WebSocket is already dead). */
|
|
784
|
+
debugError('[Realtime] Error removing channel:', error);
|
|
785
|
+
}
|
|
786
|
+
state.channel = null;
|
|
787
|
+
}
|
|
788
|
+
state.reconnectAttempts = 0;
|
|
789
|
+
setConnectionState('disconnected');
|
|
790
|
+
}
|
|
791
|
+
/**
|
|
792
|
+
* Start realtime subscriptions for an authenticated user.
|
|
793
|
+
*
|
|
794
|
+
* Creates a single Supabase Realtime channel and registers PostgreSQL change
|
|
795
|
+
* listeners for every table defined in the engine config.
|
|
796
|
+
*
|
|
797
|
+
* **Security:** Access control is enforced by Supabase RLS policies at the
|
|
798
|
+
* database level -- no client-side `user_id` filter is applied to the channel
|
|
799
|
+
* subscription. The Realtime server evaluates RLS policies for each CDC event
|
|
800
|
+
* and only delivers events the user is authorized to see.
|
|
801
|
+
*
|
|
802
|
+
* This function is idempotent: if the channel is already connected for the
|
|
803
|
+
* same user, it returns immediately. If a different user is provided, the
|
|
804
|
+
* existing channel is torn down first.
|
|
805
|
+
*
|
|
806
|
+
* **Channel multiplexing:** One channel is created for all tables rather than
|
|
807
|
+
* one per table. This is more efficient because Supabase multiplexes all
|
|
808
|
+
* subscriptions over a single WebSocket connection regardless, so separate
|
|
809
|
+
* channels would only add overhead without improving parallelism.
|
|
810
|
+
*
|
|
811
|
+
* @param userId - The authenticated user's UUID. Used to construct a unique
|
|
812
|
+
* channel name (`{prefix}_sync_{userId}`).
|
|
813
|
+
*
|
|
814
|
+
* @throws Never throws -- all errors are caught internally. On failure, the
|
|
815
|
+
* connection state transitions to `'error'` and reconnection is
|
|
816
|
+
* scheduled automatically.
|
|
817
|
+
*
|
|
818
|
+
* @example
|
|
819
|
+
* ```ts
|
|
820
|
+
* // After login:
|
|
821
|
+
* await startRealtimeSubscriptions(session.user.id);
|
|
822
|
+
* ```
|
|
823
|
+
*
|
|
824
|
+
* @see {@link stopRealtimeSubscriptions} to cleanly tear down the channel
|
|
825
|
+
* @see {@link getEngineConfig} for the table configuration consumed here
|
|
826
|
+
*/
|
|
827
|
+
export async function startRealtimeSubscriptions(userId) {
|
|
828
|
+
/* SSR guard: realtime requires a browser environment for WebSocket.
|
|
829
|
+
In SSR contexts (e.g., SvelteKit server-side rendering), `window` is
|
|
830
|
+
undefined and we must bail early to avoid runtime errors. */
|
|
831
|
+
if (typeof window === 'undefined')
|
|
832
|
+
return;
|
|
833
|
+
/* Demo mode: no realtime subscriptions needed. */
|
|
834
|
+
if (isDemoMode())
|
|
835
|
+
return;
|
|
836
|
+
/* Don't attempt connection while offline; the sync engine's `online` event
|
|
837
|
+
will call us again when connectivity is restored. Attempting to connect
|
|
838
|
+
while offline would waste a reconnect attempt on an inevitable failure. */
|
|
839
|
+
if (!navigator.onLine) {
|
|
840
|
+
debugLog('[Realtime] Offline - skipping subscription start');
|
|
841
|
+
return;
|
|
842
|
+
}
|
|
843
|
+
/* Idempotency: skip if already connected for this user. This prevents
|
|
844
|
+
unnecessary channel teardown/recreation when the caller doesn't track
|
|
845
|
+
whether we're already connected. */
|
|
846
|
+
if (state.channel && state.userId === userId && state.connectionState === 'connected') {
|
|
847
|
+
return;
|
|
848
|
+
}
|
|
849
|
+
/* Concurrency guard: prevent overlapping start/stop sequences. Without
|
|
850
|
+
this, rapid login/logout cycles could interleave async channel operations
|
|
851
|
+
and leave the module in an inconsistent state. */
|
|
852
|
+
if (operationInProgress) {
|
|
853
|
+
debugLog('[Realtime] Start blocked: operation already in progress (concurrent start/stop guard)');
|
|
854
|
+
return;
|
|
855
|
+
}
|
|
856
|
+
operationInProgress = true;
|
|
857
|
+
try {
|
|
858
|
+
/* Tear down any existing channel before creating a new one. This handles
|
|
859
|
+
the case where we're switching users (logout + login) or recovering
|
|
860
|
+
from an error state. */
|
|
861
|
+
await stopRealtimeSubscriptionsInternal();
|
|
862
|
+
state.userId = userId;
|
|
863
|
+
state.deviceId = getDeviceId();
|
|
864
|
+
setConnectionState('connecting');
|
|
865
|
+
const config = getEngineConfig();
|
|
866
|
+
const realtimeTables = config.tables.map((t) => t.supabaseName);
|
|
867
|
+
/* ---- Channel creation ----
|
|
868
|
+
One channel per user, listening to all configured tables. This is more
|
|
869
|
+
efficient than one channel per table because Supabase multiplexes all
|
|
870
|
+
subscriptions over a single WebSocket connection regardless. The channel
|
|
871
|
+
name includes the user ID to ensure uniqueness across browser tabs that
|
|
872
|
+
might have different users logged in. */
|
|
873
|
+
const channelName = `${config.prefix}_sync_${userId}`;
|
|
874
|
+
state.channel = config.supabase.channel(channelName);
|
|
875
|
+
/* ---- Register table listeners ----
|
|
876
|
+
We subscribe to `event: '*'` (INSERT, UPDATE, DELETE) on each table.
|
|
877
|
+
No `filter` parameter is used because RLS policies enforce row-level
|
|
878
|
+
security at the database level. Adding a client-side filter would be
|
|
879
|
+
redundant and could fall out of sync with the RLS policy definitions. */
|
|
880
|
+
debugLog(`[Realtime] Setting up subscriptions for ${realtimeTables.length} tables`);
|
|
881
|
+
for (const table of realtimeTables) {
|
|
882
|
+
state.channel = state.channel.on('postgres_changes', {
|
|
883
|
+
event: '*',
|
|
884
|
+
schema: 'public',
|
|
885
|
+
table: table
|
|
886
|
+
}, (payload) => {
|
|
887
|
+
debugLog(`[Realtime] Raw payload received for ${table}:`, payload.eventType);
|
|
888
|
+
/* Fire-and-forget: the handler runs asynchronously. Errors are caught
|
|
889
|
+
inside handleRealtimeChange so they don't propagate to the Supabase
|
|
890
|
+
client's event loop. */
|
|
891
|
+
handleRealtimeChange(table, payload).catch((error) => {
|
|
892
|
+
debugError(`[Realtime] Error processing ${table} change:`, error);
|
|
893
|
+
});
|
|
894
|
+
});
|
|
895
|
+
}
|
|
896
|
+
/* ---- Activate the channel ----
|
|
897
|
+
The status callback handles lifecycle transitions. Note that Supabase
|
|
898
|
+
may emit multiple statuses for the same underlying event (e.g.,
|
|
899
|
+
CHANNEL_ERROR followed by CLOSED for a single disconnection). */
|
|
900
|
+
state.channel.subscribe((status, err) => {
|
|
901
|
+
switch (status) {
|
|
902
|
+
case 'SUBSCRIBED':
|
|
903
|
+
debugLog('[Realtime] Connected and subscribed');
|
|
904
|
+
/* Reset backoff counter on successful connection so the next
|
|
905
|
+
disconnection starts fresh with a 1s delay. */
|
|
906
|
+
state.reconnectAttempts = 0;
|
|
907
|
+
reconnectScheduled = false;
|
|
908
|
+
setConnectionState('connected');
|
|
909
|
+
break;
|
|
910
|
+
case 'CHANNEL_ERROR':
|
|
911
|
+
debugError('[Realtime] Channel error:', err?.message || 'unknown', err);
|
|
912
|
+
setConnectionState('error', err?.message || 'Channel error');
|
|
913
|
+
scheduleReconnect();
|
|
914
|
+
break;
|
|
915
|
+
case 'TIMED_OUT':
|
|
916
|
+
debugWarn('[Realtime] Connection timed out');
|
|
917
|
+
setConnectionState('error', 'Connection timed out');
|
|
918
|
+
scheduleReconnect();
|
|
919
|
+
break;
|
|
920
|
+
case 'CLOSED':
|
|
921
|
+
debugLog('[Realtime] Channel closed');
|
|
922
|
+
/* Only reconnect if:
|
|
923
|
+
1. This wasn't an intentional disconnect (state would be 'disconnected').
|
|
924
|
+
2. We still have a user to reconnect for.
|
|
925
|
+
3. A reconnect isn't already scheduled (prevents duplicate timers
|
|
926
|
+
when CHANNEL_ERROR fires shortly before CLOSED).
|
|
927
|
+
Without condition 3, we would schedule two overlapping reconnect
|
|
928
|
+
timers for a single disconnection event. */
|
|
929
|
+
if (state.connectionState !== 'disconnected' && state.userId && !reconnectScheduled) {
|
|
930
|
+
setConnectionState('disconnected');
|
|
931
|
+
scheduleReconnect();
|
|
932
|
+
}
|
|
933
|
+
else if (isDebugMode()) {
|
|
934
|
+
debugLog(`[Realtime] CLOSED reconnect suppressed: state=${state.connectionState}, userId=${!!state.userId}, reconnectScheduled=${reconnectScheduled}`);
|
|
935
|
+
}
|
|
936
|
+
break;
|
|
937
|
+
}
|
|
938
|
+
});
|
|
939
|
+
}
|
|
940
|
+
catch (error) {
|
|
941
|
+
debugError('[Realtime] Failed to start subscriptions:', error);
|
|
942
|
+
setConnectionState('error', error instanceof Error ? error.message : 'Failed to connect');
|
|
943
|
+
scheduleReconnect();
|
|
944
|
+
}
|
|
945
|
+
finally {
|
|
946
|
+
/* Always release the concurrency lock, even on error. Without this,
|
|
947
|
+
a failed start would permanently block all future start/stop attempts. */
|
|
948
|
+
operationInProgress = false;
|
|
949
|
+
}
|
|
950
|
+
}
|
|
951
|
+
/**
|
|
952
|
+
* Stop realtime subscriptions and clean up all state.
|
|
953
|
+
*
|
|
954
|
+
* This is the public-facing teardown API. It acquires the concurrency lock,
|
|
955
|
+
* delegates to {@link stopRealtimeSubscriptionsInternal}, clears the user ID,
|
|
956
|
+
* and wipes the recently-processed tracking map.
|
|
957
|
+
*
|
|
958
|
+
* **When to call:** On user logout or app shutdown. For temporary connectivity
|
|
959
|
+
* loss, use {@link pauseRealtime} instead (it preserves the userId so
|
|
960
|
+
* reconnection can resume automatically).
|
|
961
|
+
*
|
|
962
|
+
* @throws Never throws -- errors during channel removal are caught and logged.
|
|
963
|
+
*
|
|
964
|
+
* @example
|
|
965
|
+
* ```ts
|
|
966
|
+
* // On logout:
|
|
967
|
+
* await stopRealtimeSubscriptions();
|
|
968
|
+
* ```
|
|
969
|
+
*
|
|
970
|
+
* @see {@link startRealtimeSubscriptions} to re-establish the connection
|
|
971
|
+
* @see {@link pauseRealtime} for temporary disconnection (offline)
|
|
972
|
+
*/
|
|
973
|
+
export async function stopRealtimeSubscriptions() {
|
|
974
|
+
/* Concurrency guard: prevent overlapping start/stop sequences. */
|
|
975
|
+
if (operationInProgress) {
|
|
976
|
+
debugLog('[Realtime] Stop blocked: operation already in progress (concurrent start/stop guard)');
|
|
977
|
+
return;
|
|
978
|
+
}
|
|
979
|
+
operationInProgress = true;
|
|
980
|
+
try {
|
|
981
|
+
await stopRealtimeSubscriptionsInternal();
|
|
982
|
+
state.userId = null;
|
|
983
|
+
/* Clear tracking so stale entries don't leak across sessions. Without
|
|
984
|
+
this, a dedup entry from user A's session could cause user B's session
|
|
985
|
+
(if they log in on the same device) to skip a legitimate change. */
|
|
986
|
+
recentlyProcessedByRealtime.clear();
|
|
987
|
+
}
|
|
988
|
+
finally {
|
|
989
|
+
operationInProgress = false;
|
|
990
|
+
}
|
|
991
|
+
}
|
|
992
|
+
/**
|
|
993
|
+
* Pause realtime subscriptions when the browser goes offline.
|
|
994
|
+
*
|
|
995
|
+
* Unlike {@link stopRealtimeSubscriptions}, this does **not** clear
|
|
996
|
+
* `state.userId` -- the user is still authenticated, we just can't reach
|
|
997
|
+
* the server. When the browser comes back online, the sync engine calls
|
|
998
|
+
* {@link startRealtimeSubscriptions} with the same user ID.
|
|
999
|
+
*
|
|
1000
|
+
* Key behaviors:
|
|
1001
|
+
* - Cancels any pending reconnect timers.
|
|
1002
|
+
* - Resets the reconnect attempt counter so we get a fresh set of attempts
|
|
1003
|
+
* when connectivity returns.
|
|
1004
|
+
* - Transitions to `'disconnected'` state.
|
|
1005
|
+
*
|
|
1006
|
+
* **Why not call stopRealtimeSubscriptionsInternal?** Because the offline
|
|
1007
|
+
* transition is often transient (e.g., brief WiFi dropout). We want to
|
|
1008
|
+
* preserve the userId and avoid the overhead of `removeChannel()` (which
|
|
1009
|
+
* tries to send an unsubscribe message over the dead WebSocket). Simply
|
|
1010
|
+
* clearing the reconnect state and transitioning to `'disconnected'` is
|
|
1011
|
+
* faster and avoids potential errors from network calls during offline.
|
|
1012
|
+
*
|
|
1013
|
+
* @see {@link ./engine.ts} -- calls this from the `offline` event handler
|
|
1014
|
+
*/
|
|
1015
|
+
export function pauseRealtime() {
|
|
1016
|
+
/* Cancel any in-flight reconnection timer and reset the flag.
|
|
1017
|
+
Without this, a timer set before the offline event could fire during
|
|
1018
|
+
the offline period and waste a reconnect attempt. */
|
|
1019
|
+
if (state.reconnectTimeout) {
|
|
1020
|
+
clearTimeout(state.reconnectTimeout);
|
|
1021
|
+
state.reconnectTimeout = null;
|
|
1022
|
+
}
|
|
1023
|
+
reconnectScheduled = false;
|
|
1024
|
+
/* Reset attempts so the next online event gets the full backoff budget.
|
|
1025
|
+
This is more forgiving than carrying over the count: if the user's
|
|
1026
|
+
network flaps repeatedly, they get a full set of 5 attempts each time. */
|
|
1027
|
+
state.reconnectAttempts = 0;
|
|
1028
|
+
setConnectionState('disconnected');
|
|
1029
|
+
debugLog('[Realtime] Paused - waiting for online event');
|
|
1030
|
+
}
|
|
1031
|
+
//# sourceMappingURL=realtime.js.map
|