@prabhask5/stellar-engine 1.1.7 → 1.1.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -1
- package/dist/actions/remoteChange.d.ts +143 -18
- package/dist/actions/remoteChange.d.ts.map +1 -1
- package/dist/actions/remoteChange.js +182 -58
- package/dist/actions/remoteChange.js.map +1 -1
- package/dist/actions/truncateTooltip.d.ts +26 -12
- package/dist/actions/truncateTooltip.d.ts.map +1 -1
- package/dist/actions/truncateTooltip.js +89 -34
- package/dist/actions/truncateTooltip.js.map +1 -1
- package/dist/auth/admin.d.ts +40 -3
- package/dist/auth/admin.d.ts.map +1 -1
- package/dist/auth/admin.js +45 -5
- package/dist/auth/admin.js.map +1 -1
- package/dist/auth/crypto.d.ts +55 -5
- package/dist/auth/crypto.d.ts.map +1 -1
- package/dist/auth/crypto.js +58 -5
- package/dist/auth/crypto.js.map +1 -1
- package/dist/auth/deviceVerification.d.ts +236 -20
- package/dist/auth/deviceVerification.d.ts.map +1 -1
- package/dist/auth/deviceVerification.js +293 -40
- package/dist/auth/deviceVerification.js.map +1 -1
- package/dist/auth/displayUtils.d.ts +98 -0
- package/dist/auth/displayUtils.d.ts.map +1 -0
- package/dist/auth/displayUtils.js +133 -0
- package/dist/auth/displayUtils.js.map +1 -0
- package/dist/auth/loginGuard.d.ts +108 -14
- package/dist/auth/loginGuard.d.ts.map +1 -1
- package/dist/auth/loginGuard.js +153 -31
- package/dist/auth/loginGuard.js.map +1 -1
- package/dist/auth/offlineCredentials.d.ts +132 -15
- package/dist/auth/offlineCredentials.d.ts.map +1 -1
- package/dist/auth/offlineCredentials.js +167 -23
- package/dist/auth/offlineCredentials.js.map +1 -1
- package/dist/auth/offlineLogin.d.ts +96 -10
- package/dist/auth/offlineLogin.d.ts.map +1 -1
- package/dist/auth/offlineLogin.js +82 -15
- package/dist/auth/offlineLogin.js.map +1 -1
- package/dist/auth/offlineSession.d.ts +83 -9
- package/dist/auth/offlineSession.d.ts.map +1 -1
- package/dist/auth/offlineSession.js +104 -13
- package/dist/auth/offlineSession.js.map +1 -1
- package/dist/auth/resolveAuthState.d.ts +70 -8
- package/dist/auth/resolveAuthState.d.ts.map +1 -1
- package/dist/auth/resolveAuthState.js +142 -46
- package/dist/auth/resolveAuthState.js.map +1 -1
- package/dist/auth/singleUser.d.ts +390 -37
- package/dist/auth/singleUser.d.ts.map +1 -1
- package/dist/auth/singleUser.js +500 -99
- package/dist/auth/singleUser.js.map +1 -1
- package/dist/bin/install-pwa.d.ts +18 -2
- package/dist/bin/install-pwa.d.ts.map +1 -1
- package/dist/bin/install-pwa.js +801 -25
- package/dist/bin/install-pwa.js.map +1 -1
- package/dist/config.d.ts +132 -12
- package/dist/config.d.ts.map +1 -1
- package/dist/config.js +87 -9
- package/dist/config.js.map +1 -1
- package/dist/conflicts.d.ts +246 -23
- package/dist/conflicts.d.ts.map +1 -1
- package/dist/conflicts.js +495 -46
- package/dist/conflicts.js.map +1 -1
- package/dist/data.d.ts +338 -18
- package/dist/data.d.ts.map +1 -1
- package/dist/data.js +385 -34
- package/dist/data.js.map +1 -1
- package/dist/database.d.ts +72 -14
- package/dist/database.d.ts.map +1 -1
- package/dist/database.js +120 -29
- package/dist/database.js.map +1 -1
- package/dist/debug.d.ts +77 -1
- package/dist/debug.d.ts.map +1 -1
- package/dist/debug.js +88 -1
- package/dist/debug.js.map +1 -1
- package/dist/deviceId.d.ts +38 -7
- package/dist/deviceId.d.ts.map +1 -1
- package/dist/deviceId.js +68 -10
- package/dist/deviceId.js.map +1 -1
- package/dist/engine.d.ts +175 -3
- package/dist/engine.d.ts.map +1 -1
- package/dist/engine.js +756 -109
- package/dist/engine.js.map +1 -1
- package/dist/entries/actions.d.ts +13 -0
- package/dist/entries/actions.d.ts.map +1 -1
- package/dist/entries/actions.js +26 -1
- package/dist/entries/actions.js.map +1 -1
- package/dist/entries/auth.d.ts +16 -0
- package/dist/entries/auth.d.ts.map +1 -1
- package/dist/entries/auth.js +73 -1
- package/dist/entries/auth.js.map +1 -1
- package/dist/entries/config.d.ts +12 -0
- package/dist/entries/config.d.ts.map +1 -1
- package/dist/entries/config.js +18 -1
- package/dist/entries/config.js.map +1 -1
- package/dist/entries/kit.d.ts +11 -0
- package/dist/entries/kit.d.ts.map +1 -1
- package/dist/entries/kit.js +52 -2
- package/dist/entries/kit.js.map +1 -1
- package/dist/entries/stores.d.ts +11 -0
- package/dist/entries/stores.d.ts.map +1 -1
- package/dist/entries/stores.js +43 -2
- package/dist/entries/stores.js.map +1 -1
- package/dist/entries/types.d.ts +10 -0
- package/dist/entries/types.d.ts.map +1 -1
- package/dist/entries/types.js +10 -0
- package/dist/entries/types.js.map +1 -1
- package/dist/entries/utils.d.ts +6 -0
- package/dist/entries/utils.d.ts.map +1 -1
- package/dist/entries/utils.js +22 -1
- package/dist/entries/utils.js.map +1 -1
- package/dist/entries/vite.d.ts +17 -0
- package/dist/entries/vite.d.ts.map +1 -1
- package/dist/entries/vite.js +24 -1
- package/dist/entries/vite.js.map +1 -1
- package/dist/index.d.ts +31 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +175 -20
- package/dist/index.js.map +1 -1
- package/dist/kit/auth.d.ts +60 -5
- package/dist/kit/auth.d.ts.map +1 -1
- package/dist/kit/auth.js +45 -4
- package/dist/kit/auth.js.map +1 -1
- package/dist/kit/confirm.d.ts +93 -12
- package/dist/kit/confirm.d.ts.map +1 -1
- package/dist/kit/confirm.js +103 -16
- package/dist/kit/confirm.js.map +1 -1
- package/dist/kit/loads.d.ts +150 -23
- package/dist/kit/loads.d.ts.map +1 -1
- package/dist/kit/loads.js +140 -24
- package/dist/kit/loads.js.map +1 -1
- package/dist/kit/server.d.ts +142 -10
- package/dist/kit/server.d.ts.map +1 -1
- package/dist/kit/server.js +158 -15
- package/dist/kit/server.js.map +1 -1
- package/dist/kit/sw.d.ts +152 -23
- package/dist/kit/sw.d.ts.map +1 -1
- package/dist/kit/sw.js +182 -26
- package/dist/kit/sw.js.map +1 -1
- package/dist/queue.d.ts +274 -0
- package/dist/queue.d.ts.map +1 -1
- package/dist/queue.js +556 -38
- package/dist/queue.js.map +1 -1
- package/dist/realtime.d.ts +241 -27
- package/dist/realtime.d.ts.map +1 -1
- package/dist/realtime.js +633 -109
- package/dist/realtime.js.map +1 -1
- package/dist/runtime/runtimeConfig.d.ts +91 -8
- package/dist/runtime/runtimeConfig.d.ts.map +1 -1
- package/dist/runtime/runtimeConfig.js +146 -19
- package/dist/runtime/runtimeConfig.js.map +1 -1
- package/dist/stores/authState.d.ts +150 -11
- package/dist/stores/authState.d.ts.map +1 -1
- package/dist/stores/authState.js +169 -17
- package/dist/stores/authState.js.map +1 -1
- package/dist/stores/network.d.ts +39 -0
- package/dist/stores/network.d.ts.map +1 -1
- package/dist/stores/network.js +169 -16
- package/dist/stores/network.js.map +1 -1
- package/dist/stores/remoteChanges.d.ts +327 -52
- package/dist/stores/remoteChanges.d.ts.map +1 -1
- package/dist/stores/remoteChanges.js +337 -75
- package/dist/stores/remoteChanges.js.map +1 -1
- package/dist/stores/sync.d.ts +130 -0
- package/dist/stores/sync.d.ts.map +1 -1
- package/dist/stores/sync.js +167 -7
- package/dist/stores/sync.js.map +1 -1
- package/dist/supabase/auth.d.ts +325 -18
- package/dist/supabase/auth.d.ts.map +1 -1
- package/dist/supabase/auth.js +374 -26
- package/dist/supabase/auth.js.map +1 -1
- package/dist/supabase/client.d.ts +79 -6
- package/dist/supabase/client.d.ts.map +1 -1
- package/dist/supabase/client.js +158 -15
- package/dist/supabase/client.js.map +1 -1
- package/dist/supabase/validate.d.ts +101 -7
- package/dist/supabase/validate.d.ts.map +1 -1
- package/dist/supabase/validate.js +117 -8
- package/dist/supabase/validate.js.map +1 -1
- package/dist/sw/build/vite-plugin.d.ts +55 -10
- package/dist/sw/build/vite-plugin.d.ts.map +1 -1
- package/dist/sw/build/vite-plugin.js +77 -18
- package/dist/sw/build/vite-plugin.js.map +1 -1
- package/dist/sw/sw.js +99 -44
- package/dist/types.d.ts +150 -26
- package/dist/types.d.ts.map +1 -1
- package/dist/types.js +12 -10
- package/dist/types.js.map +1 -1
- package/dist/utils.d.ts +55 -13
- package/dist/utils.d.ts.map +1 -1
- package/dist/utils.js +83 -22
- package/dist/utils.js.map +1 -1
- package/package.json +1 -1
package/dist/realtime.js
CHANGED
|
@@ -1,18 +1,79 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* Real-Time Subscription Manager
|
|
2
|
+
* @fileoverview Real-Time Subscription Manager -- Supabase Realtime WebSocket Layer
|
|
3
3
|
*
|
|
4
4
|
* Phase 5 of multi-device sync: Implements Supabase Realtime subscriptions
|
|
5
5
|
* for instant multi-device synchronization.
|
|
6
6
|
*
|
|
7
|
-
*
|
|
8
|
-
*
|
|
9
|
-
*
|
|
10
|
-
*
|
|
11
|
-
*
|
|
12
|
-
*
|
|
13
|
-
*
|
|
14
|
-
*
|
|
15
|
-
*
|
|
7
|
+
* ## Architecture
|
|
8
|
+
*
|
|
9
|
+
* This module manages a single Supabase Realtime channel per authenticated user,
|
|
10
|
+
* listening for PostgreSQL changes (INSERT, UPDATE, DELETE) across all configured
|
|
11
|
+
* entity tables. When a change arrives from another device, it is applied to the
|
|
12
|
+
* local Dexie (IndexedDB) store and subscribers are notified so the UI can react.
|
|
13
|
+
*
|
|
14
|
+
* ```
|
|
15
|
+
* Supabase Postgres --(CDC)--> Supabase Realtime Server
|
|
16
|
+
* |
|
|
17
|
+
* WebSocket
|
|
18
|
+
* |
|
|
19
|
+
* This module
|
|
20
|
+
* |
|
|
21
|
+
* +-------------+-------------+
|
|
22
|
+
* | |
|
|
23
|
+
* Local Dexie DB UI Notification
|
|
24
|
+
* (conflict-resolved) (animation / refresh)
|
|
25
|
+
* ```
|
|
26
|
+
*
|
|
27
|
+
* ## Echo Suppression
|
|
28
|
+
*
|
|
29
|
+
* Every write to Supabase includes a `device_id` field. When a realtime event
|
|
30
|
+
* arrives, we compare its `device_id` against our own. If they match, the event
|
|
31
|
+
* originated from this device and is silently discarded. This prevents the
|
|
32
|
+
* "echo" problem where a device processes its own outgoing changes a second time.
|
|
33
|
+
*
|
|
34
|
+
* ## Deduplication with Polling
|
|
35
|
+
*
|
|
36
|
+
* The sync engine also runs periodic polling as a fallback. To prevent the same
|
|
37
|
+
* remote change from being applied twice (once via realtime, once via poll), this
|
|
38
|
+
* module maintains a short-lived `recentlyProcessedByRealtime` map. The polling
|
|
39
|
+
* path in `engine.ts` checks this map before processing a change.
|
|
40
|
+
*
|
|
41
|
+
* ## Reconnection Strategy
|
|
42
|
+
*
|
|
43
|
+
* On WebSocket disconnection the module uses exponential backoff (1s, 2s, 4s, ...)
|
|
44
|
+
* up to {@link MAX_RECONNECT_ATTEMPTS} (5) attempts. If the browser is offline,
|
|
45
|
+
* reconnection is paused entirely -- no timers fire until a `navigator.onLine`
|
|
46
|
+
* event restores connectivity. A `reconnectScheduled` flag prevents duplicate
|
|
47
|
+
* reconnection timers from stacking up when multiple channel events fire in
|
|
48
|
+
* quick succession.
|
|
49
|
+
*
|
|
50
|
+
* ## Soft Deletes and Animations
|
|
51
|
+
*
|
|
52
|
+
* When a soft delete is detected (UPDATE with `deleted=true`), the module
|
|
53
|
+
* records the deletion in {@link remoteChangesStore} *before* writing to Dexie.
|
|
54
|
+
* This ordering is intentional: it allows the UI layer to play a removal
|
|
55
|
+
* animation before the reactive store filters out the deleted record.
|
|
56
|
+
*
|
|
57
|
+
* ## Security Considerations
|
|
58
|
+
*
|
|
59
|
+
* - **Row-Level Security (RLS):** No client-side user ID filter is applied to
|
|
60
|
+
* the channel subscription. All access control is enforced by Supabase RLS
|
|
61
|
+
* policies at the database level. This is a deliberate security decision:
|
|
62
|
+
* client-side filters can be bypassed, whereas RLS operates inside Postgres
|
|
63
|
+
* and cannot be circumvented by a malicious client.
|
|
64
|
+
* - **Device ID trust boundary:** The `device_id` field is used only for echo
|
|
65
|
+
* suppression and conflict tiebreaking, **not** for authorization. A spoofed
|
|
66
|
+
* `device_id` could cause an event to be incorrectly suppressed on another
|
|
67
|
+
* device, but it cannot escalate privileges or access unauthorized data.
|
|
68
|
+
* - **Channel naming:** The channel name includes the user ID to ensure
|
|
69
|
+
* Supabase routes CDC events correctly. This is a routing hint, not a
|
|
70
|
+
* security boundary -- RLS is the actual enforcement mechanism.
|
|
71
|
+
*
|
|
72
|
+
* @see {@link ./engine.ts} for the orchestrating sync engine and polling loop
|
|
73
|
+
* @see {@link ./conflicts.ts} for the conflict resolution algorithm
|
|
74
|
+
* @see {@link ./queue.ts} for the pending operations queue
|
|
75
|
+
* @see {@link ./stores/remoteChanges.ts} for UI change-tracking and animations
|
|
76
|
+
* @see {@link ./deviceId.ts} for per-device identity generation
|
|
16
77
|
*/
|
|
17
78
|
import { debugLog, debugWarn, debugError } from './debug';
|
|
18
79
|
import { getEngineConfig, getDexieTableFor } from './config';
|
|
@@ -20,11 +81,60 @@ import { getDeviceId } from './deviceId';
|
|
|
20
81
|
import { resolveConflicts, storeConflictHistory, getPendingOpsForEntity } from './conflicts';
|
|
21
82
|
import { getPendingEntityIds } from './queue';
|
|
22
83
|
import { remoteChangesStore } from './stores/remoteChanges';
|
|
23
|
-
//
|
|
84
|
+
// =============================================================================
|
|
85
|
+
// CONSTANTS
|
|
86
|
+
// =============================================================================
|
|
87
|
+
/**
|
|
88
|
+
* How long (in ms) a processed entity is considered "recent."
|
|
89
|
+
* Must match the TTL used in engine.ts for `recentlyModifiedEntities`
|
|
90
|
+
* so that the deduplication windows overlap correctly.
|
|
91
|
+
*
|
|
92
|
+
* **Why 2 seconds?** This window must be long enough to span the typical
|
|
93
|
+
* latency gap between a realtime WebSocket push and the next polling cycle.
|
|
94
|
+
* If the poll fires within 2s of the realtime event, the entity will still
|
|
95
|
+
* be in the dedup map and the poll result will be skipped.
|
|
96
|
+
*
|
|
97
|
+
* @see {@link ./engine.ts} -- `RECENTLY_MODIFIED_TTL_MS`
|
|
98
|
+
*/
|
|
24
99
|
const RECENTLY_MODIFIED_TTL_MS = 2000;
|
|
25
|
-
|
|
26
|
-
|
|
100
|
+
/**
|
|
101
|
+
* Maximum number of reconnection attempts before the module gives up
|
|
102
|
+
* and falls back to polling-only mode.
|
|
103
|
+
*
|
|
104
|
+
* **Why 5?** With exponential backoff (1s, 2s, 4s, 8s, 16s) the total
|
|
105
|
+
* wait before giving up is ~31 seconds, which covers most transient
|
|
106
|
+
* network hiccups without annoying the user with prolonged retry noise.
|
|
107
|
+
*/
|
|
108
|
+
const MAX_RECONNECT_ATTEMPTS = 5;
|
|
109
|
+
/**
|
|
110
|
+
* Base delay for exponential backoff between reconnection attempts.
|
|
111
|
+
* Actual delay = RECONNECT_BASE_DELAY * 2^(attemptIndex).
|
|
112
|
+
*/
|
|
113
|
+
const RECONNECT_BASE_DELAY = 1000;
|
|
114
|
+
// =============================================================================
|
|
115
|
+
// MODULE-LEVEL STATE
|
|
116
|
+
// =============================================================================
|
|
117
|
+
/**
|
|
118
|
+
* Tracks entities that realtime has recently processed, keyed by entity ID
|
|
119
|
+
* with the timestamp of processing as the value.
|
|
120
|
+
*
|
|
121
|
+
* This is intentionally separate from `engine.ts`'s `recentlyModifiedEntities`
|
|
122
|
+
* (which tracks *local* writes). This map tracks *remote* changes received via
|
|
123
|
+
* WebSocket so that the polling path can skip them.
|
|
124
|
+
*
|
|
125
|
+
* **Memory note:** Entries are lazily evicted on read (see {@link wasRecentlyProcessed})
|
|
126
|
+
* and actively cleaned by {@link cleanupRealtimeTracking}. In the worst case the map
|
|
127
|
+
* holds one entry per entity modified within the last {@link RECENTLY_MODIFIED_TTL_MS}.
|
|
128
|
+
*/
|
|
27
129
|
const recentlyProcessedByRealtime = new Map();
|
|
130
|
+
/**
|
|
131
|
+
* Singleton state instance. Initialized to a clean "disconnected" baseline.
|
|
132
|
+
*
|
|
133
|
+
* **Why a singleton?** A browser tab should never have more than one WebSocket
|
|
134
|
+
* connection to Supabase Realtime for the same user. Multiple connections would
|
|
135
|
+
* cause duplicate event processing and wasted bandwidth. The singleton pattern
|
|
136
|
+
* enforces this at the module level.
|
|
137
|
+
*/
|
|
28
138
|
const state = {
|
|
29
139
|
channel: null,
|
|
30
140
|
connectionState: 'disconnected',
|
|
@@ -34,43 +144,140 @@ const state = {
|
|
|
34
144
|
reconnectAttempts: 0,
|
|
35
145
|
reconnectTimeout: null
|
|
36
146
|
};
|
|
37
|
-
//
|
|
147
|
+
// =============================================================================
|
|
148
|
+
// CALLBACK REGISTRIES
|
|
149
|
+
// =============================================================================
|
|
150
|
+
/**
|
|
151
|
+
* Registered listeners that fire whenever the connection state transitions.
|
|
152
|
+
* Each callback receives the new {@link RealtimeConnectionState}.
|
|
153
|
+
*
|
|
154
|
+
* **Why a Set?** Using a `Set` ensures the same callback reference cannot be
|
|
155
|
+
* registered twice, which prevents duplicate notifications if consumer code
|
|
156
|
+
* accidentally calls `onConnectionStateChange` more than once with the same fn.
|
|
157
|
+
*/
|
|
38
158
|
const connectionCallbacks = new Set();
|
|
159
|
+
/**
|
|
160
|
+
* Registered listeners that fire after a remote change has been applied to the
|
|
161
|
+
* local Dexie database. Consumers (e.g. Svelte stores) use this to trigger
|
|
162
|
+
* reactive re-queries.
|
|
163
|
+
*
|
|
164
|
+
* **Ordering guarantee:** Callbacks are invoked *after* the Dexie write has
|
|
165
|
+
* completed, so any re-query inside the callback will return the updated data.
|
|
166
|
+
*/
|
|
39
167
|
const dataUpdateCallbacks = new Set();
|
|
40
|
-
//
|
|
41
|
-
|
|
42
|
-
//
|
|
43
|
-
|
|
44
|
-
|
|
168
|
+
// =============================================================================
|
|
169
|
+
// CONCURRENCY GUARDS
|
|
170
|
+
// =============================================================================
|
|
171
|
+
/**
|
|
172
|
+
* Mutex-like flag preventing concurrent `start` / `stop` operations.
|
|
173
|
+
* Because channel setup and teardown are async, overlapping calls could leave
|
|
174
|
+
* the module in an inconsistent state without this guard.
|
|
175
|
+
*
|
|
176
|
+
* **Not a true mutex:** This is a cooperative lock -- it relies on callers
|
|
177
|
+
* checking the flag and bailing out. Since JavaScript is single-threaded,
|
|
178
|
+
* there is no race between the check and the set, making this safe.
|
|
179
|
+
*/
|
|
45
180
|
let operationInProgress = false;
|
|
46
|
-
|
|
181
|
+
/**
|
|
182
|
+
* Prevents duplicate reconnection timers from being scheduled.
|
|
183
|
+
* Supabase may emit both `CHANNEL_ERROR` and `CLOSED` events for the same
|
|
184
|
+
* disconnection; without this flag each event would schedule its own timer.
|
|
185
|
+
*
|
|
186
|
+
* **Reset points:** This flag is cleared in three places:
|
|
187
|
+
* 1. Inside the setTimeout callback (normal reconnect flow)
|
|
188
|
+
* 2. In {@link stopRealtimeSubscriptionsInternal} (teardown)
|
|
189
|
+
* 3. In {@link pauseRealtime} (offline transition)
|
|
190
|
+
*/
|
|
47
191
|
let reconnectScheduled = false;
|
|
192
|
+
// =============================================================================
|
|
193
|
+
// PUBLIC API -- SUBSCRIPTION HOOKS
|
|
194
|
+
// =============================================================================
|
|
48
195
|
/**
|
|
49
|
-
* Subscribe to connection state changes
|
|
196
|
+
* Subscribe to connection state changes.
|
|
197
|
+
*
|
|
198
|
+
* The callback is invoked immediately with the current state upon registration,
|
|
199
|
+
* then again on every subsequent transition.
|
|
200
|
+
*
|
|
201
|
+
* @param callback - Function invoked with the new {@link RealtimeConnectionState}.
|
|
202
|
+
* @returns An unsubscribe function. Call it to remove the listener.
|
|
203
|
+
*
|
|
204
|
+
* @example
|
|
205
|
+
* ```ts
|
|
206
|
+
* const unsub = onConnectionStateChange((state) => {
|
|
207
|
+
* if (state === 'error') showReconnectBanner();
|
|
208
|
+
* });
|
|
209
|
+
* // Later, to stop listening:
|
|
210
|
+
* unsub();
|
|
211
|
+
* ```
|
|
50
212
|
*/
|
|
51
213
|
export function onConnectionStateChange(callback) {
|
|
52
214
|
connectionCallbacks.add(callback);
|
|
53
|
-
|
|
215
|
+
/* Deliver the current state immediately so the subscriber doesn't have to
|
|
216
|
+
wait for the next transition to learn the baseline. This pattern is common
|
|
217
|
+
in observable/store implementations (e.g., Svelte stores call subscribers
|
|
218
|
+
on subscription). */
|
|
54
219
|
callback(state.connectionState);
|
|
55
220
|
return () => connectionCallbacks.delete(callback);
|
|
56
221
|
}
|
|
57
222
|
/**
|
|
58
|
-
* Subscribe to data update notifications
|
|
223
|
+
* Subscribe to data update notifications.
|
|
224
|
+
*
|
|
225
|
+
* Callbacks fire *after* the remote change has been written to the local Dexie
|
|
226
|
+
* database, so re-querying inside the callback will return fresh data.
|
|
227
|
+
*
|
|
228
|
+
* @param callback - Function invoked with the Supabase table name and entity ID.
|
|
229
|
+
* @returns An unsubscribe function. Call it to remove the listener.
|
|
230
|
+
*
|
|
231
|
+
* @example
|
|
232
|
+
* ```ts
|
|
233
|
+
* const unsub = onRealtimeDataUpdate((table, entityId) => {
|
|
234
|
+
* if (table === 'habits') refreshHabitStore();
|
|
235
|
+
* });
|
|
236
|
+
* ```
|
|
237
|
+
*
|
|
238
|
+
* @see {@link notifyDataUpdate} for the internal dispatch function
|
|
59
239
|
*/
|
|
60
240
|
export function onRealtimeDataUpdate(callback) {
|
|
61
241
|
dataUpdateCallbacks.add(callback);
|
|
62
242
|
return () => dataUpdateCallbacks.delete(callback);
|
|
63
243
|
}
|
|
244
|
+
// =============================================================================
|
|
245
|
+
// PUBLIC API -- STATE QUERIES
|
|
246
|
+
// =============================================================================
|
|
64
247
|
/**
|
|
65
|
-
* Get current realtime connection state.
|
|
66
|
-
*
|
|
248
|
+
* Get the current realtime connection state.
|
|
249
|
+
*
|
|
250
|
+
* Primarily used by debug utilities exposed on `window.__stellarDebug`.
|
|
251
|
+
*
|
|
252
|
+
* @returns The current {@link RealtimeConnectionState}.
|
|
253
|
+
*
|
|
254
|
+
* @see {@link ./debug.ts} for the debug surface that consumes this
|
|
67
255
|
*/
|
|
68
256
|
export function getConnectionState() {
|
|
69
257
|
return state.connectionState;
|
|
70
258
|
}
|
|
71
259
|
/**
|
|
72
|
-
* Check
|
|
73
|
-
*
|
|
260
|
+
* Check whether an entity was recently processed via a realtime event.
|
|
261
|
+
*
|
|
262
|
+
* Called by `engine.ts` during polling to avoid applying the same remote
|
|
263
|
+
* change twice (once from realtime, once from the poll response).
|
|
264
|
+
*
|
|
265
|
+
* **Side effect:** Expired entries are lazily evicted on access. This keeps
|
|
266
|
+
* the map from growing during bursts of activity, complementing the
|
|
267
|
+
* periodic cleanup in {@link cleanupRealtimeTracking}.
|
|
268
|
+
*
|
|
269
|
+
* @param entityId - The UUID of the entity to check.
|
|
270
|
+
* @returns `true` if the entity was processed within the last {@link RECENTLY_MODIFIED_TTL_MS} ms.
|
|
271
|
+
*
|
|
272
|
+
* @example
|
|
273
|
+
* ```ts
|
|
274
|
+
* if (wasRecentlyProcessedByRealtime(entity.id)) {
|
|
275
|
+
* // Skip -- realtime already handled this change
|
|
276
|
+
* continue;
|
|
277
|
+
* }
|
|
278
|
+
* ```
|
|
279
|
+
*
|
|
280
|
+
* @see {@link ./engine.ts} -- polling path
|
|
74
281
|
*/
|
|
75
282
|
export function wasRecentlyProcessedByRealtime(entityId) {
|
|
76
283
|
const processedAt = recentlyProcessedByRealtime.get(entityId);
|
|
@@ -84,7 +291,45 @@ export function wasRecentlyProcessedByRealtime(entityId) {
|
|
|
84
291
|
return true;
|
|
85
292
|
}
|
|
86
293
|
/**
|
|
87
|
-
*
|
|
294
|
+
* Check if the realtime connection is healthy (connected and not in an error state).
|
|
295
|
+
*
|
|
296
|
+
* @returns `true` when the WebSocket channel is in the `'connected'` state.
|
|
297
|
+
*/
|
|
298
|
+
export function isRealtimeHealthy() {
|
|
299
|
+
return state.connectionState === 'connected';
|
|
300
|
+
}
|
|
301
|
+
/**
|
|
302
|
+
* Remove expired entries from the recently-processed tracking map.
|
|
303
|
+
*
|
|
304
|
+
* Called periodically by the sync engine's maintenance loop to prevent
|
|
305
|
+
* unbounded memory growth in long-running sessions.
|
|
306
|
+
*
|
|
307
|
+
* **Why explicit cleanup?** Lazy eviction in {@link wasRecentlyProcessedByRealtime}
|
|
308
|
+
* only fires when an entity is looked up. If an entity is processed by realtime
|
|
309
|
+
* but never polled (e.g., a table not included in the current poll cycle),
|
|
310
|
+
* its entry would persist indefinitely without this active sweep.
|
|
311
|
+
*
|
|
312
|
+
* @see {@link RECENTLY_MODIFIED_TTL_MS}
|
|
313
|
+
*/
|
|
314
|
+
export function cleanupRealtimeTracking() {
|
|
315
|
+
const now = Date.now();
|
|
316
|
+
for (const [entityId, processedAt] of recentlyProcessedByRealtime) {
|
|
317
|
+
if (now - processedAt > RECENTLY_MODIFIED_TTL_MS) {
|
|
318
|
+
recentlyProcessedByRealtime.delete(entityId);
|
|
319
|
+
}
|
|
320
|
+
}
|
|
321
|
+
}
|
|
322
|
+
// =============================================================================
|
|
323
|
+
// INTERNAL HELPERS -- STATE NOTIFICATIONS
|
|
324
|
+
// =============================================================================
|
|
325
|
+
/**
|
|
326
|
+
* Transition the connection state and notify all registered listeners.
|
|
327
|
+
*
|
|
328
|
+
* Errors thrown by individual callbacks are caught and logged so that one
|
|
329
|
+
* misbehaving listener cannot break the notification chain.
|
|
330
|
+
*
|
|
331
|
+
* @param newState - The {@link RealtimeConnectionState} to transition to.
|
|
332
|
+
* @param error - Optional human-readable error message stored in {@link state.lastError}.
|
|
88
333
|
*/
|
|
89
334
|
function setConnectionState(newState, error) {
|
|
90
335
|
state.connectionState = newState;
|
|
@@ -94,12 +339,23 @@ function setConnectionState(newState, error) {
|
|
|
94
339
|
callback(newState);
|
|
95
340
|
}
|
|
96
341
|
catch (e) {
|
|
342
|
+
/* Catch-and-continue: a broken subscriber must not prevent other
|
|
343
|
+
subscribers from being notified, nor should it crash the realtime
|
|
344
|
+
lifecycle management. */
|
|
97
345
|
debugError('[Realtime] Connection callback error:', e);
|
|
98
346
|
}
|
|
99
347
|
}
|
|
100
348
|
}
|
|
101
349
|
/**
|
|
102
|
-
*
|
|
350
|
+
* Dispatch a data-update event to all registered subscribers.
|
|
351
|
+
*
|
|
352
|
+
* Called after a remote change has been written to Dexie. Errors thrown by
|
|
353
|
+
* individual callbacks are caught and logged.
|
|
354
|
+
*
|
|
355
|
+
* @param table - The Supabase table name where the change originated (e.g. `'habits'`).
|
|
356
|
+
* @param entityId - The UUID of the changed entity.
|
|
357
|
+
*
|
|
358
|
+
* @see {@link onRealtimeDataUpdate} for the public subscription API
|
|
103
359
|
*/
|
|
104
360
|
function notifyDataUpdate(table, entityId) {
|
|
105
361
|
debugLog(`[Realtime] Notifying ${dataUpdateCallbacks.size} subscribers of update: ${table}/${entityId}`);
|
|
@@ -112,8 +368,26 @@ function notifyDataUpdate(table, entityId) {
|
|
|
112
368
|
}
|
|
113
369
|
}
|
|
114
370
|
}
|
|
371
|
+
// =============================================================================
|
|
372
|
+
// INTERNAL HELPERS -- ECHO & DEDUP FILTERS
|
|
373
|
+
// =============================================================================
|
|
115
374
|
/**
|
|
116
|
-
*
|
|
375
|
+
* Determine whether a change event originated from this device.
|
|
376
|
+
*
|
|
377
|
+
* Supabase Realtime delivers *all* changes matching the channel filter,
|
|
378
|
+
* including changes made by the current device. We compare the `device_id`
|
|
379
|
+
* field in the payload against our own to suppress these "echoes."
|
|
380
|
+
*
|
|
381
|
+
* **Security note:** The `device_id` comparison is used purely for performance
|
|
382
|
+
* optimization (avoiding redundant local writes). It is **not** a security
|
|
383
|
+
* mechanism. A spoofed `device_id` could only cause an event to be skipped
|
|
384
|
+
* on the spoofing device -- it cannot grant access to other users' data
|
|
385
|
+
* because RLS enforces row-level access at the database level.
|
|
386
|
+
*
|
|
387
|
+
* @param record - The `new` record from the realtime payload, or `null`.
|
|
388
|
+
* @returns `true` if the record's `device_id` matches this device.
|
|
389
|
+
*
|
|
390
|
+
* @see {@link ./deviceId.ts} -- where the device identity is generated
|
|
117
391
|
*/
|
|
118
392
|
function isOwnDeviceChange(record) {
|
|
119
393
|
if (!record)
|
|
@@ -122,7 +396,20 @@ function isOwnDeviceChange(record) {
|
|
|
122
396
|
return recordDeviceId === state.deviceId;
|
|
123
397
|
}
|
|
124
398
|
/**
|
|
125
|
-
* Check if entity was recently processed by realtime
|
|
399
|
+
* Check if an entity was recently processed by this realtime handler.
|
|
400
|
+
*
|
|
401
|
+
* This is the *internal* counterpart of the exported
|
|
402
|
+
* {@link wasRecentlyProcessedByRealtime}. It is called inside
|
|
403
|
+
* {@link handleRealtimeChange} to short-circuit duplicate events that may
|
|
404
|
+
* arrive in rapid succession (e.g. due to Supabase retries).
|
|
405
|
+
*
|
|
406
|
+
* **Why a separate function?** The internal version is used in the hot path
|
|
407
|
+
* of change processing, while the exported version is used by the polling
|
|
408
|
+
* engine. Keeping them separate makes it clear which is the internal guard
|
|
409
|
+
* and which is the cross-module dedup check.
|
|
410
|
+
*
|
|
411
|
+
* @param entityId - The UUID of the entity to check.
|
|
412
|
+
* @returns `true` if the entity is within the deduplication window.
|
|
126
413
|
*/
|
|
127
414
|
function wasRecentlyProcessed(entityId) {
|
|
128
415
|
const processedAt = recentlyProcessedByRealtime.get(entityId);
|
|
@@ -135,8 +422,40 @@ function wasRecentlyProcessed(entityId) {
|
|
|
135
422
|
}
|
|
136
423
|
return true;
|
|
137
424
|
}
|
|
425
|
+
// =============================================================================
|
|
426
|
+
// CORE CHANGE HANDLER
|
|
427
|
+
// =============================================================================
|
|
138
428
|
/**
|
|
139
|
-
*
|
|
429
|
+
* Process an incoming realtime change event from Supabase.
|
|
430
|
+
*
|
|
431
|
+
* This is the central routing function for all realtime events. It:
|
|
432
|
+
* 1. Extracts the entity ID and event type from the payload.
|
|
433
|
+
* 2. Applies echo suppression and deduplication filters.
|
|
434
|
+
* 3. Looks up the matching Dexie table via the engine config.
|
|
435
|
+
* 4. Delegates to the appropriate branch: INSERT/UPDATE or DELETE.
|
|
436
|
+
* 5. Records the change in {@link remoteChangesStore} for UI animations.
|
|
437
|
+
* 6. Marks the entity as recently processed to prevent polling duplication.
|
|
438
|
+
* 7. Notifies data-update subscribers.
|
|
439
|
+
*
|
|
440
|
+
* For INSERT/UPDATE events with pending local operations, the function
|
|
441
|
+
* delegates to {@link resolveConflicts} to produce a merged entity.
|
|
442
|
+
*
|
|
443
|
+
* **Error handling:** All errors are caught at the top level and logged.
|
|
444
|
+
* A failure to process one event must not crash the WebSocket listener or
|
|
445
|
+
* prevent subsequent events from being handled.
|
|
446
|
+
*
|
|
447
|
+
* **Ordering contract with remoteChangesStore:**
|
|
448
|
+
* For delete operations (both soft and hard), the change is recorded in
|
|
449
|
+
* remoteChangesStore **before** writing to Dexie. This ordering is critical
|
|
450
|
+
* for exit animations -- see the soft delete and hard delete sections below.
|
|
451
|
+
*
|
|
452
|
+
* @param table - The Supabase table name (e.g. `'habits'`, `'entries'`).
|
|
453
|
+
* @param payload - The raw Supabase realtime change payload.
|
|
454
|
+
*
|
|
455
|
+
* @throws Never throws -- all errors are caught internally and logged.
|
|
456
|
+
*
|
|
457
|
+
* @see {@link resolveConflicts} for the conflict resolution algorithm
|
|
458
|
+
* @see {@link remoteChangesStore} for how the UI animates remote changes
|
|
140
459
|
*/
|
|
141
460
|
async function handleRealtimeChange(table, payload) {
|
|
142
461
|
const eventType = payload.eventType;
|
|
@@ -144,19 +463,26 @@ async function handleRealtimeChange(table, payload) {
|
|
|
144
463
|
const newRecord = payload.new;
|
|
145
464
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
146
465
|
const oldRecord = payload.old;
|
|
147
|
-
|
|
466
|
+
/* For DELETEs, Supabase only populates `old`; for INSERTs only `new`.
|
|
467
|
+
UPDATEs populate both. We need the ID from whichever is available. */
|
|
148
468
|
const entityId = (newRecord?.id || oldRecord?.id);
|
|
149
469
|
debugLog(`[Realtime] Received ${eventType} on ${table}:`, entityId);
|
|
150
470
|
if (!entityId) {
|
|
151
471
|
debugWarn('[Realtime] Change without entity ID:', table, eventType);
|
|
152
472
|
return;
|
|
153
473
|
}
|
|
154
|
-
|
|
474
|
+
/* ---- Echo suppression ----
|
|
475
|
+
Skip events that originated from this device. Without this check, every
|
|
476
|
+
local write would be processed a second time when the CDC event arrives
|
|
477
|
+
back through the WebSocket, causing redundant Dexie writes and UI flicker. */
|
|
155
478
|
if (isOwnDeviceChange(newRecord)) {
|
|
156
479
|
debugLog(`[Realtime] Skipping own device change: ${table}/${entityId}`);
|
|
157
480
|
return;
|
|
158
481
|
}
|
|
159
|
-
|
|
482
|
+
/* ---- Deduplication ----
|
|
483
|
+
Skip events for entities we already processed very recently. This guards
|
|
484
|
+
against Supabase delivering the same CDC event multiple times (which can
|
|
485
|
+
happen during server-side reconnection or rebalancing). */
|
|
160
486
|
if (wasRecentlyProcessed(entityId)) {
|
|
161
487
|
debugLog(`[Realtime] Skipping recently processed: ${table}/${entityId}`);
|
|
162
488
|
return;
|
|
@@ -170,106 +496,165 @@ async function handleRealtimeChange(table, payload) {
|
|
|
170
496
|
}
|
|
171
497
|
try {
|
|
172
498
|
switch (eventType) {
|
|
499
|
+
// -----------------------------------------------------------------------
|
|
500
|
+
// INSERT / UPDATE -- the bulk of the logic lives here
|
|
501
|
+
// -----------------------------------------------------------------------
|
|
173
502
|
case 'INSERT':
|
|
174
503
|
case 'UPDATE': {
|
|
175
504
|
if (!newRecord)
|
|
176
505
|
return;
|
|
177
|
-
|
|
506
|
+
/* Check if entity is being edited in a manual-save form. When true,
|
|
507
|
+
remoteChangesStore will defer the incoming change until the form is
|
|
508
|
+
closed, preventing jarring mid-edit overwrites. This is a UX decision:
|
|
509
|
+
we prioritize the active editing experience over instant sync. */
|
|
178
510
|
const _isBeingEdited = remoteChangesStore.isEditing(entityId, table);
|
|
179
|
-
|
|
511
|
+
/* Fetch the local version so we can diff fields and detect conflicts. */
|
|
180
512
|
const localEntity = await getEngineConfig().db.table(dexieTable).get(entityId);
|
|
181
|
-
|
|
513
|
+
/* Build a list of fields whose values actually differ between local
|
|
514
|
+
and remote. We skip metadata fields (updated_at, _version) because
|
|
515
|
+
they always change and would produce noisy animations. These fields
|
|
516
|
+
are managed by the sync engine, not the user, so highlighting them
|
|
517
|
+
would be misleading. */
|
|
182
518
|
const changedFields = [];
|
|
183
519
|
if (localEntity && newRecord) {
|
|
184
520
|
for (const key of Object.keys(newRecord)) {
|
|
185
521
|
if (key === 'updated_at' || key === '_version')
|
|
186
522
|
continue;
|
|
523
|
+
/* JSON.stringify comparison handles nested objects/arrays correctly.
|
|
524
|
+
For primitives it is equivalent to ===, with the caveat that
|
|
525
|
+
undefined fields are omitted (which is the desired behavior). */
|
|
187
526
|
if (JSON.stringify(localEntity[key]) !== JSON.stringify(newRecord[key])) {
|
|
188
527
|
changedFields.push(key);
|
|
189
528
|
}
|
|
190
529
|
}
|
|
191
530
|
}
|
|
192
|
-
|
|
193
|
-
|
|
531
|
+
/* ---- Soft delete detection ----
|
|
532
|
+
A soft delete manifests as an UPDATE where `deleted` transitions from
|
|
533
|
+
false to true. We handle this specially: the UI animation (fade-out)
|
|
534
|
+
must play BEFORE the record is written to Dexie, because reactive
|
|
535
|
+
stores will immediately filter out deleted records, removing the DOM
|
|
536
|
+
element and preventing any exit animation.
|
|
537
|
+
|
|
538
|
+
**Why not use CSS `animation-fill-mode: forwards`?** Because the DOM
|
|
539
|
+
element is removed entirely by the reactive framework (Svelte's
|
|
540
|
+
{#each} block), not just hidden. Once the Dexie write triggers a
|
|
541
|
+
store update, the element is gone from the DOM. */
|
|
194
542
|
const isSoftDelete = newRecord.deleted === true && localEntity && !localEntity.deleted;
|
|
195
543
|
if (isSoftDelete) {
|
|
196
544
|
debugLog(`[Realtime] Soft delete detected for ${table}/${entityId}`);
|
|
197
|
-
|
|
545
|
+
/* Record + await the delete animation before touching the DB.
|
|
546
|
+
The wildcard ['*'] signals the UI that the entire row is affected. */
|
|
198
547
|
remoteChangesStore.recordRemoteChange(entityId, table, ['*'], true, 'DELETE');
|
|
199
548
|
await remoteChangesStore.markPendingDelete(entityId, table);
|
|
200
|
-
|
|
549
|
+
/* Now persist the soft-deleted record; reactive stores refresh. */
|
|
201
550
|
await getEngineConfig().db.table(dexieTable).put(newRecord);
|
|
202
551
|
recentlyProcessedByRealtime.set(entityId, Date.now());
|
|
203
552
|
notifyDataUpdate(table, entityId);
|
|
204
553
|
break;
|
|
205
554
|
}
|
|
206
|
-
|
|
555
|
+
/* ---- Conflict resolution path ----
|
|
556
|
+
Three branches depending on local state:
|
|
557
|
+
1. No local entity -> simple insert (no conflict possible)
|
|
558
|
+
2. No pending ops -> accept remote if newer (last-write-wins)
|
|
559
|
+
3. Pending ops exist -> full conflict resolution via resolveConflicts
|
|
560
|
+
|
|
561
|
+
**Why check pending ops?** If the user has unsynced local changes,
|
|
562
|
+
blindly accepting the remote version would silently discard the
|
|
563
|
+
user's work. The conflict resolver preserves local pending changes
|
|
564
|
+
while incorporating non-conflicting remote updates. */
|
|
207
565
|
const pendingEntityIds = await getPendingEntityIds();
|
|
208
566
|
const hasPendingOps = pendingEntityIds.has(entityId);
|
|
209
567
|
let applied = false;
|
|
210
568
|
if (!localEntity) {
|
|
211
|
-
|
|
569
|
+
/* Branch 1: Entity doesn't exist locally -- just insert.
|
|
570
|
+
This happens when another device creates a new entity. */
|
|
212
571
|
await getEngineConfig().db.table(dexieTable).put(newRecord);
|
|
213
572
|
applied = true;
|
|
214
573
|
}
|
|
215
574
|
else if (!hasPendingOps) {
|
|
216
|
-
|
|
575
|
+
/* Branch 2: No unsynced local changes -- simple timestamp comparison.
|
|
576
|
+
Only overwrite if the remote timestamp is strictly newer. If the
|
|
577
|
+
local version is newer (possible if a local write just happened
|
|
578
|
+
but hasn't been pushed yet), we keep the local version to avoid
|
|
579
|
+
regressing the UI. */
|
|
217
580
|
const localUpdatedAt = new Date(localEntity.updated_at).getTime();
|
|
218
581
|
const remoteUpdatedAt = new Date(newRecord.updated_at).getTime();
|
|
219
582
|
if (remoteUpdatedAt > localUpdatedAt) {
|
|
220
|
-
// Remote is newer, accept it
|
|
221
583
|
await getEngineConfig().db.table(dexieTable).put(newRecord);
|
|
222
584
|
applied = true;
|
|
223
585
|
}
|
|
224
586
|
}
|
|
225
587
|
else {
|
|
226
|
-
|
|
588
|
+
/* Branch 3: Pending local operations exist -- we must merge.
|
|
589
|
+
The conflict resolver produces a merged entity that preserves
|
|
590
|
+
non-conflicting local edits while incorporating the remote state.
|
|
591
|
+
See conflicts.ts for the three-tier resolution algorithm. */
|
|
227
592
|
const pendingOps = await getPendingOpsForEntity(entityId);
|
|
228
593
|
const resolution = await resolveConflicts(table, entityId, localEntity, newRecord, pendingOps);
|
|
229
|
-
// Store merged entity
|
|
230
594
|
await getEngineConfig().db.table(dexieTable).put(resolution.mergedEntity);
|
|
231
595
|
applied = true;
|
|
232
|
-
|
|
596
|
+
/* Persist conflict history for auditability and potential undo.
|
|
597
|
+
Only stored when actual field-level conflicts were detected
|
|
598
|
+
(not for clean auto-merges). */
|
|
233
599
|
if (resolution.hasConflicts) {
|
|
234
600
|
await storeConflictHistory(resolution);
|
|
235
601
|
}
|
|
236
602
|
}
|
|
237
|
-
|
|
603
|
+
/* ---- Value delta for counter animations ----
|
|
604
|
+
If the `current_value` field changed, compute the delta so the UI
|
|
605
|
+
can show an increment/decrement animation (e.g. "+1" / "-3").
|
|
606
|
+
This is specific to counter-style entities (e.g., habit streaks,
|
|
607
|
+
goal progress). */
|
|
238
608
|
let valueDelta;
|
|
239
609
|
if (changedFields.includes('current_value') && localEntity && newRecord) {
|
|
240
610
|
const oldValue = localEntity.current_value || 0;
|
|
241
611
|
const newValue = newRecord.current_value || 0;
|
|
242
612
|
valueDelta = newValue - oldValue;
|
|
243
613
|
}
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
614
|
+
/* ---- UI change notification ----
|
|
615
|
+
Record the change in remoteChangesStore so the UI can highlight
|
|
616
|
+
the affected row / field. If the entity is currently being edited
|
|
617
|
+
in a form, the store defers the notification until editing ends.
|
|
618
|
+
|
|
619
|
+
We only notify when there are actual visible changes (changedFields > 0)
|
|
620
|
+
or when the entity is entirely new (!localEntity). This prevents
|
|
621
|
+
spurious highlight animations for metadata-only updates. */
|
|
247
622
|
if (changedFields.length > 0 || !localEntity) {
|
|
248
623
|
remoteChangesStore.recordRemoteChange(entityId, table, changedFields.length > 0 ? changedFields : ['*'], applied, eventType, valueDelta);
|
|
249
|
-
|
|
624
|
+
/* Fire the optional per-table hook so consumers can run custom
|
|
625
|
+
side-effects (e.g. toast notifications, badge updates). */
|
|
250
626
|
const tblConfig = getEngineConfig().tables.find((t) => t.supabaseName === table);
|
|
251
627
|
if (tblConfig?.onRemoteChange) {
|
|
252
628
|
tblConfig.onRemoteChange(table, newRecord);
|
|
253
629
|
}
|
|
254
630
|
}
|
|
255
|
-
|
|
631
|
+
/* Mark as recently processed so the polling path skips this entity.
|
|
632
|
+
This is the bridge between realtime and polling deduplication. */
|
|
256
633
|
recentlyProcessedByRealtime.set(entityId, Date.now());
|
|
257
|
-
// Notify subscribers
|
|
258
634
|
notifyDataUpdate(table, entityId);
|
|
259
635
|
break;
|
|
260
636
|
}
|
|
637
|
+
// -----------------------------------------------------------------------
|
|
638
|
+
// DELETE -- hard-delete path (rare in soft-delete systems)
|
|
639
|
+
// -----------------------------------------------------------------------
|
|
261
640
|
case 'DELETE': {
|
|
262
|
-
|
|
263
|
-
|
|
641
|
+
/* In a soft-delete system most deletions arrive as UPDATEs with
|
|
642
|
+
`deleted=true` (handled above). A hard DELETE is uncommon but must
|
|
643
|
+
still be handled for correctness -- it can occur when:
|
|
644
|
+
- An admin purges records directly in the database
|
|
645
|
+
- A scheduled cleanup job removes old soft-deleted rows
|
|
646
|
+
- The application uses hard deletes for certain entity types */
|
|
264
647
|
if (oldRecord) {
|
|
265
|
-
|
|
648
|
+
/* Record delete animation BEFORE removing from DB, same ordering
|
|
649
|
+
rationale as the soft-delete path above: the reactive framework
|
|
650
|
+
will remove the DOM element immediately on Dexie deletion, so
|
|
651
|
+
the animation must be set up first. */
|
|
266
652
|
remoteChangesStore.recordRemoteChange(entityId, table, ['*'], true, 'DELETE');
|
|
267
|
-
|
|
268
|
-
|
|
653
|
+
/* Wait for the pending-delete animation to complete so the UI has
|
|
654
|
+
time to play an exit transition before the DOM element disappears. */
|
|
269
655
|
await remoteChangesStore.markPendingDelete(entityId, table);
|
|
270
|
-
|
|
656
|
+
/* Now remove the record from Dexie (triggers reactive DOM removal). */
|
|
271
657
|
await getEngineConfig().db.table(dexieTable).delete(entityId);
|
|
272
|
-
// Mark as recently processed
|
|
273
658
|
recentlyProcessedByRealtime.set(entityId, Date.now());
|
|
274
659
|
notifyDataUpdate(table, entityId);
|
|
275
660
|
}
|
|
@@ -278,15 +663,38 @@ async function handleRealtimeChange(table, payload) {
|
|
|
278
663
|
}
|
|
279
664
|
}
|
|
280
665
|
catch (error) {
|
|
666
|
+
/* Top-level catch ensures one bad event never crashes the WebSocket
|
|
667
|
+
listener. The channel continues processing subsequent events. */
|
|
281
668
|
debugError(`[Realtime] Error handling ${eventType} on ${table}:`, error);
|
|
282
669
|
}
|
|
283
670
|
}
|
|
671
|
+
// =============================================================================
|
|
672
|
+
// RECONNECTION LOGIC
|
|
673
|
+
// =============================================================================
|
|
284
674
|
/**
|
|
285
|
-
* Schedule reconnection
|
|
286
|
-
*
|
|
675
|
+
* Schedule a reconnection attempt using exponential backoff.
|
|
676
|
+
*
|
|
677
|
+
* Behavior:
|
|
678
|
+
* - If the browser is offline (`navigator.onLine === false`), reconnection is
|
|
679
|
+
* skipped entirely. The sync engine's `online` event handler will re-trigger
|
|
680
|
+
* subscription start when connectivity returns.
|
|
681
|
+
* - If the maximum number of attempts has been reached, the module gives up and
|
|
682
|
+
* transitions to `'error'` state; the polling fallback remains active.
|
|
683
|
+
* - A `reconnectScheduled` flag prevents duplicate timers from being created
|
|
684
|
+
* when multiple channel events (e.g. CHANNEL_ERROR + CLOSED) fire in quick
|
|
685
|
+
* succession for the same disconnection.
|
|
686
|
+
*
|
|
687
|
+
* **Backoff schedule:** 1s, 2s, 4s, 8s, 16s (geometric progression).
|
|
688
|
+
* Total wait across all 5 attempts: ~31 seconds.
|
|
689
|
+
*
|
|
690
|
+
* @see {@link MAX_RECONNECT_ATTEMPTS}
|
|
691
|
+
* @see {@link RECONNECT_BASE_DELAY}
|
|
692
|
+
* @see {@link startRealtimeSubscriptions} -- called by the timer callback
|
|
287
693
|
*/
|
|
288
694
|
function scheduleReconnect() {
|
|
289
|
-
|
|
695
|
+
/* Guard: prevent duplicate scheduling from multiple event callbacks.
|
|
696
|
+
Supabase can emit CHANNEL_ERROR followed closely by CLOSED for the same
|
|
697
|
+
disconnection event; both would call this function without this guard. */
|
|
290
698
|
if (reconnectScheduled) {
|
|
291
699
|
return;
|
|
292
700
|
}
|
|
@@ -294,7 +702,9 @@ function scheduleReconnect() {
|
|
|
294
702
|
clearTimeout(state.reconnectTimeout);
|
|
295
703
|
state.reconnectTimeout = null;
|
|
296
704
|
}
|
|
297
|
-
|
|
705
|
+
/* No point burning reconnect attempts while the network is down.
|
|
706
|
+
The sync engine listens for the browser's `online` event and will call
|
|
707
|
+
startRealtimeSubscriptions() when connectivity returns. */
|
|
298
708
|
if (typeof navigator !== 'undefined' && !navigator.onLine) {
|
|
299
709
|
debugLog('[Realtime] Offline - waiting for online event to reconnect');
|
|
300
710
|
setConnectionState('disconnected');
|
|
@@ -306,11 +716,13 @@ function scheduleReconnect() {
|
|
|
306
716
|
return;
|
|
307
717
|
}
|
|
308
718
|
reconnectScheduled = true;
|
|
719
|
+
/* Exponential backoff: 1s, 2s, 4s, 8s, 16s */
|
|
309
720
|
const delay = RECONNECT_BASE_DELAY * Math.pow(2, state.reconnectAttempts);
|
|
310
721
|
debugLog(`[Realtime] Scheduling reconnect attempt ${state.reconnectAttempts + 1} in ${delay}ms`);
|
|
311
722
|
state.reconnectTimeout = setTimeout(async () => {
|
|
312
723
|
reconnectScheduled = false;
|
|
313
|
-
|
|
724
|
+
/* Re-check online status in case we went offline during the backoff wait.
|
|
725
|
+
This avoids wasting a reconnect attempt on a network that's now down. */
|
|
314
726
|
if (typeof navigator !== 'undefined' && !navigator.onLine) {
|
|
315
727
|
debugLog('[Realtime] Went offline during backoff, cancelling reconnect');
|
|
316
728
|
return;
|
|
@@ -321,22 +733,43 @@ function scheduleReconnect() {
|
|
|
321
733
|
}
|
|
322
734
|
}, delay);
|
|
323
735
|
}
|
|
736
|
+
// =============================================================================
|
|
737
|
+
// LIFECYCLE -- START / STOP / PAUSE
|
|
738
|
+
// =============================================================================
|
|
324
739
|
/**
|
|
325
|
-
* Internal
|
|
740
|
+
* Internal teardown: remove the channel and reset connection state.
|
|
741
|
+
*
|
|
742
|
+
* Does **not** acquire the `operationInProgress` lock -- callers are
|
|
743
|
+
* responsible for holding it. This allows {@link startRealtimeSubscriptions}
|
|
744
|
+
* to call it mid-operation without deadlocking.
|
|
745
|
+
*
|
|
746
|
+
* **Why separate from the public `stopRealtimeSubscriptions`?** The public
|
|
747
|
+
* version acquires the concurrency lock and clears session-level state (userId,
|
|
748
|
+
* tracking map). This internal version only handles the channel teardown,
|
|
749
|
+
* making it safe to call from within `startRealtimeSubscriptions` which
|
|
750
|
+
* already holds the lock.
|
|
751
|
+
*
|
|
752
|
+
* @see {@link stopRealtimeSubscriptions} -- the public API that wraps this
|
|
326
753
|
*/
|
|
327
754
|
async function stopRealtimeSubscriptionsInternal() {
|
|
328
|
-
|
|
755
|
+
/* Clear any pending reconnect timer and reset the scheduling flag.
|
|
756
|
+
If we don't clear these, a pending timer could fire after the channel
|
|
757
|
+
is torn down and attempt to reconnect with stale state. */
|
|
329
758
|
if (state.reconnectTimeout) {
|
|
330
759
|
clearTimeout(state.reconnectTimeout);
|
|
331
760
|
state.reconnectTimeout = null;
|
|
332
761
|
}
|
|
333
762
|
reconnectScheduled = false;
|
|
334
|
-
|
|
763
|
+
/* Unsubscribe from the Supabase channel. This sends an unsubscribe
|
|
764
|
+
message over the WebSocket and cleans up the local channel state. */
|
|
335
765
|
if (state.channel) {
|
|
336
766
|
try {
|
|
337
767
|
await getEngineConfig().supabase.removeChannel(state.channel);
|
|
338
768
|
}
|
|
339
769
|
catch (error) {
|
|
770
|
+
/* Log but don't rethrow -- we're tearing down and must continue
|
|
771
|
+
cleanup even if the server-side unsubscribe fails (e.g., the
|
|
772
|
+
WebSocket is already dead). */
|
|
340
773
|
debugError('[Realtime] Error removing channel:', error);
|
|
341
774
|
}
|
|
342
775
|
state.channel = null;
|
|
@@ -345,40 +778,91 @@ async function stopRealtimeSubscriptionsInternal() {
|
|
|
345
778
|
setConnectionState('disconnected');
|
|
346
779
|
}
|
|
347
780
|
/**
|
|
348
|
-
* Start realtime subscriptions for
|
|
781
|
+
* Start realtime subscriptions for an authenticated user.
|
|
782
|
+
*
|
|
783
|
+
* Creates a single Supabase Realtime channel and registers PostgreSQL change
|
|
784
|
+
* listeners for every table defined in the engine config.
|
|
785
|
+
*
|
|
786
|
+
* **Security:** Access control is enforced by Supabase RLS policies at the
|
|
787
|
+
* database level -- no client-side `user_id` filter is applied to the channel
|
|
788
|
+
* subscription. The Realtime server evaluates RLS policies for each CDC event
|
|
789
|
+
* and only delivers events the user is authorized to see.
|
|
790
|
+
*
|
|
791
|
+
* This function is idempotent: if the channel is already connected for the
|
|
792
|
+
* same user, it returns immediately. If a different user is provided, the
|
|
793
|
+
* existing channel is torn down first.
|
|
794
|
+
*
|
|
795
|
+
* **Channel multiplexing:** One channel is created for all tables rather than
|
|
796
|
+
* one per table. This is more efficient because Supabase multiplexes all
|
|
797
|
+
* subscriptions over a single WebSocket connection regardless, so separate
|
|
798
|
+
* channels would only add overhead without improving parallelism.
|
|
799
|
+
*
|
|
800
|
+
* @param userId - The authenticated user's UUID. Used to construct a unique
|
|
801
|
+
* channel name (`{prefix}_sync_{userId}`).
|
|
802
|
+
*
|
|
803
|
+
* @throws Never throws -- all errors are caught internally. On failure, the
|
|
804
|
+
* connection state transitions to `'error'` and reconnection is
|
|
805
|
+
* scheduled automatically.
|
|
806
|
+
*
|
|
807
|
+
* @example
|
|
808
|
+
* ```ts
|
|
809
|
+
* // After login:
|
|
810
|
+
* await startRealtimeSubscriptions(session.user.id);
|
|
811
|
+
* ```
|
|
812
|
+
*
|
|
813
|
+
* @see {@link stopRealtimeSubscriptions} to cleanly tear down the channel
|
|
814
|
+
* @see {@link getEngineConfig} for the table configuration consumed here
|
|
349
815
|
*/
|
|
350
816
|
export async function startRealtimeSubscriptions(userId) {
|
|
817
|
+
/* SSR guard: realtime requires a browser environment for WebSocket.
|
|
818
|
+
In SSR contexts (e.g., SvelteKit server-side rendering), `window` is
|
|
819
|
+
undefined and we must bail early to avoid runtime errors. */
|
|
351
820
|
if (typeof window === 'undefined')
|
|
352
821
|
return;
|
|
353
|
-
|
|
822
|
+
/* Don't attempt connection while offline; the sync engine's `online` event
|
|
823
|
+
will call us again when connectivity is restored. Attempting to connect
|
|
824
|
+
while offline would waste a reconnect attempt on an inevitable failure. */
|
|
354
825
|
if (!navigator.onLine) {
|
|
355
826
|
debugLog('[Realtime] Offline - skipping subscription start');
|
|
356
827
|
return;
|
|
357
828
|
}
|
|
358
|
-
|
|
829
|
+
/* Idempotency: skip if already connected for this user. This prevents
|
|
830
|
+
unnecessary channel teardown/recreation when the caller doesn't track
|
|
831
|
+
whether we're already connected. */
|
|
359
832
|
if (state.channel && state.userId === userId && state.connectionState === 'connected') {
|
|
360
833
|
return;
|
|
361
834
|
}
|
|
362
|
-
|
|
835
|
+
/* Concurrency guard: prevent overlapping start/stop sequences. Without
|
|
836
|
+
this, rapid login/logout cycles could interleave async channel operations
|
|
837
|
+
and leave the module in an inconsistent state. */
|
|
363
838
|
if (operationInProgress) {
|
|
364
839
|
debugLog('[Realtime] Operation already in progress, skipping');
|
|
365
840
|
return;
|
|
366
841
|
}
|
|
367
842
|
operationInProgress = true;
|
|
368
843
|
try {
|
|
369
|
-
|
|
844
|
+
/* Tear down any existing channel before creating a new one. This handles
|
|
845
|
+
the case where we're switching users (logout + login) or recovering
|
|
846
|
+
from an error state. */
|
|
370
847
|
await stopRealtimeSubscriptionsInternal();
|
|
371
848
|
state.userId = userId;
|
|
372
849
|
state.deviceId = getDeviceId();
|
|
373
850
|
setConnectionState('connecting');
|
|
374
851
|
const config = getEngineConfig();
|
|
375
852
|
const realtimeTables = config.tables.map((t) => t.supabaseName);
|
|
376
|
-
|
|
377
|
-
|
|
853
|
+
/* ---- Channel creation ----
|
|
854
|
+
One channel per user, listening to all configured tables. This is more
|
|
855
|
+
efficient than one channel per table because Supabase multiplexes all
|
|
856
|
+
subscriptions over a single WebSocket connection regardless. The channel
|
|
857
|
+
name includes the user ID to ensure uniqueness across browser tabs that
|
|
858
|
+
might have different users logged in. */
|
|
378
859
|
const channelName = `${config.prefix}_sync_${userId}`;
|
|
379
860
|
state.channel = config.supabase.channel(channelName);
|
|
380
|
-
|
|
381
|
-
|
|
861
|
+
/* ---- Register table listeners ----
|
|
862
|
+
We subscribe to `event: '*'` (INSERT, UPDATE, DELETE) on each table.
|
|
863
|
+
No `filter` parameter is used because RLS policies enforce row-level
|
|
864
|
+
security at the database level. Adding a client-side filter would be
|
|
865
|
+
redundant and could fall out of sync with the RLS policy definitions. */
|
|
382
866
|
debugLog(`[Realtime] Setting up subscriptions for ${realtimeTables.length} tables`);
|
|
383
867
|
for (const table of realtimeTables) {
|
|
384
868
|
state.channel = state.channel.on('postgres_changes', {
|
|
@@ -387,16 +871,24 @@ export async function startRealtimeSubscriptions(userId) {
|
|
|
387
871
|
table: table
|
|
388
872
|
}, (payload) => {
|
|
389
873
|
debugLog(`[Realtime] Raw payload received for ${table}:`, payload.eventType);
|
|
874
|
+
/* Fire-and-forget: the handler runs asynchronously. Errors are caught
|
|
875
|
+
inside handleRealtimeChange so they don't propagate to the Supabase
|
|
876
|
+
client's event loop. */
|
|
390
877
|
handleRealtimeChange(table, payload).catch((error) => {
|
|
391
878
|
debugError(`[Realtime] Error processing ${table} change:`, error);
|
|
392
879
|
});
|
|
393
880
|
});
|
|
394
881
|
}
|
|
395
|
-
|
|
882
|
+
/* ---- Activate the channel ----
|
|
883
|
+
The status callback handles lifecycle transitions. Note that Supabase
|
|
884
|
+
may emit multiple statuses for the same underlying event (e.g.,
|
|
885
|
+
CHANNEL_ERROR followed by CLOSED for a single disconnection). */
|
|
396
886
|
state.channel.subscribe((status, err) => {
|
|
397
887
|
switch (status) {
|
|
398
888
|
case 'SUBSCRIBED':
|
|
399
889
|
debugLog('[Realtime] Connected and subscribed');
|
|
890
|
+
/* Reset backoff counter on successful connection so the next
|
|
891
|
+
disconnection starts fresh with a 1s delay. */
|
|
400
892
|
state.reconnectAttempts = 0;
|
|
401
893
|
reconnectScheduled = false;
|
|
402
894
|
setConnectionState('connected');
|
|
@@ -415,10 +907,13 @@ export async function startRealtimeSubscriptions(userId) {
|
|
|
415
907
|
break;
|
|
416
908
|
case 'CLOSED':
|
|
417
909
|
debugLog('[Realtime] Channel closed');
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
910
|
+
/* Only reconnect if:
|
|
911
|
+
1. This wasn't an intentional disconnect (state would be 'disconnected').
|
|
912
|
+
2. We still have a user to reconnect for.
|
|
913
|
+
3. A reconnect isn't already scheduled (prevents duplicate timers
|
|
914
|
+
when CHANNEL_ERROR fires shortly before CLOSED).
|
|
915
|
+
Without condition 3, we would schedule two overlapping reconnect
|
|
916
|
+
timers for a single disconnection event. */
|
|
422
917
|
if (state.connectionState !== 'disconnected' && state.userId && !reconnectScheduled) {
|
|
423
918
|
setConnectionState('disconnected');
|
|
424
919
|
scheduleReconnect();
|
|
@@ -433,14 +928,35 @@ export async function startRealtimeSubscriptions(userId) {
|
|
|
433
928
|
scheduleReconnect();
|
|
434
929
|
}
|
|
435
930
|
finally {
|
|
931
|
+
/* Always release the concurrency lock, even on error. Without this,
|
|
932
|
+
a failed start would permanently block all future start/stop attempts. */
|
|
436
933
|
operationInProgress = false;
|
|
437
934
|
}
|
|
438
935
|
}
|
|
439
936
|
/**
|
|
440
|
-
* Stop realtime subscriptions
|
|
937
|
+
* Stop realtime subscriptions and clean up all state.
|
|
938
|
+
*
|
|
939
|
+
* This is the public-facing teardown API. It acquires the concurrency lock,
|
|
940
|
+
* delegates to {@link stopRealtimeSubscriptionsInternal}, clears the user ID,
|
|
941
|
+
* and wipes the recently-processed tracking map.
|
|
942
|
+
*
|
|
943
|
+
* **When to call:** On user logout or app shutdown. For temporary connectivity
|
|
944
|
+
* loss, use {@link pauseRealtime} instead (it preserves the userId so
|
|
945
|
+
* reconnection can resume automatically).
|
|
946
|
+
*
|
|
947
|
+
* @throws Never throws -- errors during channel removal are caught and logged.
|
|
948
|
+
*
|
|
949
|
+
* @example
|
|
950
|
+
* ```ts
|
|
951
|
+
* // On logout:
|
|
952
|
+
* await stopRealtimeSubscriptions();
|
|
953
|
+
* ```
|
|
954
|
+
*
|
|
955
|
+
* @see {@link startRealtimeSubscriptions} to re-establish the connection
|
|
956
|
+
* @see {@link pauseRealtime} for temporary disconnection (offline)
|
|
441
957
|
*/
|
|
442
958
|
export async function stopRealtimeSubscriptions() {
|
|
443
|
-
|
|
959
|
+
/* Concurrency guard: prevent overlapping start/stop sequences. */
|
|
444
960
|
if (operationInProgress) {
|
|
445
961
|
debugLog('[Realtime] Operation already in progress, skipping stop');
|
|
446
962
|
return;
|
|
@@ -449,7 +965,9 @@ export async function stopRealtimeSubscriptions() {
|
|
|
449
965
|
try {
|
|
450
966
|
await stopRealtimeSubscriptionsInternal();
|
|
451
967
|
state.userId = null;
|
|
452
|
-
|
|
968
|
+
/* Clear tracking so stale entries don't leak across sessions. Without
|
|
969
|
+
this, a dedup entry from user A's session could cause user B's session
|
|
970
|
+
(if they log in on the same device) to skip a legitimate change. */
|
|
453
971
|
recentlyProcessedByRealtime.clear();
|
|
454
972
|
}
|
|
455
973
|
finally {
|
|
@@ -457,36 +975,42 @@ export async function stopRealtimeSubscriptions() {
|
|
|
457
975
|
}
|
|
458
976
|
}
|
|
459
977
|
/**
|
|
460
|
-
* Pause realtime
|
|
461
|
-
*
|
|
978
|
+
* Pause realtime subscriptions when the browser goes offline.
|
|
979
|
+
*
|
|
980
|
+
* Unlike {@link stopRealtimeSubscriptions}, this does **not** clear
|
|
981
|
+
* `state.userId` -- the user is still authenticated, we just can't reach
|
|
982
|
+
* the server. When the browser comes back online, the sync engine calls
|
|
983
|
+
* {@link startRealtimeSubscriptions} with the same user ID.
|
|
984
|
+
*
|
|
985
|
+
* Key behaviors:
|
|
986
|
+
* - Cancels any pending reconnect timers.
|
|
987
|
+
* - Resets the reconnect attempt counter so we get a fresh set of attempts
|
|
988
|
+
* when connectivity returns.
|
|
989
|
+
* - Transitions to `'disconnected'` state.
|
|
990
|
+
*
|
|
991
|
+
* **Why not call stopRealtimeSubscriptionsInternal?** Because the offline
|
|
992
|
+
* transition is often transient (e.g., brief WiFi dropout). We want to
|
|
993
|
+
* preserve the userId and avoid the overhead of `removeChannel()` (which
|
|
994
|
+
* tries to send an unsubscribe message over the dead WebSocket). Simply
|
|
995
|
+
* clearing the reconnect state and transitioning to `'disconnected'` is
|
|
996
|
+
* faster and avoids potential errors from network calls during offline.
|
|
997
|
+
*
|
|
998
|
+
* @see {@link ./engine.ts} -- calls this from the `offline` event handler
|
|
462
999
|
*/
|
|
463
1000
|
export function pauseRealtime() {
|
|
464
|
-
|
|
1001
|
+
/* Cancel any in-flight reconnection timer and reset the flag.
|
|
1002
|
+
Without this, a timer set before the offline event could fire during
|
|
1003
|
+
the offline period and waste a reconnect attempt. */
|
|
465
1004
|
if (state.reconnectTimeout) {
|
|
466
1005
|
clearTimeout(state.reconnectTimeout);
|
|
467
1006
|
state.reconnectTimeout = null;
|
|
468
1007
|
}
|
|
469
1008
|
reconnectScheduled = false;
|
|
470
|
-
|
|
1009
|
+
/* Reset attempts so the next online event gets the full backoff budget.
|
|
1010
|
+
This is more forgiving than carrying over the count: if the user's
|
|
1011
|
+
network flaps repeatedly, they get a full set of 5 attempts each time. */
|
|
471
1012
|
state.reconnectAttempts = 0;
|
|
472
1013
|
setConnectionState('disconnected');
|
|
473
1014
|
debugLog('[Realtime] Paused - waiting for online event');
|
|
474
1015
|
}
|
|
475
|
-
/**
|
|
476
|
-
* Check if realtime is healthy (connected and not in error state)
|
|
477
|
-
*/
|
|
478
|
-
export function isRealtimeHealthy() {
|
|
479
|
-
return state.connectionState === 'connected';
|
|
480
|
-
}
|
|
481
|
-
/**
|
|
482
|
-
* Clean up expired entries from recently processed tracking
|
|
483
|
-
*/
|
|
484
|
-
export function cleanupRealtimeTracking() {
|
|
485
|
-
const now = Date.now();
|
|
486
|
-
for (const [entityId, processedAt] of recentlyProcessedByRealtime) {
|
|
487
|
-
if (now - processedAt > RECENTLY_MODIFIED_TTL_MS) {
|
|
488
|
-
recentlyProcessedByRealtime.delete(entityId);
|
|
489
|
-
}
|
|
490
|
-
}
|
|
491
|
-
}
|
|
492
1016
|
//# sourceMappingURL=realtime.js.map
|