@electric-sql/client 1.0.14 → 1.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/client.ts CHANGED
@@ -44,6 +44,7 @@ import {
44
44
  FORCE_DISCONNECT_AND_REFRESH,
45
45
  PAUSE_STREAM,
46
46
  EXPERIMENTAL_LIVE_SSE_QUERY_PARAM,
47
+ LIVE_SSE_QUERY_PARAM,
47
48
  ELECTRIC_PROTOCOL_QUERY_PARAMS,
48
49
  LOG_MODE_QUERY_PARAM,
49
50
  SUBSET_PARAM_WHERE,
@@ -273,10 +274,15 @@ export interface ShapeStreamOptions<T = never> {
273
274
  subscribe?: boolean
274
275
 
275
276
  /**
276
- * Experimental support for Server-Sent Events (SSE) for live updates.
277
+ * @deprecated No longer experimental, use {@link liveSse} instead.
277
278
  */
278
279
  experimentalLiveSse?: boolean
279
280
 
281
+ /**
282
+ * Use Server-Sent Events (SSE) for live updates.
283
+ */
284
+ liveSse?: boolean
285
+
280
286
  /**
281
287
  * Initial data loading mode
282
288
  */
@@ -372,7 +378,7 @@ function canonicalShapeKey(url: URL): string {
372
378
  * ```
373
379
  * const stream = new ShapeStream({
374
380
  * url: `http://localhost:3000/v1/shape`,
375
- * experimentalLiveSse: true
381
+ * liveSse: true
376
382
  * })
377
383
  * ```
378
384
  *
@@ -598,7 +604,13 @@ export class ShapeStream<T extends Row<unknown> = Row>
598
604
  const newShapeHandle =
599
605
  e.headers[SHAPE_HANDLE_HEADER] || `${this.#shapeHandle!}-next`
600
606
  this.#reset(newShapeHandle)
601
- await this.#publish(e.json as Message<T>[])
607
+
608
+ // must refetch control message might be in a list or not depending
609
+ // on whether it came from an SSE request or long poll - handle both
610
+ // cases for safety here but worth revisiting 409 handling
611
+ await this.#publish(
612
+ (Array.isArray(e.json) ? e.json : [e.json]) as Message<T>[]
613
+ )
602
614
  return this.#requestShape()
603
615
  } else {
604
616
  // Notify subscribers
@@ -815,13 +827,15 @@ export class ShapeStream<T extends Row<unknown> = Row>
815
827
  headers: Record<string, string>
816
828
  resumingFromPause?: boolean
817
829
  }): Promise<void> {
830
+ const useSse = this.options.liveSse ?? this.options.experimentalLiveSse
818
831
  if (
819
832
  this.#isUpToDate &&
820
- this.options.experimentalLiveSse &&
833
+ useSse &&
821
834
  !this.#isRefreshing &&
822
835
  !opts.resumingFromPause
823
836
  ) {
824
837
  opts.fetchUrl.searchParams.set(EXPERIMENTAL_LIVE_SSE_QUERY_PARAM, `true`)
838
+ opts.fetchUrl.searchParams.set(LIVE_SSE_QUERY_PARAM, `true`)
825
839
  return this.#requestShapeSSE(opts)
826
840
  }
827
841
 
package/src/constants.ts CHANGED
@@ -13,7 +13,11 @@ export const TABLE_QUERY_PARAM = `table`
13
13
  export const WHERE_QUERY_PARAM = `where`
14
14
  export const REPLICA_PARAM = `replica`
15
15
  export const WHERE_PARAMS_PARAM = `params`
16
+ /**
17
+ * @deprecated Use {@link LIVE_SSE_QUERY_PARAM} instead.
18
+ */
16
19
  export const EXPERIMENTAL_LIVE_SSE_QUERY_PARAM = `experimental_live_sse`
20
+ export const LIVE_SSE_QUERY_PARAM = `live_sse`
17
21
  export const FORCE_DISCONNECT_AND_REFRESH = `force-disconnect-and-refresh`
18
22
  export const PAUSE_STREAM = `pause-stream`
19
23
  export const LOG_MODE_QUERY_PARAM = `log`
package/src/fetch.ts CHANGED
@@ -28,17 +28,39 @@ export interface BackoffOptions {
28
28
  initialDelay: number
29
29
  /**
30
30
  * Maximum retry delay in milliseconds
31
+ * After reaching this, delay stays constant (e.g., retry every 60s)
31
32
  */
32
33
  maxDelay: number
33
34
  multiplier: number
34
35
  onFailedAttempt?: () => void
35
36
  debug?: boolean
37
+ /**
38
+ * Maximum number of retry attempts before giving up.
39
+ * Set to Infinity (default) for indefinite retries - needed for offline scenarios
40
+ * where clients may go offline and come back later.
41
+ *
42
+ * The retry budget provides protection against retry storms even with infinite retries.
43
+ */
44
+ maxRetries?: number
45
+ /**
46
+ * Percentage of requests that can be retries (0.1 = 10%)
47
+ *
48
+ * This is the primary load shedding mechanism. It limits the *rate* of retries,
49
+ * not the total count. Even with infinite retries, at most 10% of your traffic
50
+ * will be retries, preventing retry storms from amplifying server load.
51
+ *
52
+ * The budget resets every 60 seconds, so a temporary spike of errors won't
53
+ * permanently exhaust the budget.
54
+ */
55
+ retryBudgetPercent?: number
36
56
  }
37
57
 
38
58
  export const BackoffDefaults = {
39
59
  initialDelay: 100,
40
- maxDelay: 10_000,
60
+ maxDelay: 60_000, // Cap at 60s - reasonable for long-lived connections
41
61
  multiplier: 1.3,
62
+ maxRetries: Infinity, // Retry forever - clients may go offline and come back
63
+ retryBudgetPercent: 0.1, // 10% retry budget prevents amplification
42
64
  }
43
65
 
44
66
  export function createFetchWithBackoff(
@@ -51,7 +73,38 @@ export function createFetchWithBackoff(
51
73
  multiplier,
52
74
  debug = false,
53
75
  onFailedAttempt,
76
+ maxRetries = Infinity,
77
+ retryBudgetPercent = 0.1,
54
78
  } = backoffOptions
79
+
80
+ // Retry budget tracking (closure-scoped)
81
+ // Resets every minute to prevent retry storms
82
+ let totalRequests = 0
83
+ let totalRetries = 0
84
+ let budgetResetTime = Date.now() + 60_000
85
+
86
+ function checkRetryBudget(percent: number): boolean {
87
+ const now = Date.now()
88
+ if (now > budgetResetTime) {
89
+ totalRequests = 0
90
+ totalRetries = 0
91
+ budgetResetTime = now + 60_000
92
+ }
93
+
94
+ totalRequests++
95
+
96
+ // Allow retries for first 10 requests to avoid cold start issues
97
+ if (totalRequests < 10) return true
98
+
99
+ const currentRetryRate = totalRetries / totalRequests
100
+ const hasCapacity = currentRetryRate < percent
101
+
102
+ if (hasCapacity) {
103
+ totalRetries++
104
+ }
105
+
106
+ return hasCapacity
107
+ }
55
108
  return async (...args: Parameters<typeof fetch>): Promise<Response> => {
56
109
  const url = args[0]
57
110
  const options = args[1]
@@ -59,16 +112,14 @@ export function createFetchWithBackoff(
59
112
  let delay = initialDelay
60
113
  let attempt = 0
61
114
 
62
- /* eslint-disable no-constant-condition -- we re-fetch the shape log
63
- * continuously until we get a non-ok response. For recoverable errors,
64
- * we retry the fetch with exponential backoff. Users can pass in an
65
- * AbortController to abort the fetching an any point.
66
- * */
67
115
  while (true) {
68
- /* eslint-enable no-constant-condition */
69
116
  try {
70
117
  const result = await fetchClient(...args)
71
- if (result.ok) return result
118
+ if (result.ok) {
119
+ // Reset backoff on successful request
120
+ delay = initialDelay
121
+ return result
122
+ }
72
123
 
73
124
  const err = await FetchError.fromResponse(result, url.toString())
74
125
 
@@ -86,17 +137,77 @@ export function createFetchWithBackoff(
86
137
  // Any client errors cannot be backed off on, leave it to the caller to handle.
87
138
  throw e
88
139
  } else {
89
- // Exponentially backoff on errors.
90
- // Wait for the current delay duration
91
- await new Promise((resolve) => setTimeout(resolve, delay))
140
+ // Check retry budget and max retries
141
+ attempt++
142
+ if (attempt >= maxRetries) {
143
+ if (debug) {
144
+ console.log(
145
+ `Max retries reached (${attempt}/${maxRetries}), giving up`
146
+ )
147
+ }
148
+ throw e
149
+ }
92
150
 
93
- // Increase the delay for the next attempt
94
- delay = Math.min(delay * multiplier, maxDelay)
151
+ // Check retry budget - this is our primary load shedding mechanism
152
+ // It limits the *rate* of retries (10% of traffic) not the count
153
+ // This prevents retry storms even with infinite retries
154
+ if (!checkRetryBudget(retryBudgetPercent)) {
155
+ if (debug) {
156
+ console.log(
157
+ `Retry budget exhausted (attempt ${attempt}), backing off`
158
+ )
159
+ }
160
+ // Wait for maxDelay before checking budget again
161
+ // This prevents tight retry loops when budget is exhausted
162
+ await new Promise((resolve) => setTimeout(resolve, maxDelay))
163
+ // Don't throw - continue retrying after the wait
164
+ // This allows offline clients to eventually reconnect
165
+ continue
166
+ }
167
+
168
+ // Calculate wait time honoring server-driven backoff as a floor
169
+ // Precedence: max(serverMinimum, min(clientMaxDelay, backoffWithJitter))
170
+
171
+ // 1. Parse server-provided Retry-After (if present)
172
+ let serverMinimumMs = 0
173
+ if (e instanceof FetchError && e.headers) {
174
+ const retryAfter = e.headers[`retry-after`]
175
+ if (retryAfter) {
176
+ const retryAfterSec = Number(retryAfter)
177
+ if (Number.isFinite(retryAfterSec) && retryAfterSec > 0) {
178
+ // Retry-After in seconds
179
+ serverMinimumMs = retryAfterSec * 1000
180
+ } else {
181
+ // Retry-After as HTTP date
182
+ const retryDate = Date.parse(retryAfter)
183
+ if (!isNaN(retryDate)) {
184
+ // Handle clock skew: clamp to non-negative, cap at reasonable max
185
+ const deltaMs = retryDate - Date.now()
186
+ serverMinimumMs = Math.max(0, Math.min(deltaMs, 3600_000)) // Cap at 1 hour
187
+ }
188
+ }
189
+ }
190
+ }
191
+
192
+ // 2. Calculate client backoff with full jitter
193
+ const jitter = Math.random() * delay
194
+ const clientBackoffMs = Math.min(jitter, maxDelay)
195
+
196
+ // 3. Server minimum is the floor, client cap is the ceiling
197
+ const waitMs = Math.max(serverMinimumMs, clientBackoffMs)
95
198
 
96
199
  if (debug) {
97
- attempt++
98
- console.log(`Retry attempt #${attempt} after ${delay}ms`)
200
+ const source = serverMinimumMs > 0 ? `server+client` : `client`
201
+ console.log(
202
+ `Retry attempt #${attempt} after ${waitMs}ms (${source}, serverMin=${serverMinimumMs}ms, clientBackoff=${clientBackoffMs}ms)`
203
+ )
99
204
  }
205
+
206
+ // Wait for the calculated duration
207
+ await new Promise((resolve) => setTimeout(resolve, waitMs))
208
+
209
+ // Increase the delay for the next attempt (capped at maxDelay)
210
+ delay = Math.min(delay * multiplier, maxDelay)
100
211
  }
101
212
  }
102
213
  }
@@ -118,6 +229,10 @@ export function createFetchWithConsumedMessages(fetchClient: typeof fetch) {
118
229
  const text = await res.text()
119
230
  return new Response(text, res)
120
231
  } catch (err) {
232
+ if (args[1]?.signal?.aborted) {
233
+ throw new FetchBackoffAbortError()
234
+ }
235
+
121
236
  throw new FetchError(
122
237
  res.status,
123
238
  undefined,