@pol-studios/powersync 1.0.7 → 1.0.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. package/README.md +933 -0
  2. package/dist/CacheSettingsManager-uz-kbnRH.d.ts +461 -0
  3. package/dist/attachments/index.d.ts +709 -6
  4. package/dist/attachments/index.js +133 -5
  5. package/dist/chunk-24RDMMCL.js +44 -0
  6. package/dist/chunk-24RDMMCL.js.map +1 -0
  7. package/dist/chunk-4TXTAEF2.js +2060 -0
  8. package/dist/chunk-4TXTAEF2.js.map +1 -0
  9. package/dist/chunk-63PXSPIN.js +358 -0
  10. package/dist/chunk-63PXSPIN.js.map +1 -0
  11. package/dist/chunk-654ERHA7.js +1 -0
  12. package/dist/{chunk-BREGB4WL.js → chunk-BRXQNASY.js} +287 -335
  13. package/dist/chunk-BRXQNASY.js.map +1 -0
  14. package/dist/{chunk-DHYUBVP7.js → chunk-CAB26E6F.js} +20 -9
  15. package/dist/chunk-CAB26E6F.js.map +1 -0
  16. package/dist/{chunk-H772V6XQ.js → chunk-CUCAYK7Z.js} +7 -43
  17. package/dist/chunk-CUCAYK7Z.js.map +1 -0
  18. package/dist/{chunk-4C3RY5SU.js → chunk-HWSNV45P.js} +76 -1
  19. package/dist/chunk-HWSNV45P.js.map +1 -0
  20. package/dist/{chunk-HFOFLW5F.js → chunk-KN2IZERF.js} +139 -6
  21. package/dist/chunk-KN2IZERF.js.map +1 -0
  22. package/dist/{chunk-UEYRTLKE.js → chunk-P4HZA6ZT.js} +20 -9
  23. package/dist/chunk-P4HZA6ZT.js.map +1 -0
  24. package/dist/chunk-T4AO7JIG.js +1 -0
  25. package/dist/{chunk-XQAJM2MW.js → chunk-VACPAAQZ.js} +33 -2
  26. package/dist/{chunk-XQAJM2MW.js.map → chunk-VACPAAQZ.js.map} +1 -1
  27. package/dist/{chunk-53WH2JJV.js → chunk-WN5ZJ3E2.js} +5 -8
  28. package/dist/chunk-WN5ZJ3E2.js.map +1 -0
  29. package/dist/chunk-XAEII4ZX.js +456 -0
  30. package/dist/chunk-XAEII4ZX.js.map +1 -0
  31. package/dist/chunk-XOY2CJ67.js +289 -0
  32. package/dist/chunk-XOY2CJ67.js.map +1 -0
  33. package/dist/chunk-YHTZ7VMV.js +1 -0
  34. package/dist/{chunk-MKD2VCX3.js → chunk-Z6VOBGTU.js} +8 -8
  35. package/dist/chunk-Z6VOBGTU.js.map +1 -0
  36. package/dist/chunk-ZM4ENYMF.js +230 -0
  37. package/dist/chunk-ZM4ENYMF.js.map +1 -0
  38. package/dist/connector/index.d.ts +56 -3
  39. package/dist/connector/index.js +8 -5
  40. package/dist/core/index.d.ts +12 -1
  41. package/dist/core/index.js +3 -2
  42. package/dist/error/index.js +0 -1
  43. package/dist/index.d.ts +12 -10
  44. package/dist/index.js +191 -29
  45. package/dist/index.native.d.ts +11 -9
  46. package/dist/index.native.js +191 -29
  47. package/dist/index.web.d.ts +11 -9
  48. package/dist/index.web.js +191 -29
  49. package/dist/maintenance/index.js +0 -1
  50. package/dist/platform/index.js +0 -2
  51. package/dist/platform/index.js.map +1 -1
  52. package/dist/platform/index.native.js +1 -2
  53. package/dist/platform/index.web.js +0 -1
  54. package/dist/pol-attachment-queue-BVAIueoP.d.ts +817 -0
  55. package/dist/provider/index.d.ts +38 -34
  56. package/dist/provider/index.js +11 -12
  57. package/dist/react/index.d.ts +372 -0
  58. package/dist/react/index.js +25 -0
  59. package/dist/storage/index.d.ts +3 -3
  60. package/dist/storage/index.js +22 -8
  61. package/dist/storage/index.native.d.ts +3 -3
  62. package/dist/storage/index.native.js +21 -7
  63. package/dist/storage/index.web.d.ts +3 -3
  64. package/dist/storage/index.web.js +21 -7
  65. package/dist/storage/upload/index.d.ts +7 -8
  66. package/dist/storage/upload/index.js +3 -3
  67. package/dist/storage/upload/index.native.d.ts +7 -8
  68. package/dist/storage/upload/index.native.js +4 -3
  69. package/dist/storage/upload/index.web.d.ts +1 -4
  70. package/dist/storage/upload/index.web.js +3 -3
  71. package/dist/supabase-connector-T9vHq_3i.d.ts +202 -0
  72. package/dist/sync/index.js +3 -3
  73. package/dist/{supabase-connector-qLm-WHkM.d.ts → types-B212hgfA.d.ts} +48 -170
  74. package/dist/{types-BVacP54t.d.ts → types-CyvBaAl8.d.ts} +12 -4
  75. package/dist/types-D0WcHrq6.d.ts +234 -0
  76. package/package.json +18 -4
  77. package/dist/CacheSettingsManager-1exbOC6S.d.ts +0 -261
  78. package/dist/chunk-4C3RY5SU.js.map +0 -1
  79. package/dist/chunk-53WH2JJV.js.map +0 -1
  80. package/dist/chunk-BREGB4WL.js.map +0 -1
  81. package/dist/chunk-DGUM43GV.js +0 -11
  82. package/dist/chunk-DHYUBVP7.js.map +0 -1
  83. package/dist/chunk-GKF7TOMT.js +0 -1
  84. package/dist/chunk-H772V6XQ.js.map +0 -1
  85. package/dist/chunk-HFOFLW5F.js.map +0 -1
  86. package/dist/chunk-KGSFAE5B.js +0 -1
  87. package/dist/chunk-LNL64IJZ.js +0 -1
  88. package/dist/chunk-MKD2VCX3.js.map +0 -1
  89. package/dist/chunk-UEYRTLKE.js.map +0 -1
  90. package/dist/chunk-WQ5MPAVC.js +0 -449
  91. package/dist/chunk-WQ5MPAVC.js.map +0 -1
  92. package/dist/chunk-ZEOKPWUC.js +0 -1165
  93. package/dist/chunk-ZEOKPWUC.js.map +0 -1
  94. package/dist/pol-attachment-queue-C7YNXXhK.d.ts +0 -676
  95. package/dist/types-Bgvx7-E8.d.ts +0 -187
  96. /package/dist/{chunk-DGUM43GV.js.map → chunk-654ERHA7.js.map} +0 -0
  97. /package/dist/{chunk-GKF7TOMT.js.map → chunk-T4AO7JIG.js.map} +0 -0
  98. /package/dist/{chunk-KGSFAE5B.js.map → chunk-YHTZ7VMV.js.map} +0 -0
  99. /package/dist/{chunk-LNL64IJZ.js.map → react/index.js.map} +0 -0
@@ -0,0 +1,817 @@
1
+ import { AbstractAttachmentQueue, AttachmentQueueOptions, AttachmentRecord as AttachmentRecord$1 } from '@powersync/attachments';
2
+ import { AbstractPowerSyncDatabase } from '@powersync/common';
3
+ import { PlatformAdapter } from './platform/index.js';
4
+
5
+ /**
6
+ * Attachment Queue Types for @pol-studios/powersync
7
+ *
8
+ * This module re-exports types from the official @powersync/attachments package
9
+ * and extends them with POL-specific features like:
10
+ * - FAILED_PERMANENT state for permanent upload failures
11
+ * - Upload metadata fields for retry tracking
12
+ * - Source table configuration for flexible attachment watching
13
+ * - Image compression configuration
14
+ *
15
+ * IMPORTANT: The official AttachmentState enum has different numeric values:
16
+ * Official: QUEUED_SYNC=0, QUEUED_UPLOAD=1, QUEUED_DOWNLOAD=2, SYNCED=3, ARCHIVED=4
17
+ *
18
+ * We extend this with FAILED_PERMANENT=5 for permanent upload failures.
19
+ */
20
+
21
+ /**
22
+ * Extended attachment state that includes permanent failure state.
23
+ *
24
+ * Values 0-4 match the official @powersync/attachments AttachmentState.
25
+ * Value 5 (FAILED_PERMANENT) is our extension for uploads that have
26
+ * exhausted retries or encountered unrecoverable errors.
27
+ *
28
+ * @example
29
+ * ```typescript
30
+ * import { AttachmentState } from '@powersync/attachments';
31
+ * import { PolAttachmentState } from '@pol-studios/powersync/attachments';
32
+ *
33
+ * // Use official states
34
+ * const queued = AttachmentState.QUEUED_UPLOAD;
35
+ *
36
+ * // Use our extension
37
+ * const failed = PolAttachmentState.FAILED_PERMANENT;
38
+ * ```
39
+ */
40
+ declare enum PolAttachmentState {
41
+ /** Check if the attachment needs to be uploaded or downloaded */
42
+ QUEUED_SYNC = 0,
43
+ /** Attachment to be uploaded */
44
+ QUEUED_UPLOAD = 1,
45
+ /** Attachment to be downloaded */
46
+ QUEUED_DOWNLOAD = 2,
47
+ /** Attachment has been synced */
48
+ SYNCED = 3,
49
+ /** Attachment has been orphaned, i.e. the associated record has been deleted */
50
+ ARCHIVED = 4,
51
+ /** Permanently failed (exhausted retries or unrecoverable error) - POL extension */
52
+ FAILED_PERMANENT = 5,
53
+ /** Download was skipped due to downloadFilter returning false - POL extension */
54
+ DOWNLOAD_SKIPPED = 6
55
+ }
56
+ /**
57
+ * Extended attachment record with POL-specific upload tracking fields.
58
+ *
59
+ * Extends the official AttachmentRecord with fields for:
60
+ * - Upload retry tracking
61
+ * - Error information
62
+ * - Upload metadata for onComplete callbacks
63
+ */
64
+ interface PolAttachmentRecord {
65
+ /** Unique identifier (typically storage path) */
66
+ id: string;
67
+ /** Filename for display and type inference */
68
+ filename: string;
69
+ /** Local file URI (set after download/upload) */
70
+ local_uri?: string | null;
71
+ /** File size in bytes */
72
+ size?: number;
73
+ /** MIME type of the file */
74
+ media_type?: string;
75
+ /** Timestamp when the attachment was created/modified */
76
+ timestamp?: number;
77
+ /** Current state in the queue */
78
+ state: number;
79
+ /** Local file URI in managed cache (for uploads) */
80
+ upload_source_uri?: string | null;
81
+ /** Last upload error message */
82
+ upload_error?: string | null;
83
+ /** HTTP status or error code */
84
+ upload_error_code?: string | null;
85
+ /** Number of upload retry attempts */
86
+ upload_retry_count?: number;
87
+ /** Timestamp for next retry */
88
+ upload_next_retry_at?: number;
89
+ /** JSON-serialized entity metadata for onComplete */
90
+ upload_metadata?: string | null;
91
+ /** Target storage bucket ID */
92
+ upload_bucket_id?: string | null;
93
+ }
94
+ /**
95
+ * Legacy configuration for watching a source table.
96
+ * @deprecated Use AttachmentSourceConfig with watchIds callback instead.
97
+ * This interface is kept for backward compatibility with query-builder utilities.
98
+ */
99
+ interface WatchConfig {
100
+ /** Source table name */
101
+ table: string;
102
+ /** Column containing attachment ID/path */
103
+ idColumn: string;
104
+ /** Additional columns to include in context */
105
+ selectColumns?: string[];
106
+ /** Optional WHERE clause fragment (e.g., "storagePath IS NOT NULL") */
107
+ where?: string;
108
+ /** Order by column and direction */
109
+ orderBy?: {
110
+ column: string;
111
+ direction: "ASC" | "DESC";
112
+ };
113
+ }
114
+ /**
115
+ * Legacy context for batch filtering of attachments.
116
+ * @deprecated Use SkipDownloadContext with skipDownload callback instead.
117
+ */
118
+ interface BatchFilterContext {
119
+ /** All pending attachment IDs */
120
+ ids: string[];
121
+ /** Source records from watch config (ID → record) */
122
+ records: Map<string, Record<string, unknown>>;
123
+ /** Database access for batch queries */
124
+ db: {
125
+ getAll<T>(sql: string, params?: unknown[]): Promise<T[]>;
126
+ getOptional<T>(sql: string, params?: unknown[]): Promise<T | null>;
127
+ };
128
+ }
129
+ /**
130
+ * Configuration for an attachment source.
131
+ *
132
+ * This is a minimal, framework-agnostic interface for configuring attachment sync.
133
+ * It uses a reactive callback pattern that works with any data layer.
134
+ *
135
+ * @example
136
+ * ```typescript
137
+ * const config: AttachmentSourceConfig = {
138
+ * bucket: 'project-assets',
139
+ *
140
+ * // Reactive source of attachment IDs
141
+ * watchIds: (db, onUpdate) => {
142
+ * // Using PowerSync's watch API
143
+ * db.watch('SELECT storagePath FROM photos WHERE storagePath IS NOT NULL', [], {
144
+ * onResult: (results) => {
145
+ * onUpdate(results.rows._array.map(r => r.storagePath));
146
+ * }
147
+ * });
148
+ * },
149
+ *
150
+ * // Optional: skip downloading videos
151
+ * skipDownload: async ({ ids, db }) => {
152
+ * const videos = await db.getAll<{ path: string }>(
153
+ * `SELECT storagePath as path FROM photos WHERE storagePath IN (${ids.map(() => '?').join(',')}) AND mediaType LIKE 'video/%'`,
154
+ * ids
155
+ * );
156
+ * return videos.map(v => v.path);
157
+ * }
158
+ * };
159
+ * ```
160
+ */
161
+ interface AttachmentSourceConfig {
162
+ /**
163
+ * Storage bucket name for this attachment source.
164
+ * Maps to the Supabase Storage bucket (or equivalent in other backends).
165
+ */
166
+ bucket: string;
167
+ /**
168
+ * Reactive source of attachment IDs. Called once during initialization.
169
+ *
170
+ * This callback should set up a reactive subscription that calls `onUpdate`
171
+ * whenever the set of attachment IDs changes. The queue will:
172
+ * - Queue downloads for new IDs on first emission
173
+ * - Queue downloads for IDs that appear in subsequent emissions
174
+ * - Auto-archive attachments whose IDs disappear from emissions
175
+ *
176
+ * @param db - PowerSync database instance for querying
177
+ * @param onUpdate - Callback to emit current set of attachment IDs
178
+ * @returns Optional cleanup function to dispose of the watch subscription
179
+ *
180
+ * @example
181
+ * ```typescript
182
+ * watchIds: (db, onUpdate) => {
183
+ * const abort = new AbortController();
184
+ * db.watch(
185
+ * 'SELECT storagePath FROM photos WHERE storagePath IS NOT NULL',
186
+ * [],
187
+ * { onResult: (r) => onUpdate(r.rows._array.map(row => row.storagePath)) },
188
+ * { signal: abort.signal }
189
+ * );
190
+ * return () => abort.abort(); // Cleanup function
191
+ * }
192
+ * ```
193
+ */
194
+ watchIds: (db: AbstractPowerSyncDatabase, onUpdate: (ids: string[]) => void) => (() => void) | void;
195
+ /**
196
+ * Optional batch filter to skip downloading certain attachments.
197
+ *
198
+ * Called with all pending attachment IDs. Return the IDs that should
199
+ * be SKIPPED (not downloaded). Useful for:
200
+ * - Filtering out video files on mobile
201
+ * - Skipping large files
202
+ * - Conditional download based on user preferences
203
+ *
204
+ * @param context - Contains pending IDs and database access
205
+ * @returns Promise resolving to array of IDs to skip
206
+ *
207
+ * @example
208
+ * ```typescript
209
+ * skipDownload: async ({ ids, db }) => {
210
+ * // Skip video files
211
+ * const videos = await db.getAll<{ id: string }>(
212
+ * `SELECT storagePath as id FROM photos
213
+ * WHERE storagePath IN (${ids.map(() => '?').join(',')})
214
+ * AND mediaType LIKE 'video/%'`,
215
+ * ids
216
+ * );
217
+ * return videos.map(v => v.id);
218
+ * }
219
+ * ```
220
+ */
221
+ skipDownload?: (context: SkipDownloadContext) => Promise<string[]>;
222
+ }
223
+ /**
224
+ * Context passed to the skipDownload callback.
225
+ */
226
+ interface SkipDownloadContext {
227
+ /** All pending attachment IDs to evaluate */
228
+ ids: string[];
229
+ /** PowerSync database instance for queries */
230
+ db: AbstractPowerSyncDatabase;
231
+ }
232
+ /**
233
+ * Full attachment configuration including source configs and handlers.
234
+ * This extends the source config with upload/download handlers and callbacks.
235
+ */
236
+ interface AttachmentConfig {
237
+ /** Bucket name in Supabase storage */
238
+ bucket: string;
239
+ /**
240
+ * Reactive source of attachment IDs. Called once during initialization.
241
+ * May return an optional cleanup function to dispose of the watch subscription.
242
+ * @see AttachmentSourceConfig.watchIds
243
+ */
244
+ watchIds: AttachmentSourceConfig["watchIds"];
245
+ /**
246
+ * Optional batch filter to skip downloading certain attachments.
247
+ * @see AttachmentSourceConfig.skipDownload
248
+ */
249
+ skipDownload?: AttachmentSourceConfig["skipDownload"];
250
+ /** Optional: callback when upload completes */
251
+ onUploadComplete?: (attachment: AttachmentRecord) => void;
252
+ /** Optional: callback when upload fails */
253
+ onUploadFailed?: (attachment: AttachmentRecord, error: Error) => void;
254
+ /** Optional: max cache size in bytes */
255
+ maxCacheBytes?: number;
256
+ /** Optional: remote storage adapter for downloads */
257
+ remoteStorage?: AttachmentStorageAdapter;
258
+ /** Optional: upload handler for uploads */
259
+ uploadHandler?: UploadHandler;
260
+ /**
261
+ * Optional: image compression settings for downloads.
262
+ * Uses Supabase's transform feature to resize/compress images on download.
263
+ * Only applies to supported image formats (jpg, png, webp, avif, gif, heic).
264
+ */
265
+ compression?: Partial<CompressionConfig>;
266
+ /**
267
+ * Optional: download configuration for performance tuning.
268
+ * Adjusts concurrency and timeout for downloads.
269
+ * Higher concurrency = faster but more memory/network usage.
270
+ */
271
+ download?: Partial<DownloadConfig>;
272
+ }
273
+ /**
274
+ * Interface for remote attachment storage operations (e.g., Supabase Storage).
275
+ *
276
+ * This is used by PolStorageAdapter to handle remote file operations.
277
+ * For local file operations, we use PlatformAdapter.fileSystem.
278
+ */
279
+ interface AttachmentStorageAdapter {
280
+ /**
281
+ * Download a file from remote storage.
282
+ *
283
+ * This method can return different types depending on the implementation:
284
+ *
285
+ * 1. **Blob** (legacy behavior): The file data as a Blob object.
286
+ * Used by web platforms or legacy adapters. This path loads the entire
287
+ * file into memory.
288
+ *
289
+ * 2. **Base64 string**: The file data encoded as a base64 string.
290
+ * Will be converted to Blob by the caller.
291
+ *
292
+ * 3. **file:// URI string** (optimized path): A `file://` URI pointing to a
293
+ * temp file on the local filesystem. This is the **preferred return type**
294
+ * for native platforms as it enables zero-memory-overhead downloads.
295
+ * When this is returned, PolAttachmentQueue.downloadRecord() uses direct
296
+ * file copy instead of loading the file into JS memory, avoiding ~4x
297
+ * memory overhead and OOM crashes on large files.
298
+ *
299
+ * @param filePath - The storage path of the file
300
+ * @returns The file data as a Blob, base64 string, or file:// URI to a temp file
301
+ */
302
+ downloadFile(filePath: string): Promise<Blob | string>;
303
+ /**
304
+ * Upload a file to remote storage (optional - not all queues need upload).
305
+ * @param filePath - The storage path to upload to
306
+ * @param data - The file data to upload (base64 string or Blob)
307
+ */
308
+ uploadFile?(filePath: string, data: Blob | string): Promise<void>;
309
+ /**
310
+ * Delete a file from remote storage (optional).
311
+ * @param filePath - The storage path of the file
312
+ */
313
+ deleteFile?(filePath: string): Promise<void>;
314
+ /**
315
+ * Resolve the storage bucket for a file path.
316
+ * Allows routing different files to different buckets.
317
+ * @param filePath - The file path to resolve
318
+ * @returns The bucket name
319
+ */
320
+ resolveBucket?(filePath: string): string;
321
+ }
322
+ /**
323
+ * Configuration for image compression.
324
+ */
325
+ interface CompressionConfig {
326
+ /** Enable compression (default: true) */
327
+ enabled: boolean;
328
+ /** Compression quality 0.0-1.0 (default: 0.7) */
329
+ quality: number;
330
+ /** Max width before resizing (default: 2048) */
331
+ maxWidth: number;
332
+ /** Skip files under this size in bytes (default: 100KB) */
333
+ skipSizeBytes: number;
334
+ /** Skip if already under this size in bytes (default: 300KB) */
335
+ targetSizeBytes: number;
336
+ }
337
+ /**
338
+ * Default compression configuration.
339
+ */
340
+ declare const DEFAULT_COMPRESSION_CONFIG: CompressionConfig;
341
+ /**
342
+ * Configuration for the upload engine.
343
+ */
344
+ interface UploadConfig {
345
+ /** Maximum concurrent uploads (default: 3) */
346
+ concurrency: number;
347
+ /** Upload timeout per file in ms (default: 120000) */
348
+ timeoutMs: number;
349
+ /** Base retry delay in ms (default: 5000) */
350
+ baseRetryDelayMs: number;
351
+ /** Maximum retry delay in ms (default: 3600000 = 1 hour) */
352
+ maxRetryDelayMs: number;
353
+ /** Days before marking upload as stale (default: 7) */
354
+ staleDaysThreshold: number;
355
+ /** Maximum number of retry attempts before marking as FAILED_PERMANENT (default: 100) */
356
+ maxRetryCount: number;
357
+ }
358
+ /**
359
+ * Default upload configuration.
360
+ *
361
+ * Note: baseRetryDelayMs is set to 30s (not 5s) to reduce battery consumption.
362
+ * The exponential backoff sequence will be: 30s, 60s, 2min, 4min, 8min, 16min, 32min, 1hr (capped).
363
+ */
364
+ declare const DEFAULT_UPLOAD_CONFIG: UploadConfig;
365
+ /**
366
+ * Interface for handling file uploads to remote storage.
367
+ * Separate from AttachmentStorageAdapter to allow different implementations.
368
+ */
369
+ interface UploadHandler {
370
+ /**
371
+ * Upload a file to remote storage.
372
+ * @param storagePath - The storage path to upload to
373
+ * @param localFileUri - Local file URI to upload from
374
+ * @param mediaType - MIME type of the file
375
+ * @param signal - Optional AbortSignal for cancellation support
376
+ */
377
+ uploadFile(storagePath: string, localFileUri: string, mediaType: string, signal?: AbortSignal): Promise<void>;
378
+ /**
379
+ * Optional: resolve the storage bucket for a file path.
380
+ * @param storagePath - The file path to resolve
381
+ * @returns The bucket name
382
+ */
383
+ resolveBucket?(storagePath: string): string;
384
+ }
385
+ /**
386
+ * Configuration for the download engine.
387
+ *
388
+ * Note: These settings are for developers tuning performance.
389
+ * Higher concurrency can speed up downloads but uses more memory and network.
390
+ */
391
+ interface DownloadConfig {
392
+ /** Maximum concurrent downloads (default: 3) */
393
+ concurrency: number;
394
+ /** Download timeout per file in ms (default: 120000 = 2 minutes) */
395
+ timeoutMs: number;
396
+ }
397
+ /**
398
+ * Default download configuration.
399
+ */
400
+ declare const DEFAULT_DOWNLOAD_CONFIG: DownloadConfig;
401
+ /**
402
+ * Configuration for cache management.
403
+ *
404
+ * By default, cache size is unlimited. Set `maxSize` to enable size-based eviction.
405
+ */
406
+ interface CacheConfig {
407
+ /** Maximum cache size in bytes. Default: unlimited (Number.MAX_SAFE_INTEGER) */
408
+ maxSize: number;
409
+ /** Stop downloads at this percentage of max (default: 0.95 = 95%). Only applies when maxSize is set. */
410
+ downloadStopThreshold: number;
411
+ /** Trigger eviction at this percentage (default: 1.0 = 100%). Only applies when maxSize is set. */
412
+ evictionTriggerThreshold: number;
413
+ }
414
+ /**
415
+ * Default cache configuration.
416
+ *
417
+ * Cache size is unlimited by default. Users can set `maxSize` to enable
418
+ * size-based eviction when storage space is a concern.
419
+ */
420
+ declare const DEFAULT_CACHE_CONFIG: CacheConfig;
421
+ /**
422
+ * Predefined cache size presets in bytes.
423
+ * Use these constants for user-facing cache limit settings.
424
+ *
425
+ * @example
426
+ * ```typescript
427
+ * // In settings UI
428
+ * const options = [
429
+ * { label: '250 MB', value: CACHE_SIZE_PRESETS.MB_250 },
430
+ * { label: '500 MB', value: CACHE_SIZE_PRESETS.MB_500 },
431
+ * { label: '1 GB', value: CACHE_SIZE_PRESETS.GB_1 },
432
+ * { label: '2 GB', value: CACHE_SIZE_PRESETS.GB_2 },
433
+ * { label: '5 GB', value: CACHE_SIZE_PRESETS.GB_5 },
434
+ * { label: 'Unlimited', value: CACHE_SIZE_PRESETS.UNLIMITED },
435
+ * ];
436
+ *
437
+ * // In config
438
+ * cache: { maxSize: CACHE_SIZE_PRESETS.GB_1 }
439
+ * ```
440
+ */
441
+ declare const CACHE_SIZE_PRESETS: {
442
+ /** 250 MB */
443
+ readonly MB_250: number;
444
+ /** 500 MB */
445
+ readonly MB_500: number;
446
+ /** 1 GB */
447
+ readonly GB_1: number;
448
+ /** 2 GB */
449
+ readonly GB_2: number;
450
+ /** 5 GB */
451
+ readonly GB_5: number;
452
+ /** Unlimited (no cache eviction) */
453
+ readonly UNLIMITED: number;
454
+ };
455
+ /**
456
+ * Type for cache size preset keys.
457
+ */
458
+ type CacheSizePreset = keyof typeof CACHE_SIZE_PRESETS;
459
+ /**
460
+ * Type for cache size preset values.
461
+ */
462
+ type CacheSizeValue = (typeof CACHE_SIZE_PRESETS)[CacheSizePreset];
463
+ /**
464
+ * Helper to format bytes as human-readable string.
465
+ *
466
+ * @example
467
+ * ```typescript
468
+ * formatCacheSize(CACHE_SIZE_PRESETS.GB_1) // "1 GB"
469
+ * formatCacheSize(CACHE_SIZE_PRESETS.MB_500) // "500 MB"
470
+ * formatCacheSize(CACHE_SIZE_PRESETS.UNLIMITED) // "Unlimited"
471
+ * ```
472
+ */
473
+ declare function formatCacheSize(bytes: number): string;
474
+ /**
475
+ * Current phase of a download operation.
476
+ */
477
+ type DownloadPhase = "downloading" | "compressing" | "complete" | "error";
478
+ /**
479
+ * Status of an individual download.
480
+ */
481
+ interface DownloadStatus {
482
+ /** Attachment ID */
483
+ id: string;
484
+ /** Filename being downloaded */
485
+ filename: string;
486
+ /** Current phase */
487
+ phase: DownloadPhase;
488
+ }
489
+ /**
490
+ * Current phase of an upload operation.
491
+ */
492
+ type UploadPhase = "uploading" | "waiting" | "complete" | "error" | "permanent_failure";
493
+ /**
494
+ * Status of an individual upload.
495
+ */
496
+ interface UploadStatus {
497
+ /** Attachment ID */
498
+ id: string;
499
+ /** Filename being uploaded */
500
+ filename: string;
501
+ /** Current phase */
502
+ phase: UploadPhase;
503
+ /** Upload progress (0-100) */
504
+ progress?: number;
505
+ /** Error message if failed */
506
+ error?: string;
507
+ }
508
+ /**
509
+ * Why downloads are stopped (if not actively syncing).
510
+ */
511
+ type AttachmentSyncStatus = "syncing" | "paused" | "cache_full" | "complete";
512
+ /**
513
+ * Statistics about the attachment sync progress.
514
+ */
515
+ interface AttachmentSyncStats {
516
+ /** Number of attachments that have been downloaded */
517
+ syncedCount: number;
518
+ /** Total size of synced attachments in bytes */
519
+ syncedSize: number;
520
+ /** Number of attachments waiting to be downloaded */
521
+ pendingCount: number;
522
+ /** Total expected attachments (synced + pending) */
523
+ totalExpected: number;
524
+ /** Maximum cache size in bytes */
525
+ maxCacheSize: number;
526
+ /** Current compression quality (0.1 to 1.0) */
527
+ compressionQuality: number;
528
+ /** Current sync status */
529
+ status: AttachmentSyncStatus;
530
+ /** Whether downloads are paused */
531
+ isPaused: boolean;
532
+ /** Whether currently processing downloads */
533
+ isProcessing: boolean;
534
+ /** Currently active downloads */
535
+ activeDownloads: DownloadStatus[];
536
+ /** Number of uploads waiting to be processed */
537
+ pendingUploadCount: number;
538
+ /** Number of permanently failed uploads */
539
+ failedPermanentCount: number;
540
+ /** Number of uploads failing > staleDaysThreshold */
541
+ staleUploadCount: number;
542
+ /** Currently active uploads */
543
+ activeUploads: UploadStatus[];
544
+ /** Number of attachments skipped by downloadFilter */
545
+ skippedDownloadCount: number;
546
+ }
547
+ /** Row from stats query */
548
+ interface AttachmentStatsRow {
549
+ state: number;
550
+ cnt: number;
551
+ sz: number;
552
+ }
553
+ /** Row for cache file operations */
554
+ interface CacheFileRow {
555
+ id: string;
556
+ local_uri: string;
557
+ }
558
+ /** Row for eviction operations */
559
+ interface EvictRow {
560
+ id: string;
561
+ local_uri: string;
562
+ size: number;
563
+ }
564
+ /** Row for cached size queries */
565
+ interface CachedSizeRow {
566
+ total: number;
567
+ }
568
+ /** Row for ID queries */
569
+ interface IdRow {
570
+ id: string;
571
+ }
572
+ /**
573
+ * Alias for PolAttachmentRecord.
574
+ * Use PolAttachmentRecord for new code. The official @powersync/attachments
575
+ * AttachmentRecord is exported as OfficialAttachmentRecord.
576
+ */
577
+ type AttachmentRecord = PolAttachmentRecord;
578
+
579
+ /**
580
+ * POL Attachment Queue
581
+ *
582
+ * Extends the official @powersync/attachments AbstractAttachmentQueue with
583
+ * POL-specific features:
584
+ * - Reactive watchIds callback for attachment ID sources
585
+ * - Optional skipDownload callback for filtering downloads
586
+ * - Durable upload queue with exponential backoff retry
587
+ * - FAILED_PERMANENT state for unrecoverable upload errors
588
+ * - Image compression integration
589
+ * - Upload callbacks (onUploadComplete, onUploadFailed)
590
+ *
591
+ * @example
592
+ * ```typescript
593
+ * const queue = new PolAttachmentQueue({
594
+ * powersync: db,
595
+ * storage: storageAdapter,
596
+ * source: {
597
+ * bucket: 'project-assets',
598
+ * watchIds: (db, onUpdate) => {
599
+ * db.watch('SELECT storagePath FROM photos WHERE storagePath IS NOT NULL', [], {
600
+ * onResult: (r) => onUpdate(r.rows._array.map(row => row.storagePath))
601
+ * });
602
+ * },
603
+ * skipDownload: async ({ ids, db }) => {
604
+ * // Return IDs to skip downloading
605
+ * const videos = await db.getAll('SELECT storagePath FROM photos WHERE mediaType LIKE "video/%"');
606
+ * return videos.map(v => v.storagePath);
607
+ * }
608
+ * },
609
+ * });
610
+ *
611
+ * await queue.init();
612
+ * ```
613
+ */
614
+
615
+ /**
616
+ * Options for PolAttachmentQueue that extend the official AttachmentQueueOptions.
617
+ */
618
+ interface PolAttachmentQueueOptions extends AttachmentQueueOptions {
619
+ platform: PlatformAdapter;
620
+ remoteStorage: AttachmentStorageAdapter;
621
+ /** Attachment source configuration with reactive watchIds callback */
622
+ source: AttachmentSourceConfig;
623
+ uploadHandler?: UploadHandler;
624
+ uploadConfig?: Partial<UploadConfig>;
625
+ /** Download configuration for concurrency tuning */
626
+ downloadConfig?: Partial<DownloadConfig>;
627
+ onUploadComplete?: (record: PolAttachmentRecord) => Promise<void>;
628
+ onUploadFailed?: (record: PolAttachmentRecord, error: Error) => void;
629
+ compression?: Partial<CompressionConfig>;
630
+ cache?: Partial<CacheConfig>;
631
+ }
632
+ /**
633
+ * POL Attachment Queue that extends the official AbstractAttachmentQueue.
634
+ */
635
+ declare class PolAttachmentQueue extends AbstractAttachmentQueue<PolAttachmentQueueOptions> {
636
+ private readonly platform;
637
+ private readonly polLogger;
638
+ private readonly source;
639
+ private readonly remoteStorage;
640
+ private readonly uploadHandler?;
641
+ private readonly uploadConfig;
642
+ private readonly downloadConfig;
643
+ private readonly compressionConfig;
644
+ private readonly cacheConfig;
645
+ private _uploadState;
646
+ private _disposed;
647
+ private _initialized;
648
+ private _watchGeneration;
649
+ private _watchIdsCleanup;
650
+ private _watchMutex;
651
+ private _networkListenerCleanup;
652
+ private _wasConnected;
653
+ private _progressCallbacks;
654
+ private _lastNotifyTime;
655
+ private _notifyTimer;
656
+ private _cachedStats;
657
+ private _cachedStatsTimestamp;
658
+ constructor(options: PolAttachmentQueueOptions);
659
+ watchUploads(): void;
660
+ /**
661
+ * Override parent's expireCache to disable count-based cache eviction.
662
+ *
663
+ * The parent implementation deletes SYNCED/ARCHIVED records beyond cacheLimit,
664
+ * which caused a bug where downloads would reset because:
665
+ * 1. expireCache deleted records beyond cacheLimit (default 100)
666
+ * 2. watchIds still emitted those IDs
667
+ * 3. They got re-created as QUEUED_DOWNLOAD
668
+ * 4. Downloads restarted in an infinite loop
669
+ *
670
+ * Our implementation uses size-based cache limits only (via clearCache/cacheConfig.maxSize).
671
+ */
672
+ expireCache(): Promise<void>;
673
+ /**
674
+ * Override parent's watchDownloads to use concurrent downloads.
675
+ *
676
+ * The parent implementation downloads one file at a time.
677
+ * This override processes downloads in parallel batches for faster sync.
678
+ */
679
+ watchDownloads(): void;
680
+ /**
681
+ * Process pending downloads with concurrency control.
682
+ * Downloads multiple files in parallel based on downloadConfig.concurrency.
683
+ * Enforces cache size limits after each batch completes.
684
+ */
685
+ private _downloadRecordsConcurrent;
686
+ uploadAttachment(record: {
687
+ id: string;
688
+ }): Promise<boolean>;
689
+ watchAttachmentIds(): Promise<void>;
690
+ saveToQueue(record: Omit<AttachmentRecord$1, 'timestamp'>): Promise<AttachmentRecord$1>;
691
+ onAttachmentIdsChange(onUpdate: (ids: string[]) => void): void;
692
+ downloadRecord(record: AttachmentRecord$1): Promise<boolean>;
693
+ newAttachmentRecord(record?: Partial<AttachmentRecord$1>): Promise<AttachmentRecord$1>;
694
+ init(): Promise<void>;
695
+ /**
696
+ * Dispose the attachment queue and clean up resources.
697
+ * This method is synchronous to ensure callers don't need to await it,
698
+ * but it fires off async cleanup in the background for graceful shutdown.
699
+ */
700
+ dispose(): void;
701
+ /**
702
+ * Async cleanup that runs in the background after dispose().
703
+ * Waits for active upload promises to settle gracefully.
704
+ */
705
+ private _asyncCleanup;
706
+ queueUpload(options: {
707
+ storagePath: string;
708
+ sourceUri: string;
709
+ filename: string;
710
+ mediaType: string;
711
+ bucketId?: string;
712
+ metadata?: Record<string, unknown>;
713
+ }): Promise<void>;
714
+ getPendingUploads(): Promise<PolAttachmentRecord[]>;
715
+ getSoonestRetryTime(): Promise<number | null>;
716
+ getFailedPermanentUploads(): Promise<PolAttachmentRecord[]>;
717
+ getStaleUploads(): Promise<PolAttachmentRecord[]>;
718
+ getSyncedUploadsWithPendingCallback(): Promise<PolAttachmentRecord[]>;
719
+ clearUploadCallback(id: string): Promise<void>;
720
+ retryUpload(id: string): Promise<void>;
721
+ /**
722
+ * Reset all uploads in QUEUED_UPLOAD state so they retry immediately.
723
+ * This clears retry counts and errors, allowing uploads to be retried fresh.
724
+ *
725
+ * @returns The number of uploads that were reset
726
+ */
727
+ resetUploadRetries(): Promise<number>;
728
+ /**
729
+ * Trigger immediate retry of uploads WITHOUT resetting their retry count.
730
+ * This preserves backoff state for truly failing uploads while allowing
731
+ * uploads that were waiting for network to retry immediately.
732
+ *
733
+ * Only affects uploads that:
734
+ * - Are past their retry time (upload_next_retry_at <= now), OR
735
+ * - Have no retry time set (upload_next_retry_at IS NULL)
736
+ *
737
+ * @returns The number of uploads that will be retried
738
+ */
739
+ retryUploads(): Promise<number>;
740
+ deleteUpload(id: string): Promise<void>;
741
+ /**
742
+ * Re-queue an upload for an orphaned attachment.
743
+ * Use this when a database record exists with a storagePath but the file was never uploaded.
744
+ *
745
+ * This handles the case where:
746
+ * - User took a photo and the database record was created with a storagePath
747
+ * - The attachment queue record was created but disappeared before upload completed
748
+ * - The photo shows locally but was never uploaded to storage
749
+ *
750
+ * @param options.storagePath - The storage path (e.g., "projectId/fileId.jpg")
751
+ * @param options.localFileUri - URI to the local file to upload
752
+ * @param options.mediaType - MIME type of the file
753
+ * @param options.bucketId - Optional bucket ID for multi-bucket setups
754
+ * @returns true if upload was queued, false if already exists in valid state
755
+ */
756
+ requeueOrphanedUpload(options: {
757
+ storagePath: string;
758
+ localFileUri: string;
759
+ mediaType: string;
760
+ bucketId?: string;
761
+ }): Promise<boolean>;
762
+ getRecord(id: string): Promise<PolAttachmentRecord | null>;
763
+ getFailedUploads(): Promise<PolAttachmentRecord[]>;
764
+ retryFailedUpload(id: string): Promise<void>;
765
+ deleteFailedUpload(id: string): Promise<void>;
766
+ get activeUploads(): UploadStatus[];
767
+ pauseUploads(): void;
768
+ resumeUploads(): void;
769
+ clearCache(): Promise<void>;
770
+ cacheLocalFile(storagePath: string, sourceUri: string): Promise<void>;
771
+ getLocalUriForStoragePath(storagePath: string): Promise<string | null>;
772
+ /**
773
+ * Purge attachments by their IDs.
774
+ * Archives and deletes local files for the specified attachment IDs.
775
+ * Useful for edge cases where external code needs to force-remove attachments.
776
+ *
777
+ * @param ids - Array of attachment IDs to purge
778
+ */
779
+ purgeAttachments(ids: string[]): Promise<void>;
780
+ onProgress(callback: (stats: AttachmentSyncStats) => void): () => void;
781
+ getStats(): Promise<AttachmentSyncStats>;
782
+ /**
783
+ * Repair attachment sizes for synced records that have size=0.
784
+ * This backfills sizes from the actual file system for existing downloads.
785
+ * @returns Number of records repaired
786
+ */
787
+ repairAttachmentSizes(): Promise<number>;
788
+ private _getDownloadManagerDeps;
789
+ private _getUploadManagerDeps;
790
+ private _getCacheManagerDeps;
791
+ private _startUploadProcessing;
792
+ private _createTableIfNotExists;
793
+ private _migrateUploadColumns;
794
+ private _invalidateStatsCache;
795
+ private _getStatus;
796
+ private _notify;
797
+ }
798
+ /**
799
+ * Factory function options that combine AttachmentConfig with required runtime dependencies.
800
+ */
801
+ interface CreateAttachmentQueueOptions extends AttachmentConfig {
802
+ /** Remote storage adapter for downloading/uploading files */
803
+ remoteStorage: AttachmentStorageAdapter;
804
+ /** Upload handler for processing uploads */
805
+ uploadHandler?: UploadHandler;
806
+ /** Upload configuration */
807
+ uploadConfig?: Partial<UploadConfig>;
808
+ /** Download configuration (concurrency, timeout) */
809
+ downloadConfig?: Partial<DownloadConfig>;
810
+ /** Compression configuration */
811
+ compression?: Partial<CompressionConfig>;
812
+ /** Cache configuration */
813
+ cache?: Partial<CacheConfig>;
814
+ }
815
+ declare function createPolAttachmentQueue(powersync: AbstractPowerSyncDatabase, platform: PlatformAdapter, config: CreateAttachmentQueueOptions): PolAttachmentQueue;
816
+
817
+ export { type AttachmentSourceConfig as A, type BatchFilterContext as B, type CompressionConfig as C, DEFAULT_COMPRESSION_CONFIG as D, type EvictRow as E, type IdRow as I, PolAttachmentQueue as P, type SkipDownloadContext as S, type UploadConfig as U, type WatchConfig as W, type PolAttachmentQueueOptions as a, PolAttachmentState as b, createPolAttachmentQueue as c, type PolAttachmentRecord as d, type AttachmentConfig as e, type AttachmentStorageAdapter as f, DEFAULT_UPLOAD_CONFIG as g, type UploadHandler as h, type DownloadConfig as i, DEFAULT_DOWNLOAD_CONFIG as j, type CacheConfig as k, DEFAULT_CACHE_CONFIG as l, CACHE_SIZE_PRESETS as m, type CacheSizePreset as n, type CacheSizeValue as o, formatCacheSize as p, type DownloadPhase as q, type DownloadStatus as r, type UploadPhase as s, type UploadStatus as t, type AttachmentSyncStatus as u, type AttachmentSyncStats as v, type AttachmentStatsRow as w, type CacheFileRow as x, type CachedSizeRow as y, type AttachmentRecord as z };