stellar-drive 1.2.13 → 1.2.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/engine.js CHANGED
@@ -138,6 +138,55 @@ function getColumns(name) {
138
138
  const table = getEngineConfig().tables.find((t) => t.supabaseName === name || t.schemaKey === name);
139
139
  return table?.columns || '*';
140
140
  }
141
+ /**
142
+ * Guarantee mandatory system-field defaults on an outbound sync payload.
143
+ *
144
+ * Every synced table in Supabase has `deleted boolean not null default false`.
145
+ * When an entity was written to IndexedDB by older code paths (before the
146
+ * `deleted` column existed) OR by a caller that passed a create payload
147
+ * without explicitly setting `deleted`, the field serializes as `undefined`
148
+ * (dropped by JSON) or `null` — both of which violate the NOT NULL
149
+ * constraint server-side.
150
+ *
151
+ * Rather than scattering this fix across every payload-assembly site, call
152
+ * this helper right before `filterPayloadToSchema()` so no code path can
153
+ * forget. Mutates and returns the same object for convenience.
154
+ */
155
+ function ensureSystemFieldDefaults(payload) {
156
+ if (payload.deleted === undefined || payload.deleted === null) {
157
+ payload.deleted = false;
158
+ }
159
+ return payload;
160
+ }
161
+ /**
162
+ * Filter a payload to only include columns defined in the schema.
163
+ *
164
+ * IndexedDB rows may contain stale fields from older schema versions that no
165
+ * longer exist in Supabase. Pushing them causes PGRST204 errors ("Could not
166
+ * find column X in the schema cache"). This function strips any field not in
167
+ * the table's column list.
168
+ *
169
+ * @param tableName - The Supabase table name
170
+ * @param payload - The raw payload from IndexedDB
171
+ * @returns A new object containing only schema-defined fields
172
+ */
173
+ function filterPayloadToSchema(tableName, payload) {
174
+ const columns = getColumns(tableName);
175
+ // If no explicit column list, pass through unchanged
176
+ if (columns === '*')
177
+ return payload;
178
+ const allowed = new Set(columns.split(','));
179
+ const filtered = {};
180
+ for (const key of Object.keys(payload)) {
181
+ if (allowed.has(key)) {
182
+ filtered[key] = payload[key];
183
+ }
184
+ else {
185
+ debugLog(`[SYNC] Stripping stale field '${key}' from ${tableName} payload`);
186
+ }
187
+ }
188
+ return filtered;
189
+ }
141
190
  /**
142
191
  * Check if a Supabase table is configured as a singleton (one row per user).
143
192
  *
@@ -1116,148 +1165,322 @@ async function pushPendingOps() {
1116
1165
  // "pending" state between sync cycles instead of silently consuming them.
1117
1166
  const snapshotItems = await getPendingSync();
1118
1167
  const snapshotIds = new Set(snapshotItems.map((item) => item.id));
1119
- while (iterations < maxIterations) {
1120
- const pendingItems = (await getPendingSync()).filter((item) => snapshotIds.has(item.id));
1121
- if (pendingItems.length === 0)
1122
- break;
1123
- iterations++;
1124
- let processedAny = false;
1125
- // ── Batch creates: group by table and INSERT in bulk ──
1126
- // This is critical for performance: CSV imports with hundreds of transactions
1127
- // push in a few batch calls instead of hundreds of individual HTTP requests.
1128
- const createItems = pendingItems.filter((item) => item.operationType === 'create');
1129
- const nonCreateItems = pendingItems.filter((item) => item.operationType !== 'create');
1130
- if (createItems.length > 0) {
1131
- // Group creates by table, preserving queue order within each group.
1132
- // Bulk-read all sync queue IDs in one IndexedDB call instead of
1133
- // per-item reads (N sequential reads → 1 bulk read).
1134
- const createQueueIds = createItems.filter((item) => item.id).map((item) => item.id);
1135
- const queuedRows = await db.table('syncQueue').bulkGet(createQueueIds);
1136
- const stillQueuedIds = new Set(queuedRows
1137
- .map((row, i) => (row ? createQueueIds[i] : null))
1138
- .filter((id) => id !== null));
1139
- const createsByTable = new Map();
1140
- for (const item of createItems) {
1141
- if (item.id && !stillQueuedIds.has(item.id))
1142
- continue;
1143
- const existing = createsByTable.get(item.table) || [];
1144
- existing.push(item);
1145
- createsByTable.set(item.table, existing);
1168
+ // Start progress tracking for high-volume pushes so the UI can show
1169
+ // "1,200 of 2,500 changes synced…" instead of an opaque spinner that the
1170
+ // user interprets as "stuck". Threshold matches the realtime-suspend
1171
+ // threshold so the two cinematic "heavy sync" signals are in lockstep.
1172
+ //
1173
+ // A periodic monitor reads the live IndexedDB queue size every 400ms and
1174
+ // publishes it through the syncStatusStore. Polling IndexedDB is cheap
1175
+ // (indexed count query on a small table) and keeps the UI accurate no
1176
+ // matter which internal push path (batch, per-item fallback, singleton,
1177
+ // coalesced, duplicate-retry) the items take — we don't have to sprinkle
1178
+ // progress calls across every branch.
1179
+ const PROGRESS_THRESHOLD = 50;
1180
+ const trackProgress = snapshotItems.length >= PROGRESS_THRESHOLD;
1181
+ let progressMonitor = null;
1182
+ let lastReportedCompleted = 0;
1183
+ if (trackProgress) {
1184
+ syncStatusStore.startProgress(snapshotItems.length);
1185
+ syncStatusStore.setPendingCount(snapshotItems.length);
1186
+ progressMonitor = setInterval(async () => {
1187
+ try {
1188
+ const liveCount = await db.table('syncQueue').count();
1189
+ const completed = Math.max(0, snapshotItems.length - liveCount);
1190
+ syncStatusStore.setPendingCount(liveCount);
1191
+ const delta = completed - lastReportedCompleted;
1192
+ if (delta !== 0) {
1193
+ syncStatusStore.advanceProgress(delta);
1194
+ lastReportedCompleted = completed;
1195
+ }
1146
1196
  }
1147
- // Sort table order: parent tables before child tables to satisfy RLS FK checks.
1148
- const schema = getEngineConfig().schema;
1149
- const sortedTableEntries = [...createsByTable.entries()].sort(([tableA], [tableB]) => {
1150
- if (!schema)
1197
+ catch (err) {
1198
+ debugWarn('[SYNC] Progress monitor tick failed:', err);
1199
+ }
1200
+ }, 400);
1201
+ }
1202
+ try {
1203
+ while (iterations < maxIterations) {
1204
+ const pendingItems = (await getPendingSync()).filter((item) => snapshotIds.has(item.id));
1205
+ if (pendingItems.length === 0)
1206
+ break;
1207
+ iterations++;
1208
+ let processedAny = false;
1209
+ // ── Batch creates: group by table and INSERT in bulk ──
1210
+ // This is critical for performance: CSV imports with hundreds of transactions
1211
+ // push in a few batch calls instead of hundreds of individual HTTP requests.
1212
+ const createItems = pendingItems.filter((item) => item.operationType === 'create');
1213
+ const nonCreateItems = pendingItems.filter((item) => item.operationType !== 'create');
1214
+ if (createItems.length > 0) {
1215
+ // Group creates by table, preserving queue order within each group.
1216
+ // Bulk-read all sync queue IDs in one IndexedDB call instead of
1217
+ // per-item reads (N sequential reads → 1 bulk read).
1218
+ const createQueueIds = createItems.filter((item) => item.id).map((item) => item.id);
1219
+ const queuedRows = await db.table('syncQueue').bulkGet(createQueueIds);
1220
+ const stillQueuedIds = new Set(queuedRows
1221
+ .map((row, i) => (row ? createQueueIds[i] : null))
1222
+ .filter((id) => id !== null));
1223
+ const createsByTable = new Map();
1224
+ for (const item of createItems) {
1225
+ if (item.id && !stillQueuedIds.has(item.id))
1226
+ continue;
1227
+ const existing = createsByTable.get(item.table) || [];
1228
+ existing.push(item);
1229
+ createsByTable.set(item.table, existing);
1230
+ }
1231
+ // Sort table order: parent tables before child tables to satisfy RLS FK checks.
1232
+ const schema = getEngineConfig().schema;
1233
+ const sortedTableEntries = [...createsByTable.entries()].sort(([tableA], [tableB]) => {
1234
+ if (!schema)
1235
+ return 0;
1236
+ // Resolve schema keys from supabase names (strip prefix)
1237
+ const configA = getEngineConfig().tables.find((t) => t.supabaseName === tableA);
1238
+ const configB = getEngineConfig().tables.find((t) => t.supabaseName === tableB);
1239
+ const keyA = configA?.schemaKey || tableA;
1240
+ const keyB = configB?.schemaKey || tableB;
1241
+ const aIsChild = isChildTable(schema, keyA);
1242
+ const bIsChild = isChildTable(schema, keyB);
1243
+ if (aIsChild && !bIsChild)
1244
+ return 1;
1245
+ if (!aIsChild && bIsChild)
1246
+ return -1;
1151
1247
  return 0;
1152
- // Resolve schema keys from supabase names (strip prefix)
1153
- const configA = getEngineConfig().tables.find((t) => t.supabaseName === tableA);
1154
- const configB = getEngineConfig().tables.find((t) => t.supabaseName === tableB);
1155
- const keyA = configA?.schemaKey || tableA;
1156
- const keyB = configB?.schemaKey || tableB;
1157
- const aIsChild = isChildTable(schema, keyA);
1158
- const bIsChild = isChildTable(schema, keyB);
1159
- if (aIsChild && !bIsChild)
1160
- return 1;
1161
- if (!aIsChild && bIsChild)
1162
- return -1;
1163
- return 0;
1164
- });
1165
- for (const [tableName, items] of sortedTableEntries) {
1166
- const supabase = getSupabase();
1167
- const deviceId = getDeviceId();
1168
- // Build batch payload
1169
- const payloads = items.map((item) => ({
1170
- id: item.entityId,
1171
- ...item.value,
1172
- device_id: deviceId
1173
- }));
1174
- // Batch insert (up to 500 at a time to stay within Supabase limits)
1175
- const BATCH_SIZE = 500;
1176
- for (let i = 0; i < payloads.length; i += BATCH_SIZE) {
1177
- const batch = payloads.slice(i, i + BATCH_SIZE);
1178
- const batchItems = items.slice(i, i + BATCH_SIZE);
1179
- try {
1180
- debugLog(`[SYNC] Batch upsert ${batch.length} rows into ${tableName}`);
1181
- const { error } = await supabase
1182
- .from(tableName)
1183
- .upsert(batch, { onConflict: 'id', ignoreDuplicates: false });
1184
- if (error) {
1185
- if (error.code === '23505') {
1186
- // Duplicate key on a SECONDARY unique constraint (e.g., csv_import_hash,
1187
- // teller_transaction_id). The primary `id` column is unique by UUID
1188
- // generation, so this means another row with a different id already has
1189
- // the same value for a secondary unique field.
1190
- //
1191
- // Strategy: query Supabase for which entity IDs already exist, remove those
1192
- // from the queue (they're true duplicates), then retry with only the new ones.
1193
- // This avoids the catastrophic individual fallback (500 sequential HTTP requests).
1194
- debugLog(`[SYNC] Batch create hit secondary unique constraint for ${tableName} filtering duplicates`);
1195
- try {
1196
- // Query which IDs from this batch already exist in Supabase
1197
- const batchEntityIds = batchItems.map((item) => item.entityId);
1198
- const { data: existingRows } = await supabase
1199
- .from(tableName)
1200
- .select('id')
1201
- .in('id', batchEntityIds);
1202
- const existingIds = new Set((existingRows || []).map((r) => r.id));
1203
- // Remove already-synced items from queue
1204
- const duplicateQueueIds = batchItems
1205
- .filter((item) => existingIds.has(item.entityId) && item.id)
1206
- .map((item) => item.id);
1207
- if (duplicateQueueIds.length > 0) {
1208
- await bulkRemoveSyncItems(duplicateQueueIds);
1209
- processedAny = true;
1210
- actualPushed += duplicateQueueIds.length;
1211
- debugLog(`[SYNC] Removed ${duplicateQueueIds.length} already-synced items from queue`);
1212
- }
1213
- // Retry with only the truly new items
1214
- const newBatch = batch.filter((row) => !existingIds.has(row.id));
1215
- const newBatchItems = batchItems.filter((item) => !existingIds.has(item.entityId));
1216
- if (newBatch.length > 0) {
1217
- const { error: retryError } = await supabase
1248
+ });
1249
+ for (const [tableName, items] of sortedTableEntries) {
1250
+ const supabase = getSupabase();
1251
+ const deviceId = getDeviceId();
1252
+ // Build batch payload filter to schema-defined columns only.
1253
+ // Ensure system field `deleted` is always present: create payloads
1254
+ // queued before the column defaulted locally (or by callers that
1255
+ // never set it) serialize with `undefined`/`null`, which violates
1256
+ // the Supabase NOT NULL constraint on `deleted`.
1257
+ const payloads = items.map((item) => {
1258
+ const rawPayload = {
1259
+ id: item.entityId,
1260
+ ...item.value,
1261
+ device_id: deviceId
1262
+ };
1263
+ ensureSystemFieldDefaults(rawPayload);
1264
+ return filterPayloadToSchema(tableName, rawPayload);
1265
+ });
1266
+ // Batch insert (up to 500 at a time to stay within Supabase limits)
1267
+ const BATCH_SIZE = 500;
1268
+ for (let i = 0; i < payloads.length; i += BATCH_SIZE) {
1269
+ const batch = payloads.slice(i, i + BATCH_SIZE);
1270
+ const batchItems = items.slice(i, i + BATCH_SIZE);
1271
+ try {
1272
+ debugLog(`[SYNC] Batch upsert ${batch.length} rows into ${tableName}`);
1273
+ const { error } = await supabase
1274
+ .from(tableName)
1275
+ .upsert(batch, { onConflict: 'id', ignoreDuplicates: false });
1276
+ if (error) {
1277
+ if (error.code === '23505') {
1278
+ // Duplicate key on a SECONDARY unique constraint (e.g., csv_import_hash,
1279
+ // teller_transaction_id). The primary `id` column is unique by UUID
1280
+ // generation, so this means another row with a different id already has
1281
+ // the same value for a secondary unique field.
1282
+ //
1283
+ // Strategy: query Supabase for which entity IDs already exist, remove those
1284
+ // from the queue (they're true duplicates), then retry with only the new ones.
1285
+ // This avoids the catastrophic individual fallback (500 sequential HTTP requests).
1286
+ debugLog(`[SYNC] Batch create hit secondary unique constraint for ${tableName} — filtering duplicates`);
1287
+ try {
1288
+ // Query which IDs from this batch already exist in Supabase
1289
+ const batchEntityIds = batchItems.map((item) => item.entityId);
1290
+ const { data: existingRows } = await supabase
1218
1291
  .from(tableName)
1219
- .upsert(newBatch, { onConflict: 'id', ignoreDuplicates: false });
1220
- if (!retryError) {
1221
- const idsToRemove = newBatchItems
1222
- .filter((item) => item.id)
1223
- .map((item) => item.id);
1224
- if (idsToRemove.length > 0) {
1225
- await bulkRemoveSyncItems(idsToRemove);
1226
- processedAny = true;
1227
- actualPushed += idsToRemove.length;
1292
+ .select('id')
1293
+ .in('id', batchEntityIds);
1294
+ const existingIds = new Set((existingRows || []).map((r) => r.id));
1295
+ // Remove already-synced items from queue
1296
+ const duplicateQueueIds = batchItems
1297
+ .filter((item) => existingIds.has(item.entityId) && item.id)
1298
+ .map((item) => item.id);
1299
+ if (duplicateQueueIds.length > 0) {
1300
+ await bulkRemoveSyncItems(duplicateQueueIds);
1301
+ processedAny = true;
1302
+ actualPushed += duplicateQueueIds.length;
1303
+ debugLog(`[SYNC] Removed ${duplicateQueueIds.length} already-synced items from queue`);
1304
+ }
1305
+ // Retry with only the truly new items
1306
+ const newBatch = batch.filter((row) => !existingIds.has(row.id));
1307
+ const newBatchItems = batchItems.filter((item) => !existingIds.has(item.entityId));
1308
+ if (newBatch.length > 0) {
1309
+ const { error: retryError } = await supabase
1310
+ .from(tableName)
1311
+ .upsert(newBatch, { onConflict: 'id', ignoreDuplicates: false });
1312
+ if (!retryError) {
1313
+ const idsToRemove = newBatchItems
1314
+ .filter((item) => item.id)
1315
+ .map((item) => item.id);
1316
+ if (idsToRemove.length > 0) {
1317
+ await bulkRemoveSyncItems(idsToRemove);
1318
+ processedAny = true;
1319
+ actualPushed += idsToRemove.length;
1320
+ }
1321
+ debugLog(`[SYNC] Batch create retry success: ${newBatch.length} new rows into ${tableName}`);
1322
+ }
1323
+ else {
1324
+ // Retry still failed — likely another secondary constraint issue.
1325
+ // Remove all items from queue to prevent infinite retry loops.
1326
+ debugError(`[SYNC] Batch create retry failed for ${tableName} — removing from queue to prevent retry storm:`, retryError);
1327
+ const allQueueIds = newBatchItems
1328
+ .filter((item) => item.id)
1329
+ .map((item) => item.id);
1330
+ if (allQueueIds.length > 0) {
1331
+ await bulkRemoveSyncItems(allQueueIds);
1332
+ processedAny = true;
1333
+ }
1228
1334
  }
1229
- debugLog(`[SYNC] Batch create retry success: ${newBatch.length} new rows into ${tableName}`);
1230
1335
  }
1231
1336
  else {
1232
- // Retry still failed likely another secondary constraint issue.
1233
- // Remove all items from queue to prevent infinite retry loops.
1234
- debugError(`[SYNC] Batch create retry failed for ${tableName} — removing from queue to prevent retry storm:`, retryError);
1235
- const allQueueIds = newBatchItems
1236
- .filter((item) => item.id)
1237
- .map((item) => item.id);
1238
- if (allQueueIds.length > 0) {
1239
- await bulkRemoveSyncItems(allQueueIds);
1337
+ debugLog(`[SYNC] All ${batch.length} items were duplicates batch fully resolved`);
1338
+ }
1339
+ }
1340
+ catch (filterError) {
1341
+ // If the filter query itself fails, remove items to prevent retry storm
1342
+ debugError(`[SYNC] Duplicate filter query failed for ${tableName} — removing from queue:`, filterError);
1343
+ const allQueueIds = batchItems
1344
+ .filter((item) => item.id)
1345
+ .map((item) => item.id);
1346
+ if (allQueueIds.length > 0) {
1347
+ await bulkRemoveSyncItems(allQueueIds);
1348
+ processedAny = true;
1349
+ }
1350
+ }
1351
+ }
1352
+ else {
1353
+ // Non-duplicate error — fall back to individual to identify the problem row(s).
1354
+ // Common cause: RLS on child tables when parent hasn't synced yet.
1355
+ debugError(`[SYNC] Batch upsert failed for ${tableName}:`, error);
1356
+ for (const item of batchItems) {
1357
+ try {
1358
+ await processSyncItem(item);
1359
+ if (item.id) {
1360
+ await removeSyncItem(item.id);
1240
1361
  processedAny = true;
1362
+ actualPushed++;
1241
1363
  }
1242
1364
  }
1365
+ catch (itemError) {
1366
+ handleSyncItemError(item, itemError);
1367
+ }
1243
1368
  }
1244
- else {
1245
- debugLog(`[SYNC] All ${batch.length} items were duplicates — batch fully resolved`);
1369
+ }
1370
+ }
1371
+ else {
1372
+ // Batch succeeded — bulk-remove all items from queue in one transaction
1373
+ const idsToRemove = batchItems.filter((item) => item.id).map((item) => item.id);
1374
+ if (idsToRemove.length > 0) {
1375
+ await bulkRemoveSyncItems(idsToRemove);
1376
+ processedAny = true;
1377
+ actualPushed += idsToRemove.length;
1378
+ }
1379
+ debugLog(`[SYNC] Batch upsert success: ${batch.length} rows into ${tableName}`);
1380
+ }
1381
+ }
1382
+ catch (batchError) {
1383
+ // Network-level failure — fall back to individual
1384
+ debugError(`[SYNC] Batch insert threw for ${tableName}:`, batchError);
1385
+ for (const item of batchItems) {
1386
+ try {
1387
+ await processSyncItem(item);
1388
+ if (item.id) {
1389
+ await removeSyncItem(item.id);
1390
+ processedAny = true;
1391
+ actualPushed++;
1246
1392
  }
1247
1393
  }
1248
- catch (filterError) {
1249
- // If the filter query itself fails, remove items to prevent retry storm
1250
- debugError(`[SYNC] Duplicate filter query failed for ${tableName} — removing from queue:`, filterError);
1251
- const allQueueIds = batchItems.filter((item) => item.id).map((item) => item.id);
1252
- if (allQueueIds.length > 0) {
1253
- await bulkRemoveSyncItems(allQueueIds);
1394
+ catch (itemError) {
1395
+ handleSyncItemError(item, itemError);
1396
+ }
1397
+ }
1398
+ }
1399
+ }
1400
+ }
1401
+ }
1402
+ // ── Batch non-create operations: group by table and UPSERT in bulk ──
1403
+ // For set/delete/increment on non-singleton tables, read the full local entity
1404
+ // from IndexedDB and upsert in batches. This turns N sequential HTTP requests
1405
+ // into ceil(N/500) batch calls. Singleton tables need special ID reconciliation
1406
+ // and must be processed individually.
1407
+ const batchableItems = nonCreateItems.filter((item) => !isSingletonTable(item.table));
1408
+ const individualItems = nonCreateItems.filter((item) => isSingletonTable(item.table));
1409
+ if (batchableItems.length > 0) {
1410
+ // Bulk-read sync queue IDs for the still-queued check (same optimization as creates)
1411
+ const batchQueueIds = batchableItems.filter((item) => item.id).map((item) => item.id);
1412
+ const batchQueuedRows = await db.table('syncQueue').bulkGet(batchQueueIds);
1413
+ const batchStillQueuedIds = new Set(batchQueuedRows
1414
+ .map((row, i) => (row ? batchQueueIds[i] : null))
1415
+ .filter((id) => id !== null));
1416
+ // Group by table
1417
+ const itemsByTable = new Map();
1418
+ for (const item of batchableItems) {
1419
+ if (item.id && !batchStillQueuedIds.has(item.id))
1420
+ continue;
1421
+ const existing = itemsByTable.get(item.table) || [];
1422
+ existing.push(item);
1423
+ itemsByTable.set(item.table, existing);
1424
+ }
1425
+ for (const [tableName, items] of itemsByTable) {
1426
+ const supabase = getSupabase();
1427
+ const deviceId = getDeviceId();
1428
+ const dexieTable = getDexieTableName(tableName);
1429
+ // Build batch payload from local IndexedDB state (full entity rows).
1430
+ // Bulk-read all entities in one IndexedDB call instead of per-item reads.
1431
+ const entityIds = items.map((item) => item.entityId);
1432
+ const localEntities = await db.table(dexieTable).bulkGet(entityIds);
1433
+ const entityMap = new Map();
1434
+ localEntities.forEach((entity, i) => {
1435
+ if (entity)
1436
+ entityMap.set(entityIds[i], entity);
1437
+ });
1438
+ const payloads = [];
1439
+ const validItems = [];
1440
+ for (const item of items) {
1441
+ const localEntity = entityMap.get(item.entityId);
1442
+ if (!localEntity) {
1443
+ // Entity deleted locally — for delete ops this is expected (already gone),
1444
+ // for others skip it
1445
+ if (item.operationType === 'delete') {
1446
+ // Still need to ensure server-side deletion; fall back to individual
1447
+ try {
1448
+ await processSyncItem(item);
1449
+ if (item.id) {
1450
+ await removeSyncItem(item.id);
1254
1451
  processedAny = true;
1452
+ actualPushed++;
1255
1453
  }
1256
1454
  }
1455
+ catch (itemError) {
1456
+ handleSyncItemError(item, itemError);
1457
+ }
1257
1458
  }
1258
- else {
1259
- // Non-duplicate error — fall back to individual to identify the problem row(s).
1260
- // Common cause: RLS on child tables when parent hasn't synced yet.
1459
+ continue;
1460
+ }
1461
+ // Strip internal Dexie fields, add device_id, filter to schema columns.
1462
+ // Ensure system field `deleted` is always present — IndexedDB rows created
1463
+ // before this field was set default to `undefined`, which serializes as `null`
1464
+ // and violates NOT NULL constraints on Supabase.
1465
+ const rawPayload = { ...localEntity, device_id: deviceId };
1466
+ ensureSystemFieldDefaults(rawPayload);
1467
+ delete rawPayload._version;
1468
+ payloads.push(filterPayloadToSchema(tableName, rawPayload));
1469
+ validItems.push(item);
1470
+ }
1471
+ if (payloads.length === 0)
1472
+ continue;
1473
+ const BATCH_SIZE = 500;
1474
+ for (let i = 0; i < payloads.length; i += BATCH_SIZE) {
1475
+ const batch = payloads.slice(i, i + BATCH_SIZE);
1476
+ const batchItems = validItems.slice(i, i + BATCH_SIZE);
1477
+ try {
1478
+ debugLog(`[SYNC] Batch upsert ${batch.length} rows into ${tableName}`);
1479
+ const { error } = await supabase
1480
+ .from(tableName)
1481
+ .upsert(batch, { onConflict: 'id', ignoreDuplicates: false });
1482
+ if (error) {
1483
+ // Batch failed — fall back to individual processing
1261
1484
  debugError(`[SYNC] Batch upsert failed for ${tableName}:`, error);
1262
1485
  for (const item of batchItems) {
1263
1486
  try {
@@ -1273,117 +1496,19 @@ async function pushPendingOps() {
1273
1496
  }
1274
1497
  }
1275
1498
  }
1276
- }
1277
- else {
1278
- // Batch succeeded bulk-remove all items from queue in one transaction
1279
- const idsToRemove = batchItems.filter((item) => item.id).map((item) => item.id);
1280
- if (idsToRemove.length > 0) {
1281
- await bulkRemoveSyncItems(idsToRemove);
1282
- processedAny = true;
1283
- actualPushed += idsToRemove.length;
1284
- }
1285
- debugLog(`[SYNC] Batch upsert success: ${batch.length} rows into ${tableName}`);
1286
- }
1287
- }
1288
- catch (batchError) {
1289
- // Network-level failure — fall back to individual
1290
- debugError(`[SYNC] Batch insert threw for ${tableName}:`, batchError);
1291
- for (const item of batchItems) {
1292
- try {
1293
- await processSyncItem(item);
1294
- if (item.id) {
1295
- await removeSyncItem(item.id);
1296
- processedAny = true;
1297
- actualPushed++;
1298
- }
1299
- }
1300
- catch (itemError) {
1301
- handleSyncItemError(item, itemError);
1302
- }
1303
- }
1304
- }
1305
- }
1306
- }
1307
- }
1308
- // ── Batch non-create operations: group by table and UPSERT in bulk ──
1309
- // For set/delete/increment on non-singleton tables, read the full local entity
1310
- // from IndexedDB and upsert in batches. This turns N sequential HTTP requests
1311
- // into ceil(N/500) batch calls. Singleton tables need special ID reconciliation
1312
- // and must be processed individually.
1313
- const batchableItems = nonCreateItems.filter((item) => !isSingletonTable(item.table));
1314
- const individualItems = nonCreateItems.filter((item) => isSingletonTable(item.table));
1315
- if (batchableItems.length > 0) {
1316
- // Bulk-read sync queue IDs for the still-queued check (same optimization as creates)
1317
- const batchQueueIds = batchableItems.filter((item) => item.id).map((item) => item.id);
1318
- const batchQueuedRows = await db.table('syncQueue').bulkGet(batchQueueIds);
1319
- const batchStillQueuedIds = new Set(batchQueuedRows
1320
- .map((row, i) => (row ? batchQueueIds[i] : null))
1321
- .filter((id) => id !== null));
1322
- // Group by table
1323
- const itemsByTable = new Map();
1324
- for (const item of batchableItems) {
1325
- if (item.id && !batchStillQueuedIds.has(item.id))
1326
- continue;
1327
- const existing = itemsByTable.get(item.table) || [];
1328
- existing.push(item);
1329
- itemsByTable.set(item.table, existing);
1330
- }
1331
- for (const [tableName, items] of itemsByTable) {
1332
- const supabase = getSupabase();
1333
- const deviceId = getDeviceId();
1334
- const dexieTable = getDexieTableName(tableName);
1335
- // Build batch payload from local IndexedDB state (full entity rows).
1336
- // Bulk-read all entities in one IndexedDB call instead of per-item reads.
1337
- const entityIds = items.map((item) => item.entityId);
1338
- const localEntities = await db.table(dexieTable).bulkGet(entityIds);
1339
- const entityMap = new Map();
1340
- localEntities.forEach((entity, i) => {
1341
- if (entity)
1342
- entityMap.set(entityIds[i], entity);
1343
- });
1344
- const payloads = [];
1345
- const validItems = [];
1346
- for (const item of items) {
1347
- const localEntity = entityMap.get(item.entityId);
1348
- if (!localEntity) {
1349
- // Entity deleted locally — for delete ops this is expected (already gone),
1350
- // for others skip it
1351
- if (item.operationType === 'delete') {
1352
- // Still need to ensure server-side deletion; fall back to individual
1353
- try {
1354
- await processSyncItem(item);
1355
- if (item.id) {
1356
- await removeSyncItem(item.id);
1499
+ else {
1500
+ // Batch succeeded — bulk-remove all items from queue in one transaction
1501
+ const idsToRemove = batchItems.filter((item) => item.id).map((item) => item.id);
1502
+ if (idsToRemove.length > 0) {
1503
+ await bulkRemoveSyncItems(idsToRemove);
1357
1504
  processedAny = true;
1358
- actualPushed++;
1505
+ actualPushed += idsToRemove.length;
1359
1506
  }
1360
- }
1361
- catch (itemError) {
1362
- handleSyncItemError(item, itemError);
1507
+ debugLog(`[SYNC] Batch upsert success: ${batch.length} rows into ${tableName}`);
1363
1508
  }
1364
1509
  }
1365
- continue;
1366
- }
1367
- // Strip internal Dexie fields, add device_id
1368
- const payload = { ...localEntity, device_id: deviceId };
1369
- delete payload._version;
1370
- payloads.push(payload);
1371
- validItems.push(item);
1372
- }
1373
- if (payloads.length === 0)
1374
- continue;
1375
- const BATCH_SIZE = 500;
1376
- for (let i = 0; i < payloads.length; i += BATCH_SIZE) {
1377
- const batch = payloads.slice(i, i + BATCH_SIZE);
1378
- const batchItems = validItems.slice(i, i + BATCH_SIZE);
1379
- try {
1380
- debugLog(`[SYNC] Batch upsert ${batch.length} rows into ${tableName}`);
1381
- const { error } = await supabase
1382
- .from(tableName)
1383
- .upsert(batch, { onConflict: 'id', ignoreDuplicates: false });
1384
- if (error) {
1385
- // Batch failed — fall back to individual processing
1386
- debugError(`[SYNC] Batch upsert failed for ${tableName}:`, error);
1510
+ catch (batchError) {
1511
+ debugError(`[SYNC] Batch upsert threw for ${tableName}:`, batchError);
1387
1512
  for (const item of batchItems) {
1388
1513
  try {
1389
1514
  await processSyncItem(item);
@@ -1398,62 +1523,57 @@ async function pushPendingOps() {
1398
1523
  }
1399
1524
  }
1400
1525
  }
1401
- else {
1402
- // Batch succeeded — bulk-remove all items from queue in one transaction
1403
- const idsToRemove = batchItems.filter((item) => item.id).map((item) => item.id);
1404
- if (idsToRemove.length > 0) {
1405
- await bulkRemoveSyncItems(idsToRemove);
1406
- processedAny = true;
1407
- actualPushed += idsToRemove.length;
1408
- }
1409
- debugLog(`[SYNC] Batch upsert success: ${batch.length} rows into ${tableName}`);
1410
- }
1411
1526
  }
1412
- catch (batchError) {
1413
- debugError(`[SYNC] Batch upsert threw for ${tableName}:`, batchError);
1414
- for (const item of batchItems) {
1415
- try {
1416
- await processSyncItem(item);
1417
- if (item.id) {
1418
- await removeSyncItem(item.id);
1419
- processedAny = true;
1420
- actualPushed++;
1421
- }
1422
- }
1423
- catch (itemError) {
1424
- handleSyncItemError(item, itemError);
1425
- }
1527
+ }
1528
+ }
1529
+ // ── Process singleton table operations individually (need ID reconciliation) ──
1530
+ for (const item of individualItems) {
1531
+ try {
1532
+ if (item.id) {
1533
+ const stillQueued = await db.table('syncQueue').get(item.id);
1534
+ if (!stillQueued) {
1535
+ debugLog(`[SYNC] Skipping purged item: ${item.operationType} ${item.table}/${item.entityId}`);
1536
+ continue;
1426
1537
  }
1427
1538
  }
1539
+ debugLog(`[SYNC] Processing: ${item.operationType} ${item.table}/${item.entityId}`);
1540
+ await processSyncItem(item);
1541
+ if (item.id) {
1542
+ await removeSyncItem(item.id);
1543
+ processedAny = true;
1544
+ actualPushed++;
1545
+ debugLog(`[SYNC] Success: ${item.operationType} ${item.table}/${item.entityId}`);
1546
+ }
1547
+ }
1548
+ catch (error) {
1549
+ handleSyncItemError(item, error);
1428
1550
  }
1429
1551
  }
1552
+ // If we didn't process anything (all items in backoff), stop iterating
1553
+ if (!processedAny)
1554
+ break;
1430
1555
  }
1431
- // ── Process singleton table operations individually (need ID reconciliation) ──
1432
- for (const item of individualItems) {
1556
+ }
1557
+ finally {
1558
+ if (progressMonitor) {
1559
+ clearInterval(progressMonitor);
1560
+ // Final count flush before clearing progress — ensures the UI lands on
1561
+ // the exact remaining count rather than whatever the last tick showed.
1562
+ // Wrapped in try/catch because this runs inside finally and must not
1563
+ // mask an upstream error.
1433
1564
  try {
1434
- if (item.id) {
1435
- const stillQueued = await db.table('syncQueue').get(item.id);
1436
- if (!stillQueued) {
1437
- debugLog(`[SYNC] Skipping purged item: ${item.operationType} ${item.table}/${item.entityId}`);
1438
- continue;
1439
- }
1440
- }
1441
- debugLog(`[SYNC] Processing: ${item.operationType} ${item.table}/${item.entityId}`);
1442
- await processSyncItem(item);
1443
- if (item.id) {
1444
- await removeSyncItem(item.id);
1445
- processedAny = true;
1446
- actualPushed++;
1447
- debugLog(`[SYNC] Success: ${item.operationType} ${item.table}/${item.entityId}`);
1448
- }
1565
+ const finalLiveCount = await db.table('syncQueue').count();
1566
+ syncStatusStore.setPendingCount(finalLiveCount);
1567
+ const finalCompleted = Math.max(0, snapshotItems.length - finalLiveCount);
1568
+ const delta = finalCompleted - lastReportedCompleted;
1569
+ if (delta !== 0)
1570
+ syncStatusStore.advanceProgress(delta);
1449
1571
  }
1450
- catch (error) {
1451
- handleSyncItemError(item, error);
1572
+ catch (err) {
1573
+ debugWarn('[SYNC] Final progress flush failed:', err);
1452
1574
  }
1575
+ syncStatusStore.clearProgress();
1453
1576
  }
1454
- // If we didn't process anything (all items in backoff), stop iterating
1455
- if (!processedAny)
1456
- break;
1457
1577
  }
1458
1578
  return { originalCount, coalescedCount, actualPushed };
1459
1579
  }
@@ -1586,12 +1706,12 @@ async function processSyncItem(item) {
1586
1706
  // INSERT the full entity payload with the originating device_id.
1587
1707
  // Uses .select('id').maybeSingle() to verify the row was actually created
1588
1708
  // (RLS can silently block inserts, returning success with no data).
1589
- const payload = value;
1590
- const { data, error } = await supabase
1591
- .from(table)
1592
- .insert({ id: entityId, ...payload, device_id: deviceId })
1593
- .select('id')
1594
- .maybeSingle();
1709
+ const payload = filterPayloadToSchema(table, ensureSystemFieldDefaults({
1710
+ id: entityId,
1711
+ ...value,
1712
+ device_id: deviceId
1713
+ }));
1714
+ const { data, error } = await supabase.from(table).insert(payload).select('id').maybeSingle();
1595
1715
  // Duplicate key = another device already created this entity.
1596
1716
  // For regular tables, this is a no-op (the entity exists, which is what we wanted).
1597
1717
  // For singleton tables, we need to reconcile: the local UUID was generated offline
@@ -1693,8 +1813,10 @@ async function processSyncItem(item) {
1693
1813
  const localInc = await db.table(dexieTable).get(entityId);
1694
1814
  if (localInc) {
1695
1815
  debugLog(`[SYNC] Increment fallback to insert for missing row: ${table}/${entityId}`);
1696
- const insertPayload = { ...localInc, device_id: deviceId };
1697
- delete insertPayload._version;
1816
+ const rawInsertPayload = { ...localInc, device_id: deviceId };
1817
+ delete rawInsertPayload._version;
1818
+ ensureSystemFieldDefaults(rawInsertPayload);
1819
+ const insertPayload = filterPayloadToSchema(table, rawInsertPayload);
1698
1820
  const { error: insertError } = await supabase
1699
1821
  .from(table)
1700
1822
  .insert(insertPayload)
@@ -1777,9 +1899,11 @@ async function processSyncItem(item) {
1777
1899
  const localEntity = await db.table(dexieTable).get(entityId);
1778
1900
  if (localEntity) {
1779
1901
  debugLog(`[SYNC] Set fallback to insert for missing row: ${table}/${entityId}`);
1780
- const insertPayload = { ...localEntity, device_id: deviceId };
1902
+ const rawSetPayload = { ...localEntity, device_id: deviceId };
1781
1903
  // Remove Dexie internal keys
1782
- delete insertPayload._version;
1904
+ delete rawSetPayload._version;
1905
+ ensureSystemFieldDefaults(rawSetPayload);
1906
+ const insertPayload = filterPayloadToSchema(table, rawSetPayload);
1783
1907
  const { data: inserted, error: insertError } = await supabase
1784
1908
  .from(table)
1785
1909
  .insert(insertPayload)