supastash 0.1.44 → 0.1.46

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -227,18 +227,6 @@ await supastash
227
227
 
228
228
  ---
229
229
 
230
- ## 🗂 Recommended Project Structure
231
-
232
- ```
233
- src/
234
- ├─ core/ # Supastash config, Supabase client
235
- ├─ hooks/ # useSupatashData, useSupastashFilters etc.
236
- ├─ types/ # Zod schemas, DB types
237
- ├─ utils/ # Local helpers
238
- ```
239
-
240
- ---
241
-
242
230
  ## 🔧 API Docs
243
231
 
244
232
  - [`configureSupastash()`](https://0xzekea.github.io/supastash/docs/configuration)
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/core/config/index.ts"],"names":[],"mappings":"AACA,OAAO,EACL,eAAe,EACf,0BAA0B,EAC3B,MAAM,mCAAmC,CAAC;AA0B3C;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA0CG;AAEH,wBAAgB,kBAAkB,CAAC,CAAC,SAAS,0BAA0B,EACrE,MAAM,EAAE,eAAe,CAAC,CAAC,CAAC,GAAG;IAAE,gBAAgB,EAAE,CAAC,CAAA;CAAE,QAuCrD;AAED;;;;;;;;;;;;;GAaG;AACH,wBAAgB,kBAAkB,CAChC,CAAC,SAAS,0BAA0B,KACjC,eAAe,CAAC,CAAC,CAAC,CAEtB;AAED;;;;;;;;;;;;;;;;;;;;;GAqBG;AACH,wBAAgB,qBAAqB,CACnC,CAAC,SAAS,0BAA0B,EACpC,MAAM,EAAE;IACR,SAAS,EAAE,eAAe,CAAC,CAAC,CAAC,GAAG;QAC9B,YAAY,CAAC,EAAE,MAAM,IAAI,CAAC;KAC3B,CAAC;CACH,QAEA"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/core/config/index.ts"],"names":[],"mappings":"AACA,OAAO,EACL,eAAe,EACf,0BAA0B,EAC3B,MAAM,mCAAmC,CAAC;AA2B3C;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA0CG;AAEH,wBAAgB,kBAAkB,CAAC,CAAC,SAAS,0BAA0B,EACrE,MAAM,EAAE,eAAe,CAAC,CAAC,CAAC,GAAG;IAAE,gBAAgB,EAAE,CAAC,CAAA;CAAE,QAuCrD;AAED;;;;;;;;;;;;;GAaG;AACH,wBAAgB,kBAAkB,CAChC,CAAC,SAAS,0BAA0B,KACjC,eAAe,CAAC,CAAC,CAAC,CAEtB;AAED;;;;;;;;;;;;;;;;;;;;;GAqBG;AACH,wBAAgB,qBAAqB,CACnC,CAAC,SAAS,0BAA0B,EACpC,MAAM,EAAE;IACR,SAAS,EAAE,eAAe,CAAC,CAAC,CAAC,GAAG;QAC9B,YAAY,CAAC,EAAE,MAAM,IAAI,CAAC;KAC3B,CAAC;CACH,QAEA"}
@@ -19,6 +19,7 @@ let _config = {
19
19
  syncPolicy: DEFAULT_POLICY,
20
20
  fieldEnforcement: DEFAULT_FIELDS,
21
21
  deleteConflictedRows: false,
22
+ pushRPCPath: undefined,
22
23
  };
23
24
  let _configured = false;
24
25
  /**
@@ -6,6 +6,12 @@ import { LocalSchemaDefinition } from "../../types/schemaManager.types";
6
6
  * Intended for offline-first apps using Supastash. Ensures consistency in structure and indexing while
7
7
  * allowing runtime control of schema migration through `deletePreviousSchema`.
8
8
  *
9
+ * It will also create the following indexes:
10
+ * - synced_at
11
+ * - deleted_at
12
+ * - created_at
13
+ * - updated_at
14
+ * if they do not exist in the schema and if columns exist in the table.
9
15
  * ---
10
16
  *
11
17
  * @param tableName - The name of the local SQLite table.
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/core/schemaManager/index.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,qBAAqB,EAAE,MAAM,iCAAiC,CAAC;AAIxE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA8BG;AACH,wBAAsB,iBAAiB,CACrC,SAAS,EAAE,MAAM,EACjB,MAAM,EAAE,qBAAqB,EAC7B,oBAAoB,UAAQ,iBAoF7B"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/core/schemaManager/index.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,qBAAqB,EAAE,MAAM,iCAAiC,CAAC;AAMxE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAoCG;AACH,wBAAsB,iBAAiB,CACrC,SAAS,EAAE,MAAM,EACjB,MAAM,EAAE,qBAAqB,EAC7B,oBAAoB,UAAQ,iBAyF7B"}
@@ -1,6 +1,7 @@
1
1
  import { getSupastashDb } from "../../db/dbInitializer";
2
2
  import { clearSchemaCache } from "../../utils/getTableSchema";
3
3
  import log, { logError } from "../../utils/logs";
4
+ import { resetSupastashSyncStatus } from "../../utils/sync/status/services";
4
5
  /**
5
6
  * 🧱 defineLocalSchema
6
7
  *
@@ -8,6 +9,12 @@ import log, { logError } from "../../utils/logs";
8
9
  * Intended for offline-first apps using Supastash. Ensures consistency in structure and indexing while
9
10
  * allowing runtime control of schema migration through `deletePreviousSchema`.
10
11
  *
12
+ * It will also create the following indexes:
13
+ * - synced_at
14
+ * - deleted_at
15
+ * - created_at
16
+ * - updated_at
17
+ * if they do not exist in the schema and if columns exist in the table.
11
18
  * ---
12
19
  *
13
20
  * @param tableName - The name of the local SQLite table.
@@ -57,12 +64,10 @@ export async function defineLocalSchema(tableName, schema, deletePreviousSchema
57
64
  const sql = `CREATE TABLE IF NOT EXISTS ${tableName} (${__constraints ? `${schemaString}, ${__constraints}` : schemaString});`;
58
65
  if (deletePreviousSchema) {
59
66
  const dropSql = `DROP TABLE IF EXISTS ${tableName}`;
60
- const clearSyncStatusSql = `DELETE FROM supastash_sync_status WHERE table_name = '${tableName}'`;
61
- const clearDeleteStatusSql = `DELETE FROM supastash_deleted_status WHERE table_name = '${tableName}'`;
62
- const clearLastCreatedStatusSql = `DELETE FROM supastash_last_created WHERE table_name = '${tableName}'`;
63
67
  const tryDropTable = async (attempt = 1) => {
64
68
  try {
65
69
  await db.execAsync(dropSql);
70
+ await resetSupastashSyncStatus(tableName, undefined, "all");
66
71
  }
67
72
  catch (err) {
68
73
  if (String(err).includes("table is locked") && attempt < 5) {
@@ -73,23 +78,67 @@ export async function defineLocalSchema(tableName, schema, deletePreviousSchema
73
78
  }
74
79
  };
75
80
  await tryDropTable();
76
- await db.execAsync(clearSyncStatusSql);
77
- await db.execAsync(clearDeleteStatusSql);
78
- await db.execAsync(clearLastCreatedStatusSql);
79
81
  clearSchemaCache(tableName);
80
82
  log(`[Supastash] Dropped table ${tableName}`);
81
83
  }
82
84
  await db.execAsync(sql);
85
+ const standardIndexes = [
86
+ "synced_at",
87
+ "deleted_at",
88
+ "created_at",
89
+ "updated_at",
90
+ ];
83
91
  // Generate and create index SQL
84
92
  if (__indices?.length) {
85
93
  for (const col of __indices) {
94
+ if (standardIndexes.includes(col)) {
95
+ continue;
96
+ }
86
97
  const indexName = `idx_${tableName}_${col}`;
87
98
  const indexSql = `CREATE INDEX IF NOT EXISTS ${indexName} ON ${tableName}(${col});`;
88
99
  await db.execAsync(indexSql);
89
100
  }
90
101
  }
102
+ await createStandardIndexes(db, tableName, standardIndexes);
91
103
  }
92
104
  catch (error) {
93
105
  logError(`[Supastash] Error defining schema for table ${tableName}`, error);
94
106
  }
95
107
  }
108
+ async function createStandardIndexes(db, table, columns) {
109
+ const pragmaRows = await db.getAllAsync(`PRAGMA table_info(${table});`);
110
+ const existingCols = Array.isArray(pragmaRows)
111
+ ? pragmaRows.map((r) => r.name)
112
+ : [];
113
+ await db.execAsync("BEGIN");
114
+ try {
115
+ for (const col of columns) {
116
+ if (existingCols.includes(col)) {
117
+ const hasSameIndex = await hasSingleColumnIndex(db, table, col);
118
+ if (!hasSameIndex) {
119
+ await db.execAsync(`CREATE INDEX IF NOT EXISTS idx_${table}_${col} ON ${table}(${col});`);
120
+ }
121
+ }
122
+ }
123
+ await db.execAsync("COMMIT");
124
+ }
125
+ catch (error) {
126
+ await db.execAsync("ROLLBACK");
127
+ logError(`[Supastash] Error creating standard indexes for ${table}`, error);
128
+ }
129
+ }
130
+ async function hasSingleColumnIndex(db, table, col) {
131
+ const idxList = await db.getAllAsync(`PRAGMA index_list(${table});`);
132
+ if (!Array.isArray(idxList))
133
+ return false;
134
+ for (const idx of idxList) {
135
+ const idxName = idx.name;
136
+ const info = await db.getAllAsync(`PRAGMA index_info(${idxName});`);
137
+ if (!Array.isArray(info))
138
+ continue;
139
+ // Single-column index exactly on `col`
140
+ if (info.length === 1 && info[0]?.name === col)
141
+ return true;
142
+ }
143
+ return false;
144
+ }
@@ -38,7 +38,39 @@ export type SupastashConfig<T extends SupastashSQLiteClientTypes> = {
38
38
  syncPolicy?: SupastashSyncPolicy;
39
39
  fieldEnforcement?: FieldEnforcement;
40
40
  deleteConflictedRows?: boolean;
41
- useCustomRPCForUpserts?: boolean;
41
+ /**
42
+ * The path to the RPC function to use for upserts.
43
+ * If not provided, Supastash will use the default upsert logic.
44
+ *
45
+ * The RPC function should be defined as follows:
46
+ * @example
47
+ * await supabase.rpc('push_rpc_path', {
48
+ * target_table: 'users',
49
+ * payload: [{ id: '1', name: 'John Doe' }],
50
+ * columns: ['id', 'name'],
51
+ * });
52
+ *
53
+ * ⚠️ Important:
54
+ * Your RPC must verify data freshness before updating.
55
+ * Always ensure local.updated_at > remote.updated_at before performing an update to prevent overwriting newer server data.
56
+ *
57
+ * ⚠️ Return Structure:
58
+ * Your RPC must return an array of objects with the following shape:
59
+ *
60
+ * {
61
+ * id: string; // UUID of the row
62
+ * action: "updated" | "inserted" | "skipped";
63
+ * reason?: string | null; // Optional reason, e.g. "stale_remote", "conflict_or_unauthorized"
64
+ * record_exists: boolean; // Whether the row already exists remotely
65
+ * }
66
+ *
67
+ * Supastash uses this structure to reconcile local states and decide whether to retry, re-insert, or refresh each row.
68
+ * It's advisable to use the recommended structure from the docs: https://0xzekea.github.io/supastash/docs/sync-calls#%EF%B8%8F-pushrpcpath-custom-batch-sync-rpc
69
+ *
70
+ * @example
71
+ * Supastash docs: https://0xzekea.github.io/supastash/docs/sync-calls#%EF%B8%8F-pushrpcpath-custom-batch-sync-rpc
72
+ */
73
+ pushRPCPath?: string;
42
74
  };
43
75
 
44
76
  interface SupastashSQLiteDatabase {
@@ -1 +1 @@
1
- {"version":3,"file":"getAllUnsyncedData.d.ts","sourceRoot":"","sources":["../../../../src/utils/sync/pushLocal/getAllUnsyncedData.ts"],"names":[],"mappings":"AAGA,OAAO,EAAE,WAAW,EAAE,MAAM,4BAA4B,CAAC;AAqEzD;;;;GAIG;AACH,wBAAsB,kBAAkB,CACtC,KAAK,EAAE,MAAM,GACZ,OAAO,CAAC,WAAW,EAAE,GAAG,IAAI,CAAC,CAe/B;AAED;;;;GAIG;AACH,wBAAsB,iBAAiB,CACrC,KAAK,EAAE,MAAM,GACZ,OAAO,CAAC,WAAW,EAAE,GAAG,IAAI,CAAC,CAM/B"}
1
+ {"version":3,"file":"getAllUnsyncedData.d.ts","sourceRoot":"","sources":["../../../../src/utils/sync/pushLocal/getAllUnsyncedData.ts"],"names":[],"mappings":"AAGA,OAAO,EAAE,WAAW,EAAE,MAAM,4BAA4B,CAAC;AAsEzD;;;;GAIG;AACH,wBAAsB,kBAAkB,CACtC,KAAK,EAAE,MAAM,GACZ,OAAO,CAAC,WAAW,EAAE,GAAG,IAAI,CAAC,CAe/B;AAED;;;;GAIG;AACH,wBAAsB,iBAAiB,CACrC,KAAK,EAAE,MAAM,GACZ,OAAO,CAAC,WAAW,EAAE,GAAG,IAAI,CAAC,CAM/B"}
@@ -23,7 +23,8 @@ async function getRemoteKeys(table) {
23
23
  table_name: table,
24
24
  });
25
25
  if (error) {
26
- log(`[Supastash] Error getting remote keys for table ${table} on public schema: ${error.message}`);
26
+ log(`[Supastash] Error getting remote keys for table ${table} on public schema: ${error.message}
27
+ You can find more information in the Supastash docs: https://0xzekea.github.io/supastash/docs/getting-started#%EF%B8%8F-server-side-setup-for-filtered-pulls`);
27
28
  numberOfErrors.set(table, (numberOfErrors.get(table) || 0) + 1);
28
29
  return null;
29
30
  }
@@ -1 +1 @@
1
- {"version":3,"file":"uploadChunk.d.ts","sourceRoot":"","sources":["../../../../src/utils/sync/pushLocal/uploadChunk.ts"],"names":[],"mappings":"AAEA,OAAO,EAAE,WAAW,EAAE,MAAM,4BAA4B,CAAC;AA6JzD;;;;GAIG;AACH,wBAAsB,UAAU,CAC9B,KAAK,EAAE,MAAM,EACb,eAAe,EAAE,WAAW,EAAE,EAC9B,cAAc,CAAC,EAAE,CAAC,OAAO,EAAE,GAAG,EAAE,KAAK,OAAO,CAAC,OAAO,CAAC,iBActD"}
1
+ {"version":3,"file":"uploadChunk.d.ts","sourceRoot":"","sources":["../../../../src/utils/sync/pushLocal/uploadChunk.ts"],"names":[],"mappings":"AAEA,OAAO,EAAE,WAAW,EAAE,MAAM,4BAA4B,CAAC;AAgMzD;;;;GAIG;AACH,wBAAsB,UAAU,CAC9B,KAAK,EAAE,MAAM,EACb,eAAe,EAAE,WAAW,EAAE,EAC9B,cAAc,CAAC,EAAE,CAAC,OAAO,EAAE,GAAG,EAAE,KAAK,OAAO,CAAC,OAAO,CAAC,iBActD"}
@@ -6,7 +6,7 @@ import log from "../../logs";
6
6
  import { supabaseClientErr } from "../../supabaseClientErr";
7
7
  import { setQueryStatus, SyncInfoUpdater } from "../queryStatus";
8
8
  import { enforceTimestamps } from "./normalize";
9
- import { batchUpsert, fetchRemoteHeadsChunked, filterRowsByUpdatedAt, handleRowFailure, markSynced, singleUpsert, } from "./uploadHelpers";
9
+ import { backoff, batchUpsert, fetchRemoteHeadsChunked, filterRowsByUpdatedAt, handleRowFailure, markSynced, rpcUpsert, rpcUpsertSingle, singleUpsert, } from "./uploadHelpers";
10
10
  /**
11
11
  * Uploads a chunk of data to the remote database
12
12
  *
@@ -28,11 +28,21 @@ async function uploadChunk(table, chunk, onPushToRemote) {
28
28
  return;
29
29
  let errorCount = 0;
30
30
  let lastError = null;
31
+ let pending = [];
32
+ const hasRPCPath = !!config.pushRPCPath;
31
33
  const ids = chunk.map((row) => row.id);
32
- // Fetch remote data for the current chunk
33
- const remoteIds = await fetchRemoteHeadsChunked(table, ids, supabase);
34
- // Loop through the initial chunk and check if the id is in the remote data
35
- const toPush = filterRowsByUpdatedAt(table, chunk, remoteIds);
34
+ const toPush = [];
35
+ // If we have a RPC path, we can push the whole chunk. Server validates freshness.
36
+ if (hasRPCPath) {
37
+ toPush.push(...chunk);
38
+ }
39
+ else {
40
+ // Fetch remote data for the current chunk
41
+ const remoteIds = await fetchRemoteHeadsChunked(table, ids, supabase);
42
+ // Loop through the initial chunk and check if the id is in the remote data
43
+ const filtered = filterRowsByUpdatedAt(table, chunk, remoteIds);
44
+ toPush.push(...filtered);
45
+ }
36
46
  if (toPush.length === 0)
37
47
  return;
38
48
  const preflightOK = [];
@@ -57,16 +67,33 @@ async function uploadChunk(table, chunk, onPushToRemote) {
57
67
  }
58
68
  if (preflightOK.length === 0)
59
69
  return;
70
+ pending.push(...preflightOK);
60
71
  const maxBatchAttempts = config.syncPolicy?.maxBatchAttempts ?? 5;
61
72
  let attempts = 0;
62
- let pending = [...preflightOK];
63
73
  while (attempts < maxBatchAttempts && pending.length > 0) {
64
74
  let batchOk = false;
75
+ // RPC return values
76
+ let completed = [];
77
+ let existsMap = new Map();
65
78
  if (onPushToRemote) {
66
79
  const ok = await onPushToRemote(pending);
67
80
  if (ok)
68
81
  batchOk = true;
69
82
  }
83
+ else if (hasRPCPath) {
84
+ const res = await rpcUpsert({ table, rows: pending, supabase });
85
+ completed = res.data.completed;
86
+ pending = [...res.data.skipped];
87
+ existsMap = res.data.existsMap;
88
+ batchOk = res.error == null && pending.length === 0;
89
+ // If there was an RPC error, we need to retry the main function
90
+ if (res.error) {
91
+ attempts++;
92
+ await backoff(attempts);
93
+ pending = [...preflightOK];
94
+ continue;
95
+ }
96
+ }
70
97
  else {
71
98
  const { error } = await batchUpsert(table, pending, supabase);
72
99
  if (!error)
@@ -76,16 +103,24 @@ async function uploadChunk(table, chunk, onPushToRemote) {
76
103
  await markSynced(table, pending.map((r) => r.id));
77
104
  return;
78
105
  }
106
+ if (completed.length > 0) {
107
+ await markSynced(table, completed.map((r) => r.id));
108
+ }
79
109
  //Batch failed -> isolate per-row offenders
80
110
  const keep = [];
81
111
  const syncedNow = [];
82
112
  for (const row of pending) {
83
- const res = onPushToRemote
84
- ? await (async () => {
85
- const ok = await onPushToRemote([row]);
86
- return { error: ok ? null : { code: "ROW_FAILED" } };
87
- })()
88
- : await singleUpsert(table, row, supabase);
113
+ let res = null;
114
+ if (onPushToRemote) {
115
+ const ok = await onPushToRemote([row]);
116
+ res = { error: ok ? null : { code: "ROW_FAILED" } };
117
+ }
118
+ else if (hasRPCPath) {
119
+ res = await rpcUpsertSingle({ table, row, supabase, existsMap });
120
+ }
121
+ else {
122
+ res = await singleUpsert(table, row, supabase);
123
+ }
89
124
  if (!res.error) {
90
125
  syncedNow.push(row.id);
91
126
  continue;
@@ -104,11 +139,7 @@ async function uploadChunk(table, chunk, onPushToRemote) {
104
139
  return;
105
140
  // Backoff before next batch round (exponential, bounded by policy)
106
141
  attempts++;
107
- const schedule = config.syncPolicy?.backoffDelaysMs ?? [
108
- 10000, 30000, 120000, 300000, 600000,
109
- ];
110
- const delay = schedule[Math.min(attempts - 1, schedule.length - 1)];
111
- await new Promise((r) => setTimeout(r, delay));
142
+ await backoff(attempts);
112
143
  pending = keep;
113
144
  }
114
145
  if (pending.length > 0) {
@@ -3,14 +3,39 @@ import { RowLike } from "../../../types/syncEngine.types";
3
3
  export declare function classifyFailure(cfg: SupastashConfig<any>, code?: string | number): "HTTP" | "UNKNOWN" | "NON_RETRYABLE" | "FK_BLOCK" | "RETRYABLE";
4
4
  declare function batchUpsert(table: string, rows: RowLike[], supabase: any): Promise<any>;
5
5
  declare function singleUpsert(table: string, row: RowLike, supabase: any): Promise<any>;
6
+ declare function backoff(attempts: number): Promise<void>;
7
+ declare function rpcUpsert({ table, rows, supabase, }: {
8
+ table: string;
9
+ rows: RowLike[];
10
+ supabase: any;
11
+ }): Promise<{
12
+ data: {
13
+ completed: RowLike[];
14
+ skipped: RowLike[];
15
+ existsMap: Map<string, boolean>;
16
+ };
17
+ error: any;
18
+ }>;
19
+ declare function rpcUpsertSingle({ table, row, supabase, existsMap, }: {
20
+ table: string;
21
+ row: RowLike;
22
+ supabase: any;
23
+ existsMap: Map<string, boolean>;
24
+ }): Promise<{
25
+ data: null;
26
+ error: any;
27
+ } | {
28
+ data: any;
29
+ error: null;
30
+ }>;
6
31
  declare function markSynced(table: string, ids: string[]): Promise<void>;
7
32
  declare function filterRowsByUpdatedAt(table: string, chunk: RowLike[], remoteHeads: Map<string, string>): RowLike[];
8
33
  declare function handleRowFailure(cfg: SupastashConfig<any>, table: string, row: RowLike, err: any, supabase: any): Promise<"DROP" | "KEEP" | "REPLACED">;
9
- export { batchUpsert, filterRowsByUpdatedAt, handleRowFailure, markSynced, singleUpsert, };
34
+ export { backoff, batchUpsert, filterRowsByUpdatedAt, handleRowFailure, markSynced, rpcUpsert, rpcUpsertSingle, singleUpsert, };
10
35
  /**
11
36
  * Deletes local row and rewinds table watermark so normal pull will fetch server copy.
12
37
  * No server read needed.
13
38
  */
14
- export declare function rewindAndDropLocal(table: string, rowId: string): Promise<void>;
39
+ export declare function rewindAndDropLocal(table: string, rowId: string, supabase: any): Promise<void>;
15
40
  export declare function fetchRemoteHeadsChunked(table: string, ids: string[], supabase: any): Promise<Map<string, string>>;
16
41
  //# sourceMappingURL=uploadHelpers.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"uploadHelpers.d.ts","sourceRoot":"","sources":["../../../../src/utils/sync/pushLocal/uploadHelpers.ts"],"names":[],"mappings":"AAEA,OAAO,EAAE,eAAe,EAAE,MAAM,sCAAsC,CAAC;AACvE,OAAO,EAAE,OAAO,EAAE,MAAM,iCAAiC,CAAC;AAS1D,wBAAgB,eAAe,CAC7B,GAAG,EAAE,eAAe,CAAC,GAAG,CAAC,EACzB,IAAI,CAAC,EAAE,MAAM,GAAG,MAAM,mEAWvB;AAED,iBAAe,WAAW,CAAC,KAAK,EAAE,MAAM,EAAE,IAAI,EAAE,OAAO,EAAE,EAAE,QAAQ,EAAE,GAAG,gBAEvE;AAED,iBAAe,YAAY,CAAC,KAAK,EAAE,MAAM,EAAE,GAAG,EAAE,OAAO,EAAE,QAAQ,EAAE,GAAG,gBAErE;AAeD,iBAAe,UAAU,CAAC,KAAK,EAAE,MAAM,EAAE,GAAG,EAAE,MAAM,EAAE,iBAIrD;AAWD,iBAAS,qBAAqB,CAC5B,KAAK,EAAE,MAAM,EACb,KAAK,EAAE,OAAO,EAAE,EAChB,WAAW,EAAE,GAAG,CAAC,MAAM,EAAE,MAAM,CAAC,aAiCjC;AAMD,iBAAe,gBAAgB,CAC7B,GAAG,EAAE,eAAe,CAAC,GAAG,CAAC,EACzB,KAAK,EAAE,MAAM,EACb,GAAG,EAAE,OAAO,EACZ,GAAG,EAAE,GAAG,EACR,QAAQ,EAAE,GAAG,GACZ,OAAO,CAAC,MAAM,GAAG,MAAM,GAAG,UAAU,CAAC,CA0DvC;AAgBD,OAAO,EACL,WAAW,EACX,qBAAqB,EACrB,gBAAgB,EAChB,UAAU,EACV,YAAY,GACb,CAAC;AAEF;;;GAGG;AACH,wBAAsB,kBAAkB,CAAC,KAAK,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM,iBAUpE;AAsBD,wBAAsB,uBAAuB,CAC3C,KAAK,EAAE,MAAM,EACb,GAAG,EAAE,MAAM,EAAE,EACb,QAAQ,EAAE,GAAG,gCAcd"}
1
+ {"version":3,"file":"uploadHelpers.d.ts","sourceRoot":"","sources":["../../../../src/utils/sync/pushLocal/uploadHelpers.ts"],"names":[],"mappings":"AAEA,OAAO,EAAE,eAAe,EAAE,MAAM,sCAAsC,CAAC;AACvE,OAAO,EAAE,OAAO,EAAE,MAAM,iCAAiC,CAAC;AAO1D,wBAAgB,eAAe,CAC7B,GAAG,EAAE,eAAe,CAAC,GAAG,CAAC,EACzB,IAAI,CAAC,EAAE,MAAM,GAAG,MAAM,mEAWvB;AAED,iBAAe,WAAW,CAAC,KAAK,EAAE,MAAM,EAAE,IAAI,EAAE,OAAO,EAAE,EAAE,QAAQ,EAAE,GAAG,gBAEvE;AAED,iBAAe,YAAY,CAAC,KAAK,EAAE,MAAM,EAAE,GAAG,EAAE,OAAO,EAAE,QAAQ,EAAE,GAAG,gBAErE;AAED,iBAAe,OAAO,CAAC,QAAQ,EAAE,MAAM,iBAOtC;AAmBD,iBAAe,SAAS,CAAC,EACvB,KAAK,EACL,IAAI,EACJ,QAAQ,GACT,EAAE;IACD,KAAK,EAAE,MAAM,CAAC;IACd,IAAI,EAAE,OAAO,EAAE,CAAC;IAChB,QAAQ,EAAE,GAAG,CAAC;CACf;;;;;;;GA+CA;AAED,iBAAe,eAAe,CAAC,EAC7B,KAAK,EACL,GAAG,EACH,QAAQ,EACR,SAAS,GACV,EAAE;IACD,KAAK,EAAE,MAAM,CAAC;IACd,GAAG,EAAE,OAAO,CAAC;IACb,QAAQ,EAAE,GAAG,CAAC;IACd,SAAS,EAAE,GAAG,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACjC;;;;;;GAaA;AAMD,iBAAe,UAAU,CAAC,KAAK,EAAE,MAAM,EAAE,GAAG,EAAE,MAAM,EAAE,iBAIrD;AAWD,iBAAS,qBAAqB,CAC5B,KAAK,EAAE,MAAM,EACb,KAAK,EAAE,OAAO,EAAE,EAChB,WAAW,EAAE,GAAG,CAAC,MAAM,EAAE,MAAM,CAAC,aAiCjC;AAMD,iBAAe,gBAAgB,CAC7B,GAAG,EAAE,eAAe,CAAC,GAAG,CAAC,EACzB,KAAK,EAAE,MAAM,EACb,GAAG,EAAE,OAAO,EACZ,GAAG,EAAE,GAAG,EACR,QAAQ,EAAE,GAAG,GACZ,OAAO,CAAC,MAAM,GAAG,MAAM,GAAG,UAAU,CAAC,CAkDvC;AAgBD,OAAO,EACL,OAAO,EACP,WAAW,EACX,qBAAqB,EACrB,gBAAgB,EAChB,UAAU,EACV,SAAS,EACT,eAAe,EACf,YAAY,GACb,CAAC;AAEF;;;GAGG;AACH,wBAAsB,kBAAkB,CACtC,KAAK,EAAE,MAAM,EACb,KAAK,EAAE,MAAM,EACb,QAAQ,EAAE,GAAG,iBAkBd;AASD,wBAAsB,uBAAuB,CAC3C,KAAK,EAAE,MAAM,EACb,GAAG,EAAE,MAAM,EAAE,EACb,QAAQ,EAAE,GAAG,gCAcd"}
@@ -2,10 +2,9 @@ import { getSupastashConfig } from "../../../core/config";
2
2
  import { getSupastashDb } from "../../../db/dbInitializer";
3
3
  import { supastashEventBus } from "../../../utils/events/eventBus";
4
4
  import log, { logWarn } from "../../../utils/logs";
5
+ import { upsertData } from "../pullFromRemote/updateLocalDb";
5
6
  import { setQueryStatus } from "../queryStatus";
6
7
  import { updateLocalSyncedAt } from "../status/syncUpdate";
7
- const DEFAULT_REWIND_MS = 60 * 60 * 1000; // 1 hour
8
- const MAX_REWIND_BACKSTOP_MS = 7 * 24 * 60 * 60 * 1000; // 7 days
9
8
  export function classifyFailure(cfg, code) {
10
9
  const p = cfg.syncPolicy ?? {};
11
10
  const n = Number(code);
@@ -28,6 +27,14 @@ async function batchUpsert(table, rows, supabase) {
28
27
  async function singleUpsert(table, row, supabase) {
29
28
  return await supabase.from(table).upsert(row).select("id").maybeSingle();
30
29
  }
30
+ async function backoff(attempts) {
31
+ const config = getSupastashConfig();
32
+ const schedule = config.syncPolicy?.backoffDelaysMs ?? [
33
+ 10000, 30000, 120000, 300000, 600000,
34
+ ];
35
+ const delay = schedule[Math.min(attempts - 1, schedule.length - 1)];
36
+ await new Promise((r) => setTimeout(r, delay));
37
+ }
31
38
  async function fetchServerRowById(table, id, supabase) {
32
39
  const { data } = await supabase
33
40
  .from(table)
@@ -36,6 +43,72 @@ async function fetchServerRowById(table, id, supabase) {
36
43
  .maybeSingle();
37
44
  return data ?? null;
38
45
  }
46
+ /* ────────────────────────────────────────────────────────────────────────── *
47
+ * RPC Upsert
48
+ * ────────────────────────────────────────────────────────────────────────── */
49
+ function setPending(table, rows) {
50
+ for (const row of rows) {
51
+ setQueryStatus(row.id, table, "pending");
52
+ }
53
+ }
54
+ async function rpcUpsert({ table, rows, supabase, }) {
55
+ const cfg = getSupastashConfig();
56
+ if (!cfg.pushRPCPath) {
57
+ throw new Error("pushRPCPath is not configured. Please configure it in the Supastash config. You can find more information in the Supastash docs: https://0xzekea.github.io/supastash/docs/sync-calls#%EF%B8%8F-pushrpcpath-custom-batch-sync-rpc");
58
+ }
59
+ setPending(table, rows);
60
+ const columns = Object.keys(rows[0]);
61
+ const { data, error } = await supabase.rpc(cfg.pushRPCPath, {
62
+ target_table: table,
63
+ payload: rows,
64
+ columns,
65
+ });
66
+ const mappedRows = new Map();
67
+ for (const row of rows) {
68
+ mappedRows.set(row.id, row);
69
+ }
70
+ const skipped = [];
71
+ const completed = [];
72
+ const existsMap = new Map();
73
+ for (const row of data ?? []) {
74
+ if (row.action === "skipped") {
75
+ if (row.reason === "stale_remote") {
76
+ void acceptServerAndStopRetrying(table, row.id);
77
+ continue;
78
+ }
79
+ const localRow = mappedRows.get(row.id);
80
+ if (localRow) {
81
+ skipped.push(localRow);
82
+ existsMap.set(localRow.id, !!row.record_exists);
83
+ }
84
+ }
85
+ else {
86
+ completed.push(row);
87
+ }
88
+ }
89
+ return {
90
+ data: {
91
+ completed,
92
+ skipped,
93
+ existsMap,
94
+ },
95
+ error: error ?? null,
96
+ };
97
+ }
98
+ async function rpcUpsertSingle({ table, row, supabase, existsMap, }) {
99
+ const rowExist = existsMap.get(row.id) ?? false;
100
+ const { data, error } = rowExist
101
+ ? await supabase
102
+ .from(table)
103
+ .update(row)
104
+ .eq("id", row.id)
105
+ .select("id")
106
+ .maybeSingle()
107
+ : await supabase.from(table).insert(row).select("id").maybeSingle();
108
+ if (error)
109
+ return { data: null, error };
110
+ return { data, error: null };
111
+ }
39
112
  /* ────────────────────────────────────────────────────────────────────────── *
40
113
  * Local side-effects (shared)
41
114
  * ────────────────────────────────────────────────────────────────────────── */
@@ -87,13 +160,12 @@ async function handleRowFailure(cfg, table, row, err, supabase) {
87
160
  const action = cfg.syncPolicy?.onNonRetryable ?? "accept-server";
88
161
  if (action === "delete-local" || cfg.deleteConflictedRows) {
89
162
  logWarn(`Row ${row.id} on ${table} hit NON_RETRYABLE conflict → deleting local`, JSON.stringify(err));
90
- await deleteLocalRow(table, row.id);
91
- cfg.syncPolicy?.onRowDroppedLocal?.(table, row.id);
163
+ await deleteLocalRow(table, row.id, supabase);
92
164
  return "DROP";
93
165
  }
94
166
  else {
95
167
  logWarn(`Row ${row.id} on ${table} hit NON_RETRYABLE conflict → accepting server`, JSON.stringify(err));
96
- await acceptServerAndStopRetrying(table, row.id);
168
+ await deleteLocalRow(table, row.id, supabase);
97
169
  cfg.syncPolicy?.onRowAcceptedServer?.(table, row.id);
98
170
  return "DROP";
99
171
  }
@@ -107,16 +179,8 @@ async function handleRowFailure(cfg, table, row, err, supabase) {
107
179
  if (klass === "HTTP" || klass === "RETRYABLE" || klass === "UNKNOWN") {
108
180
  log(`Row ${row.id} on ${table} transient/HTTP error → scheduling retry`, JSON.stringify(err));
109
181
  if (klass === "HTTP") {
110
- const server = await fetchServerRowById(table, row.id, supabase);
111
- if (server) {
112
- await replaceLocalWithServer(table, server);
113
- return "REPLACED";
114
- }
115
- else if (cfg.deleteConflictedRows) {
116
- await deleteLocalRow(table, row.id);
117
- cfg.syncPolicy?.onRowDroppedLocal?.(table, row.id);
118
- return "REPLACED";
119
- }
182
+ await deleteLocalRow(table, row.id, supabase);
183
+ return "REPLACED";
120
184
  }
121
185
  setQueryStatus(row.id, table, "error");
122
186
  return "KEEP";
@@ -130,33 +194,33 @@ function quoteIdent(name) {
130
194
  }
131
195
  return `"${name.replace(/"/g, '""')}"`;
132
196
  }
133
- async function deleteLocalRow(table, id) {
134
- await rewindAndDropLocal(table, id);
197
+ async function deleteLocalRow(table, id, supabase) {
198
+ await rewindAndDropLocal(table, id, supabase);
135
199
  setQueryStatus(id, table, "success");
136
200
  supastashEventBus.emit("updateSyncStatus");
137
201
  }
138
- export { batchUpsert, filterRowsByUpdatedAt, handleRowFailure, markSynced, singleUpsert, };
202
+ export { backoff, batchUpsert, filterRowsByUpdatedAt, handleRowFailure, markSynced, rpcUpsert, rpcUpsertSingle, singleUpsert, };
139
203
  /**
140
204
  * Deletes local row and rewinds table watermark so normal pull will fetch server copy.
141
205
  * No server read needed.
142
206
  */
143
- export async function rewindAndDropLocal(table, rowId) {
144
- const db = await getSupastashDb();
145
- // 1) Delete local copy
146
- await db.runAsync(`DELETE FROM ${quoteIdent(table)} WHERE id = ?`, [rowId]);
147
- logWarn(`[Supastash] REPLACED: dropped local ${table}:${rowId}
148
- `);
207
+ export async function rewindAndDropLocal(table, rowId, supabase) {
208
+ const server = await fetchServerRowById(table, rowId, supabase);
209
+ if (server) {
210
+ await replaceLocalWithServer(table, server);
211
+ }
212
+ else {
213
+ const db = await getSupastashDb();
214
+ // 1) Delete local copy
215
+ await db.runAsync(`DELETE FROM ${quoteIdent(table)} WHERE id = ?`, [rowId]);
216
+ const cfg = getSupastashConfig();
217
+ cfg.syncPolicy?.onRowDroppedLocal?.(table, rowId);
218
+ logWarn(`[Supastash] REPLACED: dropped local ${table}:${rowId}
219
+ `);
220
+ }
149
221
  }
150
222
  async function replaceLocalWithServer(table, serverRow) {
151
- const db = await getSupastashDb();
152
- const cols = Object.keys(serverRow);
153
- const placeholders = cols.map(() => "?").join(", ");
154
- const updates = cols
155
- .filter((c) => c !== "id")
156
- .map((c) => `${c}=excluded.${c}`)
157
- .join(", ");
158
- await db.runAsync(`INSERT INTO ${quoteIdent(table)} (${cols.join(",")}) VALUES (${placeholders})
159
- ON CONFLICT(id) DO UPDATE SET ${updates};`, cols.map((c) => serverRow[c] ?? null));
223
+ await upsertData(table, serverRow);
160
224
  await updateLocalSyncedAt(table, [serverRow.id]);
161
225
  setQueryStatus(serverRow.id, table, "success");
162
226
  supastashEventBus.emit("updateSyncStatus");
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "supastash",
3
- "version": "0.1.44",
3
+ "version": "0.1.46",
4
4
  "main": "dist/index.js",
5
5
  "types": "dist/index.d.ts",
6
6
  "type": "module",