@syncular/server-dialect-postgres 0.0.1-60

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,103 @@
1
+ /**
2
+ * @syncular/server-dialect-postgres - PostgreSQL Server Sync Dialect
3
+ *
4
+ * Driver-agnostic PostgreSQL dialect for sync. Works with any Postgres-compatible
5
+ * Kysely dialect (pg, pglite, neon, etc.).
6
+ *
7
+ * Tables:
8
+ * - sync_commits: commit log (idempotency + ordering)
9
+ * - sync_table_commits: commit routing index (fast pull by table)
10
+ * - sync_changes: change log (JSONB scopes for filtering)
11
+ * - sync_client_cursors: per-client cursor tracking (pruning/observability)
12
+ */
13
+ import type { ScopeValues, StoredScopes, SyncOp } from '@syncular/core';
14
+ import type { DbExecutor, ServerSyncDialect } from '@syncular/server';
15
+ import type { SyncChangeRow, SyncCommitRow, SyncCoreDb } from '@syncular/server/schema';
16
+ import type { Kysely, Transaction } from 'kysely';
17
+ export declare class PostgresServerSyncDialect implements ServerSyncDialect {
18
+ readonly name: "postgres";
19
+ readonly supportsForUpdate = true;
20
+ readonly supportsSavepoints = true;
21
+ ensureSyncSchema<DB extends SyncCoreDb>(db: Kysely<DB>): Promise<void>;
22
+ executeInTransaction<DB extends SyncCoreDb, T>(db: Kysely<DB>, fn: (executor: DbExecutor<DB>) => Promise<T>): Promise<T>;
23
+ setRepeatableRead<DB extends SyncCoreDb>(trx: DbExecutor<DB>): Promise<void>;
24
+ readMaxCommitSeq<DB extends SyncCoreDb>(db: Kysely<DB> | Transaction<DB>, options?: {
25
+ partitionId?: string;
26
+ }): Promise<number>;
27
+ readMinCommitSeq<DB extends SyncCoreDb>(db: Kysely<DB> | Transaction<DB>, options?: {
28
+ partitionId?: string;
29
+ }): Promise<number>;
30
+ readCommitSeqsForPull<DB extends SyncCoreDb>(db: Kysely<DB> | Transaction<DB>, args: {
31
+ cursor: number;
32
+ limitCommits: number;
33
+ tables: string[];
34
+ partitionId?: string;
35
+ }): Promise<number[]>;
36
+ readCommits<DB extends SyncCoreDb>(db: Kysely<DB> | Transaction<DB>, commitSeqs: number[], options?: {
37
+ partitionId?: string;
38
+ }): Promise<SyncCommitRow[]>;
39
+ readChangesForCommits<DB extends SyncCoreDb>(db: Kysely<DB> | Transaction<DB>, args: {
40
+ commitSeqs: number[];
41
+ table: string;
42
+ scopes: ScopeValues;
43
+ partitionId?: string;
44
+ }): Promise<SyncChangeRow[]>;
45
+ readIncrementalPullRows<DB extends SyncCoreDb>(db: Kysely<DB> | Transaction<DB>, args: {
46
+ table: string;
47
+ scopes: ScopeValues;
48
+ cursor: number;
49
+ limitCommits: number;
50
+ partitionId?: string;
51
+ }): Promise<Array<{
52
+ commit_seq: number;
53
+ actor_id: string;
54
+ created_at: string;
55
+ change_id: number;
56
+ table: string;
57
+ row_id: string;
58
+ op: SyncOp;
59
+ row_json: unknown | null;
60
+ row_version: number | null;
61
+ scopes: StoredScopes;
62
+ }>>;
63
+ streamIncrementalPullRows<DB extends SyncCoreDb>(db: Kysely<DB> | Transaction<DB>, args: {
64
+ table: string;
65
+ scopes: ScopeValues;
66
+ cursor: number;
67
+ limitCommits: number;
68
+ batchSize?: number;
69
+ partitionId?: string;
70
+ }): AsyncGenerator<{
71
+ commit_seq: number;
72
+ actor_id: string;
73
+ created_at: string;
74
+ change_id: number;
75
+ table: string;
76
+ row_id: string;
77
+ op: SyncOp;
78
+ row_json: unknown | null;
79
+ row_version: number | null;
80
+ scopes: StoredScopes;
81
+ }>;
82
+ compactChanges<DB extends SyncCoreDb>(db: Kysely<DB> | Transaction<DB>, args: {
83
+ fullHistoryHours: number;
84
+ }): Promise<number>;
85
+ recordClientCursor<DB extends SyncCoreDb>(db: Kysely<DB> | Transaction<DB>, args: {
86
+ partitionId?: string;
87
+ clientId: string;
88
+ actorId: string;
89
+ cursor: number;
90
+ effectiveScopes: ScopeValues;
91
+ }): Promise<void>;
92
+ scopesToDb(scopes: StoredScopes): StoredScopes;
93
+ dbToScopes(value: unknown): StoredScopes;
94
+ dbToArray(value: unknown): string[];
95
+ arrayToDb(values: string[]): string[];
96
+ readAffectedTablesFromChanges<DB extends SyncCoreDb>(db: Kysely<DB> | Transaction<DB>, commitSeq: number, options?: {
97
+ partitionId?: string;
98
+ }): Promise<string[]>;
99
+ ensureConsoleSchema<DB extends SyncCoreDb>(db: Kysely<DB>): Promise<void>;
100
+ private ensureIndex;
101
+ }
102
+ export declare function createPostgresServerDialect(): PostgresServerSyncDialect;
103
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;GAWG;AAEH,OAAO,KAAK,EAAE,WAAW,EAAE,YAAY,EAAE,MAAM,EAAE,MAAM,gBAAgB,CAAC;AACxE,OAAO,KAAK,EAAE,UAAU,EAAE,iBAAiB,EAAE,MAAM,kBAAkB,CAAC;AACtE,OAAO,KAAK,EACV,aAAa,EACb,aAAa,EACb,UAAU,EACX,MAAM,yBAAyB,CAAC;AACjC,OAAO,KAAK,EAAE,MAAM,EAAE,WAAW,EAAE,MAAM,QAAQ,CAAC;AAmClD,qBAAa,yBAA0B,YAAW,iBAAiB;IACjE,QAAQ,CAAC,IAAI,aAAuB;IACpC,QAAQ,CAAC,iBAAiB,QAAQ;IAClC,QAAQ,CAAC,kBAAkB,QAAQ;IAM7B,gBAAgB,CAAC,EAAE,SAAS,UAAU,EAAE,EAAE,EAAE,MAAM,CAAC,EAAE,CAAC,GAAG,OAAO,CAAC,IAAI,CAAC,CA4M3E;IAMK,oBAAoB,CAAC,EAAE,SAAS,UAAU,EAAE,CAAC,EACjD,EAAE,EAAE,MAAM,CAAC,EAAE,CAAC,EACd,EAAE,EAAE,CAAC,QAAQ,EAAE,UAAU,CAAC,EAAE,CAAC,KAAK,OAAO,CAAC,CAAC,CAAC,GAC3C,OAAO,CAAC,CAAC,CAAC,CAEZ;IAEK,iBAAiB,CAAC,EAAE,SAAS,UAAU,EAC3C,GAAG,EAAE,UAAU,CAAC,EAAE,CAAC,GAClB,OAAO,CAAC,IAAI,CAAC,CAEf;IAMK,gBAAgB,CAAC,EAAE,SAAS,UAAU,EAC1C,EAAE,EAAE,MAAM,CAAC,EAAE,CAAC,GAAG,WAAW,CAAC,EAAE,CAAC,EAChC,OAAO,CAAC,EAAE;QAAE,WAAW,CAAC,EAAE,MAAM,CAAA;KAAE,GACjC,OAAO,CAAC,MAAM,CAAC,CASjB;IAEK,gBAAgB,CAAC,EAAE,SAAS,UAAU,EAC1C,EAAE,EAAE,MAAM,CAAC,EAAE,CAAC,GAAG,WAAW,CAAC,EAAE,CAAC,EAChC,OAAO,CAAC,EAAE;QAAE,WAAW,CAAC,EAAE,MAAM,CAAA;KAAE,GACjC,OAAO,CAAC,MAAM,CAAC,CASjB;IAEK,qBAAqB,CAAC,EAAE,SAAS,UAAU,EAC/C,EAAE,EAAE,MAAM,CAAC,EAAE,CAAC,GAAG,WAAW,CAAC,EAAE,CAAC,EAChC,IAAI,EAAE;QACJ,MAAM,EAAE,MAAM,CAAC;QACf,YAAY,EAAE,MAAM,CAAC;QACrB,MAAM,EAAE,MAAM,EAAE,CAAC;QACjB,WAAW,CAAC,EAAE,MAAM,CAAC;KACtB,GACA,OAAO,CAAC,MAAM,EAAE,CAAC,CAuCnB;IAEK,WAAW,CAAC,EAAE,SAAS,UAAU,EACrC,EAAE,EAAE,MAAM,CAAC,EAAE,CAAC,GAAG,WAAW,CAAC,EAAE,CAAC,EAChC,UAAU,EAAE,MAAM,EAAE,EACpB,OAAO,CAAC,EAAE;QAAE,WAAW,CAAC,EAAE,MAAM,CAAA;KAAE,GACjC,OAAO,CAAC,aAAa,EAAE,CAAC,CAuB1B;IAEK,qBAAqB,CAAC,EAAE,SAAS,UAAU,EAC/C,EAAE,EAAE,MAAM,CAAC,EAAE,CAAC,GAAG,WAAW,CAAC,EAAE,CAAC,EAChC,IAAI,EAAE;QACJ,UAAU,EAAE,MAAM,EAAE,CAAC;QACrB,KAAK,EAAE,MAAM,CAAC;QACd,MAAM,EAAE,WAAW,CAAC;QACpB,WAAW,CAAC,EAAE,MAAM,CAAC;KACtB,GACA,OAAO,CAAC,aAAa,EAAE,CAAC,CAgE1B;IAEK,uBAAuB,CAAC,EAAE,SAAS,UAAU,EACjD,EAAE,EAAE,MAAM,CAAC,EAAE,CAAC,GAAG,WAAW,CAAC,EAAE,CAAC,EAChC,IAAI,EAAE;QACJ,KAAK,EAAE,MAAM,CAAC;QACd,MAAM,EAAE,WAAW,CAAC;QACpB,MAAM,EAAE,MAAM,CAAC;QACf,YAAY,EAAE,MAAM,CAAC;QACrB,WAAW,CAAC,EAAE,MAAM,CAAC;KACtB,GACA,OAAO,CACR,KAAK,CAAC;QACJ,UAAU,EAAE,MAAM,CAAC;QACnB,QAAQ,EAAE,MAAM,CAAC;QACjB,UAAU,EAAE,MAAM,CAAC;QACnB,SAAS,EAAE,MAAM,CAAC;QAClB,KAAK,EAAE,MAAM,CAAC;QACd,MAAM,EAAE,MAAM,CAAC;QACf,EAAE,EAAE,MAAM,CAAC;QACX,QAAQ,EAAE,OAAO,GAAG,IAAI,CAAC;QACzB,WAAW,EAAE,MAAM,GAAG,IAAI,CAAC;QAC3B,MAAM,EAAE,YAAY,CAAC;KACtB,CAAC,CACH,CAmFA;IAEM,yBAAyB,CAAC,EAAE,SAAS,UAAU,EACpD,EAAE,EAAE,MAAM,CAAC,EAAE,CAAC,GAAG,WAAW,CAAC,EAAE,CAAC,EAChC,IAAI,EAAE;QACJ,KAAK,EAAE,MAAM,CAAC;QACd,MAAM,EAAE,WAAW,CAAC;QACpB,MAAM,EAAE,MAAM,CAAC;QACf,YAAY,EAAE,MAAM,CAAC;QACrB,SAAS,CAAC,EAAE,MAAM,CAAC;QACnB,WAAW,CAAC,EAAE,MAAM,CAAC;KACtB,GACA,cAAc,CAAC;QAChB,UAAU,EAAE,MAAM,CAAC;QACnB,QAAQ,EAAE,MAAM,CAAC;QACjB,UAAU,EAAE,MAAM,CAAC;QACnB,SAAS,EAAE,MAAM,CAAC;QAClB,KAAK,EAAE,MAAM,CAAC;QACd,MAAM,EAAE,MAAM,CAAC;QACf,EAAE,EAAE,MAAM,CAAC;QACX,QAAQ,EAAE,OAAO,GAAG,IAAI,CAAC;QACzB,WAAW,EAAE,MAAM,GAAG,IAAI,CAAC;QAC3B,MAAM,EAAE,YAAY,CAAC;KACtB,CAAC,CAqBD;IAEK,cAAc,CAAC,EAAE,SAAS,UAAU,EACxC,EAAE,EAAE,MAAM,CAAC,EAAE,CAAC,GAAG,WAAW,CAAC,EAAE,CAAC,EAChC,IAAI,EAAE;QAAE,gBAAgB,EAAE,MAAM,CAAA;KAAE,GACjC,OAAO,CAAC,MAAM,CAAC,CAwCjB;IAMK,kBAAkB,CAAC,EAAE,SAAS,UAAU,EAC5C,EAAE,EAAE,MAAM,CAAC,EAAE,CAAC,GAAG,WAAW,CAAC,EAAE,CAAC,EAChC,IAAI,EAAE;QACJ,WAAW,CAAC,EAAE,MAAM,CAAC;QACrB,QAAQ,EAAE,MAAM,CAAC;QACjB,OAAO,EAAE,MAAM,CAAC;QAChB,MAAM,EAAE,MAAM,CAAC;QACf,eAAe,EAAE,WAAW,CAAC;KAC9B,GACA,OAAO,CAAC,IAAI,CAAC,CAcf;IAMD,UAAU,CAAC,MAAM,EAAE,YAAY,GAAG,YAAY,CAE7C;IAED,UAAU,CAAC,KAAK,EAAE,OAAO,GAAG,YAAY,CAEvC;IAED,SAAS,CAAC,KAAK,EAAE,OAAO,GAAG,MAAM,EAAE,CAKlC;IAED,SAAS,CAAC,MAAM,EAAE,MAAM,EAAE,GAAG,MAAM,EAAE,CAEpC;IAEK,6BAA6B,CAAC,EAAE,SAAS,UAAU,EACvD,EAAE,EAAE,MAAM,CAAC,EAAE,CAAC,GAAG,WAAW,CAAC,EAAE,CAAC,EAChC,SAAS,EAAE,MAAM,EACjB,OAAO,CAAC,EAAE;QAAE,WAAW,CAAC,EAAE,MAAM,CAAA;KAAE,GACjC,OAAO,CAAC,MAAM,EAAE,CAAC,CAYnB;IAMK,mBAAmB,CAAC,EAAE,SAAS,UAAU,EAC7C,EAAE,EAAE,MAAM,CAAC,EAAE,CAAC,GACb,OAAO,CAAC,IAAI,CAAC,CAmEf;YAMa,WAAW;CAgB1B;AAED,wBAAgB,2BAA2B,IAAI,yBAAyB,CAEvE"}
package/dist/index.js ADDED
@@ -0,0 +1,564 @@
1
+ /**
2
+ * @syncular/server-dialect-postgres - PostgreSQL Server Sync Dialect
3
+ *
4
+ * Driver-agnostic PostgreSQL dialect for sync. Works with any Postgres-compatible
5
+ * Kysely dialect (pg, pglite, neon, etc.).
6
+ *
7
+ * Tables:
8
+ * - sync_commits: commit log (idempotency + ordering)
9
+ * - sync_table_commits: commit routing index (fast pull by table)
10
+ * - sync_changes: change log (JSONB scopes for filtering)
11
+ * - sync_client_cursors: per-client cursor tracking (pruning/observability)
12
+ */
13
+ import { sql } from 'kysely';
14
+ function coerceNumber(value) {
15
+ if (value === null || value === undefined)
16
+ return null;
17
+ if (typeof value === 'number')
18
+ return Number.isFinite(value) ? value : null;
19
+ if (typeof value === 'bigint')
20
+ return Number.isFinite(Number(value)) ? Number(value) : null;
21
+ if (typeof value === 'string') {
22
+ const n = Number(value);
23
+ return Number.isFinite(n) ? n : null;
24
+ }
25
+ return null;
26
+ }
27
+ function coerceIsoString(value) {
28
+ if (typeof value === 'string')
29
+ return value;
30
+ if (value instanceof Date)
31
+ return value.toISOString();
32
+ return String(value);
33
+ }
34
+ function parseScopes(value) {
35
+ if (value === null || value === undefined)
36
+ return {};
37
+ if (typeof value === 'object' && !Array.isArray(value)) {
38
+ const result = {};
39
+ for (const [k, v] of Object.entries(value)) {
40
+ if (typeof v === 'string') {
41
+ result[k] = v;
42
+ }
43
+ }
44
+ return result;
45
+ }
46
+ return {};
47
+ }
48
+ export class PostgresServerSyncDialect {
49
+ name = 'postgres';
50
+ supportsForUpdate = true;
51
+ supportsSavepoints = true;
52
+ // ===========================================================================
53
+ // Schema Setup
54
+ // ===========================================================================
55
+ async ensureSyncSchema(db) {
56
+ await db.schema
57
+ .createTable('sync_commits')
58
+ .ifNotExists()
59
+ .addColumn('commit_seq', 'bigserial', (col) => col.primaryKey())
60
+ .addColumn('partition_id', 'text', (col) => col.notNull().defaultTo('default'))
61
+ .addColumn('actor_id', 'text', (col) => col.notNull())
62
+ .addColumn('client_id', 'text', (col) => col.notNull())
63
+ .addColumn('client_commit_id', 'text', (col) => col.notNull())
64
+ .addColumn('created_at', 'timestamptz', (col) => col.notNull().defaultTo(sql `now()`))
65
+ .addColumn('meta', 'jsonb')
66
+ .addColumn('result_json', 'jsonb')
67
+ .addColumn('change_count', 'integer', (col) => col.notNull().defaultTo(0))
68
+ .addColumn('affected_tables', sql `text[]`, (col) => col.notNull().defaultTo(sql `ARRAY[]::text[]`))
69
+ .execute();
70
+ // Ensure new columns exist for dev environments that already created the table.
71
+ await sql `ALTER TABLE sync_commits
72
+ ADD COLUMN IF NOT EXISTS change_count integer NOT NULL DEFAULT 0`.execute(db);
73
+ await sql `ALTER TABLE sync_commits
74
+ ADD COLUMN IF NOT EXISTS affected_tables text[] NOT NULL DEFAULT ARRAY[]::text[]`.execute(db);
75
+ await sql `ALTER TABLE sync_commits
76
+ ADD COLUMN IF NOT EXISTS partition_id text NOT NULL DEFAULT 'default'`.execute(db);
77
+ await sql `DROP INDEX IF EXISTS idx_sync_commits_client_commit`.execute(db);
78
+ await db.schema
79
+ .createIndex('idx_sync_commits_client_commit')
80
+ .ifNotExists()
81
+ .on('sync_commits')
82
+ .columns(['partition_id', 'client_id', 'client_commit_id'])
83
+ .unique()
84
+ .execute();
85
+ // Table-based commit routing index
86
+ await db.schema
87
+ .createTable('sync_table_commits')
88
+ .ifNotExists()
89
+ .addColumn('partition_id', 'text', (col) => col.notNull().defaultTo('default'))
90
+ .addColumn('table', 'text', (col) => col.notNull())
91
+ .addColumn('commit_seq', 'bigint', (col) => col.notNull().references('sync_commits.commit_seq').onDelete('cascade'))
92
+ .addPrimaryKeyConstraint('sync_table_commits_pk', [
93
+ 'partition_id',
94
+ 'table',
95
+ 'commit_seq',
96
+ ])
97
+ .execute();
98
+ await sql `ALTER TABLE sync_table_commits
99
+ ADD COLUMN IF NOT EXISTS partition_id text NOT NULL DEFAULT 'default'`.execute(db);
100
+ await db.schema
101
+ .createIndex('idx_sync_table_commits_commit_seq')
102
+ .ifNotExists()
103
+ .on('sync_table_commits')
104
+ .columns(['partition_id', 'commit_seq'])
105
+ .execute();
106
+ // Changes table with JSONB scopes
107
+ await db.schema
108
+ .createTable('sync_changes')
109
+ .ifNotExists()
110
+ .addColumn('change_id', 'bigserial', (col) => col.primaryKey())
111
+ .addColumn('partition_id', 'text', (col) => col.notNull().defaultTo('default'))
112
+ .addColumn('commit_seq', 'bigint', (col) => col.notNull().references('sync_commits.commit_seq').onDelete('cascade'))
113
+ .addColumn('table', 'text', (col) => col.notNull())
114
+ .addColumn('row_id', 'text', (col) => col.notNull())
115
+ .addColumn('op', 'text', (col) => col.notNull())
116
+ .addColumn('row_json', 'jsonb')
117
+ .addColumn('row_version', 'bigint')
118
+ .addColumn('scopes', 'jsonb', (col) => col.notNull())
119
+ .execute();
120
+ await sql `ALTER TABLE sync_changes
121
+ ADD COLUMN IF NOT EXISTS partition_id text NOT NULL DEFAULT 'default'`.execute(db);
122
+ await db.schema
123
+ .createIndex('idx_sync_changes_commit_seq')
124
+ .ifNotExists()
125
+ .on('sync_changes')
126
+ .columns(['partition_id', 'commit_seq'])
127
+ .execute();
128
+ await db.schema
129
+ .createIndex('idx_sync_changes_table')
130
+ .ifNotExists()
131
+ .on('sync_changes')
132
+ .columns(['partition_id', 'table'])
133
+ .execute();
134
+ await this.ensureIndex(db, 'idx_sync_changes_scopes', 'CREATE INDEX idx_sync_changes_scopes ON sync_changes USING GIN (scopes)');
135
+ await db.schema
136
+ .createTable('sync_client_cursors')
137
+ .ifNotExists()
138
+ .addColumn('partition_id', 'text', (col) => col.notNull().defaultTo('default'))
139
+ .addColumn('client_id', 'text', (col) => col.notNull())
140
+ .addColumn('actor_id', 'text', (col) => col.notNull())
141
+ .addColumn('cursor', 'bigint', (col) => col.notNull().defaultTo(0))
142
+ .addColumn('effective_scopes', 'jsonb', (col) => col.notNull().defaultTo(sql `'{}'::jsonb`))
143
+ .addColumn('updated_at', 'timestamptz', (col) => col.notNull().defaultTo(sql `now()`))
144
+ .addPrimaryKeyConstraint('sync_client_cursors_pk', [
145
+ 'partition_id',
146
+ 'client_id',
147
+ ])
148
+ .execute();
149
+ await sql `ALTER TABLE sync_client_cursors
150
+ ADD COLUMN IF NOT EXISTS partition_id text NOT NULL DEFAULT 'default'`.execute(db);
151
+ await db.schema
152
+ .createIndex('idx_sync_client_cursors_updated_at')
153
+ .ifNotExists()
154
+ .on('sync_client_cursors')
155
+ .columns(['updated_at'])
156
+ .execute();
157
+ await db.schema
158
+ .createTable('sync_snapshot_chunks')
159
+ .ifNotExists()
160
+ .addColumn('chunk_id', 'text', (col) => col.primaryKey())
161
+ .addColumn('partition_id', 'text', (col) => col.notNull().defaultTo('default'))
162
+ .addColumn('scope_key', 'text', (col) => col.notNull())
163
+ .addColumn('scope', 'text', (col) => col.notNull())
164
+ .addColumn('as_of_commit_seq', 'bigint', (col) => col.notNull())
165
+ .addColumn('row_cursor', 'text', (col) => col.notNull().defaultTo(''))
166
+ .addColumn('row_limit', 'integer', (col) => col.notNull())
167
+ .addColumn('encoding', 'text', (col) => col.notNull())
168
+ .addColumn('compression', 'text', (col) => col.notNull())
169
+ .addColumn('sha256', 'text', (col) => col.notNull())
170
+ .addColumn('byte_length', 'integer', (col) => col.notNull())
171
+ .addColumn('blob_hash', 'text', (col) => col.notNull().defaultTo(''))
172
+ .addColumn('body', 'bytea') // Deprecated: use blob storage
173
+ .addColumn('created_at', 'timestamptz', (col) => col.notNull().defaultTo(sql `now()`))
174
+ .addColumn('expires_at', 'timestamptz', (col) => col.notNull())
175
+ .execute();
176
+ await sql `ALTER TABLE sync_snapshot_chunks
177
+ ADD COLUMN IF NOT EXISTS partition_id text NOT NULL DEFAULT 'default'`.execute(db);
178
+ await db.schema
179
+ .createIndex('idx_sync_snapshot_chunks_expires_at')
180
+ .ifNotExists()
181
+ .on('sync_snapshot_chunks')
182
+ .columns(['expires_at'])
183
+ .execute();
184
+ await db.schema
185
+ .createIndex('idx_sync_snapshot_chunks_page_key')
186
+ .ifNotExists()
187
+ .on('sync_snapshot_chunks')
188
+ .columns([
189
+ 'partition_id',
190
+ 'scope_key',
191
+ 'scope',
192
+ 'as_of_commit_seq',
193
+ 'row_cursor',
194
+ 'row_limit',
195
+ 'encoding',
196
+ 'compression',
197
+ ])
198
+ .unique()
199
+ .execute();
200
+ }
201
+ // ===========================================================================
202
+ // Transaction Control
203
+ // ===========================================================================
204
+ async executeInTransaction(db, fn) {
205
+ return db.transaction().execute(fn);
206
+ }
207
+ async setRepeatableRead(trx) {
208
+ await sql `SET TRANSACTION ISOLATION LEVEL REPEATABLE READ`.execute(trx);
209
+ }
210
+ // ===========================================================================
211
+ // Commit/Change Log Queries
212
+ // ===========================================================================
213
+ async readMaxCommitSeq(db, options) {
214
+ const partitionId = options?.partitionId ?? 'default';
215
+ const res = await sql `
216
+ SELECT max(commit_seq) as max_seq
217
+ FROM sync_commits
218
+ WHERE partition_id = ${partitionId}
219
+ `.execute(db);
220
+ return coerceNumber(res.rows[0]?.max_seq) ?? 0;
221
+ }
222
+ async readMinCommitSeq(db, options) {
223
+ const partitionId = options?.partitionId ?? 'default';
224
+ const res = await sql `
225
+ SELECT min(commit_seq) as min_seq
226
+ FROM sync_commits
227
+ WHERE partition_id = ${partitionId}
228
+ `.execute(db);
229
+ return coerceNumber(res.rows[0]?.min_seq) ?? 0;
230
+ }
231
+ async readCommitSeqsForPull(db, args) {
232
+ const partitionId = args.partitionId ?? 'default';
233
+ if (args.tables.length === 0)
234
+ return [];
235
+ if (args.tables.length === 1) {
236
+ const res = await sql `
237
+ SELECT commit_seq
238
+ FROM sync_table_commits
239
+ WHERE partition_id = ${partitionId}
240
+ AND "table" = ${args.tables[0]}
241
+ AND commit_seq > ${args.cursor}
242
+ ORDER BY commit_seq ASC
243
+ LIMIT ${args.limitCommits}
244
+ `.execute(db);
245
+ return res.rows
246
+ .map((r) => coerceNumber(r.commit_seq))
247
+ .filter((n) => typeof n === 'number' && Number.isFinite(n) && n > args.cursor);
248
+ }
249
+ const res = await sql `
250
+ SELECT DISTINCT commit_seq
251
+ FROM sync_table_commits
252
+ WHERE partition_id = ${partitionId}
253
+ AND "table" = ANY(${args.tables}::text[])
254
+ AND commit_seq > ${args.cursor}
255
+ ORDER BY commit_seq ASC
256
+ LIMIT ${args.limitCommits}
257
+ `.execute(db);
258
+ return res.rows
259
+ .map((r) => coerceNumber(r.commit_seq))
260
+ .filter((n) => typeof n === 'number' && Number.isFinite(n) && n > args.cursor);
261
+ }
262
+ async readCommits(db, commitSeqs, options) {
263
+ const partitionId = options?.partitionId ?? 'default';
264
+ if (commitSeqs.length === 0)
265
+ return [];
266
+ const res = await sql `
267
+ SELECT commit_seq, actor_id, created_at, result_json
268
+ FROM sync_commits
269
+ WHERE commit_seq = ANY(${commitSeqs}::bigint[])
270
+ AND partition_id = ${partitionId}
271
+ ORDER BY commit_seq ASC
272
+ `.execute(db);
273
+ return res.rows.map((row) => ({
274
+ commit_seq: coerceNumber(row.commit_seq) ?? 0,
275
+ actor_id: row.actor_id,
276
+ created_at: coerceIsoString(row.created_at),
277
+ result_json: row.result_json ?? null,
278
+ }));
279
+ }
280
+ async readChangesForCommits(db, args) {
281
+ const partitionId = args.partitionId ?? 'default';
282
+ if (args.commitSeqs.length === 0)
283
+ return [];
284
+ // Build JSONB containment conditions for scope filtering
285
+ // For each scope key/value, we need: scopes->>'key' = 'value' OR scopes->>'key' IN (values)
286
+ const scopeConditions = [];
287
+ for (const [key, value] of Object.entries(args.scopes)) {
288
+ if (Array.isArray(value)) {
289
+ // OR condition for array values
290
+ scopeConditions.push(sql `scopes->>${key} = ANY(${value}::text[])`);
291
+ }
292
+ else {
293
+ scopeConditions.push(sql `scopes->>${key} = ${value}`);
294
+ }
295
+ }
296
+ let query = sql `
297
+ SELECT commit_seq, "table", row_id, op, row_json, row_version, scopes
298
+ FROM sync_changes
299
+ WHERE commit_seq = ANY(${args.commitSeqs}::bigint[])
300
+ AND partition_id = ${partitionId}
301
+ AND "table" = ${args.table}
302
+ `;
303
+ if (scopeConditions.length > 0) {
304
+ const scopeFilter = sql.join(scopeConditions, sql ` AND `);
305
+ query = sql `
306
+ SELECT commit_seq, "table", row_id, op, row_json, row_version, scopes
307
+ FROM sync_changes
308
+ WHERE commit_seq = ANY(${args.commitSeqs}::bigint[])
309
+ AND partition_id = ${partitionId}
310
+ AND "table" = ${args.table}
311
+ AND (${scopeFilter})
312
+ ORDER BY commit_seq ASC, change_id ASC
313
+ `;
314
+ }
315
+ const res = await query.execute(db);
316
+ return res.rows.map((row) => ({
317
+ commit_seq: coerceNumber(row.commit_seq) ?? 0,
318
+ table: row.table,
319
+ row_id: row.row_id,
320
+ op: row.op,
321
+ row_json: row.row_json ?? null,
322
+ row_version: coerceNumber(row.row_version),
323
+ scopes: parseScopes(row.scopes),
324
+ }));
325
+ }
326
+ async readIncrementalPullRows(db, args) {
327
+ const partitionId = args.partitionId ?? 'default';
328
+ const limitCommits = Math.max(1, Math.min(500, args.limitCommits));
329
+ // Build scope filter conditions
330
+ const scopeConditions = [];
331
+ for (const [key, value] of Object.entries(args.scopes)) {
332
+ if (Array.isArray(value)) {
333
+ scopeConditions.push(sql `c.scopes->>${key} = ANY(${value}::text[])`);
334
+ }
335
+ else {
336
+ scopeConditions.push(sql `c.scopes->>${key} = ${value}`);
337
+ }
338
+ }
339
+ const scopeFilter = scopeConditions.length > 0
340
+ ? sql.join(scopeConditions, sql ` AND `)
341
+ : sql `TRUE`;
342
+ const res = await sql `
343
+ WITH commit_seqs AS (
344
+ SELECT DISTINCT tc.commit_seq
345
+ FROM sync_table_commits tc
346
+ JOIN sync_commits cm ON cm.commit_seq = tc.commit_seq
347
+ WHERE tc.partition_id = ${partitionId}
348
+ AND tc."table" = ${args.table}
349
+ AND cm.partition_id = ${partitionId}
350
+ AND tc.commit_seq > ${args.cursor}
351
+ AND EXISTS (
352
+ SELECT 1
353
+ FROM sync_changes c
354
+ WHERE c.commit_seq = tc.commit_seq
355
+ AND c.partition_id = ${partitionId}
356
+ AND c."table" = ${args.table}
357
+ AND (${scopeFilter})
358
+ )
359
+ ORDER BY tc.commit_seq ASC
360
+ LIMIT ${limitCommits}
361
+ )
362
+ SELECT
363
+ cm.commit_seq,
364
+ cm.actor_id,
365
+ cm.created_at,
366
+ c.change_id,
367
+ c."table",
368
+ c.row_id,
369
+ c.op,
370
+ c.row_json,
371
+ c.row_version,
372
+ c.scopes
373
+ FROM commit_seqs cs
374
+ JOIN sync_commits cm ON cm.commit_seq = cs.commit_seq
375
+ JOIN sync_changes c ON c.commit_seq = cs.commit_seq
376
+ WHERE cm.partition_id = ${partitionId}
377
+ AND c.partition_id = ${partitionId}
378
+ AND c."table" = ${args.table}
379
+ AND (${scopeFilter})
380
+ ORDER BY cm.commit_seq ASC, c.change_id ASC
381
+ `.execute(db);
382
+ return res.rows.map((row) => ({
383
+ commit_seq: coerceNumber(row.commit_seq) ?? 0,
384
+ actor_id: row.actor_id,
385
+ created_at: coerceIsoString(row.created_at),
386
+ change_id: coerceNumber(row.change_id) ?? 0,
387
+ table: row.table,
388
+ row_id: row.row_id,
389
+ op: row.op,
390
+ row_json: row.row_json ?? null,
391
+ row_version: coerceNumber(row.row_version),
392
+ scopes: parseScopes(row.scopes),
393
+ }));
394
+ }
395
+ async *streamIncrementalPullRows(db, args) {
396
+ // PostgreSQL: use batching approach (could use pg-query-stream for true streaming)
397
+ const batchSize = Math.min(100, args.batchSize ?? 100);
398
+ let processed = 0;
399
+ while (processed < args.limitCommits) {
400
+ const batch = await this.readIncrementalPullRows(db, {
401
+ ...args,
402
+ limitCommits: Math.min(batchSize, args.limitCommits - processed),
403
+ cursor: args.cursor + processed,
404
+ });
405
+ if (batch.length === 0)
406
+ break;
407
+ for (const row of batch) {
408
+ yield row;
409
+ }
410
+ processed += batch.length;
411
+ if (batch.length < batchSize)
412
+ break;
413
+ }
414
+ }
415
+ async compactChanges(db, args) {
416
+ const cutoffIso = new Date(Date.now() - args.fullHistoryHours * 60 * 60 * 1000).toISOString();
417
+ const res = await sql `
418
+ WITH ranked AS (
419
+ SELECT
420
+ c.change_id,
421
+ row_number() OVER (
422
+ PARTITION BY c.partition_id, c."table", c.row_id, c.scopes
423
+ ORDER BY c.commit_seq DESC, c.change_id DESC
424
+ ) AS rn
425
+ FROM sync_changes c
426
+ JOIN sync_commits cm ON cm.commit_seq = c.commit_seq
427
+ WHERE cm.created_at < ${cutoffIso}
428
+ )
429
+ DELETE FROM sync_changes
430
+ WHERE change_id IN (SELECT change_id FROM ranked WHERE rn > 1)
431
+ `.execute(db);
432
+ const deletedChanges = Number(res.numAffectedRows ?? 0);
433
+ // Remove routing index entries that no longer have any remaining changes
434
+ await sql `
435
+ DELETE FROM sync_table_commits tc
436
+ USING sync_commits cm
437
+ WHERE cm.commit_seq = tc.commit_seq
438
+ AND cm.partition_id = tc.partition_id
439
+ AND cm.created_at < ${cutoffIso}
440
+ AND NOT EXISTS (
441
+ SELECT 1
442
+ FROM sync_changes c
443
+ WHERE c.commit_seq = tc.commit_seq
444
+ AND c.partition_id = tc.partition_id
445
+ AND c."table" = tc."table"
446
+ )
447
+ `.execute(db);
448
+ return deletedChanges;
449
+ }
450
+ // ===========================================================================
451
+ // Client Cursor Recording
452
+ // ===========================================================================
453
+ async recordClientCursor(db, args) {
454
+ const partitionId = args.partitionId ?? 'default';
455
+ const now = new Date().toISOString();
456
+ const scopesJson = JSON.stringify(args.effectiveScopes);
457
+ await sql `
458
+ INSERT INTO sync_client_cursors (partition_id, client_id, actor_id, cursor, effective_scopes, updated_at)
459
+ VALUES (${partitionId}, ${args.clientId}, ${args.actorId}, ${args.cursor}, ${scopesJson}::jsonb, ${now})
460
+ ON CONFLICT(partition_id, client_id) DO UPDATE SET
461
+ actor_id = ${args.actorId},
462
+ cursor = ${args.cursor},
463
+ effective_scopes = ${scopesJson}::jsonb,
464
+ updated_at = ${now}
465
+ `.execute(db);
466
+ }
467
+ // ===========================================================================
468
+ // Scope Conversion Helpers
469
+ // ===========================================================================
470
+ scopesToDb(scopes) {
471
+ return scopes;
472
+ }
473
+ dbToScopes(value) {
474
+ return parseScopes(value);
475
+ }
476
+ dbToArray(value) {
477
+ if (Array.isArray(value)) {
478
+ return value.filter((k) => typeof k === 'string');
479
+ }
480
+ return [];
481
+ }
482
+ arrayToDb(values) {
483
+ return values.filter((v) => v.length > 0);
484
+ }
485
+ async readAffectedTablesFromChanges(db, commitSeq, options) {
486
+ const partitionId = options?.partitionId ?? 'default';
487
+ const res = await sql `
488
+ SELECT DISTINCT "table"
489
+ FROM sync_changes
490
+ WHERE commit_seq = ${commitSeq}
491
+ AND partition_id = ${partitionId}
492
+ `.execute(db);
493
+ return res.rows
494
+ .map((r) => r.table)
495
+ .filter((t) => typeof t === 'string' && t.length > 0);
496
+ }
497
+ // ===========================================================================
498
+ // Console Schema (Request Events)
499
+ // ===========================================================================
500
+ async ensureConsoleSchema(db) {
501
+ await sql `
502
+ CREATE TABLE IF NOT EXISTS sync_request_events (
503
+ event_id BIGSERIAL PRIMARY KEY,
504
+ event_type TEXT NOT NULL,
505
+ actor_id TEXT NOT NULL,
506
+ client_id TEXT NOT NULL,
507
+ transport_path TEXT NOT NULL DEFAULT 'direct',
508
+ status_code INTEGER NOT NULL,
509
+ outcome TEXT NOT NULL,
510
+ duration_ms INTEGER NOT NULL,
511
+ commit_seq BIGINT,
512
+ operation_count INTEGER,
513
+ row_count INTEGER,
514
+ tables TEXT[] NOT NULL DEFAULT ARRAY[]::TEXT[],
515
+ error_message TEXT,
516
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
517
+ )
518
+ `.execute(db);
519
+ await sql `
520
+ ALTER TABLE sync_request_events
521
+ ADD COLUMN IF NOT EXISTS transport_path TEXT NOT NULL DEFAULT 'direct'
522
+ `.execute(db);
523
+ await this.ensureIndex(db, 'idx_sync_request_events_created_at', 'CREATE INDEX idx_sync_request_events_created_at ON sync_request_events(created_at DESC)');
524
+ await this.ensureIndex(db, 'idx_sync_request_events_event_type', 'CREATE INDEX idx_sync_request_events_event_type ON sync_request_events(event_type)');
525
+ await this.ensureIndex(db, 'idx_sync_request_events_client_id', 'CREATE INDEX idx_sync_request_events_client_id ON sync_request_events(client_id)');
526
+ // API Keys table
527
+ await sql `
528
+ CREATE TABLE IF NOT EXISTS sync_api_keys (
529
+ key_id TEXT PRIMARY KEY,
530
+ key_hash TEXT NOT NULL,
531
+ key_prefix TEXT NOT NULL,
532
+ name TEXT NOT NULL,
533
+ key_type TEXT NOT NULL,
534
+ scope_keys TEXT[] DEFAULT ARRAY[]::TEXT[],
535
+ actor_id TEXT,
536
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
537
+ expires_at TIMESTAMPTZ,
538
+ last_used_at TIMESTAMPTZ,
539
+ revoked_at TIMESTAMPTZ
540
+ )
541
+ `.execute(db);
542
+ await this.ensureIndex(db, 'idx_sync_api_keys_key_hash', 'CREATE INDEX idx_sync_api_keys_key_hash ON sync_api_keys(key_hash)');
543
+ await this.ensureIndex(db, 'idx_sync_api_keys_key_type', 'CREATE INDEX idx_sync_api_keys_key_type ON sync_api_keys(key_type)');
544
+ }
545
+ // ===========================================================================
546
+ // Private Helpers
547
+ // ===========================================================================
548
+ async ensureIndex(db, indexName, createSql) {
549
+ const exists = await sql `
550
+ SELECT 1 as ok
551
+ FROM pg_indexes
552
+ WHERE schemaname = 'public'
553
+ AND indexname = ${indexName}
554
+ LIMIT 1
555
+ `.execute(db);
556
+ if (exists.rows.length > 0)
557
+ return;
558
+ await sql.raw(createSql).execute(db);
559
+ }
560
+ }
561
+ export function createPostgresServerDialect() {
562
+ return new PostgresServerSyncDialect();
563
+ }
564
+ //# sourceMappingURL=index.js.map