@powersync/service-module-postgres 0.16.15 → 0.16.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/CHANGELOG.md +16 -0
  2. package/dist/api/PostgresRouteAPIAdapter.js +3 -2
  3. package/dist/api/PostgresRouteAPIAdapter.js.map +1 -1
  4. package/dist/replication/SnapshotQuery.js +4 -3
  5. package/dist/replication/SnapshotQuery.js.map +1 -1
  6. package/dist/replication/WalStream.d.ts +3 -2
  7. package/dist/replication/WalStream.js +23 -35
  8. package/dist/replication/WalStream.js.map +1 -1
  9. package/dist/replication/replication-utils.js +5 -5
  10. package/dist/replication/replication-utils.js.map +1 -1
  11. package/dist/types/registry.js +1 -2
  12. package/dist/types/registry.js.map +1 -1
  13. package/dist/types/resolver.d.ts +0 -1
  14. package/dist/types/resolver.js +9 -14
  15. package/dist/types/resolver.js.map +1 -1
  16. package/dist/utils/migration_lib.js +1 -1
  17. package/dist/utils/migration_lib.js.map +1 -1
  18. package/dist/utils/postgres_version.js +1 -1
  19. package/dist/utils/postgres_version.js.map +1 -1
  20. package/package.json +10 -10
  21. package/src/api/PostgresRouteAPIAdapter.ts +3 -2
  22. package/src/replication/SnapshotQuery.ts +7 -3
  23. package/src/replication/WalStream.ts +31 -34
  24. package/src/replication/replication-utils.ts +5 -5
  25. package/src/types/registry.ts +1 -4
  26. package/src/types/resolver.ts +10 -14
  27. package/src/utils/migration_lib.ts +1 -1
  28. package/src/utils/postgres_version.ts +1 -1
  29. package/test/src/pg_test.test.ts +153 -61
  30. package/test/src/resuming_snapshots.test.ts +12 -6
  31. package/test/src/slow_tests.test.ts +2 -2
  32. package/test/src/types/registry.test.ts +1 -1
  33. package/test/src/wal_stream_utils.ts +1 -1
  34. package/tsconfig.tsbuildinfo +1 -1
@@ -49,7 +49,7 @@ export class Migrations {
49
49
  order by migration_id desc
50
50
  limit 1
51
51
  `)
52
- .then((results) => ({ id: results.rows[0][0] }));
52
+ .then((results) => ({ id: results.rows[0].decodeWithoutCustomTypes(0) }));
53
53
  }
54
54
  async ensureMigrationsTable(db) {
55
55
  await db.query(`create table if not exists migrations (
@@ -1 +1 @@
1
- {"version":3,"file":"migration_lib.js","sourceRoot":"","sources":["../../src/utils/migration_lib.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,qBAAqB,EAAE,MAAM,mCAAmC,CAAC;AAW1E,oEAAoE;AACpE,MAAM,OAAO,UAAU;IACb,UAAU,GAAgB,EAAE,CAAC;IAErC,GAAG,CAAC,EAAU,EAAE,IAAY,EAAE,EAAqB;QACjD,IAAI,IAAI,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,IAAI,IAAI,CAAC,UAAU,CAAC,IAAI,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,EAAE,IAAI,EAAE,EAAE,CAAC;YACvF,MAAM,IAAI,qBAAqB,CAAC,6CAA6C,CAAC,CAAC;QACjF,CAAC;QACD,IAAI,CAAC,UAAU,CAAC,IAAI,CAAC,EAAE,EAAE,EAAE,EAAE,EAAE,IAAI,EAAE,CAAC,CAAC;IACzC,CAAC;IAED,KAAK,CAAC,EAAE,CAAC,EAAuB;QAC9B,MAAM,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC;QACxB,IAAI,CAAC;YACH,MAAM,IAAI,CAAC,qBAAqB,CAAC,EAAE,CAAC,CAAC;YACrC,MAAM,OAAO,GAAG,MAAM,IAAI,CAAC,mBAAmB,CAAC,EAAE,CAAC,CAAC;YACnD,IAAI,SAAS,GAAG,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;YAEzC,KAAK,IAAI,SAAS,IAAI,IAAI,CAAC,UAAU,EAAE,CAAC;gBACtC,IAAI,SAAS,CAAC,EAAE,IAAI,SAAS,EAAE,CAAC;oBAC9B,SAAS;gBACX,CAAC;gBACD,MAAM,SAAS,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC;gBAEvB,MAAM,EAAE,CAAC,KAAK,CAAC;oBACb,SAAS,EAAE;;;;;;;;KAQhB;oBACK,MAAM,EAAE;wBACN,EAAE,IAAI,EAAE,MAAM,EAAE,KAAK,EAAE,SAAS,CAAC,EAAE,EAAE;wBACrC,EAAE,IAAI,EAAE,SAAS,EAAE,KAAK,EAAE,SAAS,CAAC,IAAI,EAAE;qBAC3C;iBACF,CAAC,CAAC;YACL,CAAC;YAED,MAAM,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC;QAC3B,CAAC;QAAC,OAAO,CAAC,EAAE,CAAC;YACX,MAAM,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC;YAC3B,MAAM,CAAC,CAAC;QACV,CAAC;IACH,CAAC;IAED,mBAAmB,CAAC,EAAuB;QACzC,OAAO,EAAE;aACN,KAAK,CACJ;;;;KAIH,CACE;aACA,IAAI,CAAC,CAAC,OAAO,EAAE,EAAE,CAAC,CAAC,EAAE,EAAE,EAAE,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAW,EAAE,CAAC,CAAC,CAAC;IAC/D,CAAC;IAED,KAAK,CAAC,qBAAqB,CAAC,EAAuB;QACjD,MAAM,EAAE,CAAC,KAAK,CAAC;;;;;KAKd,CAAC,CAAC;IACL,CAAC;CACF"}
1
+ {"version":3,"file":"migration_lib.js","sourceRoot":"","sources":["../../src/utils/migration_lib.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,qBAAqB,EAAE,MAAM,mCAAmC,CAAC;AAW1E,oEAAoE;AACpE,MAAM,OAAO,UAAU;IACb,UAAU,GAAgB,EAAE,CAAC;IAErC,GAAG,CAAC,EAAU,EAAE,IAAY,EAAE,EAAqB;QACjD,IAAI,IAAI,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,IAAI,IAAI,CAAC,UAAU,CAAC,IAAI,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,EAAE,IAAI,EAAE,EAAE,CAAC;YACvF,MAAM,IAAI,qBAAqB,CAAC,6CAA6C,CAAC,CAAC;QACjF,CAAC;QACD,IAAI,CAAC,UAAU,CAAC,IAAI,CAAC,EAAE,EAAE,EAAE,EAAE,EAAE,IAAI,EAAE,CAAC,CAAC;IACzC,CAAC;IAED,KAAK,CAAC,EAAE,CAAC,EAAuB;QAC9B,MAAM,EAAE,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC;QACxB,IAAI,CAAC;YACH,MAAM,IAAI,CAAC,qBAAqB,CAAC,EAAE,CAAC,CAAC;YACrC,MAAM,OAAO,GAAG,MAAM,IAAI,CAAC,mBAAmB,CAAC,EAAE,CAAC,CAAC;YACnD,IAAI,SAAS,GAAG,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;YAEzC,KAAK,IAAI,SAAS,IAAI,IAAI,CAAC,UAAU,EAAE,CAAC;gBACtC,IAAI,SAAS,CAAC,EAAE,IAAI,SAAS,EAAE,CAAC;oBAC9B,SAAS;gBACX,CAAC;gBACD,MAAM,SAAS,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC;gBAEvB,MAAM,EAAE,CAAC,KAAK,CAAC;oBACb,SAAS,EAAE;;;;;;;;KAQhB;oBACK,MAAM,EAAE;wBACN,EAAE,IAAI,EAAE,MAAM,EAAE,KAAK,EAAE,SAAS,CAAC,EAAE,EAAE;wBACrC,EAAE,IAAI,EAAE,SAAS,EAAE,KAAK,EAAE,SAAS,CAAC,IAAI,EAAE;qBAC3C;iBACF,CAAC,CAAC;YACL,CAAC;YAED,MAAM,EAAE,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC;QAC3B,CAAC;QAAC,OAAO,CAAC,EAAE,CAAC;YACX,MAAM,EAAE,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC;YAC3B,MAAM,CAAC,CAAC;QACV,CAAC;IACH,CAAC;IAED,mBAAmB,CAAC,EAAuB;QACzC,OAAO,EAAE;aACN,KAAK,CACJ;;;;KAIH,CACE;aACA,IAAI,CAAC,CAAC,OAAO,EAAE,EAAE,CAAC,CAAC,EAAE,EAAE,EAAE,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,wBAAwB,CAAC,CAAC,CAAW,EAAE,CAAC,CAAC,CAAC;IACxF,CAAC;IAED,KAAK,CAAC,qBAAqB,CAAC,EAAuB;QACjD,MAAM,EAAE,CAAC,KAAK,CAAC;;;;;KAKd,CAAC,CAAC;IACL,CAAC;CACF"}
@@ -2,6 +2,6 @@ import semver from 'semver';
2
2
  export async function getServerVersion(db) {
3
3
  const result = await db.query(`SHOW server_version;`);
4
4
  // The result is usually of the form "16.2 (Debian 16.2-1.pgdg120+2)"
5
- return semver.coerce(result.rows[0][0].split(' ')[0]);
5
+ return semver.coerce(result.rows[0].decodeWithoutCustomTypes(0).split(' ')[0]);
6
6
  }
7
7
  //# sourceMappingURL=postgres_version.js.map
@@ -1 +1 @@
1
- {"version":3,"file":"postgres_version.js","sourceRoot":"","sources":["../../src/utils/postgres_version.ts"],"names":[],"mappings":"AACA,OAAO,MAAuB,MAAM,QAAQ,CAAC;AAE7C,MAAM,CAAC,KAAK,UAAU,gBAAgB,CAAC,EAAmB;IACxD,MAAM,MAAM,GAAG,MAAM,EAAE,CAAC,KAAK,CAAC,sBAAsB,CAAC,CAAC;IACtD,qEAAqE;IACrE,OAAO,MAAM,CAAC,MAAM,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACxD,CAAC"}
1
+ {"version":3,"file":"postgres_version.js","sourceRoot":"","sources":["../../src/utils/postgres_version.ts"],"names":[],"mappings":"AACA,OAAO,MAAuB,MAAM,QAAQ,CAAC;AAE7C,MAAM,CAAC,KAAK,UAAU,gBAAgB,CAAC,EAAmB;IACxD,MAAM,MAAM,GAAG,MAAM,EAAE,CAAC,KAAK,CAAC,sBAAsB,CAAC,CAAC;IACtD,qEAAqE;IACrE,OAAO,MAAM,CAAC,MAAM,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,wBAAwB,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACjF,CAAC"}
package/package.json CHANGED
@@ -5,7 +5,7 @@
5
5
  "publishConfig": {
6
6
  "access": "public"
7
7
  },
8
- "version": "0.16.15",
8
+ "version": "0.16.16",
9
9
  "main": "dist/index.js",
10
10
  "license": "FSL-1.1-ALv2",
11
11
  "type": "module",
@@ -28,20 +28,20 @@
28
28
  "ts-codec": "^1.3.0",
29
29
  "uri-js": "^4.4.1",
30
30
  "uuid": "^11.1.0",
31
- "@powersync/lib-service-postgres": "0.4.17",
32
- "@powersync/lib-services-framework": "0.7.13",
33
- "@powersync/service-core": "1.18.1",
34
- "@powersync/service-jpgwire": "0.21.8",
31
+ "@powersync/lib-service-postgres": "0.4.18",
32
+ "@powersync/lib-services-framework": "0.7.14",
33
+ "@powersync/service-core": "1.18.2",
34
+ "@powersync/service-jpgwire": "0.21.9",
35
35
  "@powersync/service-jsonbig": "0.17.12",
36
- "@powersync/service-sync-rules": "0.29.9",
36
+ "@powersync/service-sync-rules": "0.29.10",
37
37
  "@powersync/service-types": "0.13.3"
38
38
  },
39
39
  "devDependencies": {
40
40
  "@types/semver": "^7.5.4",
41
- "@powersync/service-core-tests": "0.12.15",
42
- "@powersync/service-module-mongodb-storage": "0.12.15",
43
- "@powersync/lib-service-postgres": "0.4.17",
44
- "@powersync/service-module-postgres-storage": "0.10.15"
41
+ "@powersync/service-core-tests": "0.12.16",
42
+ "@powersync/service-module-mongodb-storage": "0.12.16",
43
+ "@powersync/lib-service-postgres": "0.4.18",
44
+ "@powersync/service-module-postgres-storage": "0.10.16"
45
45
  },
46
46
  "scripts": {
47
47
  "build": "tsc -b",
@@ -108,7 +108,8 @@ export class PostgresRouteAPIAdapter implements api.RouteAPI {
108
108
  results: {
109
109
  columns: result.columns.map((c) => c.name),
110
110
  rows: result.rows.map((row) => {
111
- return row.map((value) => {
111
+ return row.raw.map((raw, i) => {
112
+ const value = pgwire.PgType.decode(raw, row.columns[i].typeOid);
112
113
  const sqlValue = sync_rules.applyValueContext(
113
114
  sync_rules.toSyncRulesValue(value),
114
115
  sync_rules.CompatibilityContext.FULL_BACKWARDS_COMPATIBILITY
@@ -252,7 +253,7 @@ FROM pg_replication_slots WHERE slot_name = $1 LIMIT 1;`,
252
253
  // For those, we need to use pg_current_wal_lsn() instead.
253
254
  const { results } = await lib_postgres.retriedQuery(this.pool, `SELECT pg_current_wal_lsn() as lsn`);
254
255
 
255
- const lsn = results[0].rows[0][0];
256
+ const lsn = results[0].rows[0].decodeWithoutCustomTypes(0);
256
257
  return String(lsn);
257
258
  }
258
259
 
@@ -127,19 +127,19 @@ export class ChunkedSnapshotQuery implements SnapshotQuery {
127
127
  if (this.key.typeId == null) {
128
128
  throw new Error(`typeId required for primary key ${this.key.name}`);
129
129
  }
130
- let type: StatementParam['type'] = Number(this.key.typeId);
130
+ const type = Number(this.key.typeId);
131
131
  stream = this.connection.stream({
132
132
  statement: `SELECT * FROM ${this.table.qualifiedName} WHERE ${escapedKeyName} > $1 ORDER BY ${escapedKeyName} LIMIT ${this.chunkSize}`,
133
133
  params: [{ value: this.lastKey, type }]
134
134
  });
135
135
  }
136
136
  let primaryKeyIndex: number = -1;
137
+ let typeOid = 0;
137
138
 
138
139
  for await (let chunk of stream) {
139
140
  if (chunk.tag == 'RowDescription') {
140
141
  // We get a RowDescription for each FETCH call, but they should
141
142
  // all be the same.
142
- let i = 0;
143
143
  const pk = chunk.payload.findIndex((c) => c.name == this.key.name);
144
144
  if (pk < 0) {
145
145
  throw new Error(
@@ -147,10 +147,14 @@ export class ChunkedSnapshotQuery implements SnapshotQuery {
147
147
  );
148
148
  }
149
149
  primaryKeyIndex = pk;
150
+ typeOid = chunk.payload[pk].typeOid;
150
151
  }
151
152
 
152
153
  if (chunk.rows.length > 0) {
153
- this.lastKey = chunk.rows[chunk.rows.length - 1][primaryKeyIndex];
154
+ this.lastKey = PgType.decode(chunk.rows[chunk.rows.length - 1].raw[primaryKeyIndex], typeOid) as
155
+ | string
156
+ | bigint
157
+ | null;
154
158
  }
155
159
  yield chunk;
156
160
  }
@@ -20,16 +20,19 @@ import {
20
20
  } from '@powersync/service-core';
21
21
  import * as pgwire from '@powersync/service-jpgwire';
22
22
  import {
23
+ applyRowContext,
23
24
  applyValueContext,
24
25
  CompatibilityContext,
25
26
  DatabaseInputRow,
26
27
  SqliteInputRow,
27
28
  SqliteInputValue,
28
29
  SqliteRow,
30
+ SqliteValue,
29
31
  SqlSyncRules,
30
32
  TablePattern,
31
33
  ToastableSqliteRow,
32
- toSyncRulesRow
34
+ toSyncRulesRow,
35
+ toSyncRulesValue
33
36
  } from '@powersync/service-sync-rules';
34
37
 
35
38
  import { ReplicationMetric } from '@powersync/service-types';
@@ -44,6 +47,7 @@ import {
44
47
  SimpleSnapshotQuery,
45
48
  SnapshotQuery
46
49
  } from './SnapshotQuery.js';
50
+ import { PostgresTypeResolver } from '../types/resolver.js';
47
51
 
48
52
  export interface WalStreamOptions {
49
53
  logger?: Logger;
@@ -361,11 +365,7 @@ WHERE oid = $1::regclass`,
361
365
  params: [{ value: table.qualifiedName, type: 'varchar' }]
362
366
  });
363
367
  const row = results.rows[0];
364
- if ((row?.[0] ?? -1n) == -1n) {
365
- return -1;
366
- } else {
367
- return Number(row[0]);
368
- }
368
+ return Number(row?.decodeWithoutCustomTypes(0) ?? -1n);
369
369
  }
370
370
 
371
371
  /**
@@ -466,11 +466,25 @@ WHERE oid = $1::regclass`,
466
466
  }
467
467
  }
468
468
 
469
- static *getQueryData(results: Iterable<DatabaseInputRow>): Generator<SqliteInputRow> {
470
- for (let row of results) {
471
- yield toSyncRulesRow(row);
472
- }
469
+ static decodeRow(row: pgwire.PgRow, types: PostgresTypeResolver): SqliteInputRow {
470
+ let result: SqliteInputRow = {};
471
+
472
+ row.raw.forEach((rawValue, i) => {
473
+ const column = row.columns[i];
474
+ let mappedValue: SqliteInputValue;
475
+
476
+ if (typeof rawValue == 'string') {
477
+ mappedValue = toSyncRulesValue(types.registry.decodeDatabaseValue(rawValue, column.typeOid), false, true);
478
+ } else {
479
+ // Binary format, expose as-is.
480
+ mappedValue = rawValue;
481
+ }
482
+
483
+ result[column.name] = mappedValue;
484
+ });
485
+ return result;
473
486
  }
487
+
474
488
  private async snapshotTableInTx(
475
489
  batch: storage.BucketStorageBatch,
476
490
  db: pgwire.PgConnection,
@@ -499,7 +513,7 @@ WHERE oid = $1::regclass`,
499
513
  // 2. Wait until logical replication has caught up with all the change between A and B.
500
514
  // Calling `markSnapshotDone(LSN B)` covers that.
501
515
  const rs = await db.query(`select pg_current_wal_lsn() as lsn`);
502
- tableLsnNotBefore = rs.rows[0][0];
516
+ tableLsnNotBefore = rs.rows[0].decodeWithoutCustomTypes(0);
503
517
  // Side note: A ROLLBACK would probably also be fine here, since we only read in this transaction.
504
518
  await db.query('COMMIT');
505
519
  const [resultTable] = await batch.markSnapshotDone([table], tableLsnNotBefore);
@@ -545,8 +559,6 @@ WHERE oid = $1::regclass`,
545
559
  }
546
560
  await q.initialize();
547
561
 
548
- let columns: { i: number; name: string }[] = [];
549
- let columnMap: Record<string, number> = {};
550
562
  let hasRemainingData = true;
551
563
  while (hasRemainingData) {
552
564
  // Fetch 10k at a time.
@@ -560,31 +572,16 @@ WHERE oid = $1::regclass`,
560
572
  // There are typically 100-200 rows per chunk.
561
573
  for await (let chunk of cursor) {
562
574
  if (chunk.tag == 'RowDescription') {
563
- // We get a RowDescription for each FETCH call, but they should
564
- // all be the same.
565
- let i = 0;
566
- columns = chunk.payload.map((c) => {
567
- return { i: i++, name: c.name };
568
- });
569
- for (let column of chunk.payload) {
570
- columnMap[column.name] = column.typeOid;
571
- }
572
575
  continue;
573
576
  }
574
577
 
575
- const rows = chunk.rows.map((row) => {
576
- let q: DatabaseInputRow = {};
577
- for (let c of columns) {
578
- q[c.name] = row[c.i];
579
- }
580
- return q;
581
- });
582
- if (rows.length > 0) {
578
+ if (chunk.rows.length > 0) {
583
579
  hasRemainingData = true;
584
580
  }
585
581
 
586
- for (const inputRecord of WalStream.getQueryData(rows)) {
587
- const record = this.syncRulesRecord(this.connections.types.constructRowRecord(columnMap, inputRecord));
582
+ for (const rawRow of chunk.rows) {
583
+ const record = this.sync_rules.applyRowContext<never>(WalStream.decodeRow(rawRow, this.connections.types));
584
+
588
585
  // This auto-flushes when the batch reaches its size limit
589
586
  await batch.save({
590
587
  tag: storage.SaveOperationTag.INSERT,
@@ -596,8 +593,8 @@ WHERE oid = $1::regclass`,
596
593
  });
597
594
  }
598
595
 
599
- at += rows.length;
600
- this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(rows.length);
596
+ at += chunk.rows.length;
597
+ this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(chunk.rows.length);
601
598
 
602
599
  this.touch();
603
600
  }
@@ -32,8 +32,8 @@ export async function getPrimaryKeyColumns(
32
32
 
33
33
  return attrRows.rows.map((row) => {
34
34
  return {
35
- name: row[0] as string,
36
- typeId: row[1] as number
35
+ name: row.decodeWithoutCustomTypes(0) as string,
36
+ typeId: row.decodeWithoutCustomTypes(1) as number
37
37
  } satisfies storage.ColumnDescriptor;
38
38
  });
39
39
  }
@@ -50,8 +50,8 @@ export async function getAllColumns(db: pgwire.PgClient, relationId: number): Pr
50
50
  });
51
51
  return attrRows.rows.map((row) => {
52
52
  return {
53
- name: row[0] as string,
54
- typeId: row[1] as number
53
+ name: row.decodeWithoutCustomTypes(0) as string,
54
+ typeId: row.decodeWithoutCustomTypes(1) as number
55
55
  } satisfies storage.ColumnDescriptor;
56
56
  });
57
57
  }
@@ -71,7 +71,7 @@ FROM pg_class
71
71
  WHERE oid = $1::oid LIMIT 1`,
72
72
  params: [{ type: 'int8', value: relationId }]
73
73
  });
74
- const idType: string = rows.rows[0]?.[0];
74
+ const idType: string = rows.rows[0]?.decodeWithoutCustomTypes(0);
75
75
  if (idType == 'nothing' || idType == null) {
76
76
  return { replicationIdentity: 'nothing', replicationColumns: [] };
77
77
  } else if (idType == 'full') {
@@ -256,10 +256,7 @@ export class CustomTypeRegistry {
256
256
  case 'unknown':
257
257
  return true;
258
258
  case 'array':
259
- return (
260
- type.separatorCharCode == pgwire.CHAR_CODE_COMMA &&
261
- this.isParsedWithoutCustomTypesSupport(this.lookupType(type.innerId))
262
- );
259
+ return type.separatorCharCode == pgwire.CHAR_CODE_COMMA && pgwire.ARRAY_TO_ELEM_OID.has(type.innerId);
263
260
  default:
264
261
  return false;
265
262
  }
@@ -153,7 +153,7 @@ WHERE a.attnum > 0
153
153
  AND cn.nspname not in ('information_schema', 'pg_catalog', 'pg_toast')
154
154
  `;
155
155
 
156
- const query = await this.pool.query({ statement: sql });
156
+ const query = await this.pool.query(sql);
157
157
  let ids: number[] = [];
158
158
  for (const row of pgwire.pgwireRows(query)) {
159
159
  ids.push(Number(row.type_oid));
@@ -186,26 +186,22 @@ WHERE a.attnum > 0
186
186
  return toSyncRulesRow(record);
187
187
  }
188
188
 
189
- constructRowRecord(columnMap: Record<string, number>, tupleRaw: Record<string, any>): SqliteInputRow {
190
- const record = this.decodeTupleForTable(columnMap, tupleRaw);
191
- return toSyncRulesRow(record);
192
- }
193
-
194
189
  /**
195
190
  * We need a high level of control over how values are decoded, to make sure there is no loss
196
191
  * of precision in the process.
197
192
  */
198
193
  decodeTuple(relation: pgwire.PgoutputRelation, tupleRaw: Record<string, any>): DatabaseInputRow {
199
194
  let result: Record<string, any> = {};
200
- for (let columnName in tupleRaw) {
201
- const rawval = tupleRaw[columnName];
202
- const typeOid = (relation as any)._tupleDecoder._typeOids.get(columnName);
203
- if (typeof rawval == 'string' && typeOid) {
204
- result[columnName] = this.registry.decodeDatabaseValue(rawval, typeOid);
205
- } else {
206
- result[columnName] = rawval;
207
- }
195
+ for (const column of relation.columns) {
196
+ const rawval = tupleRaw[column.name];
197
+ result[column.name] =
198
+ rawval == null
199
+ ? // We can't decode null values, but it's important that null and undefined stay distinct because undefined
200
+ // represents a TOASTed value.
201
+ rawval
202
+ : this.registry.decodeDatabaseValue(rawval, column.typeOid);
208
203
  }
204
+
209
205
  return result;
210
206
  }
211
207
 
@@ -66,7 +66,7 @@ export class Migrations {
66
66
  limit 1
67
67
  `
68
68
  )
69
- .then((results) => ({ id: results.rows[0][0] as number }));
69
+ .then((results) => ({ id: results.rows[0].decodeWithoutCustomTypes(0) as number }));
70
70
  }
71
71
 
72
72
  async ensureMigrationsTable(db: pgwire.PgConnection) {
@@ -4,5 +4,5 @@ import semver, { type SemVer } from 'semver';
4
4
  export async function getServerVersion(db: pgwire.PgClient): Promise<SemVer | null> {
5
5
  const result = await db.query(`SHOW server_version;`);
6
6
  // The result is usually of the form "16.2 (Debian 16.2-1.pgdg120+2)"
7
- return semver.coerce(result.rows[0][0].split(' ')[0]);
7
+ return semver.coerce(result.rows[0].decodeWithoutCustomTypes(0).split(' ')[0]);
8
8
  }