@powersync/service-module-postgres 0.16.11 → 0.16.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -34,7 +34,7 @@ export class PostgresRouteAPIAdapter implements api.RouteAPI {
34
34
  connectionTag?: string,
35
35
  private config?: types.ResolvedConnectionConfig
36
36
  ) {
37
- this.typeCache = new PostgresTypeResolver(config?.typeRegistry ?? new CustomTypeRegistry(), pool);
37
+ this.typeCache = new PostgresTypeResolver(pool);
38
38
  this.connectionTag = connectionTag ?? sync_rules.DEFAULT_TAG;
39
39
  }
40
40
 
@@ -22,8 +22,6 @@ import { getApplicationName } from '../utils/application-name.js';
22
22
  import { CustomTypeRegistry } from '../types/registry.js';
23
23
 
24
24
  export class PostgresModule extends replication.ReplicationModule<types.PostgresConnectionConfig> {
25
- private customTypes: CustomTypeRegistry = new CustomTypeRegistry();
26
-
27
25
  constructor() {
28
26
  super({
29
27
  name: 'Postgres',
@@ -51,7 +49,7 @@ export class PostgresModule extends replication.ReplicationModule<types.Postgres
51
49
  protected createReplicator(context: system.ServiceContext): replication.AbstractReplicator {
52
50
  const normalisedConfig = this.resolveConfig(this.decodedConfig!);
53
51
  const syncRuleProvider = new ConfigurationFileSyncRulesProvider(context.configuration.sync_rules);
54
- const connectionFactory = new ConnectionManagerFactory(normalisedConfig, this.customTypes);
52
+ const connectionFactory = new ConnectionManagerFactory(normalisedConfig);
55
53
 
56
54
  return new WalStreamReplicator({
57
55
  id: this.getDefaultId(normalisedConfig.database),
@@ -69,8 +67,7 @@ export class PostgresModule extends replication.ReplicationModule<types.Postgres
69
67
  private resolveConfig(config: types.PostgresConnectionConfig): types.ResolvedConnectionConfig {
70
68
  return {
71
69
  ...config,
72
- ...types.normalizeConnectionConfig(config),
73
- typeRegistry: this.customTypes
70
+ ...types.normalizeConnectionConfig(config)
74
71
  };
75
72
  }
76
73
 
@@ -79,8 +76,7 @@ export class PostgresModule extends replication.ReplicationModule<types.Postgres
79
76
  const connectionManager = new PgManager(normalisedConfig, {
80
77
  idleTimeout: 30_000,
81
78
  maxSize: 1,
82
- applicationName: getApplicationName(),
83
- registry: this.customTypes
79
+ applicationName: getApplicationName()
84
80
  });
85
81
 
86
82
  try {
@@ -111,8 +107,7 @@ export class PostgresModule extends replication.ReplicationModule<types.Postgres
111
107
  const connectionManager = new PgManager(normalizedConfig, {
112
108
  idleTimeout: 30_000,
113
109
  maxSize: 1,
114
- applicationName: getApplicationName(),
115
- registry: new CustomTypeRegistry()
110
+ applicationName: getApplicationName()
116
111
  });
117
112
  const connection = await connectionManager.snapshotConnection();
118
113
  try {
@@ -1,22 +1,18 @@
1
- import { PgManager } from './PgManager.js';
2
- import { NormalizedPostgresConnectionConfig } from '../types/types.js';
3
- import { PgPoolOptions } from '@powersync/service-jpgwire';
4
1
  import { logger } from '@powersync/lib-services-framework';
5
- import { CustomTypeRegistry } from '../types/registry.js';
2
+ import { PgPoolOptions } from '@powersync/service-jpgwire';
3
+ import { NormalizedPostgresConnectionConfig } from '../types/types.js';
4
+ import { PgManager } from './PgManager.js';
6
5
 
7
6
  export class ConnectionManagerFactory {
8
7
  private readonly connectionManagers = new Set<PgManager>();
9
8
  public readonly dbConnectionConfig: NormalizedPostgresConnectionConfig;
10
9
 
11
- constructor(
12
- dbConnectionConfig: NormalizedPostgresConnectionConfig,
13
- private readonly registry: CustomTypeRegistry
14
- ) {
10
+ constructor(dbConnectionConfig: NormalizedPostgresConnectionConfig) {
15
11
  this.dbConnectionConfig = dbConnectionConfig;
16
12
  }
17
13
 
18
14
  create(poolOptions: PgPoolOptions) {
19
- const manager = new PgManager(this.dbConnectionConfig, { ...poolOptions, registry: this.registry });
15
+ const manager = new PgManager(this.dbConnectionConfig, { ...poolOptions });
20
16
  this.connectionManagers.add(manager);
21
17
 
22
18
  manager.registerListener({
@@ -1,15 +1,12 @@
1
+ import { BaseObserver } from '@powersync/lib-services-framework';
1
2
  import * as pgwire from '@powersync/service-jpgwire';
2
3
  import semver from 'semver';
4
+ import { PostgresTypeResolver } from '../types/resolver.js';
3
5
  import { NormalizedPostgresConnectionConfig } from '../types/types.js';
4
6
  import { getApplicationName } from '../utils/application-name.js';
5
- import { PostgresTypeResolver } from '../types/resolver.js';
6
7
  import { getServerVersion } from '../utils/postgres_version.js';
7
- import { CustomTypeRegistry } from '../types/registry.js';
8
- import { BaseObserver } from '@powersync/lib-services-framework';
9
8
 
10
- export interface PgManagerOptions extends pgwire.PgPoolOptions {
11
- registry: CustomTypeRegistry;
12
- }
9
+ export interface PgManagerOptions extends pgwire.PgPoolOptions {}
13
10
 
14
11
  /**
15
12
  * Shorter timeout for snapshot connections than for replication connections.
@@ -37,7 +34,7 @@ export class PgManager extends BaseObserver<PgManagerListener> {
37
34
  super();
38
35
  // The pool is lazy - no connections are opened until a query is performed.
39
36
  this.pool = pgwire.connectPgWirePool(this.options, poolOptions);
40
- this.types = new PostgresTypeResolver(poolOptions.registry, this.pool);
37
+ this.types = new PostgresTypeResolver(this.pool);
41
38
  }
42
39
 
43
40
  public get connectionTag() {
@@ -546,6 +546,7 @@ WHERE oid = $1::regclass`,
546
546
  await q.initialize();
547
547
 
548
548
  let columns: { i: number; name: string }[] = [];
549
+ let columnMap: Record<string, number> = {};
549
550
  let hasRemainingData = true;
550
551
  while (hasRemainingData) {
551
552
  // Fetch 10k at a time.
@@ -565,6 +566,9 @@ WHERE oid = $1::regclass`,
565
566
  columns = chunk.payload.map((c) => {
566
567
  return { i: i++, name: c.name };
567
568
  });
569
+ for (let column of chunk.payload) {
570
+ columnMap[column.name] = column.typeOid;
571
+ }
568
572
  continue;
569
573
  }
570
574
 
@@ -580,7 +584,7 @@ WHERE oid = $1::regclass`,
580
584
  }
581
585
 
582
586
  for (const inputRecord of WalStream.getQueryData(rows)) {
583
- const record = this.syncRulesRecord(inputRecord);
587
+ const record = this.syncRulesRecord(this.connections.types.constructRowRecord(columnMap, inputRecord));
584
588
  // This auto-flushes when the batch reaches its size limit
585
589
  await batch.save({
586
590
  tag: storage.SaveOperationTag.INSERT,
@@ -1,8 +1,8 @@
1
- import { DatabaseInputRow, SqliteInputRow, toSyncRulesRow } from '@powersync/service-sync-rules';
2
1
  import * as pgwire from '@powersync/service-jpgwire';
3
- import { CustomTypeRegistry } from './registry.js';
2
+ import { DatabaseInputRow, SqliteInputRow, toSyncRulesRow } from '@powersync/service-sync-rules';
4
3
  import semver from 'semver';
5
4
  import { getServerVersion } from '../utils/postgres_version.js';
5
+ import { CustomTypeRegistry } from './registry.js';
6
6
 
7
7
  /**
8
8
  * Resolves descriptions used to decode values for custom postgres types.
@@ -11,11 +11,9 @@ import { getServerVersion } from '../utils/postgres_version.js';
11
11
  */
12
12
  export class PostgresTypeResolver {
13
13
  private cachedVersion: semver.SemVer | null = null;
14
+ readonly registry: CustomTypeRegistry;
14
15
 
15
- constructor(
16
- readonly registry: CustomTypeRegistry,
17
- private readonly pool: pgwire.PgClient
18
- ) {
16
+ constructor(private readonly pool: pgwire.PgClient) {
19
17
  this.registry = new CustomTypeRegistry();
20
18
  }
21
19
 
@@ -188,6 +186,11 @@ WHERE a.attnum > 0
188
186
  return toSyncRulesRow(record);
189
187
  }
190
188
 
189
+ constructRowRecord(columnMap: Record<string, number>, tupleRaw: Record<string, any>): SqliteInputRow {
190
+ const record = this.decodeTupleForTable(columnMap, tupleRaw);
191
+ return toSyncRulesRow(record);
192
+ }
193
+
191
194
  /**
192
195
  * We need a high level of control over how values are decoded, to make sure there is no loss
193
196
  * of precision in the process.
@@ -206,5 +209,23 @@ WHERE a.attnum > 0
206
209
  return result;
207
210
  }
208
211
 
212
+ /**
213
+ * We need a high level of control over how values are decoded, to make sure there is no loss
214
+ * of precision in the process.
215
+ */
216
+ private decodeTupleForTable(columnMap: Record<string, number>, tupleRaw: Record<string, any>): DatabaseInputRow {
217
+ let result: Record<string, any> = {};
218
+ for (let columnName in tupleRaw) {
219
+ const rawval = tupleRaw[columnName];
220
+ const typeOid = columnMap[columnName];
221
+ if (typeof rawval == 'string' && typeOid) {
222
+ result[columnName] = this.registry.decodeDatabaseValue(rawval, typeOid);
223
+ } else {
224
+ result[columnName] = rawval;
225
+ }
226
+ }
227
+ return result;
228
+ }
229
+
209
230
  private static minVersionForMultirange: semver.SemVer = semver.parse('14.0.0')!;
210
231
  }
@@ -1,7 +1,6 @@
1
1
  import * as lib_postgres from '@powersync/lib-service-postgres';
2
2
  import * as service_types from '@powersync/service-types';
3
3
  import * as t from 'ts-codec';
4
- import { CustomTypeRegistry } from './registry.js';
5
4
 
6
5
  // Maintain backwards compatibility by exporting these
7
6
  export const validatePort = lib_postgres.validatePort;
@@ -25,10 +24,7 @@ export type PostgresConnectionConfig = t.Decoded<typeof PostgresConnectionConfig
25
24
  /**
26
25
  * Resolved version of {@link PostgresConnectionConfig}
27
26
  */
28
- export type ResolvedConnectionConfig = PostgresConnectionConfig &
29
- NormalizedPostgresConnectionConfig & {
30
- typeRegistry: CustomTypeRegistry;
31
- };
27
+ export type ResolvedConnectionConfig = PostgresConnectionConfig & NormalizedPostgresConnectionConfig;
32
28
 
33
29
  export function isPostgresConfig(
34
30
  config: service_types.configFile.DataSourceConfig
@@ -551,7 +551,7 @@ INSERT INTO test_data(id, time, timestamp, timestamptz) VALUES (1, '17:42:01.12'
551
551
  test('test replication - multiranges', async () => {
552
552
  const db = await connectPgPool();
553
553
 
554
- if (!(await new PostgresTypeResolver(new CustomTypeRegistry(), db).supportsMultiRanges())) {
554
+ if (!(await new PostgresTypeResolver(db).supportsMultiRanges())) {
555
555
  // This test requires Postgres 14 or later.
556
556
  return;
557
557
  }
@@ -620,7 +620,7 @@ INSERT INTO test_data(id, time, timestamp, timestamptz) VALUES (1, '17:42:01.12'
620
620
  * Return all the inserts from the first transaction in the replication stream.
621
621
  */
622
622
  async function getReplicationTx(db: pgwire.PgClient, replicationStream: pgwire.ReplicationStream) {
623
- const typeCache = new PostgresTypeResolver(new CustomTypeRegistry(), db);
623
+ const typeCache = new PostgresTypeResolver(db);
624
624
  await typeCache.fetchTypesForSchema();
625
625
 
626
626
  let transformed: SqliteInputRow[] = [];
@@ -69,7 +69,7 @@ function defineSlowTests(factory: storage.TestStorageFactory) {
69
69
  });
70
70
 
71
71
  async function testRepeatedReplication(testOptions: { compact: boolean; maxBatchSize: number; numBatches: number }) {
72
- const connections = new PgManager(TEST_CONNECTION_OPTIONS, { registry: new CustomTypeRegistry() });
72
+ const connections = new PgManager(TEST_CONNECTION_OPTIONS, {});
73
73
  const replicationConnection = await connections.replicationConnection();
74
74
  const pool = connections.pool;
75
75
  await clearTestDb(pool);
@@ -330,7 +330,7 @@ bucket_definitions:
330
330
  await pool.query(`SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots WHERE active = FALSE`);
331
331
  i += 1;
332
332
 
333
- const connections = new PgManager(TEST_CONNECTION_OPTIONS, { registry: new CustomTypeRegistry() });
333
+ const connections = new PgManager(TEST_CONNECTION_OPTIONS, {});
334
334
  const replicationConnection = await connections.replicationConnection();
335
335
 
336
336
  abortController = new AbortController();
@@ -529,13 +529,24 @@ config:
529
529
  const { pool } = context;
530
530
  await pool.query(`DROP TABLE IF EXISTS test_data`);
531
531
  await pool.query(`CREATE TYPE composite AS (foo bool, bar int4);`);
532
- await pool.query(`CREATE TABLE test_data(id text primary key, description composite);`);
532
+ await pool.query(`CREATE TABLE test_data(id text primary key, description composite, ts timestamptz);`);
533
+
534
+ // Covered by initial replication
535
+ await pool.query(
536
+ `INSERT INTO test_data(id, description, ts) VALUES ('t1', ROW(TRUE, 1)::composite, '2025-11-17T09:11:00Z')`
537
+ );
533
538
 
534
539
  await context.initializeReplication();
535
- await pool.query(`INSERT INTO test_data(id, description) VALUES ('t1', ROW(TRUE, 2)::composite)`);
540
+ // Covered by streaming replication
541
+ await pool.query(
542
+ `INSERT INTO test_data(id, description, ts) VALUES ('t2', ROW(TRUE, 2)::composite, '2025-11-17T09:12:00Z')`
543
+ );
536
544
 
537
545
  const data = await context.getBucketData('1#stream|0[]');
538
- expect(data).toMatchObject([putOp('test_data', { id: 't1', description: '{"foo":1,"bar":2}' })]);
546
+ expect(data).toMatchObject([
547
+ putOp('test_data', { id: 't1', description: '{"foo":1,"bar":1}', ts: '2025-11-17T09:11:00.000000Z' }),
548
+ putOp('test_data', { id: 't2', description: '{"foo":1,"bar":2}', ts: '2025-11-17T09:12:00.000000Z' })
549
+ ]);
539
550
  });
540
551
 
541
552
  test('custom types in primary key', async () => {
@@ -33,7 +33,7 @@ export class WalStreamTestContext implements AsyncDisposable {
33
33
  options?: { doNotClear?: boolean; walStreamOptions?: Partial<WalStreamOptions> }
34
34
  ) {
35
35
  const f = await factory({ doNotClear: options?.doNotClear });
36
- const connectionManager = new PgManager(TEST_CONNECTION_OPTIONS, { registry: new CustomTypeRegistry() });
36
+ const connectionManager = new PgManager(TEST_CONNECTION_OPTIONS, {});
37
37
 
38
38
  if (!options?.doNotClear) {
39
39
  await clearTestDb(connectionManager.pool);