@powersync/service-core 0.0.0-dev-20240708120322 → 0.0.0-dev-20240718134716
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +42 -2
- package/dist/entry/commands/migrate-action.js +12 -4
- package/dist/entry/commands/migrate-action.js.map +1 -1
- package/dist/metrics/Metrics.d.ts +3 -4
- package/dist/metrics/Metrics.js +0 -51
- package/dist/metrics/Metrics.js.map +1 -1
- package/dist/migrations/migrations.js +8 -0
- package/dist/migrations/migrations.js.map +1 -1
- package/dist/replication/WalStream.js +8 -6
- package/dist/replication/WalStream.js.map +1 -1
- package/dist/routes/endpoints/socket-route.js +13 -4
- package/dist/routes/endpoints/socket-route.js.map +1 -1
- package/dist/routes/endpoints/sync-stream.js +14 -5
- package/dist/routes/endpoints/sync-stream.js.map +1 -1
- package/dist/routes/route-register.js +1 -0
- package/dist/routes/route-register.js.map +1 -1
- package/dist/sync/RequestTracker.d.ts +9 -0
- package/dist/sync/RequestTracker.js +20 -0
- package/dist/sync/RequestTracker.js.map +1 -0
- package/dist/sync/sync.d.ts +2 -0
- package/dist/sync/sync.js +31 -11
- package/dist/sync/sync.js.map +1 -1
- package/dist/sync/util.d.ts +2 -1
- package/dist/sync/util.js +2 -3
- package/dist/sync/util.js.map +1 -1
- package/dist/util/config/collectors/config-collector.d.ts +0 -12
- package/dist/util/config/collectors/config-collector.js +0 -43
- package/dist/util/config/collectors/config-collector.js.map +1 -1
- package/dist/util/config/compound-config-collector.d.ts +29 -3
- package/dist/util/config/compound-config-collector.js +69 -22
- package/dist/util/config/compound-config-collector.js.map +1 -1
- package/package.json +4 -6
- package/src/entry/commands/migrate-action.ts +12 -4
- package/src/metrics/Metrics.ts +2 -67
- package/src/migrations/migrations.ts +8 -0
- package/src/replication/WalStream.ts +10 -6
- package/src/routes/endpoints/socket-route.ts +14 -4
- package/src/routes/endpoints/sync-stream.ts +15 -5
- package/src/routes/route-register.ts +1 -0
- package/src/sync/RequestTracker.ts +21 -0
- package/src/sync/sync.ts +41 -11
- package/src/sync/util.ts +6 -3
- package/src/util/config/collectors/config-collector.ts +0 -48
- package/src/util/config/compound-config-collector.ts +87 -23
- package/test/src/sync.test.ts +8 -0
- package/test/src/util.ts +12 -6
- package/test/src/wal_stream.test.ts +16 -21
- package/tsconfig.tsbuildinfo +1 -1
package/src/metrics/Metrics.ts
CHANGED
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
import { Attributes, Counter, ObservableGauge, UpDownCounter, ValueType } from '@opentelemetry/api';
|
|
2
2
|
import { PrometheusExporter } from '@opentelemetry/exporter-prometheus';
|
|
3
|
-
import { MeterProvider
|
|
4
|
-
import { OTLPMetricExporter } from '@opentelemetry/exporter-metrics-otlp-http';
|
|
3
|
+
import { MeterProvider } from '@opentelemetry/sdk-metrics';
|
|
5
4
|
import * as jpgwire from '@powersync/service-jpgwire';
|
|
6
|
-
import * as util from '../util/util-index.js';
|
|
7
5
|
import * as storage from '../storage/storage-index.js';
|
|
8
6
|
import { CorePowerSyncSystem } from '../system/CorePowerSyncSystem.js';
|
|
9
|
-
import { Resource } from '@opentelemetry/resources';
|
|
10
7
|
import { logger } from '@powersync/lib-services-framework';
|
|
11
8
|
|
|
12
9
|
export interface MetricsOptions {
|
|
@@ -16,8 +13,6 @@ export interface MetricsOptions {
|
|
|
16
13
|
}
|
|
17
14
|
|
|
18
15
|
export class Metrics {
|
|
19
|
-
private static instance: Metrics;
|
|
20
|
-
|
|
21
16
|
private prometheusExporter: PrometheusExporter;
|
|
22
17
|
private meterProvider: MeterProvider;
|
|
23
18
|
|
|
@@ -60,7 +55,7 @@ export class Metrics {
|
|
|
60
55
|
// Record on API pod
|
|
61
56
|
public concurrent_connections: UpDownCounter<Attributes>;
|
|
62
57
|
|
|
63
|
-
|
|
58
|
+
constructor(meterProvider: MeterProvider, prometheusExporter: PrometheusExporter) {
|
|
64
59
|
this.meterProvider = meterProvider;
|
|
65
60
|
this.prometheusExporter = prometheusExporter;
|
|
66
61
|
const meter = meterProvider.getMeter('powersync');
|
|
@@ -132,66 +127,6 @@ export class Metrics {
|
|
|
132
127
|
this.concurrent_connections.add(0);
|
|
133
128
|
}
|
|
134
129
|
|
|
135
|
-
public static getInstance(): Metrics {
|
|
136
|
-
if (!Metrics.instance) {
|
|
137
|
-
throw new Error('Metrics have not been initialised');
|
|
138
|
-
}
|
|
139
|
-
|
|
140
|
-
return Metrics.instance;
|
|
141
|
-
}
|
|
142
|
-
|
|
143
|
-
public static async initialise(options: MetricsOptions): Promise<void> {
|
|
144
|
-
if (Metrics.instance) {
|
|
145
|
-
return;
|
|
146
|
-
}
|
|
147
|
-
logger.info('Configuring telemetry.');
|
|
148
|
-
|
|
149
|
-
logger.info(
|
|
150
|
-
`
|
|
151
|
-
Attention:
|
|
152
|
-
PowerSync collects completely anonymous telemetry regarding usage.
|
|
153
|
-
This information is used to shape our roadmap to better serve our customers.
|
|
154
|
-
You can learn more, including how to opt-out if you'd not like to participate in this anonymous program, by visiting the following URL:
|
|
155
|
-
https://docs.powersync.com/self-hosting/telemetry
|
|
156
|
-
Anonymous telemetry is currently: ${options.disable_telemetry_sharing ? 'disabled' : 'enabled'}
|
|
157
|
-
`.trim()
|
|
158
|
-
);
|
|
159
|
-
|
|
160
|
-
const configuredExporters: MetricReader[] = [];
|
|
161
|
-
|
|
162
|
-
const port: number = util.env.METRICS_PORT ?? 0;
|
|
163
|
-
const prometheusExporter = new PrometheusExporter({ port: port, preventServerStart: true });
|
|
164
|
-
configuredExporters.push(prometheusExporter);
|
|
165
|
-
|
|
166
|
-
if (!options.disable_telemetry_sharing) {
|
|
167
|
-
logger.info('Sharing anonymous telemetry');
|
|
168
|
-
const periodicExporter = new PeriodicExportingMetricReader({
|
|
169
|
-
exporter: new OTLPMetricExporter({
|
|
170
|
-
url: options.internal_metrics_endpoint
|
|
171
|
-
}),
|
|
172
|
-
exportIntervalMillis: 1000 * 60 * 5 // 5 minutes
|
|
173
|
-
});
|
|
174
|
-
|
|
175
|
-
configuredExporters.push(periodicExporter);
|
|
176
|
-
}
|
|
177
|
-
|
|
178
|
-
const meterProvider = new MeterProvider({
|
|
179
|
-
resource: new Resource({
|
|
180
|
-
['service']: 'PowerSync',
|
|
181
|
-
['instance_id']: options.powersync_instance_id
|
|
182
|
-
}),
|
|
183
|
-
readers: configuredExporters
|
|
184
|
-
});
|
|
185
|
-
|
|
186
|
-
if (port > 0) {
|
|
187
|
-
await prometheusExporter.startServer();
|
|
188
|
-
}
|
|
189
|
-
|
|
190
|
-
Metrics.instance = new Metrics(meterProvider, prometheusExporter);
|
|
191
|
-
|
|
192
|
-
logger.info('Telemetry configuration complete.');
|
|
193
|
-
}
|
|
194
|
-
|
|
195
130
|
public async shutdown(): Promise<void> {
|
|
196
131
|
await this.meterProvider.shutdown();
|
|
197
132
|
}
|
|
@@ -8,6 +8,7 @@ import * as locks from '../locks/locks-index.js';
|
|
|
8
8
|
import { Direction } from './definitions.js';
|
|
9
9
|
import { createMongoMigrationStore } from './store/migration-store.js';
|
|
10
10
|
import { execute, writeLogsToStore } from './executor.js';
|
|
11
|
+
import { logger } from '@powersync/lib-services-framework';
|
|
11
12
|
|
|
12
13
|
const DEFAULT_MONGO_LOCK_COLLECTION = 'locks';
|
|
13
14
|
const MONGO_LOCK_PROCESS = 'migrations';
|
|
@@ -63,6 +64,7 @@ export const migrate = async (options: MigrationOptions) => {
|
|
|
63
64
|
const { storage } = config;
|
|
64
65
|
|
|
65
66
|
const client = db.mongo.createMongoClient(storage);
|
|
67
|
+
logger.info('Connecting to MongoDB');
|
|
66
68
|
await client.connect();
|
|
67
69
|
|
|
68
70
|
const clientDB = client.db(storage.database);
|
|
@@ -73,6 +75,7 @@ export const migrate = async (options: MigrationOptions) => {
|
|
|
73
75
|
});
|
|
74
76
|
|
|
75
77
|
// Only one process should execute this at a time.
|
|
78
|
+
logger.info('Acquiring lock');
|
|
76
79
|
const lockId = await manager.acquire();
|
|
77
80
|
|
|
78
81
|
if (!lockId) {
|
|
@@ -92,6 +95,7 @@ export const migrate = async (options: MigrationOptions) => {
|
|
|
92
95
|
process.addListener('beforeExit', releaseLock);
|
|
93
96
|
|
|
94
97
|
try {
|
|
98
|
+
logger.info('Loading migrations');
|
|
95
99
|
const migrations = await loadMigrations(MIGRATIONS_DIR, runner_config);
|
|
96
100
|
|
|
97
101
|
// Use the provided config to connect to Mongo
|
|
@@ -99,6 +103,7 @@ export const migrate = async (options: MigrationOptions) => {
|
|
|
99
103
|
|
|
100
104
|
const state = await store.load();
|
|
101
105
|
|
|
106
|
+
logger.info('Running migrations');
|
|
102
107
|
const logStream = execute({
|
|
103
108
|
direction: direction,
|
|
104
109
|
migrations,
|
|
@@ -111,8 +116,11 @@ export const migrate = async (options: MigrationOptions) => {
|
|
|
111
116
|
state
|
|
112
117
|
});
|
|
113
118
|
} finally {
|
|
119
|
+
logger.info('Releasing lock');
|
|
114
120
|
await releaseLock();
|
|
121
|
+
logger.info('Closing database');
|
|
115
122
|
await client.close(true);
|
|
116
123
|
process.removeListener('beforeExit', releaseLock);
|
|
124
|
+
logger.info('Done with migrations');
|
|
117
125
|
}
|
|
118
126
|
};
|
|
@@ -406,7 +406,7 @@ WHERE oid = $1::regclass`,
|
|
|
406
406
|
await batch.save({ tag: 'insert', sourceTable: table, before: undefined, after: record });
|
|
407
407
|
}
|
|
408
408
|
at += rows.length;
|
|
409
|
-
|
|
409
|
+
container.getImplementation(Metrics).rows_replicated_total.add(rows.length);
|
|
410
410
|
|
|
411
411
|
await touch();
|
|
412
412
|
}
|
|
@@ -492,19 +492,21 @@ WHERE oid = $1::regclass`,
|
|
|
492
492
|
return null;
|
|
493
493
|
}
|
|
494
494
|
|
|
495
|
+
const metrics = container.getImplementation(Metrics);
|
|
496
|
+
|
|
495
497
|
if (msg.tag == 'insert') {
|
|
496
|
-
|
|
498
|
+
metrics.rows_replicated_total.add(1);
|
|
497
499
|
const baseRecord = util.constructAfterRecord(msg);
|
|
498
500
|
return await batch.save({ tag: 'insert', sourceTable: table, before: undefined, after: baseRecord });
|
|
499
501
|
} else if (msg.tag == 'update') {
|
|
500
|
-
|
|
502
|
+
metrics.rows_replicated_total.add(1);
|
|
501
503
|
// "before" may be null if the replica id columns are unchanged
|
|
502
504
|
// It's fine to treat that the same as an insert.
|
|
503
505
|
const before = util.constructBeforeRecord(msg);
|
|
504
506
|
const after = util.constructAfterRecord(msg);
|
|
505
507
|
return await batch.save({ tag: 'update', sourceTable: table, before: before, after: after });
|
|
506
508
|
} else if (msg.tag == 'delete') {
|
|
507
|
-
|
|
509
|
+
metrics.rows_replicated_total.add(1);
|
|
508
510
|
const before = util.constructBeforeRecord(msg)!;
|
|
509
511
|
|
|
510
512
|
return await batch.save({ tag: 'delete', sourceTable: table, before: before, after: undefined });
|
|
@@ -555,6 +557,8 @@ WHERE oid = $1::regclass`,
|
|
|
555
557
|
// Auto-activate as soon as initial replication is done
|
|
556
558
|
await this.storage.autoActivate();
|
|
557
559
|
|
|
560
|
+
const metrics = container.getImplementation(Metrics);
|
|
561
|
+
|
|
558
562
|
await this.storage.startBatch({}, async (batch) => {
|
|
559
563
|
// Replication never starts in the middle of a transaction
|
|
560
564
|
let inTx = false;
|
|
@@ -577,7 +581,7 @@ WHERE oid = $1::regclass`,
|
|
|
577
581
|
} else if (msg.tag == 'begin') {
|
|
578
582
|
inTx = true;
|
|
579
583
|
} else if (msg.tag == 'commit') {
|
|
580
|
-
|
|
584
|
+
metrics.transactions_replicated_total.add(1);
|
|
581
585
|
inTx = false;
|
|
582
586
|
await batch.commit(msg.lsn!);
|
|
583
587
|
await this.ack(msg.lsn!, replicationStream);
|
|
@@ -602,7 +606,7 @@ WHERE oid = $1::regclass`,
|
|
|
602
606
|
}
|
|
603
607
|
}
|
|
604
608
|
|
|
605
|
-
|
|
609
|
+
metrics.chunks_replicated_total.add(1);
|
|
606
610
|
}
|
|
607
611
|
});
|
|
608
612
|
}
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { errors, logger, schema } from '@powersync/lib-services-framework';
|
|
1
|
+
import { container, errors, logger, schema } from '@powersync/lib-services-framework';
|
|
2
2
|
import { RequestParameters } from '@powersync/service-sync-rules';
|
|
3
3
|
import { serialize } from 'bson';
|
|
4
4
|
|
|
@@ -7,6 +7,7 @@ import { streamResponse } from '../../sync/sync.js';
|
|
|
7
7
|
import * as util from '../../util/util-index.js';
|
|
8
8
|
import { SocketRouteGenerator } from '../router-socket.js';
|
|
9
9
|
import { SyncRoutes } from './sync-stream.js';
|
|
10
|
+
import { RequestTracker } from '../../sync/RequestTracker.js';
|
|
10
11
|
|
|
11
12
|
export const syncStreamReactive: SocketRouteGenerator = (router) =>
|
|
12
13
|
router.reactiveStream<util.StreamingSyncRequest, any>(SyncRoutes.STREAM, {
|
|
@@ -65,7 +66,10 @@ export const syncStreamReactive: SocketRouteGenerator = (router) =>
|
|
|
65
66
|
observer.triggerCancel();
|
|
66
67
|
});
|
|
67
68
|
|
|
68
|
-
|
|
69
|
+
const metrics = container.getImplementation(Metrics);
|
|
70
|
+
|
|
71
|
+
metrics.concurrent_connections.add(1);
|
|
72
|
+
const tracker = new RequestTracker();
|
|
69
73
|
try {
|
|
70
74
|
for await (const data of streamResponse({
|
|
71
75
|
storage,
|
|
@@ -79,6 +83,7 @@ export const syncStreamReactive: SocketRouteGenerator = (router) =>
|
|
|
79
83
|
// RSocket handles keepalive events by default
|
|
80
84
|
keep_alive: false
|
|
81
85
|
},
|
|
86
|
+
tracker,
|
|
82
87
|
signal: controller.signal
|
|
83
88
|
})) {
|
|
84
89
|
if (data == null) {
|
|
@@ -94,7 +99,7 @@ export const syncStreamReactive: SocketRouteGenerator = (router) =>
|
|
|
94
99
|
const serialized = serialize(data) as Buffer;
|
|
95
100
|
responder.onNext({ data: serialized }, false);
|
|
96
101
|
requestedN--;
|
|
97
|
-
|
|
102
|
+
tracker.addDataSynced(serialized.length);
|
|
98
103
|
}
|
|
99
104
|
|
|
100
105
|
if (requestedN <= 0) {
|
|
@@ -126,7 +131,12 @@ export const syncStreamReactive: SocketRouteGenerator = (router) =>
|
|
|
126
131
|
responder.onComplete();
|
|
127
132
|
removeStopHandler();
|
|
128
133
|
disposer();
|
|
129
|
-
|
|
134
|
+
logger.info(`Sync stream complete`, {
|
|
135
|
+
user_id: syncParams.user_id,
|
|
136
|
+
operations_synced: tracker.operationsSynced,
|
|
137
|
+
data_synced_bytes: tracker.dataSyncedBytes
|
|
138
|
+
});
|
|
139
|
+
metrics.concurrent_connections.add(-1);
|
|
130
140
|
}
|
|
131
141
|
}
|
|
132
142
|
});
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { errors, logger, router, schema } from '@powersync/lib-services-framework';
|
|
1
|
+
import { container, errors, logger, router, schema } from '@powersync/lib-services-framework';
|
|
2
2
|
import { RequestParameters } from '@powersync/service-sync-rules';
|
|
3
3
|
import { Readable } from 'stream';
|
|
4
4
|
|
|
@@ -8,6 +8,7 @@ import * as util from '../../util/util-index.js';
|
|
|
8
8
|
import { Metrics } from '../../metrics/Metrics.js';
|
|
9
9
|
import { authUser } from '../auth.js';
|
|
10
10
|
import { routeDefinition } from '../router.js';
|
|
11
|
+
import { RequestTracker } from '../../sync/RequestTracker.js';
|
|
11
12
|
|
|
12
13
|
export enum SyncRoutes {
|
|
13
14
|
STREAM = '/sync/stream'
|
|
@@ -42,9 +43,11 @@ export const syncStreamed = routeDefinition({
|
|
|
42
43
|
description: 'No sync rules available'
|
|
43
44
|
});
|
|
44
45
|
}
|
|
46
|
+
const metrics = container.getImplementation(Metrics);
|
|
45
47
|
const controller = new AbortController();
|
|
48
|
+
const tracker = new RequestTracker();
|
|
46
49
|
try {
|
|
47
|
-
|
|
50
|
+
metrics.concurrent_connections.add(1);
|
|
48
51
|
const stream = Readable.from(
|
|
49
52
|
sync.transformToBytesTracked(
|
|
50
53
|
sync.ndjson(
|
|
@@ -53,9 +56,11 @@ export const syncStreamed = routeDefinition({
|
|
|
53
56
|
params,
|
|
54
57
|
syncParams,
|
|
55
58
|
token: payload.context.token_payload!,
|
|
59
|
+
tracker,
|
|
56
60
|
signal: controller.signal
|
|
57
61
|
})
|
|
58
|
-
)
|
|
62
|
+
),
|
|
63
|
+
tracker
|
|
59
64
|
),
|
|
60
65
|
{ objectMode: false, highWaterMark: 16 * 1024 }
|
|
61
66
|
);
|
|
@@ -85,12 +90,17 @@ export const syncStreamed = routeDefinition({
|
|
|
85
90
|
data: stream,
|
|
86
91
|
afterSend: async () => {
|
|
87
92
|
controller.abort();
|
|
88
|
-
|
|
93
|
+
metrics.concurrent_connections.add(-1);
|
|
94
|
+
logger.info(`Sync stream complete`, {
|
|
95
|
+
user_id: syncParams.user_id,
|
|
96
|
+
operations_synced: tracker.operationsSynced,
|
|
97
|
+
data_synced_bytes: tracker.dataSyncedBytes
|
|
98
|
+
});
|
|
89
99
|
}
|
|
90
100
|
});
|
|
91
101
|
} catch (ex) {
|
|
92
102
|
controller.abort();
|
|
93
|
-
|
|
103
|
+
metrics.concurrent_connections.add(-1);
|
|
94
104
|
}
|
|
95
105
|
}
|
|
96
106
|
});
|
|
@@ -63,6 +63,7 @@ export function registerFastifyRoutes(
|
|
|
63
63
|
}
|
|
64
64
|
} catch (ex) {
|
|
65
65
|
const journeyError = errors.JourneyError.isJourneyError(ex) ? ex : new errors.InternalServerError(ex);
|
|
66
|
+
logger.error(`Request failed`, journeyError);
|
|
66
67
|
|
|
67
68
|
response = new router.RouterResponse({
|
|
68
69
|
status: journeyError.errorData.status || 500,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
import { container } from '@powersync/lib-services-framework';
|
|
2
|
+
import { Metrics } from '../metrics/Metrics.js';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Record sync stats per request stream.
|
|
6
|
+
*/
|
|
7
|
+
export class RequestTracker {
|
|
8
|
+
operationsSynced = 0;
|
|
9
|
+
dataSyncedBytes = 0;
|
|
10
|
+
|
|
11
|
+
addOperationsSynced(operations: number) {
|
|
12
|
+
this.operationsSynced += operations;
|
|
13
|
+
container.getImplementation(Metrics).operations_synced_total.add(operations);
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
addDataSynced(bytes: number) {
|
|
17
|
+
this.dataSyncedBytes += bytes;
|
|
18
|
+
|
|
19
|
+
container.getImplementation(Metrics).data_synced_bytes.add(bytes);
|
|
20
|
+
}
|
|
21
|
+
}
|
package/src/sync/sync.ts
CHANGED
|
@@ -8,9 +8,9 @@ import * as storage from '../storage/storage-index.js';
|
|
|
8
8
|
import * as util from '../util/util-index.js';
|
|
9
9
|
|
|
10
10
|
import { logger } from '@powersync/lib-services-framework';
|
|
11
|
-
import { Metrics } from '../metrics/Metrics.js';
|
|
12
11
|
import { mergeAsyncIterables } from './merge.js';
|
|
13
12
|
import { TokenStreamOptions, tokenStream } from './util.js';
|
|
13
|
+
import { RequestTracker } from './RequestTracker.js';
|
|
14
14
|
|
|
15
15
|
/**
|
|
16
16
|
* Maximum number of connections actively fetching data.
|
|
@@ -28,12 +28,14 @@ export interface SyncStreamParameters {
|
|
|
28
28
|
*/
|
|
29
29
|
signal?: AbortSignal;
|
|
30
30
|
tokenStreamOptions?: Partial<TokenStreamOptions>;
|
|
31
|
+
|
|
32
|
+
tracker: RequestTracker;
|
|
31
33
|
}
|
|
32
34
|
|
|
33
35
|
export async function* streamResponse(
|
|
34
36
|
options: SyncStreamParameters
|
|
35
37
|
): AsyncIterable<util.StreamingSyncLine | string | null> {
|
|
36
|
-
const { storage, params, syncParams, token, tokenStreamOptions, signal } = options;
|
|
38
|
+
const { storage, params, syncParams, token, tokenStreamOptions, tracker, signal } = options;
|
|
37
39
|
// We also need to be able to abort, so we create our own controller.
|
|
38
40
|
const controller = new AbortController();
|
|
39
41
|
if (signal) {
|
|
@@ -49,7 +51,7 @@ export async function* streamResponse(
|
|
|
49
51
|
}
|
|
50
52
|
}
|
|
51
53
|
const ki = tokenStream(token, controller.signal, tokenStreamOptions);
|
|
52
|
-
const stream = streamResponseInner(storage, params, syncParams, controller.signal);
|
|
54
|
+
const stream = streamResponseInner(storage, params, syncParams, tracker, controller.signal);
|
|
53
55
|
// Merge the two streams, and abort as soon as one of the streams end.
|
|
54
56
|
const merged = mergeAsyncIterables([stream, ki], controller.signal);
|
|
55
57
|
|
|
@@ -72,6 +74,7 @@ async function* streamResponseInner(
|
|
|
72
74
|
storage: storage.BucketStorageFactory,
|
|
73
75
|
params: util.StreamingSyncRequest,
|
|
74
76
|
syncParams: RequestParameters,
|
|
77
|
+
tracker: RequestTracker,
|
|
75
78
|
signal: AbortSignal
|
|
76
79
|
): AsyncGenerator<util.StreamingSyncLine | string | null> {
|
|
77
80
|
// Bucket state of bucket id -> op_id.
|
|
@@ -109,6 +112,11 @@ async function* streamResponseInner(
|
|
|
109
112
|
});
|
|
110
113
|
|
|
111
114
|
if (allBuckets.length > 1000) {
|
|
115
|
+
logger.error(`Too many buckets`, {
|
|
116
|
+
checkpoint,
|
|
117
|
+
user_id: syncParams.user_id,
|
|
118
|
+
buckets: allBuckets.length
|
|
119
|
+
});
|
|
112
120
|
// TODO: Limit number of buckets even before we get to this point
|
|
113
121
|
throw new Error(`Too many buckets: ${allBuckets.length}`);
|
|
114
122
|
}
|
|
@@ -137,11 +145,18 @@ async function* streamResponseInner(
|
|
|
137
145
|
}
|
|
138
146
|
bucketsToFetch = diff.updatedBuckets.map((c) => c.bucket);
|
|
139
147
|
|
|
140
|
-
let message = `Updated checkpoint: ${checkpoint} |
|
|
148
|
+
let message = `Updated checkpoint: ${checkpoint} | `;
|
|
149
|
+
message += `write: ${writeCheckpoint} | `;
|
|
141
150
|
message += `buckets: ${allBuckets.length} | `;
|
|
142
151
|
message += `updated: ${limitedBuckets(diff.updatedBuckets, 20)} | `;
|
|
143
|
-
message += `removed: ${limitedBuckets(diff.removedBuckets, 20)}
|
|
144
|
-
logger.info(message
|
|
152
|
+
message += `removed: ${limitedBuckets(diff.removedBuckets, 20)}`;
|
|
153
|
+
logger.info(message, {
|
|
154
|
+
checkpoint,
|
|
155
|
+
user_id: syncParams.user_id,
|
|
156
|
+
buckets: allBuckets.length,
|
|
157
|
+
updated: diff.updatedBuckets.length,
|
|
158
|
+
removed: diff.removedBuckets.length
|
|
159
|
+
});
|
|
145
160
|
|
|
146
161
|
const checksum_line: util.StreamingSyncCheckpointDiff = {
|
|
147
162
|
checkpoint_diff: {
|
|
@@ -156,7 +171,7 @@ async function* streamResponseInner(
|
|
|
156
171
|
} else {
|
|
157
172
|
let message = `New checkpoint: ${checkpoint} | write: ${writeCheckpoint} | `;
|
|
158
173
|
message += `buckets: ${allBuckets.length} ${limitedBuckets(allBuckets, 20)}`;
|
|
159
|
-
logger.info(message);
|
|
174
|
+
logger.info(message, { checkpoint, user_id: syncParams.user_id, buckets: allBuckets.length });
|
|
160
175
|
bucketsToFetch = allBuckets;
|
|
161
176
|
const checksum_line: util.StreamingSyncCheckpoint = {
|
|
162
177
|
checkpoint: {
|
|
@@ -172,7 +187,16 @@ async function* streamResponseInner(
|
|
|
172
187
|
|
|
173
188
|
// This incrementally updates dataBuckets with each individual bucket position.
|
|
174
189
|
// At the end of this, we can be sure that all buckets have data up to the checkpoint.
|
|
175
|
-
yield* bucketDataInBatches({
|
|
190
|
+
yield* bucketDataInBatches({
|
|
191
|
+
storage,
|
|
192
|
+
checkpoint,
|
|
193
|
+
bucketsToFetch,
|
|
194
|
+
dataBuckets,
|
|
195
|
+
raw_data,
|
|
196
|
+
binary_data,
|
|
197
|
+
signal,
|
|
198
|
+
tracker
|
|
199
|
+
});
|
|
176
200
|
|
|
177
201
|
await new Promise((resolve) => setTimeout(resolve, 10));
|
|
178
202
|
}
|
|
@@ -186,6 +210,7 @@ interface BucketDataRequest {
|
|
|
186
210
|
dataBuckets: Map<string, string>;
|
|
187
211
|
raw_data: boolean | undefined;
|
|
188
212
|
binary_data: boolean | undefined;
|
|
213
|
+
tracker: RequestTracker;
|
|
189
214
|
signal: AbortSignal;
|
|
190
215
|
}
|
|
191
216
|
|
|
@@ -221,11 +246,16 @@ async function* bucketDataInBatches(request: BucketDataRequest) {
|
|
|
221
246
|
}
|
|
222
247
|
}
|
|
223
248
|
|
|
249
|
+
interface BucketDataBatchResult {
|
|
250
|
+
done: boolean;
|
|
251
|
+
data: any;
|
|
252
|
+
}
|
|
253
|
+
|
|
224
254
|
/**
|
|
225
255
|
* Extracted as a separate internal function just to avoid memory leaks.
|
|
226
256
|
*/
|
|
227
|
-
async function* bucketDataBatch(request: BucketDataRequest) {
|
|
228
|
-
const { storage, checkpoint, bucketsToFetch, dataBuckets, raw_data, binary_data, signal } = request;
|
|
257
|
+
async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<BucketDataBatchResult, void> {
|
|
258
|
+
const { storage, checkpoint, bucketsToFetch, dataBuckets, raw_data, binary_data, tracker, signal } = request;
|
|
229
259
|
|
|
230
260
|
const [_, release] = await syncSemaphore.acquire();
|
|
231
261
|
try {
|
|
@@ -272,7 +302,7 @@ async function* bucketDataBatch(request: BucketDataRequest) {
|
|
|
272
302
|
// iterator memory in case if large data sent.
|
|
273
303
|
yield { data: null, done: false };
|
|
274
304
|
}
|
|
275
|
-
|
|
305
|
+
tracker.addOperationsSynced(r.data.length);
|
|
276
306
|
|
|
277
307
|
dataBuckets.set(r.bucket, r.next_after);
|
|
278
308
|
}
|
package/src/sync/util.ts
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import * as timers from 'timers/promises';
|
|
2
2
|
|
|
3
3
|
import * as util from '../util/util-index.js';
|
|
4
|
-
import {
|
|
4
|
+
import { RequestTracker } from './RequestTracker.js';
|
|
5
5
|
|
|
6
6
|
export type TokenStreamOptions = {
|
|
7
7
|
/**
|
|
@@ -89,10 +89,13 @@ export async function* ndjson(iterator: AsyncIterable<string | null | Record<str
|
|
|
89
89
|
}
|
|
90
90
|
}
|
|
91
91
|
|
|
92
|
-
export async function* transformToBytesTracked(
|
|
92
|
+
export async function* transformToBytesTracked(
|
|
93
|
+
iterator: AsyncIterable<string>,
|
|
94
|
+
tracker: RequestTracker
|
|
95
|
+
): AsyncGenerator<Buffer> {
|
|
93
96
|
for await (let data of iterator) {
|
|
94
97
|
const encoded = Buffer.from(data, 'utf8');
|
|
95
|
-
|
|
98
|
+
tracker.addDataSynced(encoded.length);
|
|
96
99
|
yield encoded;
|
|
97
100
|
}
|
|
98
101
|
}
|
|
@@ -1,8 +1,6 @@
|
|
|
1
|
-
import * as t from 'ts-codec';
|
|
2
1
|
import * as yaml from 'yaml';
|
|
3
2
|
|
|
4
3
|
import { configFile } from '@powersync/service-types';
|
|
5
|
-
import { schema } from '@powersync/lib-services-framework';
|
|
6
4
|
|
|
7
5
|
import { RunnerConfig } from '../types.js';
|
|
8
6
|
|
|
@@ -23,13 +21,6 @@ export enum ConfigFileFormat {
|
|
|
23
21
|
*/
|
|
24
22
|
const YAML_ENV_PREFIX = 'PS_';
|
|
25
23
|
|
|
26
|
-
// ts-codec itself doesn't give great validation errors, so we use json schema for that
|
|
27
|
-
const configSchemaValidator = schema
|
|
28
|
-
.parseJSONSchema(
|
|
29
|
-
t.generateJSONSchema(configFile.powerSyncConfig, { allowAdditional: true, parsers: [configFile.portParser] })
|
|
30
|
-
)
|
|
31
|
-
.validator();
|
|
32
|
-
|
|
33
24
|
export abstract class ConfigCollector {
|
|
34
25
|
abstract get name(): string;
|
|
35
26
|
|
|
@@ -39,45 +30,6 @@ export abstract class ConfigCollector {
|
|
|
39
30
|
*/
|
|
40
31
|
abstract collectSerialized(runnerConfig: RunnerConfig): Promise<configFile.SerializedPowerSyncConfig | null>;
|
|
41
32
|
|
|
42
|
-
/**
|
|
43
|
-
* Collects the PowerSyncConfig settings.
|
|
44
|
-
* Validates and decodes the config.
|
|
45
|
-
* @returns null if this collector cannot provide a config
|
|
46
|
-
*/
|
|
47
|
-
async collect(runner_config: RunnerConfig): Promise<configFile.PowerSyncConfig | null> {
|
|
48
|
-
const serialized = await this.collectSerialized(runner_config);
|
|
49
|
-
if (!serialized) {
|
|
50
|
-
return null;
|
|
51
|
-
}
|
|
52
|
-
|
|
53
|
-
/**
|
|
54
|
-
* After this point a serialized config has been found. Any failures to decode or validate
|
|
55
|
-
* will result in a hard stop.
|
|
56
|
-
*/
|
|
57
|
-
const decoded = this.decode(serialized);
|
|
58
|
-
this.validate(decoded);
|
|
59
|
-
return decoded;
|
|
60
|
-
}
|
|
61
|
-
|
|
62
|
-
/**
|
|
63
|
-
* Validates input config
|
|
64
|
-
* ts-codec itself doesn't give great validation errors, so we use json schema for that
|
|
65
|
-
*/
|
|
66
|
-
validate(config: configFile.PowerSyncConfig) {
|
|
67
|
-
const valid = configSchemaValidator.validate(config);
|
|
68
|
-
if (!valid.valid) {
|
|
69
|
-
throw new Error(`Failed to validate PowerSync config: ${valid.errors.join(', ')}`);
|
|
70
|
-
}
|
|
71
|
-
}
|
|
72
|
-
|
|
73
|
-
decode(encoded: configFile.SerializedPowerSyncConfig): configFile.PowerSyncConfig {
|
|
74
|
-
try {
|
|
75
|
-
return configFile.powerSyncConfig.decode(encoded);
|
|
76
|
-
} catch (ex) {
|
|
77
|
-
throw new Error(`Failed to decode PowerSync config: ${ex}`);
|
|
78
|
-
}
|
|
79
|
-
}
|
|
80
|
-
|
|
81
33
|
protected parseContent(content: string, contentType?: ConfigFileFormat) {
|
|
82
34
|
switch (contentType) {
|
|
83
35
|
case ConfigFileFormat.YAML:
|