@powersync/service-core 1.13.4 → 1.15.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +61 -0
- package/LICENSE +3 -3
- package/dist/api/api-metrics.js +5 -0
- package/dist/api/api-metrics.js.map +1 -1
- package/dist/api/diagnostics.js +31 -1
- package/dist/api/diagnostics.js.map +1 -1
- package/dist/auth/KeyStore.d.ts +19 -0
- package/dist/auth/KeyStore.js +16 -4
- package/dist/auth/KeyStore.js.map +1 -1
- package/dist/auth/RemoteJWKSCollector.d.ts +3 -0
- package/dist/auth/RemoteJWKSCollector.js +3 -1
- package/dist/auth/RemoteJWKSCollector.js.map +1 -1
- package/dist/auth/StaticSupabaseKeyCollector.d.ts +2 -1
- package/dist/auth/StaticSupabaseKeyCollector.js +1 -1
- package/dist/auth/StaticSupabaseKeyCollector.js.map +1 -1
- package/dist/auth/utils.d.ts +19 -0
- package/dist/auth/utils.js +106 -3
- package/dist/auth/utils.js.map +1 -1
- package/dist/entry/commands/compact-action.js +10 -1
- package/dist/entry/commands/compact-action.js.map +1 -1
- package/dist/metrics/open-telemetry/util.d.ts +0 -3
- package/dist/metrics/open-telemetry/util.js +19 -12
- package/dist/metrics/open-telemetry/util.js.map +1 -1
- package/dist/replication/AbstractReplicator.js +2 -2
- package/dist/replication/AbstractReplicator.js.map +1 -1
- package/dist/routes/compression.d.ts +19 -0
- package/dist/routes/compression.js +70 -0
- package/dist/routes/compression.js.map +1 -0
- package/dist/routes/configure-fastify.d.ts +40 -5
- package/dist/routes/configure-fastify.js +2 -1
- package/dist/routes/configure-fastify.js.map +1 -1
- package/dist/routes/endpoints/socket-route.js +25 -17
- package/dist/routes/endpoints/socket-route.js.map +1 -1
- package/dist/routes/endpoints/sync-rules.js +1 -27
- package/dist/routes/endpoints/sync-rules.js.map +1 -1
- package/dist/routes/endpoints/sync-stream.d.ts +80 -10
- package/dist/routes/endpoints/sync-stream.js +29 -11
- package/dist/routes/endpoints/sync-stream.js.map +1 -1
- package/dist/routes/route-register.d.ts +4 -0
- package/dist/routes/route-register.js +29 -15
- package/dist/routes/route-register.js.map +1 -1
- package/dist/storage/BucketStorage.d.ts +1 -1
- package/dist/storage/BucketStorage.js.map +1 -1
- package/dist/storage/BucketStorageBatch.d.ts +16 -6
- package/dist/storage/BucketStorageBatch.js.map +1 -1
- package/dist/storage/ChecksumCache.d.ts +4 -19
- package/dist/storage/ChecksumCache.js +4 -0
- package/dist/storage/ChecksumCache.js.map +1 -1
- package/dist/storage/ReplicationEventPayload.d.ts +2 -2
- package/dist/storage/SourceEntity.d.ts +5 -4
- package/dist/storage/SourceTable.d.ts +22 -20
- package/dist/storage/SourceTable.js +34 -30
- package/dist/storage/SourceTable.js.map +1 -1
- package/dist/storage/SyncRulesBucketStorage.d.ts +19 -4
- package/dist/storage/SyncRulesBucketStorage.js.map +1 -1
- package/dist/sync/BucketChecksumState.d.ts +41 -11
- package/dist/sync/BucketChecksumState.js +155 -19
- package/dist/sync/BucketChecksumState.js.map +1 -1
- package/dist/sync/RequestTracker.d.ts +7 -1
- package/dist/sync/RequestTracker.js +22 -2
- package/dist/sync/RequestTracker.js.map +1 -1
- package/dist/sync/sync.d.ts +3 -3
- package/dist/sync/sync.js +23 -42
- package/dist/sync/sync.js.map +1 -1
- package/dist/sync/util.d.ts +3 -1
- package/dist/sync/util.js +30 -2
- package/dist/sync/util.js.map +1 -1
- package/dist/util/config/compound-config-collector.js +23 -0
- package/dist/util/config/compound-config-collector.js.map +1 -1
- package/dist/util/lsn.d.ts +4 -0
- package/dist/util/lsn.js +11 -0
- package/dist/util/lsn.js.map +1 -0
- package/dist/util/protocol-types.d.ts +153 -9
- package/dist/util/protocol-types.js +41 -6
- package/dist/util/protocol-types.js.map +1 -1
- package/dist/util/util-index.d.ts +1 -0
- package/dist/util/util-index.js +1 -0
- package/dist/util/util-index.js.map +1 -1
- package/dist/util/utils.d.ts +18 -3
- package/dist/util/utils.js +33 -9
- package/dist/util/utils.js.map +1 -1
- package/package.json +16 -14
- package/src/api/api-metrics.ts +6 -0
- package/src/api/diagnostics.ts +33 -1
- package/src/auth/KeyStore.ts +28 -4
- package/src/auth/RemoteJWKSCollector.ts +5 -2
- package/src/auth/StaticSupabaseKeyCollector.ts +1 -1
- package/src/auth/utils.ts +123 -3
- package/src/entry/commands/compact-action.ts +9 -1
- package/src/metrics/open-telemetry/util.ts +23 -19
- package/src/replication/AbstractReplicator.ts +2 -2
- package/src/routes/compression.ts +75 -0
- package/src/routes/configure-fastify.ts +3 -1
- package/src/routes/endpoints/socket-route.ts +25 -16
- package/src/routes/endpoints/sync-rules.ts +1 -28
- package/src/routes/endpoints/sync-stream.ts +37 -26
- package/src/routes/route-register.ts +41 -15
- package/src/storage/BucketStorage.ts +2 -2
- package/src/storage/BucketStorageBatch.ts +23 -6
- package/src/storage/ChecksumCache.ts +8 -22
- package/src/storage/ReplicationEventPayload.ts +2 -2
- package/src/storage/SourceEntity.ts +5 -5
- package/src/storage/SourceTable.ts +48 -34
- package/src/storage/SyncRulesBucketStorage.ts +26 -7
- package/src/sync/BucketChecksumState.ts +194 -31
- package/src/sync/RequestTracker.ts +27 -2
- package/src/sync/sync.ts +53 -51
- package/src/sync/util.ts +32 -3
- package/src/util/config/compound-config-collector.ts +24 -0
- package/src/util/lsn.ts +8 -0
- package/src/util/protocol-types.ts +138 -10
- package/src/util/util-index.ts +1 -0
- package/src/util/utils.ts +59 -12
- package/test/src/auth.test.ts +323 -1
- package/test/src/checksum_cache.test.ts +6 -8
- package/test/src/routes/mocks.ts +59 -0
- package/test/src/routes/stream.test.ts +84 -0
- package/test/src/sync/BucketChecksumState.test.ts +375 -76
- package/tsconfig.tsbuildinfo +1 -1
|
@@ -1,15 +1,13 @@
|
|
|
1
|
-
import { MeterProvider, MetricReader, PeriodicExportingMetricReader } from '@opentelemetry/sdk-metrics';
|
|
2
|
-
import { PrometheusExporter } from '@opentelemetry/exporter-prometheus';
|
|
3
1
|
import { OTLPMetricExporter } from '@opentelemetry/exporter-metrics-otlp-http';
|
|
4
|
-
import {
|
|
2
|
+
import { PrometheusExporter } from '@opentelemetry/exporter-prometheus';
|
|
3
|
+
import { MeterProvider, MetricReader, PeriodicExportingMetricReader } from '@opentelemetry/sdk-metrics';
|
|
4
|
+
import { logger } from '@powersync/lib-services-framework';
|
|
5
5
|
import { ServiceContext } from '../../system/ServiceContext.js';
|
|
6
|
-
import { OpenTelemetryMetricsFactory } from './OpenTelemetryMetricsFactory.js';
|
|
7
6
|
import { MetricsFactory } from '../metrics-interfaces.js';
|
|
8
|
-
import {
|
|
7
|
+
import { OpenTelemetryMetricsFactory } from './OpenTelemetryMetricsFactory.js';
|
|
9
8
|
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
}
|
|
9
|
+
import pkg from '../../../package.json' with { type: 'json' };
|
|
10
|
+
import { resourceFromAttributes } from '@opentelemetry/resources';
|
|
13
11
|
|
|
14
12
|
export function createOpenTelemetryMetricsFactory(context: ServiceContext): MetricsFactory {
|
|
15
13
|
const { configuration, lifeCycleEngine, storageEngine } = context;
|
|
@@ -41,9 +39,9 @@ export function createOpenTelemetryMetricsFactory(context: ServiceContext): Metr
|
|
|
41
39
|
configuredExporters.push(periodicExporter);
|
|
42
40
|
}
|
|
43
41
|
|
|
44
|
-
let
|
|
45
|
-
const
|
|
46
|
-
|
|
42
|
+
let resolvedInstanceId: (id: string) => void;
|
|
43
|
+
const instanceIdPromise = new Promise<string>((resolve) => {
|
|
44
|
+
resolvedInstanceId = resolve;
|
|
47
45
|
});
|
|
48
46
|
|
|
49
47
|
lifeCycleEngine.withLifecycle(null, {
|
|
@@ -51,20 +49,26 @@ export function createOpenTelemetryMetricsFactory(context: ServiceContext): Metr
|
|
|
51
49
|
const bucketStorage = storageEngine.activeBucketStorage;
|
|
52
50
|
try {
|
|
53
51
|
const instanceId = await bucketStorage.getPowerSyncInstanceId();
|
|
54
|
-
|
|
52
|
+
resolvedInstanceId(instanceId);
|
|
55
53
|
} catch (err) {
|
|
56
|
-
|
|
54
|
+
resolvedInstanceId('Unknown');
|
|
57
55
|
}
|
|
58
56
|
}
|
|
59
57
|
});
|
|
60
58
|
|
|
59
|
+
const resource = resourceFromAttributes({
|
|
60
|
+
['service']: 'PowerSync',
|
|
61
|
+
['service.version']: pkg.version,
|
|
62
|
+
['instance_id']: instanceIdPromise
|
|
63
|
+
});
|
|
64
|
+
|
|
65
|
+
// This triggers OpenTelemetry to resolve the async attributes (instanceIdPromise).
|
|
66
|
+
// This will never reject, and we don't specifically need to wait for it.
|
|
67
|
+
resource.waitForAsyncAttributes?.();
|
|
68
|
+
|
|
61
69
|
const meterProvider = new MeterProvider({
|
|
62
|
-
resource
|
|
63
|
-
|
|
64
|
-
['service']: 'PowerSync'
|
|
65
|
-
},
|
|
66
|
-
runtimeMetadata
|
|
67
|
-
),
|
|
70
|
+
resource,
|
|
71
|
+
|
|
68
72
|
readers: configuredExporters
|
|
69
73
|
});
|
|
70
74
|
|
|
@@ -10,8 +10,8 @@ import { AbstractReplicationJob } from './AbstractReplicationJob.js';
|
|
|
10
10
|
import { ErrorRateLimiter } from './ErrorRateLimiter.js';
|
|
11
11
|
import { ConnectionTestResult } from './ReplicationModule.js';
|
|
12
12
|
|
|
13
|
-
//
|
|
14
|
-
const PING_INTERVAL = 1_000_000_000n *
|
|
13
|
+
// 1 minute
|
|
14
|
+
const PING_INTERVAL = 1_000_000_000n * 60n;
|
|
15
15
|
|
|
16
16
|
export interface CreateJobOptions {
|
|
17
17
|
lock: storage.ReplicationLock;
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
import type Negotiator from 'negotiator';
|
|
2
|
+
import { PassThrough, pipeline, Readable, Transform } from 'node:stream';
|
|
3
|
+
import * as zlib from 'node:zlib';
|
|
4
|
+
import { RequestTracker } from '../sync/RequestTracker.js';
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Compress a streamed response.
|
|
8
|
+
*
|
|
9
|
+
* `@fastify/compress` can do something similar, but does not appear to work as well on streamed responses.
|
|
10
|
+
* The manual implementation is simple enough, and gives us more control over the low-level details.
|
|
11
|
+
*
|
|
12
|
+
* @param negotiator Negotiator from the request, to negotiate response encoding
|
|
13
|
+
* @param stream plain-text stream
|
|
14
|
+
* @returns
|
|
15
|
+
*/
|
|
16
|
+
export function maybeCompressResponseStream(
|
|
17
|
+
negotiator: Negotiator,
|
|
18
|
+
stream: Readable,
|
|
19
|
+
tracker: RequestTracker
|
|
20
|
+
): { stream: Readable; encodingHeaders: { 'content-encoding'?: string } } {
|
|
21
|
+
const encoding = (negotiator as any).encoding(['identity', 'gzip', 'zstd'], { preferred: 'zstd' });
|
|
22
|
+
const transform = createCompressionTransform(encoding);
|
|
23
|
+
if (transform == null) {
|
|
24
|
+
// No matching compression supported - leave stream as-is
|
|
25
|
+
return {
|
|
26
|
+
stream,
|
|
27
|
+
encodingHeaders: {}
|
|
28
|
+
};
|
|
29
|
+
} else {
|
|
30
|
+
tracker.setCompressed(encoding);
|
|
31
|
+
return {
|
|
32
|
+
stream: transformStream(stream, transform, tracker),
|
|
33
|
+
encodingHeaders: { 'content-encoding': encoding }
|
|
34
|
+
};
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
function createCompressionTransform(encoding: string | undefined): Transform | null {
|
|
39
|
+
if (encoding == 'zstd') {
|
|
40
|
+
// Available since Node v23.8.0, v22.15.0
|
|
41
|
+
// This does the actual compression in a background thread pool.
|
|
42
|
+
return zlib.createZstdCompress({
|
|
43
|
+
// We need to flush the frame after every new input chunk, to avoid delaying data
|
|
44
|
+
// in the output stream.
|
|
45
|
+
flush: zlib.constants.ZSTD_e_flush,
|
|
46
|
+
params: {
|
|
47
|
+
// Default compression level is 3. We reduce this slightly to limit CPU overhead
|
|
48
|
+
[zlib.constants.ZSTD_c_compressionLevel]: 2
|
|
49
|
+
}
|
|
50
|
+
});
|
|
51
|
+
} else if (encoding == 'gzip') {
|
|
52
|
+
return zlib.createGzip({
|
|
53
|
+
// We need to flush the frame after every new input chunk, to avoid delaying data
|
|
54
|
+
// in the output stream.
|
|
55
|
+
flush: zlib.constants.Z_SYNC_FLUSH
|
|
56
|
+
});
|
|
57
|
+
}
|
|
58
|
+
return null;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
function transformStream(source: Readable, transform: Transform, tracker: RequestTracker) {
|
|
62
|
+
// pipe does not forward error events automatically, resulting in unhandled error
|
|
63
|
+
// events. This forwards it.
|
|
64
|
+
const out = new PassThrough();
|
|
65
|
+
const trackingTransform = new Transform({
|
|
66
|
+
transform(chunk, _encoding, callback) {
|
|
67
|
+
tracker.addCompressedDataSent(chunk.length);
|
|
68
|
+
callback(null, chunk);
|
|
69
|
+
}
|
|
70
|
+
});
|
|
71
|
+
pipeline(source, transform, trackingTransform, out, (err) => {
|
|
72
|
+
if (err) out.destroy(err);
|
|
73
|
+
});
|
|
74
|
+
return out;
|
|
75
|
+
}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import type fastify from 'fastify';
|
|
2
2
|
import * as uuid from 'uuid';
|
|
3
3
|
|
|
4
|
-
import { registerFastifyRoutes } from './route-register.js';
|
|
4
|
+
import { registerFastifyNotFoundHandler, registerFastifyRoutes } from './route-register.js';
|
|
5
5
|
|
|
6
6
|
import * as system from '../system/system-index.js';
|
|
7
7
|
|
|
@@ -76,6 +76,8 @@ export function configureFastifyServer(server: fastify.FastifyInstance, options:
|
|
|
76
76
|
*/
|
|
77
77
|
server.register(async function (childContext) {
|
|
78
78
|
registerFastifyRoutes(childContext, generateContext, routes.api?.routes ?? DEFAULT_ROUTE_OPTIONS.api.routes);
|
|
79
|
+
registerFastifyNotFoundHandler(childContext);
|
|
80
|
+
|
|
79
81
|
// Limit the active concurrent requests
|
|
80
82
|
childContext.addHook(
|
|
81
83
|
'onRequest',
|
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
import { ErrorCode, errors, schema } from '@powersync/lib-services-framework';
|
|
2
2
|
import { RequestParameters } from '@powersync/service-sync-rules';
|
|
3
|
-
import { serialize } from 'bson';
|
|
4
3
|
|
|
5
4
|
import * as sync from '../../sync/sync-index.js';
|
|
6
5
|
import * as util from '../../util/util-index.js';
|
|
@@ -12,7 +11,7 @@ import { APIMetric } from '@powersync/service-types';
|
|
|
12
11
|
export const syncStreamReactive: SocketRouteGenerator = (router) =>
|
|
13
12
|
router.reactiveStream<util.StreamingSyncRequest, any>(SyncRoutes.STREAM, {
|
|
14
13
|
validator: schema.createTsCodecValidator(util.StreamingSyncRequest, { allowAdditional: true }),
|
|
15
|
-
handler: async ({ context, params, responder, observer, initialN, signal: upstreamSignal }) => {
|
|
14
|
+
handler: async ({ context, params, responder, observer, initialN, signal: upstreamSignal, connection }) => {
|
|
16
15
|
const { service_context, logger } = context;
|
|
17
16
|
const { routerEngine, metricsEngine, syncContext } = service_context;
|
|
18
17
|
|
|
@@ -59,8 +58,6 @@ export const syncStreamReactive: SocketRouteGenerator = (router) =>
|
|
|
59
58
|
return;
|
|
60
59
|
}
|
|
61
60
|
|
|
62
|
-
const syncParams = new RequestParameters(context.token_payload!, params.parameters ?? {});
|
|
63
|
-
|
|
64
61
|
const {
|
|
65
62
|
storageEngine: { activeBucketStorage }
|
|
66
63
|
} = service_context;
|
|
@@ -87,16 +84,21 @@ export const syncStreamReactive: SocketRouteGenerator = (router) =>
|
|
|
87
84
|
|
|
88
85
|
metricsEngine.getUpDownCounter(APIMetric.CONCURRENT_CONNECTIONS).add(1);
|
|
89
86
|
const tracker = new sync.RequestTracker(metricsEngine);
|
|
87
|
+
if (connection.tracker.encoding) {
|
|
88
|
+
// Must be set before we start the stream
|
|
89
|
+
tracker.setCompressed(connection.tracker.encoding);
|
|
90
|
+
}
|
|
90
91
|
try {
|
|
91
92
|
for await (const data of sync.streamResponse({
|
|
92
93
|
syncContext: syncContext,
|
|
93
94
|
bucketStorage: bucketStorage,
|
|
94
|
-
syncRules:
|
|
95
|
+
syncRules: {
|
|
96
|
+
syncRules,
|
|
97
|
+
version: bucketStorage.group_id
|
|
98
|
+
},
|
|
95
99
|
params: {
|
|
96
|
-
...params
|
|
97
|
-
binary_data: true // always true for web sockets
|
|
100
|
+
...params
|
|
98
101
|
},
|
|
99
|
-
syncParams,
|
|
100
102
|
token: context!.token_payload!,
|
|
101
103
|
tokenStreamOptions: {
|
|
102
104
|
// RSocket handles keepalive events by default
|
|
@@ -104,25 +106,21 @@ export const syncStreamReactive: SocketRouteGenerator = (router) =>
|
|
|
104
106
|
},
|
|
105
107
|
tracker,
|
|
106
108
|
signal,
|
|
107
|
-
logger
|
|
109
|
+
logger,
|
|
110
|
+
isEncodingAsBson: true
|
|
108
111
|
})) {
|
|
109
112
|
if (signal.aborted) {
|
|
110
113
|
break;
|
|
111
114
|
}
|
|
112
115
|
if (data == null) {
|
|
113
|
-
// Empty value just to flush iterator memory
|
|
114
116
|
continue;
|
|
115
|
-
} else if (typeof data == 'string') {
|
|
116
|
-
// Should not happen with binary_data: true
|
|
117
|
-
throw new Error(`Unexpected string data: ${data}`);
|
|
118
117
|
}
|
|
119
118
|
|
|
120
119
|
{
|
|
121
|
-
|
|
122
|
-
const serialized = serialize(data) as Buffer;
|
|
120
|
+
const serialized = sync.syncLineToBson(data);
|
|
123
121
|
responder.onNext({ data: serialized }, false);
|
|
124
122
|
requestedN--;
|
|
125
|
-
tracker.
|
|
123
|
+
tracker.addPlaintextDataSynced(serialized.length);
|
|
126
124
|
}
|
|
127
125
|
|
|
128
126
|
if (requestedN <= 0 && !signal.aborted) {
|
|
@@ -159,6 +157,17 @@ export const syncStreamReactive: SocketRouteGenerator = (router) =>
|
|
|
159
157
|
responder.onComplete();
|
|
160
158
|
removeStopHandler();
|
|
161
159
|
disposer();
|
|
160
|
+
if (connection.tracker.encoding) {
|
|
161
|
+
// Technically, this may not be unique to this specific stream, since there could be multiple
|
|
162
|
+
// rsocket streams on the same websocket connection. We don't have a way to track compressed bytes
|
|
163
|
+
// on individual streams, and we generally expect 1 stream per connection, so this is a reasonable
|
|
164
|
+
// approximation.
|
|
165
|
+
// If there are multiple streams, bytes written would be split arbitrarily across them, but the
|
|
166
|
+
// total should be correct.
|
|
167
|
+
// For non-compressed cases, this is tracked by the stream itself.
|
|
168
|
+
const socketBytes = connection.tracker.getBytesWritten();
|
|
169
|
+
tracker.addCompressedDataSent(socketBytes);
|
|
170
|
+
}
|
|
162
171
|
logger.info(`Sync stream complete`, {
|
|
163
172
|
...tracker.getLogMeta(),
|
|
164
173
|
stream_ms: Date.now() - streamStart,
|
|
@@ -202,34 +202,7 @@ async function debugSyncRules(apiHandler: RouteAPI, sync_rules: string) {
|
|
|
202
202
|
|
|
203
203
|
return {
|
|
204
204
|
valid: true,
|
|
205
|
-
bucket_definitions: rules.
|
|
206
|
-
let all_parameter_queries = [...d.parameterQueries.values()].flat();
|
|
207
|
-
let all_data_queries = [...d.dataQueries.values()].flat();
|
|
208
|
-
return {
|
|
209
|
-
name: d.name,
|
|
210
|
-
bucket_parameters: d.bucketParameters,
|
|
211
|
-
global_parameter_queries: d.globalParameterQueries.map((q) => {
|
|
212
|
-
return {
|
|
213
|
-
sql: q.sql
|
|
214
|
-
};
|
|
215
|
-
}),
|
|
216
|
-
parameter_queries: all_parameter_queries.map((q) => {
|
|
217
|
-
return {
|
|
218
|
-
sql: q.sql,
|
|
219
|
-
table: q.sourceTable,
|
|
220
|
-
input_parameters: q.inputParameters
|
|
221
|
-
};
|
|
222
|
-
}),
|
|
223
|
-
|
|
224
|
-
data_queries: all_data_queries.map((q) => {
|
|
225
|
-
return {
|
|
226
|
-
sql: q.sql,
|
|
227
|
-
table: q.sourceTable,
|
|
228
|
-
columns: q.columnOutputNames()
|
|
229
|
-
};
|
|
230
|
-
})
|
|
231
|
-
};
|
|
232
|
-
}),
|
|
205
|
+
bucket_definitions: rules.bucketSources.map((source) => source.debugRepresentation()),
|
|
233
206
|
source_tables: resolved_tables,
|
|
234
207
|
data_tables: rules.debugGetOutputTables()
|
|
235
208
|
};
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { ErrorCode, errors,
|
|
2
|
-
import
|
|
1
|
+
import { ErrorCode, errors, router, schema } from '@powersync/lib-services-framework';
|
|
2
|
+
import Negotiator from 'negotiator';
|
|
3
3
|
import { Readable } from 'stream';
|
|
4
4
|
|
|
5
5
|
import * as sync from '../../sync/sync-index.js';
|
|
@@ -9,11 +9,16 @@ import { authUser } from '../auth.js';
|
|
|
9
9
|
import { routeDefinition } from '../router.js';
|
|
10
10
|
|
|
11
11
|
import { APIMetric } from '@powersync/service-types';
|
|
12
|
+
import { maybeCompressResponseStream } from '../compression.js';
|
|
12
13
|
|
|
13
14
|
export enum SyncRoutes {
|
|
14
15
|
STREAM = '/sync/stream'
|
|
15
16
|
}
|
|
16
17
|
|
|
18
|
+
const ndJsonContentType = 'application/x-ndjson';
|
|
19
|
+
const concatenatedBsonContentType = 'application/vnd.powersync.bson-stream';
|
|
20
|
+
const supportedContentTypes = [ndJsonContentType, concatenatedBsonContentType];
|
|
21
|
+
|
|
17
22
|
export const syncStreamed = routeDefinition({
|
|
18
23
|
path: SyncRoutes.STREAM,
|
|
19
24
|
method: router.HTTPMethod.POST,
|
|
@@ -26,12 +31,18 @@ export const syncStreamed = routeDefinition({
|
|
|
26
31
|
const userAgent = headers['x-user-agent'] ?? headers['user-agent'];
|
|
27
32
|
const clientId = payload.params.client_id;
|
|
28
33
|
const streamStart = Date.now();
|
|
34
|
+
const negotiator = new Negotiator(payload.request);
|
|
35
|
+
// This falls back to JSON unless there's preference for the bson-stream in the Accept header.
|
|
36
|
+
const useBson = payload.request.headers.accept
|
|
37
|
+
? negotiator.mediaType(supportedContentTypes) == concatenatedBsonContentType
|
|
38
|
+
: false;
|
|
29
39
|
|
|
30
40
|
logger.defaultMeta = {
|
|
31
41
|
...logger.defaultMeta,
|
|
32
42
|
user_agent: userAgent,
|
|
33
43
|
client_id: clientId,
|
|
34
|
-
user_id: payload.context.user_id
|
|
44
|
+
user_id: payload.context.user_id,
|
|
45
|
+
bson: useBson
|
|
35
46
|
};
|
|
36
47
|
|
|
37
48
|
if (routerEngine.closed) {
|
|
@@ -42,9 +53,6 @@ export const syncStreamed = routeDefinition({
|
|
|
42
53
|
});
|
|
43
54
|
}
|
|
44
55
|
|
|
45
|
-
const params: util.StreamingSyncRequest = payload.params;
|
|
46
|
-
const syncParams = new RequestParameters(payload.context.token_payload!, payload.params.parameters ?? {});
|
|
47
|
-
|
|
48
56
|
const bucketStorage = await storageEngine.activeBucketStorage.getActiveStorage();
|
|
49
57
|
|
|
50
58
|
if (bucketStorage == null) {
|
|
@@ -61,25 +69,27 @@ export const syncStreamed = routeDefinition({
|
|
|
61
69
|
const tracker = new sync.RequestTracker(metricsEngine);
|
|
62
70
|
try {
|
|
63
71
|
metricsEngine.getUpDownCounter(APIMetric.CONCURRENT_CONNECTIONS).add(1);
|
|
64
|
-
const
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
72
|
+
const syncLines = sync.streamResponse({
|
|
73
|
+
syncContext: syncContext,
|
|
74
|
+
bucketStorage,
|
|
75
|
+
syncRules: {
|
|
76
|
+
syncRules,
|
|
77
|
+
version: bucketStorage.group_id
|
|
78
|
+
},
|
|
79
|
+
params: payload.params,
|
|
80
|
+
token: payload.context.token_payload!,
|
|
81
|
+
tracker,
|
|
82
|
+
signal: controller.signal,
|
|
83
|
+
logger,
|
|
84
|
+
isEncodingAsBson: useBson
|
|
85
|
+
});
|
|
86
|
+
|
|
87
|
+
const byteContents = useBson ? sync.bsonLines(syncLines) : sync.ndjson(syncLines);
|
|
88
|
+
const plainStream = Readable.from(sync.transformToBytesTracked(byteContents, tracker), {
|
|
89
|
+
objectMode: false,
|
|
90
|
+
highWaterMark: 16 * 1024
|
|
91
|
+
});
|
|
92
|
+
const { stream, encodingHeaders } = maybeCompressResponseStream(negotiator, plainStream, tracker);
|
|
83
93
|
|
|
84
94
|
// Best effort guess on why the stream was closed.
|
|
85
95
|
// We use the `??=` operator everywhere, so that we catch the first relevant
|
|
@@ -114,7 +124,8 @@ export const syncStreamed = routeDefinition({
|
|
|
114
124
|
return new router.RouterResponse({
|
|
115
125
|
status: 200,
|
|
116
126
|
headers: {
|
|
117
|
-
'Content-Type':
|
|
127
|
+
'Content-Type': useBson ? concatenatedBsonContentType : ndJsonContentType,
|
|
128
|
+
...encodingHeaders
|
|
118
129
|
},
|
|
119
130
|
data: stream,
|
|
120
131
|
afterSend: async (details) => {
|
|
@@ -1,8 +1,17 @@
|
|
|
1
1
|
import type fastify from 'fastify';
|
|
2
2
|
import * as uuid from 'uuid';
|
|
3
3
|
|
|
4
|
-
import {
|
|
4
|
+
import {
|
|
5
|
+
ErrorCode,
|
|
6
|
+
errors,
|
|
7
|
+
HTTPMethod,
|
|
8
|
+
logger,
|
|
9
|
+
RouteNotFound,
|
|
10
|
+
router,
|
|
11
|
+
ServiceError
|
|
12
|
+
} from '@powersync/lib-services-framework';
|
|
5
13
|
import { Context, ContextProvider, RequestEndpoint, RequestEndpointHandlerPayload } from './router.js';
|
|
14
|
+
import { FastifyReply } from 'fastify';
|
|
6
15
|
|
|
7
16
|
export type FastifyEndpoint<I, O, C> = RequestEndpoint<I, O, C> & {
|
|
8
17
|
parse?: boolean;
|
|
@@ -69,23 +78,11 @@ export function registerFastifyRoutes(
|
|
|
69
78
|
const serviceError = errors.asServiceError(ex);
|
|
70
79
|
requestLogger.error(`Request failed`, serviceError);
|
|
71
80
|
|
|
72
|
-
response =
|
|
73
|
-
status: serviceError.errorData.status || 500,
|
|
74
|
-
headers: {
|
|
75
|
-
'Content-Type': 'application/json'
|
|
76
|
-
},
|
|
77
|
-
data: {
|
|
78
|
-
error: serviceError.errorData
|
|
79
|
-
}
|
|
80
|
-
});
|
|
81
|
+
response = serviceErrorToResponse(serviceError);
|
|
81
82
|
}
|
|
82
83
|
|
|
83
|
-
Object.keys(response.headers).forEach((key) => {
|
|
84
|
-
reply.header(key, response.headers[key]);
|
|
85
|
-
});
|
|
86
|
-
reply.status(response.status);
|
|
87
84
|
try {
|
|
88
|
-
await reply
|
|
85
|
+
await respond(reply, response);
|
|
89
86
|
} finally {
|
|
90
87
|
await response.afterSend?.({ clientClosed: request.socket.closed });
|
|
91
88
|
requestLogger.info(`${e.method} ${request.url}`, {
|
|
@@ -106,3 +103,32 @@ export function registerFastifyRoutes(
|
|
|
106
103
|
});
|
|
107
104
|
}
|
|
108
105
|
}
|
|
106
|
+
|
|
107
|
+
/**
|
|
108
|
+
* Registers a custom not-found handler to ensure 404 error responses have the same schema as other service errors.
|
|
109
|
+
*/
|
|
110
|
+
export function registerFastifyNotFoundHandler(app: fastify.FastifyInstance) {
|
|
111
|
+
app.setNotFoundHandler(async (request, reply) => {
|
|
112
|
+
await respond(reply, serviceErrorToResponse(new RouteNotFound(request.originalUrl, request.method)));
|
|
113
|
+
});
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
function serviceErrorToResponse(error: ServiceError): router.RouterResponse {
|
|
117
|
+
return new router.RouterResponse({
|
|
118
|
+
status: error.errorData.status || 500,
|
|
119
|
+
headers: {
|
|
120
|
+
'Content-Type': 'application/json'
|
|
121
|
+
},
|
|
122
|
+
data: {
|
|
123
|
+
error: error.errorData
|
|
124
|
+
}
|
|
125
|
+
});
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
async function respond(reply: FastifyReply, response: router.RouterResponse) {
|
|
129
|
+
Object.keys(response.headers).forEach((key) => {
|
|
130
|
+
reply.header(key, response.headers[key]);
|
|
131
|
+
});
|
|
132
|
+
reply.status(response.status);
|
|
133
|
+
await reply.send(response.data);
|
|
134
|
+
}
|
|
@@ -39,8 +39,8 @@ export enum SyncRuleState {
|
|
|
39
39
|
export const DEFAULT_DOCUMENT_BATCH_LIMIT = 1000;
|
|
40
40
|
export const DEFAULT_DOCUMENT_CHUNK_LIMIT_BYTES = 1 * 1024 * 1024;
|
|
41
41
|
|
|
42
|
-
export function mergeToast(record: ToastableSqliteRow
|
|
43
|
-
const newRecord: ToastableSqliteRow = {};
|
|
42
|
+
export function mergeToast<V>(record: ToastableSqliteRow<V>, persisted: ToastableSqliteRow<V>): ToastableSqliteRow<V> {
|
|
43
|
+
const newRecord: ToastableSqliteRow<V> = {};
|
|
44
44
|
for (let key in record) {
|
|
45
45
|
if (typeof record[key] == 'undefined') {
|
|
46
46
|
newRecord[key] = persisted[key];
|
|
@@ -1,5 +1,11 @@
|
|
|
1
1
|
import { ObserverClient } from '@powersync/lib-services-framework';
|
|
2
|
-
import {
|
|
2
|
+
import {
|
|
3
|
+
EvaluatedParameters,
|
|
4
|
+
EvaluatedRow,
|
|
5
|
+
SqliteInputRow,
|
|
6
|
+
SqliteRow,
|
|
7
|
+
ToastableSqliteRow
|
|
8
|
+
} from '@powersync/service-sync-rules';
|
|
3
9
|
import { BSON } from 'bson';
|
|
4
10
|
import { ReplicationEventPayload } from './ReplicationEventPayload.js';
|
|
5
11
|
import { SourceTable, TableSnapshotStatus } from './SourceTable.js';
|
|
@@ -60,18 +66,29 @@ export interface BucketStorageBatch extends ObserverClient<BucketBatchStorageLis
|
|
|
60
66
|
keepalive(lsn: string): Promise<boolean>;
|
|
61
67
|
|
|
62
68
|
/**
|
|
63
|
-
* Set the LSN
|
|
69
|
+
* Set the LSN that replication should resume from.
|
|
70
|
+
*
|
|
71
|
+
* This can be used for:
|
|
72
|
+
* 1. Setting the LSN for a snapshot, before starting replication.
|
|
73
|
+
* 2. Setting the LSN to resume from after a replication restart, without advancing the checkpoint LSN via a commit.
|
|
64
74
|
*
|
|
65
75
|
* Not required if the source database keeps track of this, for example with
|
|
66
76
|
* PostgreSQL logical replication slots.
|
|
67
77
|
*/
|
|
68
|
-
|
|
78
|
+
setResumeLsn(lsn: string): Promise<void>;
|
|
69
79
|
|
|
70
80
|
/**
|
|
71
81
|
* Get the last checkpoint LSN, from either commit or keepalive.
|
|
72
82
|
*/
|
|
73
83
|
lastCheckpointLsn: string | null;
|
|
74
84
|
|
|
85
|
+
/**
|
|
86
|
+
* LSN to resume from.
|
|
87
|
+
*
|
|
88
|
+
* Not relevant for streams where the source keeps track of replication progress, such as Postgres.
|
|
89
|
+
*/
|
|
90
|
+
resumeFromLsn: string | null;
|
|
91
|
+
|
|
75
92
|
markSnapshotDone(tables: SourceTable[], no_checkpoint_before_lsn: string): Promise<SourceTable[]>;
|
|
76
93
|
|
|
77
94
|
updateTableProgress(table: SourceTable, progress: Partial<TableSnapshotStatus>): Promise<SourceTable>;
|
|
@@ -121,7 +138,7 @@ export interface SaveInsert {
|
|
|
121
138
|
sourceTable: SourceTable;
|
|
122
139
|
before?: undefined;
|
|
123
140
|
beforeReplicaId?: undefined;
|
|
124
|
-
after:
|
|
141
|
+
after: SqliteInputRow;
|
|
125
142
|
afterReplicaId: ReplicaId;
|
|
126
143
|
}
|
|
127
144
|
|
|
@@ -132,7 +149,7 @@ export interface SaveUpdate {
|
|
|
132
149
|
/**
|
|
133
150
|
* This is only present when the id has changed, and will only contain replica identity columns.
|
|
134
151
|
*/
|
|
135
|
-
before?:
|
|
152
|
+
before?: SqliteInputRow;
|
|
136
153
|
beforeReplicaId?: ReplicaId;
|
|
137
154
|
|
|
138
155
|
/**
|
|
@@ -147,7 +164,7 @@ export interface SaveUpdate {
|
|
|
147
164
|
export interface SaveDelete {
|
|
148
165
|
tag: SaveOperationTag.DELETE;
|
|
149
166
|
sourceTable: SourceTable;
|
|
150
|
-
before?:
|
|
167
|
+
before?: SqliteInputRow;
|
|
151
168
|
beforeReplicaId: ReplicaId;
|
|
152
169
|
after?: undefined;
|
|
153
170
|
afterReplicaId?: undefined;
|
|
@@ -1,40 +1,21 @@
|
|
|
1
1
|
import { OrderedSet } from '@js-sdsl/ordered-set';
|
|
2
2
|
import { LRUCache } from 'lru-cache/min';
|
|
3
3
|
import { BucketChecksum } from '../util/protocol-types.js';
|
|
4
|
-
import { addBucketChecksums, ChecksumMap, InternalOpId } from '../util/utils.js';
|
|
4
|
+
import { addBucketChecksums, ChecksumMap, InternalOpId, PartialChecksum } from '../util/utils.js';
|
|
5
5
|
|
|
6
6
|
interface ChecksumFetchContext {
|
|
7
7
|
fetch(bucket: string): Promise<BucketChecksum>;
|
|
8
8
|
checkpoint: InternalOpId;
|
|
9
9
|
}
|
|
10
10
|
|
|
11
|
-
export interface PartialChecksum {
|
|
12
|
-
bucket: string;
|
|
13
|
-
/**
|
|
14
|
-
* 32-bit unsigned hash.
|
|
15
|
-
*/
|
|
16
|
-
partialChecksum: number;
|
|
17
|
-
|
|
18
|
-
/**
|
|
19
|
-
* Count of operations - informational only.
|
|
20
|
-
*/
|
|
21
|
-
partialCount: number;
|
|
22
|
-
|
|
23
|
-
/**
|
|
24
|
-
* True if the queried operations contains (starts with) a CLEAR
|
|
25
|
-
* operation, indicating that the partial checksum is the full
|
|
26
|
-
* checksum, and must not be added to a previously-cached checksum.
|
|
27
|
-
*/
|
|
28
|
-
isFullChecksum: boolean;
|
|
29
|
-
}
|
|
30
|
-
|
|
31
11
|
export interface FetchPartialBucketChecksum {
|
|
32
12
|
bucket: string;
|
|
33
13
|
start?: InternalOpId;
|
|
34
14
|
end: InternalOpId;
|
|
35
15
|
}
|
|
36
16
|
|
|
37
|
-
export type
|
|
17
|
+
export type PartialOrFullChecksum = PartialChecksum | BucketChecksum;
|
|
18
|
+
export type PartialChecksumMap = Map<string, PartialOrFullChecksum>;
|
|
38
19
|
|
|
39
20
|
export type FetchChecksums = (batch: FetchPartialBucketChecksum[]) => Promise<PartialChecksumMap>;
|
|
40
21
|
|
|
@@ -127,6 +108,11 @@ export class ChecksumCache {
|
|
|
127
108
|
});
|
|
128
109
|
}
|
|
129
110
|
|
|
111
|
+
clear() {
|
|
112
|
+
this.cache.clear();
|
|
113
|
+
this.bucketCheckpoints.clear();
|
|
114
|
+
}
|
|
115
|
+
|
|
130
116
|
async getChecksums(checkpoint: InternalOpId, buckets: string[]): Promise<BucketChecksum[]> {
|
|
131
117
|
const checksums = await this.getChecksumMap(checkpoint, buckets);
|
|
132
118
|
// Return results in the same order as the request
|
|
@@ -4,8 +4,8 @@ import { BucketStorageBatch, SaveOp } from './BucketStorageBatch.js';
|
|
|
4
4
|
|
|
5
5
|
export type EventData = {
|
|
6
6
|
op: SaveOp;
|
|
7
|
-
before?: sync_rules.
|
|
8
|
-
after?: sync_rules.
|
|
7
|
+
before?: sync_rules.SqliteInputRow;
|
|
8
|
+
after?: sync_rules.SqliteInputRow;
|
|
9
9
|
};
|
|
10
10
|
|
|
11
11
|
export type ReplicationEventPayload = {
|
|
@@ -10,17 +10,17 @@ export interface ColumnDescriptor {
|
|
|
10
10
|
typeId?: number;
|
|
11
11
|
}
|
|
12
12
|
|
|
13
|
-
// TODO: This needs to be consolidated with SourceTable into something new.
|
|
14
13
|
export interface SourceEntityDescriptor {
|
|
15
14
|
/**
|
|
16
|
-
* The internal id of the
|
|
17
|
-
*
|
|
15
|
+
* The internal id of the source entity structure in the database.
|
|
18
16
|
* If undefined, the schema and name are used as the identifier.
|
|
19
|
-
*
|
|
20
17
|
* If specified, this is specifically used to detect renames.
|
|
21
18
|
*/
|
|
22
19
|
objectId: number | string | undefined;
|
|
23
20
|
schema: string;
|
|
24
21
|
name: string;
|
|
25
|
-
|
|
22
|
+
/**
|
|
23
|
+
* The columns that are used to uniquely identify a record in the source entity.
|
|
24
|
+
*/
|
|
25
|
+
replicaIdColumns: ColumnDescriptor[];
|
|
26
26
|
}
|