@mauryasumit/driftdb 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +810 -0
- package/dist/db.d.ts +30 -0
- package/dist/db.d.ts.map +1 -0
- package/dist/db.js +115 -0
- package/dist/db.js.map +1 -0
- package/dist/index.d.ts +8 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +12 -0
- package/dist/index.js.map +1 -0
- package/dist/orm/model.d.ts +35 -0
- package/dist/orm/model.d.ts.map +1 -0
- package/dist/orm/model.js +34 -0
- package/dist/orm/model.js.map +1 -0
- package/dist/orm/query-builder.d.ts +8 -0
- package/dist/orm/query-builder.d.ts.map +1 -0
- package/dist/orm/query-builder.js +90 -0
- package/dist/orm/query-builder.js.map +1 -0
- package/dist/orm/repository.d.ts +38 -0
- package/dist/orm/repository.d.ts.map +1 -0
- package/dist/orm/repository.js +107 -0
- package/dist/orm/repository.js.map +1 -0
- package/dist/orm/schema.d.ts +20 -0
- package/dist/orm/schema.d.ts.map +1 -0
- package/dist/orm/schema.js +81 -0
- package/dist/orm/schema.js.map +1 -0
- package/dist/queue/queue.d.ts +17 -0
- package/dist/queue/queue.d.ts.map +1 -0
- package/dist/queue/queue.js +109 -0
- package/dist/queue/queue.js.map +1 -0
- package/dist/storage/s3-adapter.d.ts +21 -0
- package/dist/storage/s3-adapter.d.ts.map +1 -0
- package/dist/storage/s3-adapter.js +133 -0
- package/dist/storage/s3-adapter.js.map +1 -0
- package/dist/sync/change-log.d.ts +15 -0
- package/dist/sync/change-log.d.ts.map +1 -0
- package/dist/sync/change-log.js +78 -0
- package/dist/sync/change-log.js.map +1 -0
- package/dist/sync/engine.d.ts +31 -0
- package/dist/sync/engine.d.ts.map +1 -0
- package/dist/sync/engine.js +210 -0
- package/dist/sync/engine.js.map +1 -0
- package/dist/sync/snapshot-manager.d.ts +17 -0
- package/dist/sync/snapshot-manager.d.ts.map +1 -0
- package/dist/sync/snapshot-manager.js +91 -0
- package/dist/sync/snapshot-manager.js.map +1 -0
- package/dist/types.d.ts +120 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +3 -0
- package/dist/types.js.map +1 -0
- package/dist/utils/compress.d.ts +3 -0
- package/dist/utils/compress.d.ts.map +1 -0
- package/dist/utils/compress.js +16 -0
- package/dist/utils/compress.js.map +1 -0
- package/dist/utils/crypto.d.ts +4 -0
- package/dist/utils/crypto.d.ts.map +1 -0
- package/dist/utils/crypto.js +35 -0
- package/dist/utils/crypto.js.map +1 -0
- package/dist/utils/id.d.ts +3 -0
- package/dist/utils/id.d.ts.map +1 -0
- package/dist/utils/id.js +13 -0
- package/dist/utils/id.js.map +1 -0
- package/dist/utils/retry.d.ts +5 -0
- package/dist/utils/retry.d.ts.map +1 -0
- package/dist/utils/retry.js +36 -0
- package/dist/utils/retry.js.map +1 -0
- package/package.json +55 -0
- package/src/db.ts +154 -0
- package/src/index.ts +24 -0
- package/src/orm/model.ts +95 -0
- package/src/orm/query-builder.ts +100 -0
- package/src/orm/repository.ts +156 -0
- package/src/orm/schema.ts +92 -0
- package/src/queue/queue.ts +138 -0
- package/src/storage/s3-adapter.ts +181 -0
- package/src/sync/change-log.ts +101 -0
- package/src/sync/engine.ts +249 -0
- package/src/sync/snapshot-manager.ts +80 -0
- package/src/types.ts +130 -0
- package/src/utils/compress.ts +14 -0
- package/src/utils/crypto.ts +33 -0
- package/src/utils/id.ts +10 -0
- package/src/utils/retry.ts +38 -0
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
import type Database from 'better-sqlite3';
|
|
2
|
+
import type { SyncJob } from '../types.js';
|
|
3
|
+
import { generateId } from '../utils/id.js';
|
|
4
|
+
import { nextRetryAt } from '../utils/retry.js';
|
|
5
|
+
import type { RetryConfig } from '../types.js';
|
|
6
|
+
|
|
7
|
+
const SCHEMA = `
|
|
8
|
+
CREATE TABLE IF NOT EXISTS _driftdb_queue (
|
|
9
|
+
id TEXT PRIMARY KEY,
|
|
10
|
+
type TEXT NOT NULL,
|
|
11
|
+
payload TEXT NOT NULL,
|
|
12
|
+
status TEXT NOT NULL DEFAULT 'pending',
|
|
13
|
+
attempts INTEGER NOT NULL DEFAULT 0,
|
|
14
|
+
nextRetryAt INTEGER NOT NULL DEFAULT 0,
|
|
15
|
+
createdAt INTEGER NOT NULL,
|
|
16
|
+
error TEXT
|
|
17
|
+
);
|
|
18
|
+
CREATE INDEX IF NOT EXISTS idx_driftdb_queue_status ON _driftdb_queue (status, nextRetryAt);
|
|
19
|
+
`;
|
|
20
|
+
|
|
21
|
+
export class SyncQueue {
|
|
22
|
+
private readonly db: Database.Database;
|
|
23
|
+
private readonly retryConfig: RetryConfig;
|
|
24
|
+
|
|
25
|
+
constructor(db: Database.Database, retryConfig: RetryConfig) {
|
|
26
|
+
this.db = db;
|
|
27
|
+
this.retryConfig = retryConfig;
|
|
28
|
+
db.exec(SCHEMA);
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
enqueue(type: SyncJob['type'], payload: object): SyncJob {
|
|
32
|
+
const job: SyncJob = {
|
|
33
|
+
id: generateId(),
|
|
34
|
+
type,
|
|
35
|
+
payload: JSON.stringify(payload),
|
|
36
|
+
status: 'pending',
|
|
37
|
+
attempts: 0,
|
|
38
|
+
nextRetryAt: 0,
|
|
39
|
+
createdAt: Date.now(),
|
|
40
|
+
error: null,
|
|
41
|
+
};
|
|
42
|
+
|
|
43
|
+
this.db
|
|
44
|
+
.prepare(
|
|
45
|
+
`INSERT INTO _driftdb_queue (id, type, payload, status, attempts, nextRetryAt, createdAt, error)
|
|
46
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?)`
|
|
47
|
+
)
|
|
48
|
+
.run(job.id, job.type, job.payload, job.status, job.attempts, job.nextRetryAt, job.createdAt, job.error);
|
|
49
|
+
|
|
50
|
+
return job;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
dequeue(limit = 5): SyncJob[] {
|
|
54
|
+
const now = Date.now();
|
|
55
|
+
const rows = this.db
|
|
56
|
+
.prepare(
|
|
57
|
+
`SELECT * FROM _driftdb_queue
|
|
58
|
+
WHERE status IN ('pending', 'failed')
|
|
59
|
+
AND nextRetryAt <= ?
|
|
60
|
+
ORDER BY createdAt ASC
|
|
61
|
+
LIMIT ?`
|
|
62
|
+
)
|
|
63
|
+
.all(now, limit) as SyncJob[];
|
|
64
|
+
|
|
65
|
+
if (rows.length === 0) return [];
|
|
66
|
+
|
|
67
|
+
const ids = rows.map((r) => r.id);
|
|
68
|
+
const placeholders = ids.map(() => '?').join(',');
|
|
69
|
+
this.db
|
|
70
|
+
.prepare(`UPDATE _driftdb_queue SET status = 'processing' WHERE id IN (${placeholders})`)
|
|
71
|
+
.run(...ids);
|
|
72
|
+
|
|
73
|
+
return rows.map((r) => ({ ...r, status: 'processing' as const }));
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
markDone(id: string): void {
|
|
77
|
+
this.db
|
|
78
|
+
.prepare(`UPDATE _driftdb_queue SET status = 'done', error = NULL WHERE id = ?`)
|
|
79
|
+
.run(id);
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
markFailed(id: string, error: string): void {
|
|
83
|
+
const job = this.db
|
|
84
|
+
.prepare(`SELECT attempts FROM _driftdb_queue WHERE id = ?`)
|
|
85
|
+
.get(id) as Pick<SyncJob, 'attempts'> | undefined;
|
|
86
|
+
|
|
87
|
+
if (!job) return;
|
|
88
|
+
|
|
89
|
+
const attempts = job.attempts + 1;
|
|
90
|
+
const willRetry = attempts <= this.retryConfig.maxRetries;
|
|
91
|
+
const status = willRetry ? 'failed' : 'failed';
|
|
92
|
+
const retryAt = willRetry ? nextRetryAt(attempts, this.retryConfig) : 0;
|
|
93
|
+
|
|
94
|
+
this.db
|
|
95
|
+
.prepare(
|
|
96
|
+
`UPDATE _driftdb_queue
|
|
97
|
+
SET status = ?, attempts = ?, nextRetryAt = ?, error = ?
|
|
98
|
+
WHERE id = ?`
|
|
99
|
+
)
|
|
100
|
+
.run(status, attempts, retryAt, error.slice(0, 1000), id);
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
resetStuck(): void {
|
|
104
|
+
const staleThreshold = Date.now() - 5 * 60 * 1000;
|
|
105
|
+
this.db
|
|
106
|
+
.prepare(
|
|
107
|
+
`UPDATE _driftdb_queue
|
|
108
|
+
SET status = 'pending', nextRetryAt = 0
|
|
109
|
+
WHERE status = 'processing' AND createdAt < ?`
|
|
110
|
+
)
|
|
111
|
+
.run(staleThreshold);
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
pendingCount(): number {
|
|
115
|
+
const row = this.db
|
|
116
|
+
.prepare(
|
|
117
|
+
`SELECT COUNT(*) as cnt FROM _driftdb_queue WHERE status IN ('pending', 'processing', 'failed')`
|
|
118
|
+
)
|
|
119
|
+
.get() as { cnt: number };
|
|
120
|
+
return row.cnt;
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
purgeCompleted(olderThanMs = 24 * 60 * 60 * 1000): void {
|
|
124
|
+
const cutoff = Date.now() - olderThanMs;
|
|
125
|
+
this.db
|
|
126
|
+
.prepare(`DELETE FROM _driftdb_queue WHERE status = 'done' AND createdAt < ?`)
|
|
127
|
+
.run(cutoff);
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
hasPendingOfType(type: SyncJob['type']): boolean {
|
|
131
|
+
const row = this.db
|
|
132
|
+
.prepare(
|
|
133
|
+
`SELECT 1 FROM _driftdb_queue WHERE type = ? AND status IN ('pending', 'processing', 'failed') LIMIT 1`
|
|
134
|
+
)
|
|
135
|
+
.get(type) as { 1: number } | undefined;
|
|
136
|
+
return row !== undefined;
|
|
137
|
+
}
|
|
138
|
+
}
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
import {
|
|
2
|
+
S3Client,
|
|
3
|
+
GetObjectCommand,
|
|
4
|
+
PutObjectCommand,
|
|
5
|
+
ListObjectsV2Command,
|
|
6
|
+
HeadObjectCommand,
|
|
7
|
+
type S3ClientConfig,
|
|
8
|
+
} from '@aws-sdk/client-s3';
|
|
9
|
+
import { Upload } from '@aws-sdk/lib-storage';
|
|
10
|
+
import { Readable } from 'stream';
|
|
11
|
+
import type { S3Config, SyncManifest } from '../types.js';
|
|
12
|
+
import { compress, decompress } from '../utils/compress.js';
|
|
13
|
+
import { encrypt, decrypt } from '../utils/crypto.js';
|
|
14
|
+
|
|
15
|
+
export interface S3UploadOptions {
|
|
16
|
+
compress?: boolean;
|
|
17
|
+
encryptionKey?: string;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
export class S3Adapter {
|
|
21
|
+
private readonly client: S3Client;
|
|
22
|
+
private readonly bucket: string;
|
|
23
|
+
private readonly prefix: string;
|
|
24
|
+
|
|
25
|
+
constructor(config: S3Config) {
|
|
26
|
+
const clientConfig: S3ClientConfig = {
|
|
27
|
+
region: config.region,
|
|
28
|
+
};
|
|
29
|
+
|
|
30
|
+
if (config.accessKeyId && config.secretAccessKey) {
|
|
31
|
+
clientConfig.credentials = {
|
|
32
|
+
accessKeyId: config.accessKeyId,
|
|
33
|
+
secretAccessKey: config.secretAccessKey,
|
|
34
|
+
};
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
if (config.endpoint) {
|
|
38
|
+
clientConfig.endpoint = config.endpoint;
|
|
39
|
+
clientConfig.forcePathStyle = config.forcePathStyle ?? true;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
this.client = new S3Client(clientConfig);
|
|
43
|
+
this.bucket = config.bucket;
|
|
44
|
+
this.prefix = config.prefix ? config.prefix.replace(/\/$/, '') : 'driftdb';
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
private key(path: string): string {
|
|
48
|
+
return `${this.prefix}/${path}`;
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
async upload(
|
|
52
|
+
path: string,
|
|
53
|
+
data: Buffer,
|
|
54
|
+
options: S3UploadOptions = {}
|
|
55
|
+
): Promise<void> {
|
|
56
|
+
let payload = data;
|
|
57
|
+
|
|
58
|
+
if (options.compress) {
|
|
59
|
+
payload = await compress(payload);
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
if (options.encryptionKey) {
|
|
63
|
+
payload = encrypt(payload, options.encryptionKey);
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
const contentEncoding = options.compress ? 'gzip' : undefined;
|
|
67
|
+
const metadata: Record<string, string> = {};
|
|
68
|
+
if (options.compress) metadata['x-driftdb-compressed'] = '1';
|
|
69
|
+
if (options.encryptionKey) metadata['x-driftdb-encrypted'] = '1';
|
|
70
|
+
|
|
71
|
+
if (payload.length > 5 * 1024 * 1024) {
|
|
72
|
+
const upload = new Upload({
|
|
73
|
+
client: this.client,
|
|
74
|
+
params: {
|
|
75
|
+
Bucket: this.bucket,
|
|
76
|
+
Key: this.key(path),
|
|
77
|
+
Body: Readable.from(payload),
|
|
78
|
+
ContentEncoding: contentEncoding,
|
|
79
|
+
Metadata: metadata,
|
|
80
|
+
},
|
|
81
|
+
});
|
|
82
|
+
await upload.done();
|
|
83
|
+
} else {
|
|
84
|
+
await this.client.send(
|
|
85
|
+
new PutObjectCommand({
|
|
86
|
+
Bucket: this.bucket,
|
|
87
|
+
Key: this.key(path),
|
|
88
|
+
Body: payload,
|
|
89
|
+
ContentEncoding: contentEncoding,
|
|
90
|
+
Metadata: metadata,
|
|
91
|
+
})
|
|
92
|
+
);
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
async download(
|
|
97
|
+
path: string,
|
|
98
|
+
options: S3UploadOptions = {}
|
|
99
|
+
): Promise<Buffer> {
|
|
100
|
+
const response = await this.client.send(
|
|
101
|
+
new GetObjectCommand({ Bucket: this.bucket, Key: this.key(path) })
|
|
102
|
+
);
|
|
103
|
+
|
|
104
|
+
if (!response.Body) {
|
|
105
|
+
throw new Error(`Empty response body for key: ${path}`);
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
const chunks: Buffer[] = [];
|
|
109
|
+
for await (const chunk of response.Body as AsyncIterable<Uint8Array>) {
|
|
110
|
+
chunks.push(Buffer.from(chunk));
|
|
111
|
+
}
|
|
112
|
+
let data = Buffer.concat(chunks);
|
|
113
|
+
|
|
114
|
+
if (options.encryptionKey) {
|
|
115
|
+
data = Buffer.from(decrypt(data, options.encryptionKey));
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
if (options.compress) {
|
|
119
|
+
data = Buffer.from(await decompress(data));
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
return data;
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
async exists(path: string): Promise<boolean> {
|
|
126
|
+
try {
|
|
127
|
+
await this.client.send(
|
|
128
|
+
new HeadObjectCommand({ Bucket: this.bucket, Key: this.key(path) })
|
|
129
|
+
);
|
|
130
|
+
return true;
|
|
131
|
+
} catch {
|
|
132
|
+
return false;
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
async listKeys(prefix: string): Promise<string[]> {
|
|
137
|
+
const fullPrefix = this.key(prefix);
|
|
138
|
+
const keys: string[] = [];
|
|
139
|
+
let continuationToken: string | undefined;
|
|
140
|
+
|
|
141
|
+
do {
|
|
142
|
+
const response = await this.client.send(
|
|
143
|
+
new ListObjectsV2Command({
|
|
144
|
+
Bucket: this.bucket,
|
|
145
|
+
Prefix: fullPrefix,
|
|
146
|
+
ContinuationToken: continuationToken,
|
|
147
|
+
})
|
|
148
|
+
);
|
|
149
|
+
|
|
150
|
+
for (const obj of response.Contents ?? []) {
|
|
151
|
+
if (obj.Key) {
|
|
152
|
+
keys.push(obj.Key.slice(this.prefix.length + 1));
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
continuationToken = response.NextContinuationToken;
|
|
157
|
+
} while (continuationToken);
|
|
158
|
+
|
|
159
|
+
return keys;
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
async putManifest(nodeId: string, manifest: SyncManifest): Promise<void> {
|
|
163
|
+
const data = Buffer.from(JSON.stringify(manifest), 'utf8');
|
|
164
|
+
await this.upload(`nodes/${nodeId}/manifest.json`, data);
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
async getManifest(nodeId: string): Promise<SyncManifest | null> {
|
|
168
|
+
const path = `nodes/${nodeId}/manifest.json`;
|
|
169
|
+
if (!(await this.exists(path))) return null;
|
|
170
|
+
const data = await this.download(path);
|
|
171
|
+
return JSON.parse(data.toString('utf8')) as SyncManifest;
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
logKey(nodeId: string, fromSeq: number, toSeq: number): string {
|
|
175
|
+
return `nodes/${nodeId}/logs/${String(fromSeq).padStart(12, '0')}-${String(toSeq).padStart(12, '0')}.json`;
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
snapshotKey(nodeId: string, timestamp: number): string {
|
|
179
|
+
return `nodes/${nodeId}/snapshots/${timestamp}.sqlite`;
|
|
180
|
+
}
|
|
181
|
+
}
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
import type Database from 'better-sqlite3';
|
|
2
|
+
import type { ChangeLogEntry, LogBatch } from '../types.js';
|
|
3
|
+
|
|
4
|
+
const SCHEMA = `
|
|
5
|
+
CREATE TABLE IF NOT EXISTS _driftdb_log (
|
|
6
|
+
sequence INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
7
|
+
timestamp INTEGER NOT NULL,
|
|
8
|
+
nodeId TEXT NOT NULL,
|
|
9
|
+
\`table\` TEXT NOT NULL,
|
|
10
|
+
operation TEXT NOT NULL,
|
|
11
|
+
data TEXT,
|
|
12
|
+
synced INTEGER NOT NULL DEFAULT 0
|
|
13
|
+
);
|
|
14
|
+
CREATE INDEX IF NOT EXISTS idx_driftdb_log_synced ON _driftdb_log (synced, sequence);
|
|
15
|
+
`;
|
|
16
|
+
|
|
17
|
+
export class ChangeLog {
|
|
18
|
+
private readonly db: Database.Database;
|
|
19
|
+
private readonly nodeId: string;
|
|
20
|
+
|
|
21
|
+
constructor(db: Database.Database, nodeId: string) {
|
|
22
|
+
this.db = db;
|
|
23
|
+
this.nodeId = nodeId;
|
|
24
|
+
db.exec(SCHEMA);
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
append(
|
|
28
|
+
table: string,
|
|
29
|
+
operation: ChangeLogEntry['operation'],
|
|
30
|
+
data: Record<string, unknown> | null
|
|
31
|
+
): number {
|
|
32
|
+
const result = this.db
|
|
33
|
+
.prepare(
|
|
34
|
+
`INSERT INTO _driftdb_log (timestamp, nodeId, \`table\`, operation, data, synced)
|
|
35
|
+
VALUES (?, ?, ?, ?, ?, 0)`
|
|
36
|
+
)
|
|
37
|
+
.run(Date.now(), this.nodeId, table, operation, data ? JSON.stringify(data) : null);
|
|
38
|
+
|
|
39
|
+
return Number(result.lastInsertRowid);
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
pendingEntries(limit: number): ChangeLogEntry[] {
|
|
43
|
+
return this.db
|
|
44
|
+
.prepare(
|
|
45
|
+
`SELECT * FROM _driftdb_log WHERE synced = 0 ORDER BY sequence ASC LIMIT ?`
|
|
46
|
+
)
|
|
47
|
+
.all(limit) as ChangeLogEntry[];
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
markSynced(fromSequence: number, toSequence: number): void {
|
|
51
|
+
this.db
|
|
52
|
+
.prepare(
|
|
53
|
+
`UPDATE _driftdb_log SET synced = 1 WHERE sequence >= ? AND sequence <= ?`
|
|
54
|
+
)
|
|
55
|
+
.run(fromSequence, toSequence);
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
pendingCount(): number {
|
|
59
|
+
const row = this.db
|
|
60
|
+
.prepare(`SELECT COUNT(*) as cnt FROM _driftdb_log WHERE synced = 0`)
|
|
61
|
+
.get() as { cnt: number };
|
|
62
|
+
return row.cnt;
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
latestSyncedSequence(): number {
|
|
66
|
+
const row = this.db
|
|
67
|
+
.prepare(`SELECT MAX(sequence) as seq FROM _driftdb_log WHERE synced = 1`)
|
|
68
|
+
.get() as { seq: number | null };
|
|
69
|
+
return row.seq ?? 0;
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
buildBatch(entries: ChangeLogEntry[]): LogBatch {
|
|
73
|
+
return {
|
|
74
|
+
version: 1,
|
|
75
|
+
nodeId: this.nodeId,
|
|
76
|
+
fromSequence: entries[0]?.sequence ?? 0,
|
|
77
|
+
toSequence: entries[entries.length - 1]?.sequence ?? 0,
|
|
78
|
+
entries: entries.map((e) => ({
|
|
79
|
+
sequence: e.sequence,
|
|
80
|
+
timestamp: e.timestamp,
|
|
81
|
+
table: e.table,
|
|
82
|
+
operation: e.operation,
|
|
83
|
+
data: e.data ? (JSON.parse(e.data) as Record<string, unknown>) : null,
|
|
84
|
+
})),
|
|
85
|
+
};
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
purgeOldSynced(keepLatest = 5000): void {
|
|
89
|
+
const row = this.db
|
|
90
|
+
.prepare(
|
|
91
|
+
`SELECT sequence FROM _driftdb_log WHERE synced = 1 ORDER BY sequence DESC LIMIT 1 OFFSET ?`
|
|
92
|
+
)
|
|
93
|
+
.get(keepLatest) as { sequence: number } | undefined;
|
|
94
|
+
|
|
95
|
+
if (row) {
|
|
96
|
+
this.db
|
|
97
|
+
.prepare(`DELETE FROM _driftdb_log WHERE synced = 1 AND sequence <= ?`)
|
|
98
|
+
.run(row.sequence);
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
}
|
|
@@ -0,0 +1,249 @@
|
|
|
1
|
+
import type Database from 'better-sqlite3';
|
|
2
|
+
import type { DBConfig, SyncJob, SyncMetrics, UploadLogPayload, UploadSnapshotPayload } from '../types.js';
|
|
3
|
+
import { SyncQueue } from '../queue/queue.js';
|
|
4
|
+
import { ChangeLog } from './change-log.js';
|
|
5
|
+
import { SnapshotManager } from './snapshot-manager.js';
|
|
6
|
+
import { S3Adapter } from '../storage/s3-adapter.js';
|
|
7
|
+
import { withRetry } from '../utils/retry.js';
|
|
8
|
+
import { statSync, existsSync } from 'fs';
|
|
9
|
+
|
|
10
|
+
const DEFAULT_SYNC_INTERVAL_MS = 5_000;
|
|
11
|
+
const DEFAULT_SNAPSHOT_EVERY_N_LOGS = 1_000;
|
|
12
|
+
const DEFAULT_MAX_BATCH_SIZE = 100;
|
|
13
|
+
const DEFAULT_RETRY_CONFIG = { maxRetries: 5, baseDelayMs: 500, maxDelayMs: 30_000 };
|
|
14
|
+
|
|
15
|
+
export class SyncEngine {
|
|
16
|
+
private readonly db: Database.Database;
|
|
17
|
+
private readonly config: DBConfig;
|
|
18
|
+
private readonly nodeId: string;
|
|
19
|
+
private readonly queue: SyncQueue;
|
|
20
|
+
private readonly changeLog: ChangeLog;
|
|
21
|
+
private readonly snapshotManager: SnapshotManager | null;
|
|
22
|
+
private readonly s3: S3Adapter | null;
|
|
23
|
+
|
|
24
|
+
private timer: ReturnType<typeof setInterval> | null = null;
|
|
25
|
+
private isProcessing = false;
|
|
26
|
+
|
|
27
|
+
private metrics: SyncMetrics = {
|
|
28
|
+
lastSyncAt: null,
|
|
29
|
+
lastSnapshotAt: null,
|
|
30
|
+
pendingChanges: 0,
|
|
31
|
+
dbSizeBytes: 0,
|
|
32
|
+
totalSynced: 0,
|
|
33
|
+
syncErrors: 0,
|
|
34
|
+
isRunning: false,
|
|
35
|
+
};
|
|
36
|
+
|
|
37
|
+
constructor(db: Database.Database, nodeId: string, config: DBConfig) {
|
|
38
|
+
this.db = db;
|
|
39
|
+
this.config = config;
|
|
40
|
+
this.nodeId = nodeId;
|
|
41
|
+
|
|
42
|
+
const retryConfig = { ...DEFAULT_RETRY_CONFIG, ...config.retryConfig };
|
|
43
|
+
this.queue = new SyncQueue(db, retryConfig);
|
|
44
|
+
this.changeLog = new ChangeLog(db, nodeId);
|
|
45
|
+
|
|
46
|
+
if (config.s3Config) {
|
|
47
|
+
this.s3 = new S3Adapter(config.s3Config);
|
|
48
|
+
const uploadOptions = {
|
|
49
|
+
compress: config.compression !== false,
|
|
50
|
+
encryptionKey: config.encryption?.key,
|
|
51
|
+
};
|
|
52
|
+
this.snapshotManager = new SnapshotManager(
|
|
53
|
+
db,
|
|
54
|
+
this.s3,
|
|
55
|
+
nodeId,
|
|
56
|
+
config.sqlitePath,
|
|
57
|
+
uploadOptions
|
|
58
|
+
);
|
|
59
|
+
} else {
|
|
60
|
+
this.s3 = null;
|
|
61
|
+
this.snapshotManager = null;
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
getChangeLog(): ChangeLog {
|
|
66
|
+
return this.changeLog;
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
getQueue(): SyncQueue {
|
|
70
|
+
return this.queue;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
start(): void {
|
|
74
|
+
if (this.timer) return;
|
|
75
|
+
this.metrics.isRunning = true;
|
|
76
|
+
this.queue.resetStuck();
|
|
77
|
+
|
|
78
|
+
const intervalMs = this.config.syncIntervalMs ?? DEFAULT_SYNC_INTERVAL_MS;
|
|
79
|
+
this.timer = setInterval(() => {
|
|
80
|
+
void this.tick();
|
|
81
|
+
}, intervalMs);
|
|
82
|
+
|
|
83
|
+
if (this.timer.unref) {
|
|
84
|
+
this.timer.unref();
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
stop(): void {
|
|
89
|
+
if (this.timer) {
|
|
90
|
+
clearInterval(this.timer);
|
|
91
|
+
this.timer = null;
|
|
92
|
+
}
|
|
93
|
+
this.metrics.isRunning = false;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
async flush(): Promise<void> {
|
|
97
|
+
await this.tick();
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
getMetrics(): Readonly<SyncMetrics> {
|
|
101
|
+
return {
|
|
102
|
+
...this.metrics,
|
|
103
|
+
pendingChanges: this.changeLog.pendingCount(),
|
|
104
|
+
dbSizeBytes: this.getDbSizeBytes(),
|
|
105
|
+
};
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
private getDbSizeBytes(): number {
|
|
109
|
+
const path = this.config.sqlitePath;
|
|
110
|
+
if (path === ':memory:' || !existsSync(path)) return 0;
|
|
111
|
+
try {
|
|
112
|
+
return statSync(path).size;
|
|
113
|
+
} catch {
|
|
114
|
+
return 0;
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
private async tick(): Promise<void> {
|
|
119
|
+
if (this.isProcessing || !this.s3) return;
|
|
120
|
+
this.isProcessing = true;
|
|
121
|
+
|
|
122
|
+
try {
|
|
123
|
+
await this.enqueuePendingLogs();
|
|
124
|
+
await this.processQueue();
|
|
125
|
+
} catch (err) {
|
|
126
|
+
this.metrics.syncErrors++;
|
|
127
|
+
} finally {
|
|
128
|
+
this.isProcessing = false;
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
private async enqueuePendingLogs(): Promise<void> {
|
|
133
|
+
const maxBatch = this.config.maxBatchSize ?? DEFAULT_MAX_BATCH_SIZE;
|
|
134
|
+
const pending = this.changeLog.pendingEntries(maxBatch);
|
|
135
|
+
if (pending.length === 0) return;
|
|
136
|
+
|
|
137
|
+
const batch = this.changeLog.buildBatch(pending);
|
|
138
|
+
const s3Key = this.s3!.logKey(this.nodeId, batch.fromSequence, batch.toSequence);
|
|
139
|
+
|
|
140
|
+
const alreadyQueued = this.queue.hasPendingOfType('upload_log');
|
|
141
|
+
if (!alreadyQueued) {
|
|
142
|
+
const payload: UploadLogPayload = {
|
|
143
|
+
fromSequence: batch.fromSequence,
|
|
144
|
+
toSequence: batch.toSequence,
|
|
145
|
+
s3Key,
|
|
146
|
+
};
|
|
147
|
+
this.queue.enqueue('upload_log', { ...payload, batch });
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
private async processQueue(): Promise<void> {
|
|
152
|
+
const jobs = this.queue.dequeue(3);
|
|
153
|
+
|
|
154
|
+
await Promise.allSettled(
|
|
155
|
+
jobs.map((job) => this.processJob(job))
|
|
156
|
+
);
|
|
157
|
+
|
|
158
|
+
this.queue.purgeCompleted();
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
private async processJob(job: SyncJob): Promise<void> {
|
|
162
|
+
const retryConfig = { ...DEFAULT_RETRY_CONFIG, ...this.config.retryConfig };
|
|
163
|
+
const uploadOptions = {
|
|
164
|
+
compress: this.config.compression !== false,
|
|
165
|
+
encryptionKey: this.config.encryption?.key,
|
|
166
|
+
};
|
|
167
|
+
|
|
168
|
+
try {
|
|
169
|
+
await withRetry(
|
|
170
|
+
async () => {
|
|
171
|
+
if (job.type === 'upload_log') {
|
|
172
|
+
const p = JSON.parse(job.payload) as UploadLogPayload & { batch: unknown };
|
|
173
|
+
const batchBuffer = Buffer.from(JSON.stringify(p.batch), 'utf8');
|
|
174
|
+
await this.s3!.upload(p.s3Key, batchBuffer, uploadOptions);
|
|
175
|
+
this.changeLog.markSynced(p.fromSequence, p.toSequence);
|
|
176
|
+
|
|
177
|
+
const manifest = await this.s3!.getManifest(this.nodeId);
|
|
178
|
+
const latestSeq = Math.max(
|
|
179
|
+
manifest?.latestLogSequence ?? 0,
|
|
180
|
+
p.toSequence
|
|
181
|
+
);
|
|
182
|
+
await this.s3!.putManifest(this.nodeId, {
|
|
183
|
+
nodeId: this.nodeId,
|
|
184
|
+
latestSnapshotKey: manifest?.latestSnapshotKey ?? null,
|
|
185
|
+
latestSnapshotTimestamp: manifest?.latestSnapshotTimestamp ?? null,
|
|
186
|
+
latestLogSequence: latestSeq,
|
|
187
|
+
updatedAt: Date.now(),
|
|
188
|
+
});
|
|
189
|
+
|
|
190
|
+
this.metrics.totalSynced += (p.toSequence - p.fromSequence + 1);
|
|
191
|
+
this.metrics.lastSyncAt = Date.now();
|
|
192
|
+
|
|
193
|
+
await this.maybeSnapshot(latestSeq);
|
|
194
|
+
} else if (job.type === 'upload_snapshot') {
|
|
195
|
+
const p = JSON.parse(job.payload) as UploadSnapshotPayload;
|
|
196
|
+
if (this.snapshotManager) {
|
|
197
|
+
const { key, timestamp } = await this.snapshotManager.takeAndUpload();
|
|
198
|
+
const manifest = await this.s3!.getManifest(this.nodeId);
|
|
199
|
+
await this.s3!.putManifest(this.nodeId, {
|
|
200
|
+
nodeId: this.nodeId,
|
|
201
|
+
latestSnapshotKey: key,
|
|
202
|
+
latestSnapshotTimestamp: timestamp,
|
|
203
|
+
latestLogSequence: manifest?.latestLogSequence ?? 0,
|
|
204
|
+
updatedAt: Date.now(),
|
|
205
|
+
});
|
|
206
|
+
this.metrics.lastSnapshotAt = Date.now();
|
|
207
|
+
}
|
|
208
|
+
void p;
|
|
209
|
+
}
|
|
210
|
+
},
|
|
211
|
+
retryConfig
|
|
212
|
+
);
|
|
213
|
+
|
|
214
|
+
this.queue.markDone(job.id);
|
|
215
|
+
} catch (err) {
|
|
216
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
217
|
+
this.queue.markFailed(job.id, msg);
|
|
218
|
+
this.metrics.syncErrors++;
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
private async maybeSnapshot(latestSequence: number): Promise<void> {
|
|
223
|
+
const threshold = this.config.snapshotEveryNLogs ?? DEFAULT_SNAPSHOT_EVERY_N_LOGS;
|
|
224
|
+
if (latestSequence > 0 && latestSequence % threshold === 0) {
|
|
225
|
+
if (!this.queue.hasPendingOfType('upload_snapshot')) {
|
|
226
|
+
const payload: UploadSnapshotPayload = {
|
|
227
|
+
timestamp: Date.now(),
|
|
228
|
+
s3Key: this.s3!.snapshotKey(this.nodeId, Date.now()),
|
|
229
|
+
dbPath: this.config.sqlitePath,
|
|
230
|
+
};
|
|
231
|
+
this.queue.enqueue('upload_snapshot', payload);
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
async triggerSnapshot(): Promise<void> {
|
|
237
|
+
if (!this.snapshotManager || !this.s3) return;
|
|
238
|
+
const { key, timestamp } = await this.snapshotManager.takeAndUpload();
|
|
239
|
+
const manifest = await this.s3.getManifest(this.nodeId);
|
|
240
|
+
await this.s3.putManifest(this.nodeId, {
|
|
241
|
+
nodeId: this.nodeId,
|
|
242
|
+
latestSnapshotKey: key,
|
|
243
|
+
latestSnapshotTimestamp: timestamp,
|
|
244
|
+
latestLogSequence: manifest?.latestLogSequence ?? 0,
|
|
245
|
+
updatedAt: Date.now(),
|
|
246
|
+
});
|
|
247
|
+
this.metrics.lastSnapshotAt = Date.now();
|
|
248
|
+
}
|
|
249
|
+
}
|