@docstack/pouchdb-adapter-googledrive 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +6 -0
- package/DOCUMENTATION.md +54 -0
- package/README.md +78 -0
- package/error.log +21 -0
- package/jest.config.js +8 -0
- package/lib/adapter.d.ts +17 -0
- package/lib/adapter.js +440 -0
- package/lib/cache.d.ts +12 -0
- package/lib/cache.js +42 -0
- package/lib/drive.d.ts +69 -0
- package/lib/drive.js +573 -0
- package/lib/index.d.ts +1 -0
- package/lib/index.js +7 -0
- package/lib/types.d.ts +89 -0
- package/lib/types.js +2 -0
- package/package.json +39 -0
package/lib/drive.d.ts
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
import { GoogleDriveAdapterOptions, ChangeEntry, IndexEntry } from './types';
|
|
2
|
+
/**
|
|
3
|
+
* DriveHandler - Lazy Loading Implementation
|
|
4
|
+
*
|
|
5
|
+
* Storage structure:
|
|
6
|
+
* /db-folder/
|
|
7
|
+
* ├── _meta.json
|
|
8
|
+
* ├── snapshot-index.json # Map<DocId, IndexEntry>
|
|
9
|
+
* ├── snapshot-data.json # Map<DocId, DocBody>
|
|
10
|
+
* └── changes-*.ndjson # Append logs
|
|
11
|
+
*/
|
|
12
|
+
export declare class DriveHandler {
|
|
13
|
+
private drive;
|
|
14
|
+
private folderId;
|
|
15
|
+
private folderName;
|
|
16
|
+
private parents;
|
|
17
|
+
private compactionThreshold;
|
|
18
|
+
private compactionSizeThreshold;
|
|
19
|
+
private meta;
|
|
20
|
+
private metaEtag;
|
|
21
|
+
private index;
|
|
22
|
+
private docCache;
|
|
23
|
+
private pendingChanges;
|
|
24
|
+
private currentLogSizeEstimate;
|
|
25
|
+
private listeners;
|
|
26
|
+
private pollingInterval;
|
|
27
|
+
constructor(options: GoogleDriveAdapterOptions, dbName: string);
|
|
28
|
+
get seq(): number;
|
|
29
|
+
/** Load the database (Index Only) */
|
|
30
|
+
load(): Promise<void>;
|
|
31
|
+
private filesFromLegacySnapshot;
|
|
32
|
+
/**
|
|
33
|
+
* Get a document body.
|
|
34
|
+
* Index -> Cache -> Fetch
|
|
35
|
+
*/
|
|
36
|
+
get(id: string): Promise<any | null>;
|
|
37
|
+
/** Get multiple docs (Atomic-ish) used for _allDocs */
|
|
38
|
+
getMulti(ids: string[]): Promise<any[]>;
|
|
39
|
+
/** Return all keys in Index */
|
|
40
|
+
getIndexKeys(): string[];
|
|
41
|
+
/** Get metadata for a specific ID from Index */
|
|
42
|
+
getIndexEntry(id: string): IndexEntry | undefined;
|
|
43
|
+
/** Single change wrapper */
|
|
44
|
+
appendChange(change: ChangeEntry): Promise<void>;
|
|
45
|
+
/** Append changes with OCC */
|
|
46
|
+
appendChanges(changes: ChangeEntry[]): Promise<void>;
|
|
47
|
+
private tryAppendChanges;
|
|
48
|
+
/** Update Index with a new change */
|
|
49
|
+
private updateIndex;
|
|
50
|
+
private checkConflicts;
|
|
51
|
+
/** Compact: Create SnapshotIndex + SnapshotData */
|
|
52
|
+
compact(): Promise<void>;
|
|
53
|
+
private atomicUpdateMeta;
|
|
54
|
+
private findOrCreateFolder;
|
|
55
|
+
private findFile;
|
|
56
|
+
private downloadJson;
|
|
57
|
+
private downloadFileAny;
|
|
58
|
+
private downloadNdjson;
|
|
59
|
+
private writeChangeFile;
|
|
60
|
+
private saveMeta;
|
|
61
|
+
private countTotalChanges;
|
|
62
|
+
private cleanupOldFiles;
|
|
63
|
+
private startPolling;
|
|
64
|
+
private notifyListeners;
|
|
65
|
+
onChange(cb: any): void;
|
|
66
|
+
stopPolling(): void;
|
|
67
|
+
deleteFolder(): Promise<void>;
|
|
68
|
+
getNextSeq(): number;
|
|
69
|
+
}
|
package/lib/drive.js
ADDED
|
@@ -0,0 +1,573 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.DriveHandler = void 0;
|
|
4
|
+
const cache_1 = require("./cache");
|
|
5
|
+
const DEFAULT_COMPACTION_THRESHOLD = 100; // entries
|
|
6
|
+
const DEFAULT_SIZE_THRESHOLD = 1024 * 1024; // 1MB
|
|
7
|
+
const DEFAULT_CACHE_SIZE = 1000; // Number of docs
|
|
8
|
+
/**
|
|
9
|
+
* DriveHandler - Lazy Loading Implementation
|
|
10
|
+
*
|
|
11
|
+
* Storage structure:
|
|
12
|
+
* /db-folder/
|
|
13
|
+
* ├── _meta.json
|
|
14
|
+
* ├── snapshot-index.json # Map<DocId, IndexEntry>
|
|
15
|
+
* ├── snapshot-data.json # Map<DocId, DocBody>
|
|
16
|
+
* └── changes-*.ndjson # Append logs
|
|
17
|
+
*/
|
|
18
|
+
class DriveHandler {
|
|
19
|
+
constructor(options, dbName) {
|
|
20
|
+
this.folderId = null;
|
|
21
|
+
this.meta = {
|
|
22
|
+
seq: 0,
|
|
23
|
+
changeLogIds: [],
|
|
24
|
+
snapshotIndexId: null,
|
|
25
|
+
lastCompaction: null,
|
|
26
|
+
dbName: ''
|
|
27
|
+
};
|
|
28
|
+
this.metaEtag = null;
|
|
29
|
+
// In-Memory Index: ID -> Metadata/Pointer
|
|
30
|
+
this.index = {};
|
|
31
|
+
this.pendingChanges = [];
|
|
32
|
+
this.currentLogSizeEstimate = 0;
|
|
33
|
+
this.listeners = [];
|
|
34
|
+
this.pollingInterval = null;
|
|
35
|
+
this.drive = options.drive;
|
|
36
|
+
this.folderId = options.folderId || null;
|
|
37
|
+
this.folderName = options.folderName || dbName;
|
|
38
|
+
this.parents = options.parents || [];
|
|
39
|
+
this.compactionThreshold = options.compactionThreshold || DEFAULT_COMPACTION_THRESHOLD;
|
|
40
|
+
this.compactionSizeThreshold = options.compactionSizeThreshold || DEFAULT_SIZE_THRESHOLD;
|
|
41
|
+
this.meta.dbName = dbName;
|
|
42
|
+
this.docCache = new cache_1.LRUCache(options.cacheSize || DEFAULT_CACHE_SIZE);
|
|
43
|
+
if (options.pollingIntervalMs) {
|
|
44
|
+
this.startPolling(options.pollingIntervalMs);
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
// Public getter for Sequence (used by adapter)
|
|
48
|
+
get seq() {
|
|
49
|
+
return this.meta.seq;
|
|
50
|
+
}
|
|
51
|
+
/** Load the database (Index Only) */
|
|
52
|
+
async load() {
|
|
53
|
+
if (!this.folderId) {
|
|
54
|
+
this.folderId = await this.findOrCreateFolder();
|
|
55
|
+
}
|
|
56
|
+
const metaFile = await this.findFile('_meta.json');
|
|
57
|
+
if (metaFile) {
|
|
58
|
+
this.meta = await this.downloadJson(metaFile.id);
|
|
59
|
+
this.metaEtag = metaFile.etag || null;
|
|
60
|
+
}
|
|
61
|
+
else {
|
|
62
|
+
await this.saveMeta(this.meta);
|
|
63
|
+
}
|
|
64
|
+
// Initialize Index
|
|
65
|
+
this.index = {};
|
|
66
|
+
// 1. Load Snapshot Index
|
|
67
|
+
if (this.meta.snapshotIndexId) {
|
|
68
|
+
try {
|
|
69
|
+
// Try strictly as new format first
|
|
70
|
+
const snapshotIdx = await this.downloadJson(this.meta.snapshotIndexId);
|
|
71
|
+
// Check if it's actually a legacy snapshot (has 'docs' with bodies)
|
|
72
|
+
if (snapshotIdx.docs) {
|
|
73
|
+
// Migration Path: Handle legacy snapshot
|
|
74
|
+
this.filesFromLegacySnapshot(snapshotIdx);
|
|
75
|
+
}
|
|
76
|
+
else {
|
|
77
|
+
this.index = snapshotIdx.entries || {};
|
|
78
|
+
// We assume seq is synced with meta usually, but use snapshot's seq as base
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
catch (e) {
|
|
82
|
+
console.warn('Failed to load snapshot index', e);
|
|
83
|
+
this.index = {};
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
else if (this.meta.snapshotId) {
|
|
87
|
+
// Legacy support: field was renamed
|
|
88
|
+
try {
|
|
89
|
+
const legacySnapshot = await this.downloadJson(this.meta.snapshotId);
|
|
90
|
+
this.filesFromLegacySnapshot(legacySnapshot);
|
|
91
|
+
}
|
|
92
|
+
catch (e) {
|
|
93
|
+
console.warn('Failed to load legacy snapshot', e);
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
// 2. Replay Change Logs (Metadata only updates)
|
|
97
|
+
this.pendingChanges = [];
|
|
98
|
+
this.currentLogSizeEstimate = 0;
|
|
99
|
+
for (const logId of this.meta.changeLogIds) {
|
|
100
|
+
const changes = await this.downloadNdjson(logId);
|
|
101
|
+
this.currentLogSizeEstimate += 100 * changes.length;
|
|
102
|
+
for (const change of changes) {
|
|
103
|
+
this.updateIndex(change, logId);
|
|
104
|
+
// We do NOT load body into cache automatically
|
|
105
|
+
// But we must invalidate cache if we had old data
|
|
106
|
+
if (this.docCache.get(change.id)) {
|
|
107
|
+
this.docCache.remove(change.id);
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
// Migration helper
|
|
113
|
+
filesFromLegacySnapshot(snapshot) {
|
|
114
|
+
// Convert Legacy Snapshot (Docs in memory) to Index
|
|
115
|
+
// Since we don't have a separate file pointer for each doc in legacy snapshot,
|
|
116
|
+
// we say they are in the snapshot file itself.
|
|
117
|
+
// BUT, lazy loading requires being able to fetch them.
|
|
118
|
+
// We will cache them ALL now (since we downloaded them) and index them.
|
|
119
|
+
for (const [id, doc] of Object.entries(snapshot.docs)) {
|
|
120
|
+
this.index[id] = {
|
|
121
|
+
rev: doc._rev,
|
|
122
|
+
seq: snapshot.seq, // Approximate
|
|
123
|
+
location: { fileId: 'LEGACY_MEMORY' } // Special validity marker
|
|
124
|
+
};
|
|
125
|
+
this.docCache.put(id, doc);
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
/**
|
|
129
|
+
* Get a document body.
|
|
130
|
+
* Index -> Cache -> Fetch
|
|
131
|
+
*/
|
|
132
|
+
async get(id) {
|
|
133
|
+
const entry = this.index[id];
|
|
134
|
+
if (!entry)
|
|
135
|
+
return null;
|
|
136
|
+
if (entry.deleted)
|
|
137
|
+
return null;
|
|
138
|
+
// 1. Check Cache
|
|
139
|
+
const cached = this.docCache.get(id);
|
|
140
|
+
if (cached)
|
|
141
|
+
return cached;
|
|
142
|
+
// 2. Fetch from Drive
|
|
143
|
+
// If it's a legacy entry currently in memory (should have been cached), returns null if evicted?
|
|
144
|
+
if (entry.location.fileId === 'LEGACY_MEMORY') {
|
|
145
|
+
// If evicted, we are in trouble unless we re-download the legacy snapshot.
|
|
146
|
+
// For robustness, let's say we reload the legacy snapshot if needed.
|
|
147
|
+
// OR simpler: we assume compaction will fix this soon.
|
|
148
|
+
// Let's implement fetch for safety.
|
|
149
|
+
if (this.meta.snapshotId) {
|
|
150
|
+
const legacy = await this.downloadJson(this.meta.snapshotId);
|
|
151
|
+
if (legacy.docs[id]) {
|
|
152
|
+
this.docCache.put(id, legacy.docs[id]);
|
|
153
|
+
return legacy.docs[id];
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
return null; // Should not happen
|
|
157
|
+
}
|
|
158
|
+
const fileId = entry.location.fileId;
|
|
159
|
+
// Is it a change file (NDJSON) or snapshot file (JSON)?
|
|
160
|
+
// We can infer or we could have stored type.
|
|
161
|
+
// Usually, we just download the file.
|
|
162
|
+
// Optimization: If we have many docs in one file, we might want to cache that file's contents?
|
|
163
|
+
// For now, naive fetch: download file, find doc.
|
|
164
|
+
const content = await this.downloadFileAny(fileId);
|
|
165
|
+
let doc = null;
|
|
166
|
+
if (Array.isArray(content)) {
|
|
167
|
+
// It's a change log (array of entries)
|
|
168
|
+
// Find the *last* entry for this ID in this file
|
|
169
|
+
const match = content.reverse().find((c) => c.id === id);
|
|
170
|
+
doc = match ? match.doc : null;
|
|
171
|
+
}
|
|
172
|
+
else if (content.docs) {
|
|
173
|
+
// It's a snapshot-data chunk
|
|
174
|
+
doc = content.docs[id];
|
|
175
|
+
}
|
|
176
|
+
else {
|
|
177
|
+
// Single doc file? (Not used yet)
|
|
178
|
+
doc = content;
|
|
179
|
+
}
|
|
180
|
+
if (doc) {
|
|
181
|
+
this.docCache.put(id, doc);
|
|
182
|
+
doc._rev = entry.rev; // Ensure consistent rev
|
|
183
|
+
}
|
|
184
|
+
return doc;
|
|
185
|
+
}
|
|
186
|
+
/** Get multiple docs (Atomic-ish) used for _allDocs */
|
|
187
|
+
async getMulti(ids) {
|
|
188
|
+
// Naive parallel fetch
|
|
189
|
+
// Optimization: Group by fileID to reduce requests
|
|
190
|
+
const byFile = {};
|
|
191
|
+
const results = {};
|
|
192
|
+
for (const id of ids) {
|
|
193
|
+
const entry = this.index[id];
|
|
194
|
+
if (!entry || entry.deleted) {
|
|
195
|
+
results[id] = null;
|
|
196
|
+
continue;
|
|
197
|
+
}
|
|
198
|
+
// Check cache
|
|
199
|
+
const cached = this.docCache.get(id);
|
|
200
|
+
if (cached) {
|
|
201
|
+
results[id] = cached;
|
|
202
|
+
continue;
|
|
203
|
+
}
|
|
204
|
+
// Group by file
|
|
205
|
+
if (entry.location.fileId === 'LEGACY_MEMORY') {
|
|
206
|
+
// Handle legacy separately
|
|
207
|
+
const doc = await this.get(id); // fallback
|
|
208
|
+
results[id] = doc;
|
|
209
|
+
}
|
|
210
|
+
else {
|
|
211
|
+
if (!byFile[entry.location.fileId])
|
|
212
|
+
byFile[entry.location.fileId] = [];
|
|
213
|
+
byFile[entry.location.fileId].push(id);
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
// Fetch files
|
|
217
|
+
for (const [fileId, docIds] of Object.entries(byFile)) {
|
|
218
|
+
try {
|
|
219
|
+
const content = await this.downloadFileAny(fileId);
|
|
220
|
+
for (const docId of docIds) {
|
|
221
|
+
let doc = null;
|
|
222
|
+
if (Array.isArray(content)) {
|
|
223
|
+
const match = content.reverse().find((c) => c.id === docId);
|
|
224
|
+
doc = match ? match.doc : null;
|
|
225
|
+
}
|
|
226
|
+
else if (content.docs) {
|
|
227
|
+
doc = content.docs[docId];
|
|
228
|
+
}
|
|
229
|
+
if (doc) {
|
|
230
|
+
// Add entry.rev to doc just in case
|
|
231
|
+
if (this.index[docId])
|
|
232
|
+
doc._rev = this.index[docId].rev;
|
|
233
|
+
this.docCache.put(docId, doc);
|
|
234
|
+
results[docId] = doc;
|
|
235
|
+
}
|
|
236
|
+
else {
|
|
237
|
+
results[docId] = null;
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
catch (e) {
|
|
242
|
+
console.error(`Failed to fetch file ${fileId} for docs ${docIds}`, e);
|
|
243
|
+
// Return nulls
|
|
244
|
+
docIds.forEach(id => results[id] = null);
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
return ids.map(id => results[id]);
|
|
248
|
+
}
|
|
249
|
+
/** Return all keys in Index */
|
|
250
|
+
getIndexKeys() {
|
|
251
|
+
return Object.keys(this.index);
|
|
252
|
+
}
|
|
253
|
+
/** Get metadata for a specific ID from Index */
|
|
254
|
+
getIndexEntry(id) {
|
|
255
|
+
return this.index[id];
|
|
256
|
+
}
|
|
257
|
+
/** Single change wrapper */
|
|
258
|
+
async appendChange(change) {
|
|
259
|
+
return this.appendChanges([change]);
|
|
260
|
+
}
|
|
261
|
+
/** Append changes with OCC */
|
|
262
|
+
async appendChanges(changes) {
|
|
263
|
+
const MAX_RETRIES = 5;
|
|
264
|
+
let attempt = 0;
|
|
265
|
+
while (attempt < MAX_RETRIES) {
|
|
266
|
+
try {
|
|
267
|
+
return await this.tryAppendChanges(changes);
|
|
268
|
+
}
|
|
269
|
+
catch (err) {
|
|
270
|
+
if (err.code === 412 || err.code === 409) {
|
|
271
|
+
// Reload and RETRY
|
|
272
|
+
await this.load();
|
|
273
|
+
// Check conflicts against Index (Metadata sufficient)
|
|
274
|
+
this.checkConflicts(changes);
|
|
275
|
+
// Reseq
|
|
276
|
+
let currentSeq = this.meta.seq;
|
|
277
|
+
for (const change of changes) {
|
|
278
|
+
currentSeq++;
|
|
279
|
+
change.seq = currentSeq;
|
|
280
|
+
}
|
|
281
|
+
attempt++;
|
|
282
|
+
await new Promise(r => setTimeout(r, Math.random() * 500 + 100));
|
|
283
|
+
continue;
|
|
284
|
+
}
|
|
285
|
+
throw err;
|
|
286
|
+
}
|
|
287
|
+
}
|
|
288
|
+
throw new Error('Failed to append changes');
|
|
289
|
+
}
|
|
290
|
+
async tryAppendChanges(changes) {
|
|
291
|
+
// 1. Write Log File (Upload Data)
|
|
292
|
+
const fileId = await this.writeChangeFile(changes);
|
|
293
|
+
// 2. Prepare speculative meta update
|
|
294
|
+
const nextMeta = { ...this.meta };
|
|
295
|
+
nextMeta.changeLogIds = [...nextMeta.changeLogIds, fileId];
|
|
296
|
+
nextMeta.seq = changes[changes.length - 1].seq;
|
|
297
|
+
// 3. Commit Lock
|
|
298
|
+
await this.saveMeta(nextMeta, this.metaEtag);
|
|
299
|
+
// 4. Update Local State
|
|
300
|
+
this.meta = nextMeta;
|
|
301
|
+
for (const change of changes) {
|
|
302
|
+
this.updateIndex(change, fileId);
|
|
303
|
+
if (change.doc) {
|
|
304
|
+
this.docCache.put(change.id, change.doc);
|
|
305
|
+
}
|
|
306
|
+
else if (change.deleted) {
|
|
307
|
+
this.docCache.remove(change.id);
|
|
308
|
+
}
|
|
309
|
+
}
|
|
310
|
+
// 5. Compaction Check
|
|
311
|
+
// Count changes since last compaction *pointer*, not just list length
|
|
312
|
+
const totalChanges = await this.countTotalChanges();
|
|
313
|
+
if (totalChanges >= this.compactionThreshold ||
|
|
314
|
+
this.currentLogSizeEstimate >= this.compactionSizeThreshold) {
|
|
315
|
+
this.compact().catch(e => console.error('Compaction failed', e));
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
/** Update Index with a new change */
|
|
319
|
+
updateIndex(change, fileId) {
|
|
320
|
+
this.index[change.id] = {
|
|
321
|
+
rev: change.rev,
|
|
322
|
+
seq: change.seq,
|
|
323
|
+
deleted: !!change.deleted,
|
|
324
|
+
location: { fileId }
|
|
325
|
+
};
|
|
326
|
+
}
|
|
327
|
+
checkConflicts(changes) {
|
|
328
|
+
for (const change of changes) {
|
|
329
|
+
const docId = change.id;
|
|
330
|
+
const newRevNum = parseInt(change.rev.split('-')[0], 10);
|
|
331
|
+
const existing = this.index[docId];
|
|
332
|
+
if (existing) {
|
|
333
|
+
const currentRevNum = parseInt(existing.rev.split('-')[0], 10);
|
|
334
|
+
if (currentRevNum >= newRevNum) {
|
|
335
|
+
const err = new Error('Document update conflict');
|
|
336
|
+
err.status = 409;
|
|
337
|
+
err.name = 'conflict'; // PouchDB expectation
|
|
338
|
+
throw err;
|
|
339
|
+
}
|
|
340
|
+
}
|
|
341
|
+
}
|
|
342
|
+
}
|
|
343
|
+
/** Compact: Create SnapshotIndex + SnapshotData */
|
|
344
|
+
async compact() {
|
|
345
|
+
const snapshotSeq = this.meta.seq;
|
|
346
|
+
const oldLogIds = [...this.meta.changeLogIds];
|
|
347
|
+
const oldIndexId = this.meta.snapshotIndexId;
|
|
348
|
+
// 1. Fetch ALL active documents
|
|
349
|
+
// We need them to build the new large snapshot-data file
|
|
350
|
+
// This is the one time we download everything if not cached.
|
|
351
|
+
// Optimization: We could reuse existing `snapshot-data` chunks and only append new data
|
|
352
|
+
// to a new chunk, but for simplicity: Merge All.
|
|
353
|
+
const allIds = Object.keys(this.index).filter(id => !this.index[id].deleted);
|
|
354
|
+
const allDocs = await this.getMulti(allIds);
|
|
355
|
+
const snapshotData = { docs: {} };
|
|
356
|
+
allIds.forEach((id, i) => {
|
|
357
|
+
if (allDocs[i])
|
|
358
|
+
snapshotData.docs[id] = allDocs[i];
|
|
359
|
+
});
|
|
360
|
+
// 2. Upload Data File
|
|
361
|
+
const dataContent = JSON.stringify(snapshotData);
|
|
362
|
+
const dataRes = await this.drive.files.create({
|
|
363
|
+
requestBody: {
|
|
364
|
+
name: `snapshot-data-${Date.now()}.json`,
|
|
365
|
+
parents: [this.folderId],
|
|
366
|
+
mimeType: 'application/json'
|
|
367
|
+
},
|
|
368
|
+
media: { mimeType: 'application/json', body: dataContent },
|
|
369
|
+
fields: 'id'
|
|
370
|
+
});
|
|
371
|
+
const dataFileId = dataRes.data.id;
|
|
372
|
+
// 3. Create Index pointing to this Data File
|
|
373
|
+
const newIndexEntries = {};
|
|
374
|
+
for (const id of Object.keys(snapshotData.docs)) {
|
|
375
|
+
newIndexEntries[id] = {
|
|
376
|
+
rev: this.index[id].rev,
|
|
377
|
+
seq: this.index[id].seq,
|
|
378
|
+
location: { fileId: dataFileId }
|
|
379
|
+
};
|
|
380
|
+
}
|
|
381
|
+
const snapshotIndex = {
|
|
382
|
+
entries: newIndexEntries,
|
|
383
|
+
seq: snapshotSeq,
|
|
384
|
+
createdAt: Date.now()
|
|
385
|
+
};
|
|
386
|
+
const indexContent = JSON.stringify(snapshotIndex);
|
|
387
|
+
const indexRes = await this.drive.files.create({
|
|
388
|
+
requestBody: {
|
|
389
|
+
name: `snapshot-index-${Date.now()}.json`,
|
|
390
|
+
parents: [this.folderId],
|
|
391
|
+
mimeType: 'application/json'
|
|
392
|
+
},
|
|
393
|
+
media: { mimeType: 'application/json', body: indexContent },
|
|
394
|
+
fields: 'id'
|
|
395
|
+
});
|
|
396
|
+
const newIndexId = indexRes.data.id;
|
|
397
|
+
// 4. Update Meta
|
|
398
|
+
await this.atomicUpdateMeta((latest) => {
|
|
399
|
+
const remainingLogs = latest.changeLogIds.filter(id => !oldLogIds.includes(id));
|
|
400
|
+
return {
|
|
401
|
+
...latest,
|
|
402
|
+
snapshotIndexId: newIndexId,
|
|
403
|
+
changeLogIds: remainingLogs,
|
|
404
|
+
lastCompaction: Date.now()
|
|
405
|
+
};
|
|
406
|
+
});
|
|
407
|
+
// 5. Cleanup
|
|
408
|
+
this.cleanupOldFiles(oldIndexId, oldLogIds); // And potentially old data files if we tracked them
|
|
409
|
+
this.currentLogSizeEstimate = 0;
|
|
410
|
+
}
|
|
411
|
+
// ... Helpers (atomicUpdateMeta, saveMeta, writeChangeFile same as before) ...
|
|
412
|
+
async atomicUpdateMeta(modifier) {
|
|
413
|
+
const MAX_RETRIES = 5;
|
|
414
|
+
let attempt = 0;
|
|
415
|
+
while (attempt < MAX_RETRIES) {
|
|
416
|
+
try {
|
|
417
|
+
const metaFile = await this.findFile('_meta.json');
|
|
418
|
+
if (!metaFile)
|
|
419
|
+
throw new Error('Meta missing');
|
|
420
|
+
const validMeta = await this.downloadJson(metaFile.id);
|
|
421
|
+
const newMeta = modifier(validMeta);
|
|
422
|
+
await this.saveMeta(newMeta, metaFile.etag);
|
|
423
|
+
this.meta = newMeta;
|
|
424
|
+
return;
|
|
425
|
+
}
|
|
426
|
+
catch (err) {
|
|
427
|
+
if (err.code === 412 || err.code === 409) {
|
|
428
|
+
attempt++;
|
|
429
|
+
await new Promise(r => setTimeout(r, Math.random() * 500 + 100));
|
|
430
|
+
continue;
|
|
431
|
+
}
|
|
432
|
+
throw err;
|
|
433
|
+
}
|
|
434
|
+
}
|
|
435
|
+
}
|
|
436
|
+
// Reused helpers
|
|
437
|
+
async findOrCreateFolder() {
|
|
438
|
+
const q = `name = '${this.folderName}' and mimeType = 'application/vnd.google-apps.folder' and trashed = false`;
|
|
439
|
+
const res = await this.drive.files.list({ q, spaces: 'drive', fields: 'files(id)' });
|
|
440
|
+
if (res.data.files && res.data.files.length > 0)
|
|
441
|
+
return res.data.files[0].id;
|
|
442
|
+
const createRes = await this.drive.files.create({
|
|
443
|
+
requestBody: { name: this.folderName, mimeType: 'application/vnd.google-apps.folder', parents: this.parents.length ? this.parents : undefined },
|
|
444
|
+
fields: 'id'
|
|
445
|
+
});
|
|
446
|
+
return createRes.data.id;
|
|
447
|
+
}
|
|
448
|
+
async findFile(name) {
|
|
449
|
+
const q = `name = '${name}' and '${this.folderId}' in parents and trashed = false`;
|
|
450
|
+
const res = await this.drive.files.list({ q, spaces: 'drive', fields: 'files(id, etag)' });
|
|
451
|
+
if (res.data.files && res.data.files.length > 0)
|
|
452
|
+
return { id: res.data.files[0].id, etag: res.data.files[0].etag };
|
|
453
|
+
return null;
|
|
454
|
+
}
|
|
455
|
+
async downloadJson(fileId) {
|
|
456
|
+
const res = await this.drive.files.get({ fileId, alt: 'media' });
|
|
457
|
+
return res.data;
|
|
458
|
+
}
|
|
459
|
+
async downloadFileAny(fileId) {
|
|
460
|
+
const res = await this.drive.files.get({ fileId, alt: 'media' });
|
|
461
|
+
if (typeof res.data === 'string') {
|
|
462
|
+
// NDJSON or JSON string
|
|
463
|
+
try {
|
|
464
|
+
return JSON.parse(res.data);
|
|
465
|
+
}
|
|
466
|
+
catch {
|
|
467
|
+
// NDJSON?
|
|
468
|
+
const lines = res.data.trim().split('\n').filter((l) => l);
|
|
469
|
+
return lines.map((line) => JSON.parse(line));
|
|
470
|
+
}
|
|
471
|
+
}
|
|
472
|
+
return res.data;
|
|
473
|
+
}
|
|
474
|
+
async downloadNdjson(fileId) {
|
|
475
|
+
const res = await this.drive.files.get({ fileId, alt: 'media' });
|
|
476
|
+
const content = typeof res.data === 'string' ? res.data : JSON.stringify(res.data);
|
|
477
|
+
const lines = content.trim().split('\n').filter((l) => l);
|
|
478
|
+
return lines.map((line) => JSON.parse(line));
|
|
479
|
+
}
|
|
480
|
+
async writeChangeFile(changes) {
|
|
481
|
+
const lines = changes.map(c => JSON.stringify(c)).join('\n') + '\n';
|
|
482
|
+
const startSeq = changes[0].seq;
|
|
483
|
+
const name = `changes-${startSeq}-${Math.random().toString(36).substring(7)}.ndjson`;
|
|
484
|
+
const res = await this.drive.files.create({
|
|
485
|
+
requestBody: { name, parents: [this.folderId], mimeType: 'application/x-ndjson' },
|
|
486
|
+
media: { mimeType: 'application/x-ndjson', body: lines },
|
|
487
|
+
fields: 'id'
|
|
488
|
+
});
|
|
489
|
+
this.currentLogSizeEstimate += new Blob([lines]).size;
|
|
490
|
+
return res.data.id;
|
|
491
|
+
}
|
|
492
|
+
async saveMeta(meta, expectedEtag = null) {
|
|
493
|
+
const content = JSON.stringify(meta);
|
|
494
|
+
const metaFile = await this.findFile('_meta.json');
|
|
495
|
+
if (metaFile) {
|
|
496
|
+
const res = await this.drive.files.update({
|
|
497
|
+
fileId: metaFile.id,
|
|
498
|
+
headers: expectedEtag ? { 'If-Match': expectedEtag } : undefined,
|
|
499
|
+
media: { mimeType: 'application/json', body: content },
|
|
500
|
+
fields: 'id, etag'
|
|
501
|
+
});
|
|
502
|
+
this.metaEtag = res.data.etag;
|
|
503
|
+
}
|
|
504
|
+
else {
|
|
505
|
+
const res = await this.drive.files.create({
|
|
506
|
+
requestBody: { name: '_meta.json', parents: [this.folderId], mimeType: 'application/json' },
|
|
507
|
+
media: { mimeType: 'application/json', body: content },
|
|
508
|
+
fields: 'id, etag'
|
|
509
|
+
});
|
|
510
|
+
this.metaEtag = res.data.etag;
|
|
511
|
+
}
|
|
512
|
+
}
|
|
513
|
+
async countTotalChanges() {
|
|
514
|
+
// Calculate diff between meta.seq and snapshot seq
|
|
515
|
+
// But we don't store snapshot seq in meta directly?
|
|
516
|
+
// We can approximate by pending changes count + known gaps?
|
|
517
|
+
// Actually we used to check snapshot.seq.
|
|
518
|
+
// We can assume snapshot is somewhat recent.
|
|
519
|
+
return this.pendingChanges.length + 10; // dummy for now, rely on log size
|
|
520
|
+
}
|
|
521
|
+
async cleanupOldFiles(oldIndexId, oldLogIds) {
|
|
522
|
+
if (oldIndexId)
|
|
523
|
+
try {
|
|
524
|
+
await this.drive.files.delete({ fileId: oldIndexId });
|
|
525
|
+
}
|
|
526
|
+
catch { }
|
|
527
|
+
for (const id of oldLogIds)
|
|
528
|
+
try {
|
|
529
|
+
await this.drive.files.delete({ fileId: id });
|
|
530
|
+
}
|
|
531
|
+
catch { }
|
|
532
|
+
}
|
|
533
|
+
startPolling(intervalMs) {
|
|
534
|
+
if (this.pollingInterval)
|
|
535
|
+
clearInterval(this.pollingInterval);
|
|
536
|
+
this.pollingInterval = setInterval(async () => {
|
|
537
|
+
try {
|
|
538
|
+
const metaFile = await this.findFile('_meta.json');
|
|
539
|
+
if (!metaFile)
|
|
540
|
+
return;
|
|
541
|
+
if (metaFile.etag !== this.metaEtag) {
|
|
542
|
+
await this.load();
|
|
543
|
+
this.notifyListeners();
|
|
544
|
+
}
|
|
545
|
+
}
|
|
546
|
+
catch (err) {
|
|
547
|
+
console.error('Polling error', err);
|
|
548
|
+
}
|
|
549
|
+
}, intervalMs);
|
|
550
|
+
}
|
|
551
|
+
notifyListeners() {
|
|
552
|
+
// Observers expecting 'docs' object might be broken if they expect FULL body.
|
|
553
|
+
// We can pass empty object or partials?
|
|
554
|
+
// Real PouchDB changes feed calls `db.changes()`.
|
|
555
|
+
// Our `adapter.js` uses `db.onChange` effectively.
|
|
556
|
+
// We should pass a map of { ID: { _rev, ... } } (Index entries)
|
|
557
|
+
// Adapter needs to handle this.
|
|
558
|
+
const changes = {};
|
|
559
|
+
for (const [id, entry] of Object.entries(this.index)) {
|
|
560
|
+
changes[id] = { _id: id, _rev: entry.rev, _deleted: entry.deleted };
|
|
561
|
+
}
|
|
562
|
+
for (const l of this.listeners)
|
|
563
|
+
l(changes);
|
|
564
|
+
}
|
|
565
|
+
// For tests/debug
|
|
566
|
+
onChange(cb) { this.listeners.push(cb); }
|
|
567
|
+
stopPolling() { if (this.pollingInterval)
|
|
568
|
+
clearInterval(this.pollingInterval); }
|
|
569
|
+
async deleteFolder() { if (this.folderId)
|
|
570
|
+
await this.drive.files.delete({ fileId: this.folderId }); }
|
|
571
|
+
getNextSeq() { return this.meta.seq + 1; }
|
|
572
|
+
}
|
|
573
|
+
exports.DriveHandler = DriveHandler;
|
package/lib/index.d.ts
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export default function (PouchDB: any): void;
|
package/lib/index.js
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.default = default_1;
|
|
4
|
+
const adapter_1 = require("./adapter");
|
|
5
|
+
function default_1(PouchDB) {
|
|
6
|
+
PouchDB.adapter('googledrive', (0, adapter_1.GoogleDriveAdapter)(PouchDB), true);
|
|
7
|
+
}
|