@backloghq/opslog 0.1.4 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +28 -0
- package/dist/index.d.ts +1 -0
- package/dist/index.js +1 -0
- package/dist/lock.d.ts +12 -0
- package/dist/lock.js +64 -0
- package/dist/store.d.ts +19 -1
- package/dist/store.js +151 -67
- package/dist/types.d.ts +4 -0
- package/dist/validate.js +3 -0
- package/dist/wal.js +37 -10
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -131,14 +131,42 @@ await store.open(dir, {
|
|
|
131
131
|
checkpointOnClose: true, // Checkpoint when close() is called (default: true)
|
|
132
132
|
version: 1, // Schema version
|
|
133
133
|
migrate: (record, fromVersion) => record, // Migration function
|
|
134
|
+
readOnly: false, // Open in read-only mode (default: false)
|
|
134
135
|
});
|
|
135
136
|
```
|
|
136
137
|
|
|
138
|
+
## Read-Only Mode
|
|
139
|
+
|
|
140
|
+
Open a store for reading without acquiring the write lock. Useful for dashboards, backup processes, or multiple readers alongside a single writer.
|
|
141
|
+
|
|
142
|
+
```typescript
|
|
143
|
+
const reader = new Store();
|
|
144
|
+
await reader.open("./data", { readOnly: true });
|
|
145
|
+
|
|
146
|
+
// All reads work
|
|
147
|
+
const tasks = reader.all();
|
|
148
|
+
const active = reader.filter((t) => t.status === "active");
|
|
149
|
+
|
|
150
|
+
// All mutations throw
|
|
151
|
+
await reader.set("x", value); // Error: Store is read-only
|
|
152
|
+
```
|
|
153
|
+
|
|
154
|
+
Read-only stores load the latest snapshot and replay ops on open. They do not checkpoint on close. Multiple read-only stores can open the same directory concurrently alongside one writer.
|
|
155
|
+
|
|
156
|
+
## Concurrency
|
|
157
|
+
|
|
158
|
+
All state-mutating operations (`set`, `delete`, `batch`, `undo`, `compact`, `archive`) are serialized through an internal async mutex. This prevents interleaving of concurrent mutations — e.g., `compact()` swapping the ops file while `set()` is appending, or `undo()` truncating while `set()` is writing.
|
|
159
|
+
|
|
160
|
+
Read operations (`get`, `all`, `filter`, `count`, `has`, `entries`) are synchronous and lock-free.
|
|
161
|
+
|
|
162
|
+
An advisory directory write lock (`.lock` file with PID) prevents two processes from opening the same store. Stale locks from crashed processes are automatically recovered.
|
|
163
|
+
|
|
137
164
|
## Crash Safety
|
|
138
165
|
|
|
139
166
|
- **Ops file**: append-only writes. A crash mid-append loses at most the last operation. Malformed lines are skipped on recovery.
|
|
140
167
|
- **Snapshots**: immutable. Written to a temp file, then atomically renamed.
|
|
141
168
|
- **Manifest**: atomically replaced via temp-file-rename. Always points to a valid snapshot.
|
|
169
|
+
- **Undo**: uses `ftruncate()` — a single atomic POSIX syscall. O(1) regardless of file size.
|
|
142
170
|
|
|
143
171
|
No data corruption on crash. At most one in-flight operation is lost.
|
|
144
172
|
|
package/dist/index.d.ts
CHANGED
package/dist/index.js
CHANGED
package/dist/lock.d.ts
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import type { FileHandle } from "node:fs/promises";
|
|
2
|
+
/**
|
|
3
|
+
* Acquire an advisory write lock on a directory.
|
|
4
|
+
* Returns a FileHandle that must be passed to releaseLock() on close.
|
|
5
|
+
* Throws if another live process holds the lock.
|
|
6
|
+
* Automatically recovers stale locks from crashed processes.
|
|
7
|
+
*/
|
|
8
|
+
export declare function acquireLock(dir: string): Promise<FileHandle>;
|
|
9
|
+
/**
|
|
10
|
+
* Release the advisory write lock.
|
|
11
|
+
*/
|
|
12
|
+
export declare function releaseLock(dir: string, fh: FileHandle): Promise<void>;
|
package/dist/lock.js
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
import { open, readFile, unlink } from "node:fs/promises";
|
|
2
|
+
import { join } from "node:path";
|
|
3
|
+
const LOCK_FILE = ".lock";
|
|
4
|
+
function isProcessAlive(pid) {
|
|
5
|
+
try {
|
|
6
|
+
process.kill(pid, 0);
|
|
7
|
+
return true;
|
|
8
|
+
}
|
|
9
|
+
catch {
|
|
10
|
+
return false;
|
|
11
|
+
}
|
|
12
|
+
}
|
|
13
|
+
/**
|
|
14
|
+
* Acquire an advisory write lock on a directory.
|
|
15
|
+
* Returns a FileHandle that must be passed to releaseLock() on close.
|
|
16
|
+
* Throws if another live process holds the lock.
|
|
17
|
+
* Automatically recovers stale locks from crashed processes.
|
|
18
|
+
*/
|
|
19
|
+
export async function acquireLock(dir) {
|
|
20
|
+
const lockPath = join(dir, LOCK_FILE);
|
|
21
|
+
// Try exclusive create
|
|
22
|
+
try {
|
|
23
|
+
const fh = await open(lockPath, "wx");
|
|
24
|
+
await fh.writeFile(String(process.pid), "utf-8");
|
|
25
|
+
return fh;
|
|
26
|
+
}
|
|
27
|
+
catch (err) {
|
|
28
|
+
if (err.code !== "EEXIST")
|
|
29
|
+
throw err;
|
|
30
|
+
}
|
|
31
|
+
// Lock file exists — check if the holder is still alive
|
|
32
|
+
let content;
|
|
33
|
+
try {
|
|
34
|
+
content = await readFile(lockPath, "utf-8");
|
|
35
|
+
}
|
|
36
|
+
catch {
|
|
37
|
+
// File disappeared between our open and read — retry
|
|
38
|
+
return acquireLock(dir);
|
|
39
|
+
}
|
|
40
|
+
const pid = parseInt(content, 10);
|
|
41
|
+
if (!isNaN(pid) && isProcessAlive(pid)) {
|
|
42
|
+
throw new Error(`Store is locked by process ${pid}. If this is stale, delete ${lockPath}`);
|
|
43
|
+
}
|
|
44
|
+
// Stale lock — remove and retry
|
|
45
|
+
try {
|
|
46
|
+
await unlink(lockPath);
|
|
47
|
+
}
|
|
48
|
+
catch {
|
|
49
|
+
// Another process may have already cleaned it up
|
|
50
|
+
}
|
|
51
|
+
return acquireLock(dir);
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* Release the advisory write lock.
|
|
55
|
+
*/
|
|
56
|
+
export async function releaseLock(dir, fh) {
|
|
57
|
+
await fh.close();
|
|
58
|
+
try {
|
|
59
|
+
await unlink(join(dir, LOCK_FILE));
|
|
60
|
+
}
|
|
61
|
+
catch {
|
|
62
|
+
// Already cleaned up
|
|
63
|
+
}
|
|
64
|
+
}
|
package/dist/store.d.ts
CHANGED
|
@@ -12,6 +12,15 @@ export declare class Store<T = Record<string, unknown>> {
|
|
|
12
12
|
private archivedRecordCount;
|
|
13
13
|
private batching;
|
|
14
14
|
private batchOps;
|
|
15
|
+
private _lock;
|
|
16
|
+
private lockFh;
|
|
17
|
+
/**
|
|
18
|
+
* Serialize all state-mutating operations through a promise chain.
|
|
19
|
+
* This prevents interleaving of async mutations (e.g. compact + set,
|
|
20
|
+
* undo + set) which could corrupt the WAL or in-memory state.
|
|
21
|
+
* Read operations remain synchronous and lock-free.
|
|
22
|
+
*/
|
|
23
|
+
private serialize;
|
|
15
24
|
open(dir: string, options?: StoreOptions): Promise<void>;
|
|
16
25
|
close(): Promise<void>;
|
|
17
26
|
get(id: string): T | undefined;
|
|
@@ -28,10 +37,19 @@ export declare class Store<T = Record<string, unknown>> {
|
|
|
28
37
|
getOps(since?: string): Operation<T>[];
|
|
29
38
|
compact(): Promise<void>;
|
|
30
39
|
archive(predicate: (value: T, id: string) => boolean, segment?: string): Promise<number>;
|
|
31
|
-
loadArchive(segment: string): Promise<Map<string, T>>;
|
|
32
40
|
listArchiveSegments(): string[];
|
|
41
|
+
loadArchive(segment: string): Promise<Map<string, T>>;
|
|
33
42
|
stats(): StoreStats;
|
|
43
|
+
private _set;
|
|
44
|
+
private _setSync;
|
|
45
|
+
private _delete;
|
|
46
|
+
private _deleteSync;
|
|
47
|
+
private _batch;
|
|
48
|
+
private _undo;
|
|
49
|
+
private _compact;
|
|
50
|
+
private _archive;
|
|
34
51
|
private ensureOpen;
|
|
52
|
+
private ensureWritable;
|
|
35
53
|
private applyOp;
|
|
36
54
|
private reverseOp;
|
|
37
55
|
private persistOp;
|
package/dist/store.js
CHANGED
|
@@ -4,6 +4,7 @@ import { appendOp, appendOps, readOps, truncateLastOp } from "./wal.js";
|
|
|
4
4
|
import { loadSnapshot, writeSnapshot } from "./snapshot.js";
|
|
5
5
|
import { createDefaultManifest, readManifest, writeManifest, } from "./manifest.js";
|
|
6
6
|
import { loadArchiveSegment, writeArchiveSegment, } from "./archive.js";
|
|
7
|
+
import { acquireLock, releaseLock } from "./lock.js";
|
|
7
8
|
export class Store {
|
|
8
9
|
dir = "";
|
|
9
10
|
records = new Map();
|
|
@@ -18,20 +19,43 @@ export class Store {
|
|
|
18
19
|
checkpointOnClose: true,
|
|
19
20
|
version: 1,
|
|
20
21
|
migrate: (r) => r,
|
|
22
|
+
readOnly: false,
|
|
21
23
|
};
|
|
22
24
|
archivedRecordCount = 0;
|
|
23
25
|
batching = false;
|
|
24
26
|
batchOps = [];
|
|
27
|
+
_lock = Promise.resolve();
|
|
28
|
+
lockFh = null;
|
|
29
|
+
/**
|
|
30
|
+
* Serialize all state-mutating operations through a promise chain.
|
|
31
|
+
* This prevents interleaving of async mutations (e.g. compact + set,
|
|
32
|
+
* undo + set) which could corrupt the WAL or in-memory state.
|
|
33
|
+
* Read operations remain synchronous and lock-free.
|
|
34
|
+
*/
|
|
35
|
+
serialize(fn) {
|
|
36
|
+
const prev = this._lock;
|
|
37
|
+
let resolve;
|
|
38
|
+
this._lock = new Promise((r) => {
|
|
39
|
+
resolve = r;
|
|
40
|
+
});
|
|
41
|
+
return prev.then(fn).finally(() => resolve());
|
|
42
|
+
}
|
|
25
43
|
async open(dir, options) {
|
|
26
44
|
this.dir = dir;
|
|
27
45
|
if (options) {
|
|
28
46
|
this.options = { ...this.options, ...options };
|
|
29
47
|
}
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
48
|
+
if (!this.options.readOnly) {
|
|
49
|
+
await mkdir(join(dir, "snapshots"), { recursive: true });
|
|
50
|
+
await mkdir(join(dir, "ops"), { recursive: true });
|
|
51
|
+
await mkdir(join(dir, "archive"), { recursive: true });
|
|
52
|
+
this.lockFh = await acquireLock(dir);
|
|
53
|
+
}
|
|
33
54
|
const manifest = await readManifest(dir);
|
|
34
55
|
if (!manifest) {
|
|
56
|
+
if (this.options.readOnly) {
|
|
57
|
+
throw new Error("Cannot open in readOnly mode: no existing store found");
|
|
58
|
+
}
|
|
35
59
|
// Fresh store — create empty snapshot and manifest
|
|
36
60
|
const snapshotPath = await writeSnapshot(dir, new Map(), this.options.version);
|
|
37
61
|
const opsFilename = `ops-${Date.now()}.jsonl`;
|
|
@@ -82,8 +106,12 @@ export class Store {
|
|
|
82
106
|
}
|
|
83
107
|
async close() {
|
|
84
108
|
this.ensureOpen();
|
|
85
|
-
if (this.options.checkpointOnClose && this.ops.length > 0) {
|
|
86
|
-
await this.
|
|
109
|
+
if (!this.options.readOnly && this.options.checkpointOnClose && this.ops.length > 0) {
|
|
110
|
+
await this.serialize(() => this._compact());
|
|
111
|
+
}
|
|
112
|
+
if (this.lockFh) {
|
|
113
|
+
await releaseLock(this.dir, this.lockFh);
|
|
114
|
+
this.lockFh = null;
|
|
87
115
|
}
|
|
88
116
|
this.opened = false;
|
|
89
117
|
}
|
|
@@ -93,39 +121,21 @@ export class Store {
|
|
|
93
121
|
}
|
|
94
122
|
set(id, value) {
|
|
95
123
|
this.ensureOpen();
|
|
96
|
-
|
|
97
|
-
const op = {
|
|
98
|
-
ts: new Date().toISOString(),
|
|
99
|
-
op: "set",
|
|
100
|
-
id,
|
|
101
|
-
data: value,
|
|
102
|
-
prev,
|
|
103
|
-
};
|
|
104
|
-
this.records.set(id, value);
|
|
124
|
+
this.ensureWritable();
|
|
105
125
|
if (this.batching) {
|
|
106
|
-
this.
|
|
126
|
+
this._setSync(id, value);
|
|
107
127
|
return;
|
|
108
128
|
}
|
|
109
|
-
return this.
|
|
129
|
+
return this.serialize(() => this._set(id, value));
|
|
110
130
|
}
|
|
111
131
|
delete(id) {
|
|
112
132
|
this.ensureOpen();
|
|
113
|
-
|
|
114
|
-
if (prev === undefined) {
|
|
115
|
-
throw new Error(`Record '${id}' not found`);
|
|
116
|
-
}
|
|
117
|
-
const op = {
|
|
118
|
-
ts: new Date().toISOString(),
|
|
119
|
-
op: "delete",
|
|
120
|
-
id,
|
|
121
|
-
prev,
|
|
122
|
-
};
|
|
123
|
-
this.records.delete(id);
|
|
133
|
+
this.ensureWritable();
|
|
124
134
|
if (this.batching) {
|
|
125
|
-
this.
|
|
135
|
+
this._deleteSync(id);
|
|
126
136
|
return;
|
|
127
137
|
}
|
|
128
|
-
return this.
|
|
138
|
+
return this.serialize(() => this._delete(id));
|
|
129
139
|
}
|
|
130
140
|
has(id) {
|
|
131
141
|
this.ensureOpen();
|
|
@@ -161,6 +171,107 @@ export class Store {
|
|
|
161
171
|
}
|
|
162
172
|
async batch(fn) {
|
|
163
173
|
this.ensureOpen();
|
|
174
|
+
this.ensureWritable();
|
|
175
|
+
return this.serialize(() => this._batch(fn));
|
|
176
|
+
}
|
|
177
|
+
async undo() {
|
|
178
|
+
this.ensureOpen();
|
|
179
|
+
this.ensureWritable();
|
|
180
|
+
return this.serialize(() => this._undo());
|
|
181
|
+
}
|
|
182
|
+
getHistory(id) {
|
|
183
|
+
this.ensureOpen();
|
|
184
|
+
return this.ops.filter((op) => op.id === id);
|
|
185
|
+
}
|
|
186
|
+
getOps(since) {
|
|
187
|
+
this.ensureOpen();
|
|
188
|
+
if (!since)
|
|
189
|
+
return [...this.ops];
|
|
190
|
+
return this.ops.filter((op) => op.ts > since);
|
|
191
|
+
}
|
|
192
|
+
async compact() {
|
|
193
|
+
this.ensureOpen();
|
|
194
|
+
this.ensureWritable();
|
|
195
|
+
return this.serialize(() => this._compact());
|
|
196
|
+
}
|
|
197
|
+
async archive(predicate, segment) {
|
|
198
|
+
this.ensureOpen();
|
|
199
|
+
this.ensureWritable();
|
|
200
|
+
return this.serialize(() => this._archive(predicate, segment));
|
|
201
|
+
}
|
|
202
|
+
listArchiveSegments() {
|
|
203
|
+
this.ensureOpen();
|
|
204
|
+
return [...this.archiveSegments];
|
|
205
|
+
}
|
|
206
|
+
async loadArchive(segment) {
|
|
207
|
+
this.ensureOpen();
|
|
208
|
+
const segmentPath = this.archiveSegments.find((s) => s === `archive/archive-${segment}.json`) || this.archiveSegments.find((s) => s.includes(segment));
|
|
209
|
+
if (!segmentPath)
|
|
210
|
+
throw new Error(`Archive segment '${segment}' not found`);
|
|
211
|
+
return loadArchiveSegment(this.dir, segmentPath);
|
|
212
|
+
}
|
|
213
|
+
stats() {
|
|
214
|
+
this.ensureOpen();
|
|
215
|
+
return {
|
|
216
|
+
activeRecords: this.records.size,
|
|
217
|
+
opsCount: this.ops.length,
|
|
218
|
+
archiveSegments: this.archiveSegments.length,
|
|
219
|
+
};
|
|
220
|
+
}
|
|
221
|
+
// --- Private mutation implementations ---
|
|
222
|
+
async _set(id, value) {
|
|
223
|
+
const prev = this.records.get(id) ?? null;
|
|
224
|
+
const op = {
|
|
225
|
+
ts: new Date().toISOString(),
|
|
226
|
+
op: "set",
|
|
227
|
+
id,
|
|
228
|
+
data: value,
|
|
229
|
+
prev,
|
|
230
|
+
};
|
|
231
|
+
this.records.set(id, value);
|
|
232
|
+
await this.persistOp(op);
|
|
233
|
+
}
|
|
234
|
+
_setSync(id, value) {
|
|
235
|
+
const prev = this.records.get(id) ?? null;
|
|
236
|
+
const op = {
|
|
237
|
+
ts: new Date().toISOString(),
|
|
238
|
+
op: "set",
|
|
239
|
+
id,
|
|
240
|
+
data: value,
|
|
241
|
+
prev,
|
|
242
|
+
};
|
|
243
|
+
this.records.set(id, value);
|
|
244
|
+
this.batchOps.push(op);
|
|
245
|
+
}
|
|
246
|
+
async _delete(id) {
|
|
247
|
+
const prev = this.records.get(id);
|
|
248
|
+
if (prev === undefined) {
|
|
249
|
+
throw new Error(`Record '${id}' not found`);
|
|
250
|
+
}
|
|
251
|
+
const op = {
|
|
252
|
+
ts: new Date().toISOString(),
|
|
253
|
+
op: "delete",
|
|
254
|
+
id,
|
|
255
|
+
prev,
|
|
256
|
+
};
|
|
257
|
+
this.records.delete(id);
|
|
258
|
+
await this.persistOp(op);
|
|
259
|
+
}
|
|
260
|
+
_deleteSync(id) {
|
|
261
|
+
const prev = this.records.get(id);
|
|
262
|
+
if (prev === undefined) {
|
|
263
|
+
throw new Error(`Record '${id}' not found`);
|
|
264
|
+
}
|
|
265
|
+
const op = {
|
|
266
|
+
ts: new Date().toISOString(),
|
|
267
|
+
op: "delete",
|
|
268
|
+
id,
|
|
269
|
+
prev,
|
|
270
|
+
};
|
|
271
|
+
this.records.delete(id);
|
|
272
|
+
this.batchOps.push(op);
|
|
273
|
+
}
|
|
274
|
+
async _batch(fn) {
|
|
164
275
|
this.batching = true;
|
|
165
276
|
this.batchOps = [];
|
|
166
277
|
try {
|
|
@@ -170,7 +281,7 @@ export class Store {
|
|
|
170
281
|
await appendOps(join(this.dir, this.activeOpsPath), this.batchOps);
|
|
171
282
|
this.ops.push(...this.batchOps);
|
|
172
283
|
if (this.ops.length >= this.options.checkpointThreshold) {
|
|
173
|
-
await this.
|
|
284
|
+
await this._compact();
|
|
174
285
|
}
|
|
175
286
|
}
|
|
176
287
|
}
|
|
@@ -191,8 +302,7 @@ export class Store {
|
|
|
191
302
|
this.batchOps = [];
|
|
192
303
|
}
|
|
193
304
|
}
|
|
194
|
-
async
|
|
195
|
-
this.ensureOpen();
|
|
305
|
+
async _undo() {
|
|
196
306
|
if (this.ops.length === 0)
|
|
197
307
|
return false;
|
|
198
308
|
const lastOp = this.ops[this.ops.length - 1];
|
|
@@ -201,18 +311,7 @@ export class Store {
|
|
|
201
311
|
await truncateLastOp(join(this.dir, this.activeOpsPath));
|
|
202
312
|
return true;
|
|
203
313
|
}
|
|
204
|
-
|
|
205
|
-
this.ensureOpen();
|
|
206
|
-
return this.ops.filter((op) => op.id === id);
|
|
207
|
-
}
|
|
208
|
-
getOps(since) {
|
|
209
|
-
this.ensureOpen();
|
|
210
|
-
if (!since)
|
|
211
|
-
return [...this.ops];
|
|
212
|
-
return this.ops.filter((op) => op.ts > since);
|
|
213
|
-
}
|
|
214
|
-
async compact() {
|
|
215
|
-
this.ensureOpen();
|
|
314
|
+
async _compact() {
|
|
216
315
|
const snapshotPath = await writeSnapshot(this.dir, this.records, this.version);
|
|
217
316
|
const opsFilename = `ops-${Date.now()}.jsonl`;
|
|
218
317
|
const opsPath = `ops/${opsFilename}`;
|
|
@@ -234,8 +333,7 @@ export class Store {
|
|
|
234
333
|
this.activeOpsPath = opsPath;
|
|
235
334
|
this.ops = [];
|
|
236
335
|
}
|
|
237
|
-
async
|
|
238
|
-
this.ensureOpen();
|
|
336
|
+
async _archive(predicate, segment) {
|
|
239
337
|
const toArchive = new Map();
|
|
240
338
|
for (const [id, value] of this.records) {
|
|
241
339
|
if (predicate(value, id))
|
|
@@ -252,32 +350,18 @@ export class Store {
|
|
|
252
350
|
this.records.delete(id);
|
|
253
351
|
}
|
|
254
352
|
this.archivedRecordCount += toArchive.size;
|
|
255
|
-
await this.
|
|
353
|
+
await this._compact();
|
|
256
354
|
return toArchive.size;
|
|
257
355
|
}
|
|
258
|
-
|
|
259
|
-
this.ensureOpen();
|
|
260
|
-
const segmentPath = this.archiveSegments.find((s) => s === `archive/archive-${segment}.json`) || this.archiveSegments.find((s) => s.includes(segment));
|
|
261
|
-
if (!segmentPath)
|
|
262
|
-
throw new Error(`Archive segment '${segment}' not found`);
|
|
263
|
-
return loadArchiveSegment(this.dir, segmentPath);
|
|
264
|
-
}
|
|
265
|
-
listArchiveSegments() {
|
|
266
|
-
this.ensureOpen();
|
|
267
|
-
return [...this.archiveSegments];
|
|
268
|
-
}
|
|
269
|
-
stats() {
|
|
270
|
-
this.ensureOpen();
|
|
271
|
-
return {
|
|
272
|
-
activeRecords: this.records.size,
|
|
273
|
-
opsCount: this.ops.length,
|
|
274
|
-
archiveSegments: this.archiveSegments.length,
|
|
275
|
-
};
|
|
276
|
-
}
|
|
356
|
+
// --- Helpers ---
|
|
277
357
|
ensureOpen() {
|
|
278
358
|
if (!this.opened)
|
|
279
359
|
throw new Error("Store is not open. Call open() first.");
|
|
280
360
|
}
|
|
361
|
+
ensureWritable() {
|
|
362
|
+
if (this.options.readOnly)
|
|
363
|
+
throw new Error("Store is read-only. Cannot perform mutations.");
|
|
364
|
+
}
|
|
281
365
|
applyOp(op) {
|
|
282
366
|
if (op.op === "set" && op.data !== undefined) {
|
|
283
367
|
this.records.set(op.id, op.data);
|
|
@@ -304,7 +388,7 @@ export class Store {
|
|
|
304
388
|
await appendOp(join(this.dir, this.activeOpsPath), op);
|
|
305
389
|
this.ops.push(op);
|
|
306
390
|
if (this.ops.length >= this.options.checkpointThreshold) {
|
|
307
|
-
await this.
|
|
391
|
+
await this._compact();
|
|
308
392
|
}
|
|
309
393
|
}
|
|
310
394
|
defaultPeriod() {
|
package/dist/types.d.ts
CHANGED
|
@@ -9,6 +9,8 @@ export interface Operation<T = Record<string, unknown>> {
|
|
|
9
9
|
data?: T;
|
|
10
10
|
/** Previous value (null for creates, full record for updates/deletes) */
|
|
11
11
|
prev: T | null;
|
|
12
|
+
/** Encoding format for prev field. Omitted or "full" = full record. "delta" = JSON Patch (future). */
|
|
13
|
+
encoding?: "full" | "delta";
|
|
12
14
|
}
|
|
13
15
|
export interface Snapshot<T = Record<string, unknown>> {
|
|
14
16
|
version: number;
|
|
@@ -44,6 +46,8 @@ export interface StoreOptions {
|
|
|
44
46
|
version?: number;
|
|
45
47
|
/** Migration function: called if stored version < current version */
|
|
46
48
|
migrate?: (record: unknown, fromVersion: number) => unknown;
|
|
49
|
+
/** Open in read-only mode: skips directory lock, rejects all mutations. */
|
|
50
|
+
readOnly?: boolean;
|
|
47
51
|
}
|
|
48
52
|
export interface StoreStats {
|
|
49
53
|
activeRecords: number;
|
package/dist/validate.js
CHANGED
|
@@ -21,6 +21,9 @@ export function validateOp(raw) {
|
|
|
21
21
|
throw new Error("Invalid operation: delete op must have non-null prev");
|
|
22
22
|
if (obj.op === "delete" && "data" in obj)
|
|
23
23
|
throw new Error("Invalid operation: delete op must not have data field");
|
|
24
|
+
if ("encoding" in obj && obj.encoding !== "full" && obj.encoding !== "delta") {
|
|
25
|
+
throw new Error(`Invalid operation: encoding must be "full" or "delta", got "${obj.encoding}"`);
|
|
26
|
+
}
|
|
24
27
|
return raw;
|
|
25
28
|
}
|
|
26
29
|
export function validateManifest(raw) {
|
package/dist/wal.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { appendFile, readFile,
|
|
1
|
+
import { appendFile, readFile, open } from "node:fs/promises";
|
|
2
2
|
import { validateOp } from "./validate.js";
|
|
3
3
|
export async function appendOp(path, op) {
|
|
4
4
|
await appendFile(path, JSON.stringify(op) + "\n", "utf-8");
|
|
@@ -32,18 +32,45 @@ export async function readOps(path) {
|
|
|
32
32
|
return ops;
|
|
33
33
|
}
|
|
34
34
|
export async function truncateLastOp(path) {
|
|
35
|
-
let
|
|
35
|
+
let fh;
|
|
36
36
|
try {
|
|
37
|
-
|
|
37
|
+
fh = await open(path, "r+");
|
|
38
38
|
}
|
|
39
39
|
catch {
|
|
40
40
|
return false;
|
|
41
41
|
}
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
42
|
+
try {
|
|
43
|
+
const { size } = await fh.stat();
|
|
44
|
+
if (size === 0)
|
|
45
|
+
return false;
|
|
46
|
+
// Read the tail of the file to find the second-to-last newline.
|
|
47
|
+
// 4KB handles operations up to ~4KB. For larger ops, read in chunks.
|
|
48
|
+
let readSize = Math.min(4096, size);
|
|
49
|
+
let readPos = size - readSize;
|
|
50
|
+
let lastNl = -1;
|
|
51
|
+
while (true) {
|
|
52
|
+
const buf = Buffer.alloc(readSize);
|
|
53
|
+
await fh.read(buf, 0, readSize, readPos);
|
|
54
|
+
const text = buf.toString("utf-8", 0, readSize);
|
|
55
|
+
// Find the second-to-last newline (skip trailing newline)
|
|
56
|
+
lastNl = text.lastIndexOf("\n", text.length - 2);
|
|
57
|
+
if (lastNl !== -1) {
|
|
58
|
+
await fh.truncate(readPos + lastNl + 1);
|
|
59
|
+
return true;
|
|
60
|
+
}
|
|
61
|
+
// No newline found in this chunk — need to read further back
|
|
62
|
+
if (readPos === 0) {
|
|
63
|
+
// Only one line in the entire file — truncate to empty
|
|
64
|
+
await fh.truncate(0);
|
|
65
|
+
return true;
|
|
66
|
+
}
|
|
67
|
+
// Read the next chunk further back
|
|
68
|
+
const nextSize = Math.min(4096, readPos);
|
|
69
|
+
readPos -= nextSize;
|
|
70
|
+
readSize = nextSize;
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
finally {
|
|
74
|
+
await fh.close();
|
|
75
|
+
}
|
|
49
76
|
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@backloghq/opslog",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.2.0",
|
|
4
4
|
"description": "Embedded event-sourced document store. Append-only operation log with immutable snapshots, zero native dependencies.",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"types": "dist/index.d.ts",
|