@backloghq/opslog 0.1.4 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +105 -4
- package/dist/backend.d.ts +30 -0
- package/dist/backend.js +134 -0
- package/dist/clock.d.ts +16 -0
- package/dist/clock.js +25 -0
- package/dist/index.d.ts +5 -1
- package/dist/index.js +4 -0
- package/dist/lock.d.ts +12 -0
- package/dist/lock.js +64 -0
- package/dist/store.d.ts +36 -2
- package/dist/store.js +397 -128
- package/dist/types.d.ts +47 -0
- package/dist/validate.js +21 -0
- package/dist/wal.js +37 -10
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -54,13 +54,14 @@ State survives restarts — reopen the same directory and everything is there.
|
|
|
54
54
|
|
|
55
55
|
```
|
|
56
56
|
data/
|
|
57
|
-
manifest.json
|
|
57
|
+
manifest.json # Points to current snapshot + ops file(s)
|
|
58
58
|
snapshots/
|
|
59
|
-
snap-<timestamp>.json
|
|
59
|
+
snap-<timestamp>.json # Immutable full-state capture
|
|
60
60
|
ops/
|
|
61
|
-
ops-<timestamp>.jsonl
|
|
61
|
+
ops-<timestamp>.jsonl # Append-only operation log (single-writer)
|
|
62
|
+
agent-<id>-<timestamp>.jsonl # Per-agent operation log (multi-writer)
|
|
62
63
|
archive/
|
|
63
|
-
archive-<period>.json
|
|
64
|
+
archive-<period>.json # Old records, lazy-loaded
|
|
64
65
|
```
|
|
65
66
|
|
|
66
67
|
**Writes** append an operation (one JSON line) to the ops file. **Reads** come from an in-memory map built from the latest snapshot + ops replay. **Checkpoints** materialize current state as a new immutable snapshot.
|
|
@@ -121,6 +122,7 @@ await store.archive(predicate) // Move matching records to archive
|
|
|
121
122
|
await store.loadArchive(segment) // Lazy-load archived records
|
|
122
123
|
store.listArchiveSegments() // List available archive files
|
|
123
124
|
store.stats() // { activeRecords, opsCount, archiveSegments }
|
|
125
|
+
await store.refresh() // Reload from all agent WALs (multi-writer only)
|
|
124
126
|
```
|
|
125
127
|
|
|
126
128
|
## Options
|
|
@@ -131,14 +133,113 @@ await store.open(dir, {
|
|
|
131
133
|
checkpointOnClose: true, // Checkpoint when close() is called (default: true)
|
|
132
134
|
version: 1, // Schema version
|
|
133
135
|
migrate: (record, fromVersion) => record, // Migration function
|
|
136
|
+
readOnly: false, // Open in read-only mode (default: false)
|
|
137
|
+
agentId: "agent-A", // Enable multi-writer mode (optional)
|
|
138
|
+
backend: new FsBackend(), // Custom storage backend (optional, default: FsBackend)
|
|
134
139
|
});
|
|
135
140
|
```
|
|
136
141
|
|
|
142
|
+
## Multi-Writer Mode
|
|
143
|
+
|
|
144
|
+
Multiple agents can write to the same store concurrently. Each agent gets its own WAL file — no write contention.
|
|
145
|
+
|
|
146
|
+
```typescript
|
|
147
|
+
// Agent A (process 1 / machine 1)
|
|
148
|
+
const storeA = new Store<Task>();
|
|
149
|
+
await storeA.open("./data", { agentId: "agent-A" });
|
|
150
|
+
await storeA.set("task-1", { title: "Build API", status: "active" });
|
|
151
|
+
await storeA.close();
|
|
152
|
+
|
|
153
|
+
// Agent B (process 2 / machine 2)
|
|
154
|
+
const storeB = new Store<Task>();
|
|
155
|
+
await storeB.open("./data", { agentId: "agent-B" });
|
|
156
|
+
// B sees A's writes on open
|
|
157
|
+
storeB.get("task-1"); // { title: "Build API", status: "active" }
|
|
158
|
+
await storeB.set("task-2", { title: "Write tests", status: "active" });
|
|
159
|
+
await storeB.close();
|
|
160
|
+
```
|
|
161
|
+
|
|
162
|
+
### How it works
|
|
163
|
+
|
|
164
|
+
- Each agent writes to `ops/agent-{id}-{timestamp}.jsonl` — separate files, no locking needed for writes
|
|
165
|
+
- Operations carry a [Lamport clock](https://en.wikipedia.org/wiki/Lamport_timestamp) for ordering
|
|
166
|
+
- On `open()`, all agent WAL files are merge-sorted by `(clock, agentId)` for a deterministic total order
|
|
167
|
+
- Conflicts (two agents write the same key) are resolved with **last-writer-wins** by clock value
|
|
168
|
+
- `undo()` only undoes the calling agent's last operation
|
|
169
|
+
- `compact()` acquires a compaction lock, snapshots the merged state, and resets all WAL files
|
|
170
|
+
- `refresh()` re-reads all agent WALs to pick up other agents' writes
|
|
171
|
+
|
|
172
|
+
### Conflict resolution
|
|
173
|
+
|
|
174
|
+
When two agents modify the same key, the operation with the higher Lamport clock wins. If clocks are equal, the lexicographically higher agent ID wins. This is deterministic — all agents arrive at the same state regardless of replay order.
|
|
175
|
+
|
|
176
|
+
```typescript
|
|
177
|
+
// Agent A sets "shared" (clock=1)
|
|
178
|
+
await storeA.set("shared", { value: "from-A" });
|
|
179
|
+
|
|
180
|
+
// Agent B opens (sees clock=1), sets "shared" (clock=2)
|
|
181
|
+
await storeB.set("shared", { value: "from-B" });
|
|
182
|
+
|
|
183
|
+
// B wins — higher clock
|
|
184
|
+
store.get("shared"); // { value: "from-B" }
|
|
185
|
+
```
|
|
186
|
+
|
|
187
|
+
## Custom Storage Backend
|
|
188
|
+
|
|
189
|
+
opslog uses a pluggable `StorageBackend` interface for all I/O. The default is `FsBackend` (local filesystem). You can implement your own backend for S3, databases, or other storage systems.
|
|
190
|
+
|
|
191
|
+
```typescript
|
|
192
|
+
import { Store, FsBackend } from "@backloghq/opslog";
|
|
193
|
+
import type { StorageBackend } from "@backloghq/opslog";
|
|
194
|
+
|
|
195
|
+
// Use the default filesystem backend (implicit)
|
|
196
|
+
const store = new Store();
|
|
197
|
+
await store.open("./data");
|
|
198
|
+
|
|
199
|
+
// Or pass a custom backend explicitly
|
|
200
|
+
const store = new Store();
|
|
201
|
+
await store.open("./data", { backend: new FsBackend() });
|
|
202
|
+
|
|
203
|
+
// Or implement your own
|
|
204
|
+
class S3Backend implements StorageBackend {
|
|
205
|
+
// ... implement all methods
|
|
206
|
+
}
|
|
207
|
+
const store = new Store();
|
|
208
|
+
await store.open("s3://bucket/prefix", { backend: new S3Backend() });
|
|
209
|
+
```
|
|
210
|
+
|
|
211
|
+
## Read-Only Mode
|
|
212
|
+
|
|
213
|
+
Open a store for reading without acquiring the write lock. Useful for dashboards, backup processes, or multiple readers alongside a single writer.
|
|
214
|
+
|
|
215
|
+
```typescript
|
|
216
|
+
const reader = new Store();
|
|
217
|
+
await reader.open("./data", { readOnly: true });
|
|
218
|
+
|
|
219
|
+
// All reads work
|
|
220
|
+
const tasks = reader.all();
|
|
221
|
+
const active = reader.filter((t) => t.status === "active");
|
|
222
|
+
|
|
223
|
+
// All mutations throw
|
|
224
|
+
await reader.set("x", value); // Error: Store is read-only
|
|
225
|
+
```
|
|
226
|
+
|
|
227
|
+
Read-only stores load the latest snapshot and replay ops on open. They do not checkpoint on close. Multiple read-only stores can open the same directory concurrently alongside one writer.
|
|
228
|
+
|
|
229
|
+
## Concurrency
|
|
230
|
+
|
|
231
|
+
All state-mutating operations (`set`, `delete`, `batch`, `undo`, `compact`, `archive`) are serialized through an internal async mutex. This prevents interleaving of concurrent mutations — e.g., `compact()` swapping the ops file while `set()` is appending, or `undo()` truncating while `set()` is writing.
|
|
232
|
+
|
|
233
|
+
Read operations (`get`, `all`, `filter`, `count`, `has`, `entries`) are synchronous and lock-free.
|
|
234
|
+
|
|
235
|
+
An advisory directory write lock (`.lock` file with PID) prevents two processes from opening the same store. Stale locks from crashed processes are automatically recovered.
|
|
236
|
+
|
|
137
237
|
## Crash Safety
|
|
138
238
|
|
|
139
239
|
- **Ops file**: append-only writes. A crash mid-append loses at most the last operation. Malformed lines are skipped on recovery.
|
|
140
240
|
- **Snapshots**: immutable. Written to a temp file, then atomically renamed.
|
|
141
241
|
- **Manifest**: atomically replaced via temp-file-rename. Always points to a valid snapshot.
|
|
242
|
+
- **Undo**: uses `ftruncate()` — a single atomic POSIX syscall. O(1) regardless of file size.
|
|
142
243
|
|
|
143
244
|
No data corruption on crash. At most one in-flight operation is lost.
|
|
144
245
|
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
import type { LockHandle, Manifest, Operation, StorageBackend } from "./types.js";
|
|
2
|
+
/** Filesystem-backed storage backend. Default backend for opslog. */
|
|
3
|
+
export declare class FsBackend implements StorageBackend {
|
|
4
|
+
private dir;
|
|
5
|
+
initialize(dir: string, opts: {
|
|
6
|
+
readOnly: boolean;
|
|
7
|
+
}): Promise<void>;
|
|
8
|
+
shutdown(): Promise<void>;
|
|
9
|
+
readManifest(): Promise<Manifest | null>;
|
|
10
|
+
writeManifest(manifest: Manifest): Promise<void>;
|
|
11
|
+
writeSnapshot(records: Map<string, unknown>, version: number): Promise<string>;
|
|
12
|
+
loadSnapshot(relativePath: string): Promise<{
|
|
13
|
+
records: Map<string, unknown>;
|
|
14
|
+
version: number;
|
|
15
|
+
}>;
|
|
16
|
+
appendOps(relativePath: string, ops: Operation[]): Promise<void>;
|
|
17
|
+
readOps(relativePath: string): Promise<Operation[]>;
|
|
18
|
+
truncateLastOp(relativePath: string): Promise<boolean>;
|
|
19
|
+
createOpsFile(): Promise<string>;
|
|
20
|
+
writeArchiveSegment(period: string, records: Map<string, unknown>): Promise<string>;
|
|
21
|
+
loadArchiveSegment(relativePath: string): Promise<Map<string, unknown>>;
|
|
22
|
+
listArchiveSegments(): Promise<string[]>;
|
|
23
|
+
acquireLock(): Promise<LockHandle>;
|
|
24
|
+
releaseLock(handle: LockHandle): Promise<void>;
|
|
25
|
+
createAgentOpsFile(agentId: string): Promise<string>;
|
|
26
|
+
listOpsFiles(): Promise<string[]>;
|
|
27
|
+
acquireCompactionLock(): Promise<LockHandle>;
|
|
28
|
+
releaseCompactionLock(handle: LockHandle): Promise<void>;
|
|
29
|
+
getManifestVersion(): Promise<string | null>;
|
|
30
|
+
}
|
package/dist/backend.js
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
import { mkdir, open, readdir, stat, unlink, writeFile } from "node:fs/promises";
|
|
2
|
+
import { join } from "node:path";
|
|
3
|
+
import { appendOp, appendOps, readOps, truncateLastOp } from "./wal.js";
|
|
4
|
+
import { loadSnapshot, writeSnapshot } from "./snapshot.js";
|
|
5
|
+
import { readManifest, writeManifest } from "./manifest.js";
|
|
6
|
+
import { loadArchiveSegment, writeArchiveSegment, listArchiveSegments as fsListArchiveSegments, } from "./archive.js";
|
|
7
|
+
import { acquireLock as fsAcquireLock, releaseLock as fsReleaseLock, } from "./lock.js";
|
|
8
|
+
class FsLockHandle {
|
|
9
|
+
fh;
|
|
10
|
+
dir;
|
|
11
|
+
constructor(fh, dir) {
|
|
12
|
+
this.fh = fh;
|
|
13
|
+
this.dir = dir;
|
|
14
|
+
}
|
|
15
|
+
}
|
|
16
|
+
/** Filesystem-backed storage backend. Default backend for opslog. */
|
|
17
|
+
export class FsBackend {
|
|
18
|
+
dir = "";
|
|
19
|
+
async initialize(dir, opts) {
|
|
20
|
+
this.dir = dir;
|
|
21
|
+
if (!opts.readOnly) {
|
|
22
|
+
await mkdir(join(dir, "snapshots"), { recursive: true });
|
|
23
|
+
await mkdir(join(dir, "ops"), { recursive: true });
|
|
24
|
+
await mkdir(join(dir, "archive"), { recursive: true });
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
async shutdown() {
|
|
28
|
+
// No-op for filesystem backend
|
|
29
|
+
}
|
|
30
|
+
// -- Manifest --
|
|
31
|
+
async readManifest() {
|
|
32
|
+
return readManifest(this.dir);
|
|
33
|
+
}
|
|
34
|
+
async writeManifest(manifest) {
|
|
35
|
+
return writeManifest(this.dir, manifest);
|
|
36
|
+
}
|
|
37
|
+
// -- Snapshots --
|
|
38
|
+
async writeSnapshot(records, version) {
|
|
39
|
+
return writeSnapshot(this.dir, records, version);
|
|
40
|
+
}
|
|
41
|
+
async loadSnapshot(relativePath) {
|
|
42
|
+
return loadSnapshot(this.dir, relativePath);
|
|
43
|
+
}
|
|
44
|
+
// -- WAL --
|
|
45
|
+
async appendOps(relativePath, ops) {
|
|
46
|
+
const fullPath = join(this.dir, relativePath);
|
|
47
|
+
if (ops.length === 1) {
|
|
48
|
+
return appendOp(fullPath, ops[0]);
|
|
49
|
+
}
|
|
50
|
+
return appendOps(fullPath, ops);
|
|
51
|
+
}
|
|
52
|
+
async readOps(relativePath) {
|
|
53
|
+
return readOps(join(this.dir, relativePath));
|
|
54
|
+
}
|
|
55
|
+
async truncateLastOp(relativePath) {
|
|
56
|
+
return truncateLastOp(join(this.dir, relativePath));
|
|
57
|
+
}
|
|
58
|
+
async createOpsFile() {
|
|
59
|
+
const filename = `ops-${Date.now()}.jsonl`;
|
|
60
|
+
const relativePath = `ops/${filename}`;
|
|
61
|
+
await writeFile(join(this.dir, relativePath), "", "utf-8");
|
|
62
|
+
return relativePath;
|
|
63
|
+
}
|
|
64
|
+
// -- Archive --
|
|
65
|
+
async writeArchiveSegment(period, records) {
|
|
66
|
+
return writeArchiveSegment(this.dir, period, records);
|
|
67
|
+
}
|
|
68
|
+
async loadArchiveSegment(relativePath) {
|
|
69
|
+
return loadArchiveSegment(this.dir, relativePath);
|
|
70
|
+
}
|
|
71
|
+
async listArchiveSegments() {
|
|
72
|
+
return fsListArchiveSegments(this.dir);
|
|
73
|
+
}
|
|
74
|
+
// -- Locking (single-writer) --
|
|
75
|
+
async acquireLock() {
|
|
76
|
+
const fh = await fsAcquireLock(this.dir);
|
|
77
|
+
return new FsLockHandle(fh, this.dir);
|
|
78
|
+
}
|
|
79
|
+
async releaseLock(handle) {
|
|
80
|
+
const fsHandle = handle;
|
|
81
|
+
return fsReleaseLock(fsHandle.dir, fsHandle.fh);
|
|
82
|
+
}
|
|
83
|
+
// -- Multi-writer extensions --
|
|
84
|
+
async createAgentOpsFile(agentId) {
|
|
85
|
+
const filename = `agent-${agentId}-${Date.now()}.jsonl`;
|
|
86
|
+
const relativePath = `ops/${filename}`;
|
|
87
|
+
await writeFile(join(this.dir, relativePath), "", "utf-8");
|
|
88
|
+
return relativePath;
|
|
89
|
+
}
|
|
90
|
+
async listOpsFiles() {
|
|
91
|
+
const opsDir = join(this.dir, "ops");
|
|
92
|
+
try {
|
|
93
|
+
const files = await readdir(opsDir);
|
|
94
|
+
return files.filter((f) => f.endsWith(".jsonl")).map((f) => `ops/${f}`);
|
|
95
|
+
}
|
|
96
|
+
catch {
|
|
97
|
+
return [];
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
async acquireCompactionLock() {
|
|
101
|
+
const lockPath = join(this.dir, ".compact-lock");
|
|
102
|
+
let fh;
|
|
103
|
+
try {
|
|
104
|
+
fh = await open(lockPath, "wx");
|
|
105
|
+
}
|
|
106
|
+
catch (err) {
|
|
107
|
+
if (err.code === "EEXIST") {
|
|
108
|
+
throw new Error("Compaction lock held by another agent", { cause: err });
|
|
109
|
+
}
|
|
110
|
+
throw err;
|
|
111
|
+
}
|
|
112
|
+
await fh.writeFile(String(process.pid), "utf-8");
|
|
113
|
+
return new FsLockHandle(fh, this.dir);
|
|
114
|
+
}
|
|
115
|
+
async releaseCompactionLock(handle) {
|
|
116
|
+
const fsHandle = handle;
|
|
117
|
+
await fsHandle.fh.close();
|
|
118
|
+
try {
|
|
119
|
+
await unlink(join(fsHandle.dir, ".compact-lock"));
|
|
120
|
+
}
|
|
121
|
+
catch {
|
|
122
|
+
// Already cleaned up
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
async getManifestVersion() {
|
|
126
|
+
try {
|
|
127
|
+
const s = await stat(join(this.dir, "manifest.json"));
|
|
128
|
+
return s.mtimeMs.toString();
|
|
129
|
+
}
|
|
130
|
+
catch {
|
|
131
|
+
return null;
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
}
|
package/dist/clock.d.ts
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Lamport logical clock for multi-writer operation ordering.
|
|
3
|
+
* Each agent maintains its own clock. On local events, tick().
|
|
4
|
+
* On receiving remote events, merge(received) to stay ahead.
|
|
5
|
+
* Ties are broken by agent ID (lexicographic) for deterministic total order.
|
|
6
|
+
*/
|
|
7
|
+
export declare class LamportClock {
|
|
8
|
+
private counter;
|
|
9
|
+
constructor(initial?: number);
|
|
10
|
+
/** Increment and return the new value (for local events). */
|
|
11
|
+
tick(): number;
|
|
12
|
+
/** Merge with a received clock value and increment. */
|
|
13
|
+
merge(received: number): number;
|
|
14
|
+
/** Current clock value without incrementing. */
|
|
15
|
+
get current(): number;
|
|
16
|
+
}
|
package/dist/clock.js
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Lamport logical clock for multi-writer operation ordering.
|
|
3
|
+
* Each agent maintains its own clock. On local events, tick().
|
|
4
|
+
* On receiving remote events, merge(received) to stay ahead.
|
|
5
|
+
* Ties are broken by agent ID (lexicographic) for deterministic total order.
|
|
6
|
+
*/
|
|
7
|
+
export class LamportClock {
|
|
8
|
+
counter;
|
|
9
|
+
constructor(initial = 0) {
|
|
10
|
+
this.counter = initial;
|
|
11
|
+
}
|
|
12
|
+
/** Increment and return the new value (for local events). */
|
|
13
|
+
tick() {
|
|
14
|
+
return ++this.counter;
|
|
15
|
+
}
|
|
16
|
+
/** Merge with a received clock value and increment. */
|
|
17
|
+
merge(received) {
|
|
18
|
+
this.counter = Math.max(this.counter, received) + 1;
|
|
19
|
+
return this.counter;
|
|
20
|
+
}
|
|
21
|
+
/** Current clock value without incrementing. */
|
|
22
|
+
get current() {
|
|
23
|
+
return this.counter;
|
|
24
|
+
}
|
|
25
|
+
}
|
package/dist/index.d.ts
CHANGED
|
@@ -1,2 +1,6 @@
|
|
|
1
1
|
export { Store } from "./store.js";
|
|
2
|
-
export
|
|
2
|
+
export { FsBackend } from "./backend.js";
|
|
3
|
+
export { LamportClock } from "./clock.js";
|
|
4
|
+
export { acquireLock, releaseLock } from "./lock.js";
|
|
5
|
+
export { validateOp, validateManifest, validateSnapshot, validateArchiveSegment, } from "./validate.js";
|
|
6
|
+
export type { Operation, Snapshot, Manifest, ManifestStats, ArchiveSegment, StoreOptions, StoreStats, StorageBackend, LockHandle, } from "./types.js";
|
package/dist/index.js
CHANGED
|
@@ -1 +1,5 @@
|
|
|
1
1
|
export { Store } from "./store.js";
|
|
2
|
+
export { FsBackend } from "./backend.js";
|
|
3
|
+
export { LamportClock } from "./clock.js";
|
|
4
|
+
export { acquireLock, releaseLock } from "./lock.js";
|
|
5
|
+
export { validateOp, validateManifest, validateSnapshot, validateArchiveSegment, } from "./validate.js";
|
package/dist/lock.d.ts
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import type { FileHandle } from "node:fs/promises";
|
|
2
|
+
/**
|
|
3
|
+
* Acquire an advisory write lock on a directory.
|
|
4
|
+
* Returns a FileHandle that must be passed to releaseLock() on close.
|
|
5
|
+
* Throws if another live process holds the lock.
|
|
6
|
+
* Automatically recovers stale locks from crashed processes.
|
|
7
|
+
*/
|
|
8
|
+
export declare function acquireLock(dir: string): Promise<FileHandle>;
|
|
9
|
+
/**
|
|
10
|
+
* Release the advisory write lock.
|
|
11
|
+
*/
|
|
12
|
+
export declare function releaseLock(dir: string, fh: FileHandle): Promise<void>;
|
package/dist/lock.js
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
import { open, readFile, unlink } from "node:fs/promises";
|
|
2
|
+
import { join } from "node:path";
|
|
3
|
+
const LOCK_FILE = ".lock";
|
|
4
|
+
function isProcessAlive(pid) {
|
|
5
|
+
try {
|
|
6
|
+
process.kill(pid, 0);
|
|
7
|
+
return true;
|
|
8
|
+
}
|
|
9
|
+
catch {
|
|
10
|
+
return false;
|
|
11
|
+
}
|
|
12
|
+
}
|
|
13
|
+
/**
|
|
14
|
+
* Acquire an advisory write lock on a directory.
|
|
15
|
+
* Returns a FileHandle that must be passed to releaseLock() on close.
|
|
16
|
+
* Throws if another live process holds the lock.
|
|
17
|
+
* Automatically recovers stale locks from crashed processes.
|
|
18
|
+
*/
|
|
19
|
+
export async function acquireLock(dir) {
|
|
20
|
+
const lockPath = join(dir, LOCK_FILE);
|
|
21
|
+
// Try exclusive create
|
|
22
|
+
try {
|
|
23
|
+
const fh = await open(lockPath, "wx");
|
|
24
|
+
await fh.writeFile(String(process.pid), "utf-8");
|
|
25
|
+
return fh;
|
|
26
|
+
}
|
|
27
|
+
catch (err) {
|
|
28
|
+
if (err.code !== "EEXIST")
|
|
29
|
+
throw err;
|
|
30
|
+
}
|
|
31
|
+
// Lock file exists — check if the holder is still alive
|
|
32
|
+
let content;
|
|
33
|
+
try {
|
|
34
|
+
content = await readFile(lockPath, "utf-8");
|
|
35
|
+
}
|
|
36
|
+
catch {
|
|
37
|
+
// File disappeared between our open and read — retry
|
|
38
|
+
return acquireLock(dir);
|
|
39
|
+
}
|
|
40
|
+
const pid = parseInt(content, 10);
|
|
41
|
+
if (!isNaN(pid) && isProcessAlive(pid)) {
|
|
42
|
+
throw new Error(`Store is locked by process ${pid}. If this is stale, delete ${lockPath}`);
|
|
43
|
+
}
|
|
44
|
+
// Stale lock — remove and retry
|
|
45
|
+
try {
|
|
46
|
+
await unlink(lockPath);
|
|
47
|
+
}
|
|
48
|
+
catch {
|
|
49
|
+
// Another process may have already cleaned it up
|
|
50
|
+
}
|
|
51
|
+
return acquireLock(dir);
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* Release the advisory write lock.
|
|
55
|
+
*/
|
|
56
|
+
export async function releaseLock(dir, fh) {
|
|
57
|
+
await fh.close();
|
|
58
|
+
try {
|
|
59
|
+
await unlink(join(dir, LOCK_FILE));
|
|
60
|
+
}
|
|
61
|
+
catch {
|
|
62
|
+
// Already cleaned up
|
|
63
|
+
}
|
|
64
|
+
}
|
package/dist/store.d.ts
CHANGED
|
@@ -8,11 +8,26 @@ export declare class Store<T = Record<string, unknown>> {
|
|
|
8
8
|
private version;
|
|
9
9
|
private activeOpsPath;
|
|
10
10
|
private created;
|
|
11
|
-
private
|
|
11
|
+
private coreOpts;
|
|
12
12
|
private archivedRecordCount;
|
|
13
13
|
private batching;
|
|
14
14
|
private batchOps;
|
|
15
|
+
private _lock;
|
|
16
|
+
private lockHandle;
|
|
17
|
+
private backend;
|
|
18
|
+
private agentId?;
|
|
19
|
+
private clock;
|
|
20
|
+
private manifestVersion;
|
|
21
|
+
/**
|
|
22
|
+
* Serialize all state-mutating operations through a promise chain.
|
|
23
|
+
* Prevents interleaving of async mutations. Reads remain synchronous and lock-free.
|
|
24
|
+
*/
|
|
25
|
+
private serialize;
|
|
26
|
+
private isMultiWriter;
|
|
15
27
|
open(dir: string, options?: StoreOptions): Promise<void>;
|
|
28
|
+
private initFreshStore;
|
|
29
|
+
private loadExistingStore;
|
|
30
|
+
private loadMultiWriterOps;
|
|
16
31
|
close(): Promise<void>;
|
|
17
32
|
get(id: string): T | undefined;
|
|
18
33
|
set(id: string, value: T): Promise<void> | void;
|
|
@@ -28,10 +43,29 @@ export declare class Store<T = Record<string, unknown>> {
|
|
|
28
43
|
getOps(since?: string): Operation<T>[];
|
|
29
44
|
compact(): Promise<void>;
|
|
30
45
|
archive(predicate: (value: T, id: string) => boolean, segment?: string): Promise<number>;
|
|
31
|
-
loadArchive(segment: string): Promise<Map<string, T>>;
|
|
32
46
|
listArchiveSegments(): string[];
|
|
47
|
+
loadArchive(segment: string): Promise<Map<string, T>>;
|
|
33
48
|
stats(): StoreStats;
|
|
49
|
+
/**
|
|
50
|
+
* Reload state from the backend (multi-writer mode).
|
|
51
|
+
* Re-reads the manifest, snapshot, and all agent WAL files.
|
|
52
|
+
* Use this to pick up writes from other agents.
|
|
53
|
+
*/
|
|
54
|
+
refresh(): Promise<void>;
|
|
55
|
+
private makeOp;
|
|
56
|
+
private _set;
|
|
57
|
+
private _setSync;
|
|
58
|
+
private _delete;
|
|
59
|
+
private _deleteSync;
|
|
60
|
+
private _batch;
|
|
61
|
+
private _undo;
|
|
62
|
+
private _undoMultiWriter;
|
|
63
|
+
private _compact;
|
|
64
|
+
private _compactMultiWriter;
|
|
65
|
+
private _archive;
|
|
66
|
+
private _refresh;
|
|
34
67
|
private ensureOpen;
|
|
68
|
+
private ensureWritable;
|
|
35
69
|
private applyOp;
|
|
36
70
|
private reverseOp;
|
|
37
71
|
private persistOp;
|
package/dist/store.js
CHANGED
|
@@ -1,9 +1,6 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import {
|
|
3
|
-
import {
|
|
4
|
-
import { loadSnapshot, writeSnapshot } from "./snapshot.js";
|
|
5
|
-
import { createDefaultManifest, readManifest, writeManifest, } from "./manifest.js";
|
|
6
|
-
import { loadArchiveSegment, writeArchiveSegment, } from "./archive.js";
|
|
1
|
+
import { createDefaultManifest } from "./manifest.js";
|
|
2
|
+
import { FsBackend } from "./backend.js";
|
|
3
|
+
import { LamportClock } from "./clock.js";
|
|
7
4
|
export class Store {
|
|
8
5
|
dir = "";
|
|
9
6
|
records = new Map();
|
|
@@ -13,78 +10,187 @@ export class Store {
|
|
|
13
10
|
version = 1;
|
|
14
11
|
activeOpsPath = "";
|
|
15
12
|
created = "";
|
|
16
|
-
|
|
13
|
+
coreOpts = {
|
|
17
14
|
checkpointThreshold: 100,
|
|
18
15
|
checkpointOnClose: true,
|
|
19
16
|
version: 1,
|
|
20
17
|
migrate: (r) => r,
|
|
18
|
+
readOnly: false,
|
|
21
19
|
};
|
|
22
20
|
archivedRecordCount = 0;
|
|
23
21
|
batching = false;
|
|
24
22
|
batchOps = [];
|
|
23
|
+
_lock = Promise.resolve();
|
|
24
|
+
lockHandle = null;
|
|
25
|
+
backend;
|
|
26
|
+
// Multi-writer state
|
|
27
|
+
agentId;
|
|
28
|
+
clock = null;
|
|
29
|
+
manifestVersion = null;
|
|
30
|
+
/**
|
|
31
|
+
* Serialize all state-mutating operations through a promise chain.
|
|
32
|
+
* Prevents interleaving of async mutations. Reads remain synchronous and lock-free.
|
|
33
|
+
*/
|
|
34
|
+
serialize(fn) {
|
|
35
|
+
const prev = this._lock;
|
|
36
|
+
let resolve;
|
|
37
|
+
this._lock = new Promise((r) => {
|
|
38
|
+
resolve = r;
|
|
39
|
+
});
|
|
40
|
+
return prev.then(fn).finally(() => resolve());
|
|
41
|
+
}
|
|
42
|
+
isMultiWriter() {
|
|
43
|
+
return this.agentId !== undefined;
|
|
44
|
+
}
|
|
25
45
|
async open(dir, options) {
|
|
26
46
|
this.dir = dir;
|
|
27
47
|
if (options) {
|
|
28
|
-
|
|
48
|
+
const { backend, agentId, ...rest } = options;
|
|
49
|
+
this.coreOpts = { ...this.coreOpts, ...rest };
|
|
50
|
+
if (backend)
|
|
51
|
+
this.backend = backend;
|
|
52
|
+
if (agentId)
|
|
53
|
+
this.agentId = agentId;
|
|
54
|
+
}
|
|
55
|
+
this.backend ??= new FsBackend();
|
|
56
|
+
await this.backend.initialize(dir, { readOnly: this.coreOpts.readOnly });
|
|
57
|
+
// Acquire write lock (single-writer only, not readOnly)
|
|
58
|
+
if (!this.coreOpts.readOnly && !this.isMultiWriter()) {
|
|
59
|
+
this.lockHandle = await this.backend.acquireLock();
|
|
29
60
|
}
|
|
30
|
-
|
|
31
|
-
await mkdir(join(dir, "ops"), { recursive: true });
|
|
32
|
-
await mkdir(join(dir, "archive"), { recursive: true });
|
|
33
|
-
const manifest = await readManifest(dir);
|
|
61
|
+
const manifest = await this.backend.readManifest();
|
|
34
62
|
if (!manifest) {
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
await writeFile(join(dir, opsPath), "", "utf-8");
|
|
40
|
-
const newManifest = createDefaultManifest(snapshotPath, opsPath);
|
|
41
|
-
await writeManifest(dir, newManifest);
|
|
42
|
-
this.version = this.options.version;
|
|
43
|
-
this.activeOpsPath = opsPath;
|
|
44
|
-
this.created = newManifest.stats.created;
|
|
45
|
-
this.archiveSegments = [];
|
|
63
|
+
if (this.coreOpts.readOnly) {
|
|
64
|
+
throw new Error("Cannot open in readOnly mode: no existing store found");
|
|
65
|
+
}
|
|
66
|
+
await this.initFreshStore();
|
|
46
67
|
}
|
|
47
68
|
else {
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
69
|
+
await this.loadExistingStore(manifest);
|
|
70
|
+
}
|
|
71
|
+
this.manifestVersion = await this.backend.getManifestVersion();
|
|
72
|
+
this.opened = true;
|
|
73
|
+
}
|
|
74
|
+
async initFreshStore() {
|
|
75
|
+
const snapshotPath = await this.backend.writeSnapshot(new Map(), this.coreOpts.version);
|
|
76
|
+
let opsPath;
|
|
77
|
+
if (this.isMultiWriter()) {
|
|
78
|
+
opsPath = await this.backend.createAgentOpsFile(this.agentId);
|
|
79
|
+
}
|
|
80
|
+
else {
|
|
81
|
+
opsPath = await this.backend.createOpsFile();
|
|
82
|
+
}
|
|
83
|
+
const newManifest = createDefaultManifest(snapshotPath, opsPath);
|
|
84
|
+
if (this.isMultiWriter()) {
|
|
85
|
+
newManifest.activeAgentOps = { [this.agentId]: opsPath };
|
|
86
|
+
}
|
|
87
|
+
await this.backend.writeManifest(newManifest);
|
|
88
|
+
this.version = this.coreOpts.version;
|
|
89
|
+
this.activeOpsPath = opsPath;
|
|
90
|
+
this.created = newManifest.stats.created;
|
|
91
|
+
this.archiveSegments = [];
|
|
92
|
+
if (this.isMultiWriter()) {
|
|
93
|
+
this.clock = new LamportClock(0);
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
async loadExistingStore(manifest) {
|
|
97
|
+
// Load snapshot
|
|
98
|
+
let snapshotData;
|
|
99
|
+
try {
|
|
100
|
+
snapshotData = await this.backend.loadSnapshot(manifest.currentSnapshot);
|
|
101
|
+
}
|
|
102
|
+
catch (err) {
|
|
103
|
+
const isNotFound = err instanceof Error &&
|
|
104
|
+
"code" in err &&
|
|
105
|
+
err.code === "ENOENT";
|
|
106
|
+
if (isNotFound) {
|
|
107
|
+
throw new Error(`Snapshot file not found: ${manifest.currentSnapshot}. The data directory may be corrupted.`, { cause: err });
|
|
59
108
|
}
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
this.version = this.options.version;
|
|
109
|
+
throw err;
|
|
110
|
+
}
|
|
111
|
+
const { records, version: storedVersion } = snapshotData;
|
|
112
|
+
this.records = records;
|
|
113
|
+
this.version = storedVersion;
|
|
114
|
+
this.created = manifest.stats.created;
|
|
115
|
+
this.archiveSegments = manifest.archiveSegments;
|
|
116
|
+
this.archivedRecordCount = manifest.stats.archivedRecords;
|
|
117
|
+
// Migrate if needed
|
|
118
|
+
if (storedVersion < this.coreOpts.version) {
|
|
119
|
+
for (const [id, record] of this.records) {
|
|
120
|
+
this.records.set(id, this.coreOpts.migrate(record, storedVersion));
|
|
73
121
|
}
|
|
74
|
-
|
|
75
|
-
|
|
122
|
+
this.version = this.coreOpts.version;
|
|
123
|
+
}
|
|
124
|
+
if (this.isMultiWriter()) {
|
|
125
|
+
await this.loadMultiWriterOps(manifest);
|
|
126
|
+
}
|
|
127
|
+
else {
|
|
128
|
+
// Single-writer: replay ops from active ops file
|
|
129
|
+
const ops = (await this.backend.readOps(manifest.activeOps));
|
|
76
130
|
for (const op of ops) {
|
|
77
131
|
this.applyOp(op);
|
|
78
132
|
}
|
|
79
133
|
this.ops = ops;
|
|
134
|
+
this.activeOpsPath = manifest.activeOps;
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
async loadMultiWriterOps(manifest) {
|
|
138
|
+
const allOps = [];
|
|
139
|
+
// Read all agent ops files
|
|
140
|
+
if (manifest.activeAgentOps) {
|
|
141
|
+
for (const opsPath of Object.values(manifest.activeAgentOps)) {
|
|
142
|
+
const ops = (await this.backend.readOps(opsPath));
|
|
143
|
+
allOps.push(...ops);
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
// Also read legacy single-writer ops for backward compat
|
|
147
|
+
if (manifest.activeOps && !manifest.activeAgentOps) {
|
|
148
|
+
const ops = (await this.backend.readOps(manifest.activeOps));
|
|
149
|
+
allOps.push(...ops);
|
|
150
|
+
}
|
|
151
|
+
// Merge-sort by (clock, agent) for deterministic total order
|
|
152
|
+
allOps.sort((a, b) => {
|
|
153
|
+
const clockDiff = (a.clock ?? 0) - (b.clock ?? 0);
|
|
154
|
+
if (clockDiff !== 0)
|
|
155
|
+
return clockDiff;
|
|
156
|
+
return (a.agent ?? "").localeCompare(b.agent ?? "");
|
|
157
|
+
});
|
|
158
|
+
for (const op of allOps) {
|
|
159
|
+
this.applyOp(op);
|
|
160
|
+
}
|
|
161
|
+
this.ops = allOps;
|
|
162
|
+
// Initialize Lamport clock from max seen value
|
|
163
|
+
const maxClock = allOps.reduce((max, op) => Math.max(max, op.clock ?? 0), 0);
|
|
164
|
+
this.clock = new LamportClock(maxClock);
|
|
165
|
+
// Find or create our agent's ops file
|
|
166
|
+
if (manifest.activeAgentOps?.[this.agentId]) {
|
|
167
|
+
this.activeOpsPath = manifest.activeAgentOps[this.agentId];
|
|
168
|
+
}
|
|
169
|
+
else {
|
|
170
|
+
// Register this agent in the manifest
|
|
171
|
+
this.activeOpsPath = await this.backend.createAgentOpsFile(this.agentId);
|
|
172
|
+
const updatedManifest = {
|
|
173
|
+
...manifest,
|
|
174
|
+
activeAgentOps: {
|
|
175
|
+
...(manifest.activeAgentOps ?? {}),
|
|
176
|
+
[this.agentId]: this.activeOpsPath,
|
|
177
|
+
},
|
|
178
|
+
};
|
|
179
|
+
await this.backend.writeManifest(updatedManifest);
|
|
80
180
|
}
|
|
81
|
-
this.opened = true;
|
|
82
181
|
}
|
|
83
182
|
async close() {
|
|
84
183
|
this.ensureOpen();
|
|
85
|
-
if (this.
|
|
86
|
-
|
|
184
|
+
if (!this.coreOpts.readOnly &&
|
|
185
|
+
this.coreOpts.checkpointOnClose &&
|
|
186
|
+
this.ops.length > 0) {
|
|
187
|
+
await this.serialize(() => this._compact());
|
|
188
|
+
}
|
|
189
|
+
if (this.lockHandle) {
|
|
190
|
+
await this.backend.releaseLock(this.lockHandle);
|
|
191
|
+
this.lockHandle = null;
|
|
87
192
|
}
|
|
193
|
+
await this.backend.shutdown();
|
|
88
194
|
this.opened = false;
|
|
89
195
|
}
|
|
90
196
|
get(id) {
|
|
@@ -93,39 +199,21 @@ export class Store {
|
|
|
93
199
|
}
|
|
94
200
|
set(id, value) {
|
|
95
201
|
this.ensureOpen();
|
|
96
|
-
|
|
97
|
-
const op = {
|
|
98
|
-
ts: new Date().toISOString(),
|
|
99
|
-
op: "set",
|
|
100
|
-
id,
|
|
101
|
-
data: value,
|
|
102
|
-
prev,
|
|
103
|
-
};
|
|
104
|
-
this.records.set(id, value);
|
|
202
|
+
this.ensureWritable();
|
|
105
203
|
if (this.batching) {
|
|
106
|
-
this.
|
|
204
|
+
this._setSync(id, value);
|
|
107
205
|
return;
|
|
108
206
|
}
|
|
109
|
-
return this.
|
|
207
|
+
return this.serialize(() => this._set(id, value));
|
|
110
208
|
}
|
|
111
209
|
delete(id) {
|
|
112
210
|
this.ensureOpen();
|
|
113
|
-
|
|
114
|
-
if (prev === undefined) {
|
|
115
|
-
throw new Error(`Record '${id}' not found`);
|
|
116
|
-
}
|
|
117
|
-
const op = {
|
|
118
|
-
ts: new Date().toISOString(),
|
|
119
|
-
op: "delete",
|
|
120
|
-
id,
|
|
121
|
-
prev,
|
|
122
|
-
};
|
|
123
|
-
this.records.delete(id);
|
|
211
|
+
this.ensureWritable();
|
|
124
212
|
if (this.batching) {
|
|
125
|
-
this.
|
|
213
|
+
this._deleteSync(id);
|
|
126
214
|
return;
|
|
127
215
|
}
|
|
128
|
-
return this.
|
|
216
|
+
return this.serialize(() => this._delete(id));
|
|
129
217
|
}
|
|
130
218
|
has(id) {
|
|
131
219
|
this.ensureOpen();
|
|
@@ -161,21 +249,125 @@ export class Store {
|
|
|
161
249
|
}
|
|
162
250
|
async batch(fn) {
|
|
163
251
|
this.ensureOpen();
|
|
252
|
+
this.ensureWritable();
|
|
253
|
+
return this.serialize(() => this._batch(fn));
|
|
254
|
+
}
|
|
255
|
+
async undo() {
|
|
256
|
+
this.ensureOpen();
|
|
257
|
+
this.ensureWritable();
|
|
258
|
+
return this.serialize(() => this._undo());
|
|
259
|
+
}
|
|
260
|
+
getHistory(id) {
|
|
261
|
+
this.ensureOpen();
|
|
262
|
+
return this.ops.filter((op) => op.id === id);
|
|
263
|
+
}
|
|
264
|
+
getOps(since) {
|
|
265
|
+
this.ensureOpen();
|
|
266
|
+
if (!since)
|
|
267
|
+
return [...this.ops];
|
|
268
|
+
return this.ops.filter((op) => op.ts > since);
|
|
269
|
+
}
|
|
270
|
+
async compact() {
|
|
271
|
+
this.ensureOpen();
|
|
272
|
+
this.ensureWritable();
|
|
273
|
+
return this.serialize(() => this._compact());
|
|
274
|
+
}
|
|
275
|
+
async archive(predicate, segment) {
|
|
276
|
+
this.ensureOpen();
|
|
277
|
+
this.ensureWritable();
|
|
278
|
+
return this.serialize(() => this._archive(predicate, segment));
|
|
279
|
+
}
|
|
280
|
+
listArchiveSegments() {
|
|
281
|
+
this.ensureOpen();
|
|
282
|
+
return [...this.archiveSegments];
|
|
283
|
+
}
|
|
284
|
+
async loadArchive(segment) {
|
|
285
|
+
this.ensureOpen();
|
|
286
|
+
const segmentPath = this.archiveSegments.find((s) => s === `archive/archive-${segment}.json`) || this.archiveSegments.find((s) => s.includes(segment));
|
|
287
|
+
if (!segmentPath)
|
|
288
|
+
throw new Error(`Archive segment '${segment}' not found`);
|
|
289
|
+
return this.backend.loadArchiveSegment(segmentPath);
|
|
290
|
+
}
|
|
291
|
+
stats() {
|
|
292
|
+
this.ensureOpen();
|
|
293
|
+
return {
|
|
294
|
+
activeRecords: this.records.size,
|
|
295
|
+
opsCount: this.ops.length,
|
|
296
|
+
archiveSegments: this.archiveSegments.length,
|
|
297
|
+
};
|
|
298
|
+
}
|
|
299
|
+
/**
|
|
300
|
+
* Reload state from the backend (multi-writer mode).
|
|
301
|
+
* Re-reads the manifest, snapshot, and all agent WAL files.
|
|
302
|
+
* Use this to pick up writes from other agents.
|
|
303
|
+
*/
|
|
304
|
+
async refresh() {
|
|
305
|
+
this.ensureOpen();
|
|
306
|
+
if (!this.isMultiWriter()) {
|
|
307
|
+
throw new Error("refresh() is only available in multi-writer mode");
|
|
308
|
+
}
|
|
309
|
+
return this.serialize(() => this._refresh());
|
|
310
|
+
}
|
|
311
|
+
// --- Private mutation implementations ---
|
|
312
|
+
makeOp(type, id, data, prev) {
|
|
313
|
+
const op = {
|
|
314
|
+
ts: new Date().toISOString(),
|
|
315
|
+
op: type,
|
|
316
|
+
id,
|
|
317
|
+
prev,
|
|
318
|
+
};
|
|
319
|
+
if (type === "set")
|
|
320
|
+
op.data = data;
|
|
321
|
+
if (this.agentId) {
|
|
322
|
+
op.agent = this.agentId;
|
|
323
|
+
op.clock = this.clock.tick();
|
|
324
|
+
}
|
|
325
|
+
return op;
|
|
326
|
+
}
|
|
327
|
+
async _set(id, value) {
|
|
328
|
+
const prev = this.records.get(id) ?? null;
|
|
329
|
+
const op = this.makeOp("set", id, value, prev);
|
|
330
|
+
this.records.set(id, value);
|
|
331
|
+
await this.persistOp(op);
|
|
332
|
+
}
|
|
333
|
+
_setSync(id, value) {
|
|
334
|
+
const prev = this.records.get(id) ?? null;
|
|
335
|
+
const op = this.makeOp("set", id, value, prev);
|
|
336
|
+
this.records.set(id, value);
|
|
337
|
+
this.batchOps.push(op);
|
|
338
|
+
}
|
|
339
|
+
async _delete(id) {
|
|
340
|
+
const prev = this.records.get(id);
|
|
341
|
+
if (prev === undefined) {
|
|
342
|
+
throw new Error(`Record '${id}' not found`);
|
|
343
|
+
}
|
|
344
|
+
const op = this.makeOp("delete", id, undefined, prev);
|
|
345
|
+
this.records.delete(id);
|
|
346
|
+
await this.persistOp(op);
|
|
347
|
+
}
|
|
348
|
+
_deleteSync(id) {
|
|
349
|
+
const prev = this.records.get(id);
|
|
350
|
+
if (prev === undefined) {
|
|
351
|
+
throw new Error(`Record '${id}' not found`);
|
|
352
|
+
}
|
|
353
|
+
const op = this.makeOp("delete", id, undefined, prev);
|
|
354
|
+
this.records.delete(id);
|
|
355
|
+
this.batchOps.push(op);
|
|
356
|
+
}
|
|
357
|
+
async _batch(fn) {
|
|
164
358
|
this.batching = true;
|
|
165
359
|
this.batchOps = [];
|
|
166
360
|
try {
|
|
167
361
|
fn();
|
|
168
|
-
// Empty batches are no-ops — no I/O if fn() didn't call set/delete
|
|
169
362
|
if (this.batchOps.length > 0) {
|
|
170
|
-
await appendOps(
|
|
363
|
+
await this.backend.appendOps(this.activeOpsPath, this.batchOps);
|
|
171
364
|
this.ops.push(...this.batchOps);
|
|
172
|
-
if (this.ops.length >= this.
|
|
173
|
-
await this.
|
|
365
|
+
if (this.ops.length >= this.coreOpts.checkpointThreshold) {
|
|
366
|
+
await this._compact();
|
|
174
367
|
}
|
|
175
368
|
}
|
|
176
369
|
}
|
|
177
370
|
catch (err) {
|
|
178
|
-
// Rollback in-memory changes on failure
|
|
179
371
|
for (const op of this.batchOps.reverse()) {
|
|
180
372
|
try {
|
|
181
373
|
this.reverseOp(op);
|
|
@@ -191,32 +383,38 @@ export class Store {
|
|
|
191
383
|
this.batchOps = [];
|
|
192
384
|
}
|
|
193
385
|
}
|
|
194
|
-
async
|
|
195
|
-
this.
|
|
386
|
+
async _undo() {
|
|
387
|
+
if (this.isMultiWriter()) {
|
|
388
|
+
return this._undoMultiWriter();
|
|
389
|
+
}
|
|
390
|
+
// Single-writer: O(1) undo
|
|
196
391
|
if (this.ops.length === 0)
|
|
197
392
|
return false;
|
|
198
393
|
const lastOp = this.ops[this.ops.length - 1];
|
|
199
394
|
this.reverseOp(lastOp);
|
|
200
395
|
this.ops.pop();
|
|
201
|
-
await truncateLastOp(
|
|
396
|
+
await this.backend.truncateLastOp(this.activeOpsPath);
|
|
202
397
|
return true;
|
|
203
398
|
}
|
|
204
|
-
|
|
205
|
-
this
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
399
|
+
async _undoMultiWriter() {
|
|
400
|
+
// Find last op from this agent
|
|
401
|
+
const myOps = this.ops.filter((op) => op.agent === this.agentId);
|
|
402
|
+
if (myOps.length === 0)
|
|
403
|
+
return false;
|
|
404
|
+
// Truncate our WAL file
|
|
405
|
+
await this.backend.truncateLastOp(this.activeOpsPath);
|
|
406
|
+
// Re-derive state from scratch (correct but O(n))
|
|
407
|
+
await this._refresh();
|
|
408
|
+
return true;
|
|
213
409
|
}
|
|
214
|
-
async
|
|
215
|
-
this.
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
410
|
+
async _compact() {
|
|
411
|
+
if (this.isMultiWriter()) {
|
|
412
|
+
await this._compactMultiWriter();
|
|
413
|
+
return;
|
|
414
|
+
}
|
|
415
|
+
// Single-writer compaction
|
|
416
|
+
const snapshotPath = await this.backend.writeSnapshot(this.records, this.version);
|
|
417
|
+
const opsPath = await this.backend.createOpsFile();
|
|
220
418
|
const updatedManifest = {
|
|
221
419
|
version: this.version,
|
|
222
420
|
currentSnapshot: snapshotPath,
|
|
@@ -230,12 +428,46 @@ export class Store {
|
|
|
230
428
|
lastCheckpoint: new Date().toISOString(),
|
|
231
429
|
},
|
|
232
430
|
};
|
|
233
|
-
await writeManifest(
|
|
431
|
+
await this.backend.writeManifest(updatedManifest);
|
|
234
432
|
this.activeOpsPath = opsPath;
|
|
235
433
|
this.ops = [];
|
|
236
434
|
}
|
|
237
|
-
async
|
|
238
|
-
|
|
435
|
+
async _compactMultiWriter() {
|
|
436
|
+
let compactLock;
|
|
437
|
+
try {
|
|
438
|
+
compactLock = await this.backend.acquireCompactionLock();
|
|
439
|
+
}
|
|
440
|
+
catch {
|
|
441
|
+
// Another agent is compacting — skip
|
|
442
|
+
return;
|
|
443
|
+
}
|
|
444
|
+
try {
|
|
445
|
+
const snapshotPath = await this.backend.writeSnapshot(this.records, this.version);
|
|
446
|
+
const opsPath = await this.backend.createAgentOpsFile(this.agentId);
|
|
447
|
+
const updatedManifest = {
|
|
448
|
+
version: this.version,
|
|
449
|
+
currentSnapshot: snapshotPath,
|
|
450
|
+
activeOps: opsPath,
|
|
451
|
+
activeAgentOps: { [this.agentId]: opsPath },
|
|
452
|
+
archiveSegments: this.archiveSegments,
|
|
453
|
+
stats: {
|
|
454
|
+
activeRecords: this.records.size,
|
|
455
|
+
archivedRecords: this.archivedRecordCount,
|
|
456
|
+
opsCount: 0,
|
|
457
|
+
created: this.created,
|
|
458
|
+
lastCheckpoint: new Date().toISOString(),
|
|
459
|
+
},
|
|
460
|
+
};
|
|
461
|
+
await this.backend.writeManifest(updatedManifest);
|
|
462
|
+
this.activeOpsPath = opsPath;
|
|
463
|
+
this.ops = [];
|
|
464
|
+
this.manifestVersion = await this.backend.getManifestVersion();
|
|
465
|
+
}
|
|
466
|
+
finally {
|
|
467
|
+
await this.backend.releaseCompactionLock(compactLock);
|
|
468
|
+
}
|
|
469
|
+
}
|
|
470
|
+
async _archive(predicate, segment) {
|
|
239
471
|
const toArchive = new Map();
|
|
240
472
|
for (const [id, value] of this.records) {
|
|
241
473
|
if (predicate(value, id))
|
|
@@ -244,7 +476,7 @@ export class Store {
|
|
|
244
476
|
if (toArchive.size === 0)
|
|
245
477
|
return 0;
|
|
246
478
|
const period = segment ?? this.defaultPeriod();
|
|
247
|
-
const segmentPath = await writeArchiveSegment(
|
|
479
|
+
const segmentPath = await this.backend.writeArchiveSegment(period, toArchive);
|
|
248
480
|
if (!this.archiveSegments.includes(segmentPath)) {
|
|
249
481
|
this.archiveSegments.push(segmentPath);
|
|
250
482
|
}
|
|
@@ -252,32 +484,72 @@ export class Store {
|
|
|
252
484
|
this.records.delete(id);
|
|
253
485
|
}
|
|
254
486
|
this.archivedRecordCount += toArchive.size;
|
|
255
|
-
await this.
|
|
487
|
+
await this._compact();
|
|
256
488
|
return toArchive.size;
|
|
257
489
|
}
|
|
258
|
-
async
|
|
259
|
-
this.
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
this.
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
}
|
|
490
|
+
async _refresh() {
|
|
491
|
+
const manifest = await this.backend.readManifest();
|
|
492
|
+
if (!manifest)
|
|
493
|
+
throw new Error("Manifest not found during refresh");
|
|
494
|
+
const { records, version } = await this.backend.loadSnapshot(manifest.currentSnapshot);
|
|
495
|
+
this.records = records;
|
|
496
|
+
this.version = version;
|
|
497
|
+
this.archiveSegments = manifest.archiveSegments;
|
|
498
|
+
this.archivedRecordCount = manifest.stats.archivedRecords;
|
|
499
|
+
this.created = manifest.stats.created;
|
|
500
|
+
// Read all agent ops
|
|
501
|
+
const allOps = [];
|
|
502
|
+
if (manifest.activeAgentOps) {
|
|
503
|
+
for (const opsPath of Object.values(manifest.activeAgentOps)) {
|
|
504
|
+
const ops = await this.backend.readOps(opsPath);
|
|
505
|
+
allOps.push(...ops);
|
|
506
|
+
}
|
|
507
|
+
}
|
|
508
|
+
// Legacy single-writer ops
|
|
509
|
+
if (manifest.activeOps && !manifest.activeAgentOps) {
|
|
510
|
+
const ops = await this.backend.readOps(manifest.activeOps);
|
|
511
|
+
allOps.push(...ops);
|
|
512
|
+
}
|
|
513
|
+
// Merge-sort
|
|
514
|
+
allOps.sort((a, b) => {
|
|
515
|
+
const clockDiff = (a.clock ?? 0) - (b.clock ?? 0);
|
|
516
|
+
if (clockDiff !== 0)
|
|
517
|
+
return clockDiff;
|
|
518
|
+
return (a.agent ?? "").localeCompare(b.agent ?? "");
|
|
519
|
+
});
|
|
520
|
+
for (const op of allOps)
|
|
521
|
+
this.applyOp(op);
|
|
522
|
+
this.ops = allOps;
|
|
523
|
+
// Update clock
|
|
524
|
+
const maxClock = allOps.reduce((max, op) => Math.max(max, op.clock ?? 0), 0);
|
|
525
|
+
this.clock = new LamportClock(maxClock);
|
|
526
|
+
// Update our ops path if manifest changed
|
|
527
|
+
if (manifest.activeAgentOps?.[this.agentId]) {
|
|
528
|
+
this.activeOpsPath = manifest.activeAgentOps[this.agentId];
|
|
529
|
+
}
|
|
530
|
+
else {
|
|
531
|
+
// Our ops file is not in the manifest (compaction happened)
|
|
532
|
+
this.activeOpsPath = await this.backend.createAgentOpsFile(this.agentId);
|
|
533
|
+
const updatedManifest = {
|
|
534
|
+
...manifest,
|
|
535
|
+
activeAgentOps: {
|
|
536
|
+
...(manifest.activeAgentOps ?? {}),
|
|
537
|
+
[this.agentId]: this.activeOpsPath,
|
|
538
|
+
},
|
|
539
|
+
};
|
|
540
|
+
await this.backend.writeManifest(updatedManifest);
|
|
541
|
+
}
|
|
542
|
+
this.manifestVersion = await this.backend.getManifestVersion();
|
|
276
543
|
}
|
|
544
|
+
// --- Helpers ---
|
|
277
545
|
ensureOpen() {
|
|
278
546
|
if (!this.opened)
|
|
279
547
|
throw new Error("Store is not open. Call open() first.");
|
|
280
548
|
}
|
|
549
|
+
ensureWritable() {
|
|
550
|
+
if (this.coreOpts.readOnly)
|
|
551
|
+
throw new Error("Store is read-only. Cannot perform mutations.");
|
|
552
|
+
}
|
|
281
553
|
applyOp(op) {
|
|
282
554
|
if (op.op === "set" && op.data !== undefined) {
|
|
283
555
|
this.records.set(op.id, op.data);
|
|
@@ -288,23 +560,20 @@ export class Store {
|
|
|
288
560
|
}
|
|
289
561
|
reverseOp(op) {
|
|
290
562
|
if (op.prev === null) {
|
|
291
|
-
// Was a create — reverse by deleting
|
|
292
563
|
this.records.delete(op.id);
|
|
293
564
|
}
|
|
294
565
|
else if (op.op === "delete") {
|
|
295
|
-
// Was a delete — reverse by restoring
|
|
296
566
|
this.records.set(op.id, op.prev);
|
|
297
567
|
}
|
|
298
568
|
else {
|
|
299
|
-
// Was an update — reverse by restoring prev
|
|
300
569
|
this.records.set(op.id, op.prev);
|
|
301
570
|
}
|
|
302
571
|
}
|
|
303
572
|
async persistOp(op) {
|
|
304
|
-
await
|
|
573
|
+
await this.backend.appendOps(this.activeOpsPath, [op]);
|
|
305
574
|
this.ops.push(op);
|
|
306
|
-
if (this.ops.length >= this.
|
|
307
|
-
await this.
|
|
575
|
+
if (this.ops.length >= this.coreOpts.checkpointThreshold) {
|
|
576
|
+
await this._compact();
|
|
308
577
|
}
|
|
309
578
|
}
|
|
310
579
|
defaultPeriod() {
|
package/dist/types.d.ts
CHANGED
|
@@ -9,6 +9,12 @@ export interface Operation<T = Record<string, unknown>> {
|
|
|
9
9
|
data?: T;
|
|
10
10
|
/** Previous value (null for creates, full record for updates/deletes) */
|
|
11
11
|
prev: T | null;
|
|
12
|
+
/** Encoding format for prev field. Omitted or "full" = full record. "delta" = JSON Patch (future). */
|
|
13
|
+
encoding?: "full" | "delta";
|
|
14
|
+
/** Agent ID (present in multi-writer mode) */
|
|
15
|
+
agent?: string;
|
|
16
|
+
/** Lamport clock value (present in multi-writer mode) */
|
|
17
|
+
clock?: number;
|
|
12
18
|
}
|
|
13
19
|
export interface Snapshot<T = Record<string, unknown>> {
|
|
14
20
|
version: number;
|
|
@@ -19,6 +25,8 @@ export interface Manifest {
|
|
|
19
25
|
version: number;
|
|
20
26
|
currentSnapshot: string;
|
|
21
27
|
activeOps: string;
|
|
28
|
+
/** Per-agent ops file paths (multi-writer mode). Keys are agent IDs. */
|
|
29
|
+
activeAgentOps?: Record<string, string>;
|
|
22
30
|
archiveSegments: string[];
|
|
23
31
|
stats: ManifestStats;
|
|
24
32
|
}
|
|
@@ -44,9 +52,48 @@ export interface StoreOptions {
|
|
|
44
52
|
version?: number;
|
|
45
53
|
/** Migration function: called if stored version < current version */
|
|
46
54
|
migrate?: (record: unknown, fromVersion: number) => unknown;
|
|
55
|
+
/** Open in read-only mode: skips directory lock, rejects all mutations. */
|
|
56
|
+
readOnly?: boolean;
|
|
57
|
+
/** Storage backend implementation (default: FsBackend). */
|
|
58
|
+
backend?: StorageBackend;
|
|
59
|
+
/** Agent ID for multi-writer mode. Enables per-agent WAL streams and LWW conflict resolution. */
|
|
60
|
+
agentId?: string;
|
|
47
61
|
}
|
|
48
62
|
export interface StoreStats {
|
|
49
63
|
activeRecords: number;
|
|
50
64
|
opsCount: number;
|
|
51
65
|
archiveSegments: number;
|
|
52
66
|
}
|
|
67
|
+
/** Opaque lock handle returned by StorageBackend locking methods. */
|
|
68
|
+
export interface LockHandle {
|
|
69
|
+
}
|
|
70
|
+
/** Pluggable storage backend for opslog. */
|
|
71
|
+
export interface StorageBackend {
|
|
72
|
+
/** Initialize the backend (create directories, etc.). Called once during store.open(). */
|
|
73
|
+
initialize(dir: string, opts: {
|
|
74
|
+
readOnly: boolean;
|
|
75
|
+
}): Promise<void>;
|
|
76
|
+
/** Shut down the backend. Called during store.close(). */
|
|
77
|
+
shutdown(): Promise<void>;
|
|
78
|
+
readManifest(): Promise<Manifest | null>;
|
|
79
|
+
writeManifest(manifest: Manifest): Promise<void>;
|
|
80
|
+
writeSnapshot(records: Map<string, unknown>, version: number): Promise<string>;
|
|
81
|
+
loadSnapshot(relativePath: string): Promise<{
|
|
82
|
+
records: Map<string, unknown>;
|
|
83
|
+
version: number;
|
|
84
|
+
}>;
|
|
85
|
+
appendOps(relativePath: string, ops: Operation[]): Promise<void>;
|
|
86
|
+
readOps(relativePath: string): Promise<Operation[]>;
|
|
87
|
+
truncateLastOp(relativePath: string): Promise<boolean>;
|
|
88
|
+
createOpsFile(): Promise<string>;
|
|
89
|
+
writeArchiveSegment(period: string, records: Map<string, unknown>): Promise<string>;
|
|
90
|
+
loadArchiveSegment(relativePath: string): Promise<Map<string, unknown>>;
|
|
91
|
+
listArchiveSegments(): Promise<string[]>;
|
|
92
|
+
acquireLock(): Promise<LockHandle>;
|
|
93
|
+
releaseLock(handle: LockHandle): Promise<void>;
|
|
94
|
+
createAgentOpsFile(agentId: string): Promise<string>;
|
|
95
|
+
listOpsFiles(): Promise<string[]>;
|
|
96
|
+
acquireCompactionLock(): Promise<LockHandle>;
|
|
97
|
+
releaseCompactionLock(handle: LockHandle): Promise<void>;
|
|
98
|
+
getManifestVersion(): Promise<string | null>;
|
|
99
|
+
}
|
package/dist/validate.js
CHANGED
|
@@ -21,6 +21,17 @@ export function validateOp(raw) {
|
|
|
21
21
|
throw new Error("Invalid operation: delete op must have non-null prev");
|
|
22
22
|
if (obj.op === "delete" && "data" in obj)
|
|
23
23
|
throw new Error("Invalid operation: delete op must not have data field");
|
|
24
|
+
if ("encoding" in obj && obj.encoding !== "full" && obj.encoding !== "delta") {
|
|
25
|
+
throw new Error(`Invalid operation: encoding must be "full" or "delta", got "${obj.encoding}"`);
|
|
26
|
+
}
|
|
27
|
+
if ("agent" in obj && (typeof obj.agent !== "string" || obj.agent.length === 0)) {
|
|
28
|
+
throw new Error("Invalid operation: agent must be a non-empty string");
|
|
29
|
+
}
|
|
30
|
+
if ("clock" in obj) {
|
|
31
|
+
if (typeof obj.clock !== "number" || !Number.isFinite(obj.clock) || !Number.isInteger(obj.clock) || obj.clock < 0) {
|
|
32
|
+
throw new Error("Invalid operation: clock must be a non-negative integer");
|
|
33
|
+
}
|
|
34
|
+
}
|
|
24
35
|
return raw;
|
|
25
36
|
}
|
|
26
37
|
export function validateManifest(raw) {
|
|
@@ -55,6 +66,16 @@ export function validateManifest(raw) {
|
|
|
55
66
|
throw new Error("Invalid manifest: stats.created must be a non-empty string");
|
|
56
67
|
if (typeof stats.lastCheckpoint !== "string" || stats.lastCheckpoint.length === 0)
|
|
57
68
|
throw new Error("Invalid manifest: stats.lastCheckpoint must be a non-empty string");
|
|
69
|
+
if ("activeAgentOps" in obj && obj.activeAgentOps !== undefined) {
|
|
70
|
+
if (typeof obj.activeAgentOps !== "object" || obj.activeAgentOps === null || Array.isArray(obj.activeAgentOps)) {
|
|
71
|
+
throw new Error("Invalid manifest: activeAgentOps must be an object");
|
|
72
|
+
}
|
|
73
|
+
for (const [, val] of Object.entries(obj.activeAgentOps)) {
|
|
74
|
+
if (typeof val !== "string" || val.length === 0) {
|
|
75
|
+
throw new Error("Invalid manifest: activeAgentOps values must be non-empty strings");
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
}
|
|
58
79
|
return raw;
|
|
59
80
|
}
|
|
60
81
|
export function validateSnapshot(raw) {
|
package/dist/wal.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { appendFile, readFile,
|
|
1
|
+
import { appendFile, readFile, open } from "node:fs/promises";
|
|
2
2
|
import { validateOp } from "./validate.js";
|
|
3
3
|
export async function appendOp(path, op) {
|
|
4
4
|
await appendFile(path, JSON.stringify(op) + "\n", "utf-8");
|
|
@@ -32,18 +32,45 @@ export async function readOps(path) {
|
|
|
32
32
|
return ops;
|
|
33
33
|
}
|
|
34
34
|
export async function truncateLastOp(path) {
|
|
35
|
-
let
|
|
35
|
+
let fh;
|
|
36
36
|
try {
|
|
37
|
-
|
|
37
|
+
fh = await open(path, "r+");
|
|
38
38
|
}
|
|
39
39
|
catch {
|
|
40
40
|
return false;
|
|
41
41
|
}
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
42
|
+
try {
|
|
43
|
+
const { size } = await fh.stat();
|
|
44
|
+
if (size === 0)
|
|
45
|
+
return false;
|
|
46
|
+
// Read the tail of the file to find the second-to-last newline.
|
|
47
|
+
// 4KB handles operations up to ~4KB. For larger ops, read in chunks.
|
|
48
|
+
let readSize = Math.min(4096, size);
|
|
49
|
+
let readPos = size - readSize;
|
|
50
|
+
let lastNl = -1;
|
|
51
|
+
while (true) {
|
|
52
|
+
const buf = Buffer.alloc(readSize);
|
|
53
|
+
await fh.read(buf, 0, readSize, readPos);
|
|
54
|
+
const text = buf.toString("utf-8", 0, readSize);
|
|
55
|
+
// Find the second-to-last newline (skip trailing newline)
|
|
56
|
+
lastNl = text.lastIndexOf("\n", text.length - 2);
|
|
57
|
+
if (lastNl !== -1) {
|
|
58
|
+
await fh.truncate(readPos + lastNl + 1);
|
|
59
|
+
return true;
|
|
60
|
+
}
|
|
61
|
+
// No newline found in this chunk — need to read further back
|
|
62
|
+
if (readPos === 0) {
|
|
63
|
+
// Only one line in the entire file — truncate to empty
|
|
64
|
+
await fh.truncate(0);
|
|
65
|
+
return true;
|
|
66
|
+
}
|
|
67
|
+
// Read the next chunk further back
|
|
68
|
+
const nextSize = Math.min(4096, readPos);
|
|
69
|
+
readPos -= nextSize;
|
|
70
|
+
readSize = nextSize;
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
finally {
|
|
74
|
+
await fh.close();
|
|
75
|
+
}
|
|
49
76
|
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@backloghq/opslog",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.3.0",
|
|
4
4
|
"description": "Embedded event-sourced document store. Append-only operation log with immutable snapshots, zero native dependencies.",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"types": "dist/index.d.ts",
|