@backloghq/opslog 0.2.0 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +77 -4
- package/dist/backend.d.ts +30 -0
- package/dist/backend.js +134 -0
- package/dist/clock.d.ts +16 -0
- package/dist/clock.js +25 -0
- package/dist/delta.d.ts +30 -0
- package/dist/delta.js +75 -0
- package/dist/index.d.ts +6 -1
- package/dist/index.js +4 -0
- package/dist/store.d.ts +39 -5
- package/dist/store.js +362 -102
- package/dist/types.d.ts +43 -0
- package/dist/validate.js +18 -0
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -54,13 +54,14 @@ State survives restarts — reopen the same directory and everything is there.
|
|
|
54
54
|
|
|
55
55
|
```
|
|
56
56
|
data/
|
|
57
|
-
manifest.json
|
|
57
|
+
manifest.json # Points to current snapshot + ops file(s)
|
|
58
58
|
snapshots/
|
|
59
|
-
snap-<timestamp>.json
|
|
59
|
+
snap-<timestamp>.json # Immutable full-state capture
|
|
60
60
|
ops/
|
|
61
|
-
ops-<timestamp>.jsonl
|
|
61
|
+
ops-<timestamp>.jsonl # Append-only operation log (single-writer)
|
|
62
|
+
agent-<id>-<timestamp>.jsonl # Per-agent operation log (multi-writer)
|
|
62
63
|
archive/
|
|
63
|
-
archive-<period>.json
|
|
64
|
+
archive-<period>.json # Old records, lazy-loaded
|
|
64
65
|
```
|
|
65
66
|
|
|
66
67
|
**Writes** append an operation (one JSON line) to the ops file. **Reads** come from an in-memory map built from the latest snapshot + ops replay. **Checkpoints** materialize current state as a new immutable snapshot.
|
|
@@ -121,6 +122,7 @@ await store.archive(predicate) // Move matching records to archive
|
|
|
121
122
|
await store.loadArchive(segment) // Lazy-load archived records
|
|
122
123
|
store.listArchiveSegments() // List available archive files
|
|
123
124
|
store.stats() // { activeRecords, opsCount, archiveSegments }
|
|
125
|
+
await store.refresh() // Reload from all agent WALs (multi-writer only)
|
|
124
126
|
```
|
|
125
127
|
|
|
126
128
|
## Options
|
|
@@ -132,9 +134,80 @@ await store.open(dir, {
|
|
|
132
134
|
version: 1, // Schema version
|
|
133
135
|
migrate: (record, fromVersion) => record, // Migration function
|
|
134
136
|
readOnly: false, // Open in read-only mode (default: false)
|
|
137
|
+
agentId: "agent-A", // Enable multi-writer mode (optional)
|
|
138
|
+
backend: new FsBackend(), // Custom storage backend (optional, default: FsBackend)
|
|
135
139
|
});
|
|
136
140
|
```
|
|
137
141
|
|
|
142
|
+
## Multi-Writer Mode
|
|
143
|
+
|
|
144
|
+
Multiple agents can write to the same store concurrently. Each agent gets its own WAL file — no write contention.
|
|
145
|
+
|
|
146
|
+
```typescript
|
|
147
|
+
// Agent A (process 1 / machine 1)
|
|
148
|
+
const storeA = new Store<Task>();
|
|
149
|
+
await storeA.open("./data", { agentId: "agent-A" });
|
|
150
|
+
await storeA.set("task-1", { title: "Build API", status: "active" });
|
|
151
|
+
await storeA.close();
|
|
152
|
+
|
|
153
|
+
// Agent B (process 2 / machine 2)
|
|
154
|
+
const storeB = new Store<Task>();
|
|
155
|
+
await storeB.open("./data", { agentId: "agent-B" });
|
|
156
|
+
// B sees A's writes on open
|
|
157
|
+
storeB.get("task-1"); // { title: "Build API", status: "active" }
|
|
158
|
+
await storeB.set("task-2", { title: "Write tests", status: "active" });
|
|
159
|
+
await storeB.close();
|
|
160
|
+
```
|
|
161
|
+
|
|
162
|
+
### How it works
|
|
163
|
+
|
|
164
|
+
- Each agent writes to `ops/agent-{id}-{timestamp}.jsonl` — separate files, no locking needed for writes
|
|
165
|
+
- Operations carry a [Lamport clock](https://en.wikipedia.org/wiki/Lamport_timestamp) for ordering
|
|
166
|
+
- On `open()`, all agent WAL files are merge-sorted by `(clock, agentId)` for a deterministic total order
|
|
167
|
+
- Conflicts (two agents write the same key) are resolved with **last-writer-wins** by clock value
|
|
168
|
+
- `undo()` only undoes the calling agent's last operation
|
|
169
|
+
- `compact()` acquires a compaction lock, snapshots the merged state, and resets all WAL files
|
|
170
|
+
- `refresh()` re-reads all agent WALs to pick up other agents' writes
|
|
171
|
+
|
|
172
|
+
### Conflict resolution
|
|
173
|
+
|
|
174
|
+
When two agents modify the same key, the operation with the higher Lamport clock wins. If clocks are equal, the lexicographically higher agent ID wins. This is deterministic — all agents arrive at the same state regardless of replay order.
|
|
175
|
+
|
|
176
|
+
```typescript
|
|
177
|
+
// Agent A sets "shared" (clock=1)
|
|
178
|
+
await storeA.set("shared", { value: "from-A" });
|
|
179
|
+
|
|
180
|
+
// Agent B opens (sees clock=1), sets "shared" (clock=2)
|
|
181
|
+
await storeB.set("shared", { value: "from-B" });
|
|
182
|
+
|
|
183
|
+
// B wins — higher clock
|
|
184
|
+
store.get("shared"); // { value: "from-B" }
|
|
185
|
+
```
|
|
186
|
+
|
|
187
|
+
## Custom Storage Backend
|
|
188
|
+
|
|
189
|
+
opslog uses a pluggable `StorageBackend` interface for all I/O. The default is `FsBackend` (local filesystem). You can implement your own backend for S3, databases, or other storage systems.
|
|
190
|
+
|
|
191
|
+
```typescript
|
|
192
|
+
import { Store, FsBackend } from "@backloghq/opslog";
|
|
193
|
+
import type { StorageBackend } from "@backloghq/opslog";
|
|
194
|
+
|
|
195
|
+
// Use the default filesystem backend (implicit)
|
|
196
|
+
const store = new Store();
|
|
197
|
+
await store.open("./data");
|
|
198
|
+
|
|
199
|
+
// Or pass a custom backend explicitly
|
|
200
|
+
const store = new Store();
|
|
201
|
+
await store.open("./data", { backend: new FsBackend() });
|
|
202
|
+
|
|
203
|
+
// Or implement your own
|
|
204
|
+
class S3Backend implements StorageBackend {
|
|
205
|
+
// ... implement all methods
|
|
206
|
+
}
|
|
207
|
+
const store = new Store();
|
|
208
|
+
await store.open("s3://bucket/prefix", { backend: new S3Backend() });
|
|
209
|
+
```
|
|
210
|
+
|
|
138
211
|
## Read-Only Mode
|
|
139
212
|
|
|
140
213
|
Open a store for reading without acquiring the write lock. Useful for dashboards, backup processes, or multiple readers alongside a single writer.
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
import type { LockHandle, Manifest, Operation, StorageBackend } from "./types.js";
|
|
2
|
+
/** Filesystem-backed storage backend. Default backend for opslog. */
|
|
3
|
+
export declare class FsBackend implements StorageBackend {
|
|
4
|
+
private dir;
|
|
5
|
+
initialize(dir: string, opts: {
|
|
6
|
+
readOnly: boolean;
|
|
7
|
+
}): Promise<void>;
|
|
8
|
+
shutdown(): Promise<void>;
|
|
9
|
+
readManifest(): Promise<Manifest | null>;
|
|
10
|
+
writeManifest(manifest: Manifest): Promise<void>;
|
|
11
|
+
writeSnapshot(records: Map<string, unknown>, version: number): Promise<string>;
|
|
12
|
+
loadSnapshot(relativePath: string): Promise<{
|
|
13
|
+
records: Map<string, unknown>;
|
|
14
|
+
version: number;
|
|
15
|
+
}>;
|
|
16
|
+
appendOps(relativePath: string, ops: Operation[]): Promise<void>;
|
|
17
|
+
readOps(relativePath: string): Promise<Operation[]>;
|
|
18
|
+
truncateLastOp(relativePath: string): Promise<boolean>;
|
|
19
|
+
createOpsFile(): Promise<string>;
|
|
20
|
+
writeArchiveSegment(period: string, records: Map<string, unknown>): Promise<string>;
|
|
21
|
+
loadArchiveSegment(relativePath: string): Promise<Map<string, unknown>>;
|
|
22
|
+
listArchiveSegments(): Promise<string[]>;
|
|
23
|
+
acquireLock(): Promise<LockHandle>;
|
|
24
|
+
releaseLock(handle: LockHandle): Promise<void>;
|
|
25
|
+
createAgentOpsFile(agentId: string): Promise<string>;
|
|
26
|
+
listOpsFiles(): Promise<string[]>;
|
|
27
|
+
acquireCompactionLock(): Promise<LockHandle>;
|
|
28
|
+
releaseCompactionLock(handle: LockHandle): Promise<void>;
|
|
29
|
+
getManifestVersion(): Promise<string | null>;
|
|
30
|
+
}
|
package/dist/backend.js
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
import { mkdir, open, readdir, stat, unlink, writeFile } from "node:fs/promises";
|
|
2
|
+
import { join } from "node:path";
|
|
3
|
+
import { appendOp, appendOps, readOps, truncateLastOp } from "./wal.js";
|
|
4
|
+
import { loadSnapshot, writeSnapshot } from "./snapshot.js";
|
|
5
|
+
import { readManifest, writeManifest } from "./manifest.js";
|
|
6
|
+
import { loadArchiveSegment, writeArchiveSegment, listArchiveSegments as fsListArchiveSegments, } from "./archive.js";
|
|
7
|
+
import { acquireLock as fsAcquireLock, releaseLock as fsReleaseLock, } from "./lock.js";
|
|
8
|
+
class FsLockHandle {
|
|
9
|
+
fh;
|
|
10
|
+
dir;
|
|
11
|
+
constructor(fh, dir) {
|
|
12
|
+
this.fh = fh;
|
|
13
|
+
this.dir = dir;
|
|
14
|
+
}
|
|
15
|
+
}
|
|
16
|
+
/** Filesystem-backed storage backend. Default backend for opslog. */
|
|
17
|
+
export class FsBackend {
|
|
18
|
+
dir = "";
|
|
19
|
+
async initialize(dir, opts) {
|
|
20
|
+
this.dir = dir;
|
|
21
|
+
if (!opts.readOnly) {
|
|
22
|
+
await mkdir(join(dir, "snapshots"), { recursive: true });
|
|
23
|
+
await mkdir(join(dir, "ops"), { recursive: true });
|
|
24
|
+
await mkdir(join(dir, "archive"), { recursive: true });
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
async shutdown() {
|
|
28
|
+
// No-op for filesystem backend
|
|
29
|
+
}
|
|
30
|
+
// -- Manifest --
|
|
31
|
+
async readManifest() {
|
|
32
|
+
return readManifest(this.dir);
|
|
33
|
+
}
|
|
34
|
+
async writeManifest(manifest) {
|
|
35
|
+
return writeManifest(this.dir, manifest);
|
|
36
|
+
}
|
|
37
|
+
// -- Snapshots --
|
|
38
|
+
async writeSnapshot(records, version) {
|
|
39
|
+
return writeSnapshot(this.dir, records, version);
|
|
40
|
+
}
|
|
41
|
+
async loadSnapshot(relativePath) {
|
|
42
|
+
return loadSnapshot(this.dir, relativePath);
|
|
43
|
+
}
|
|
44
|
+
// -- WAL --
|
|
45
|
+
async appendOps(relativePath, ops) {
|
|
46
|
+
const fullPath = join(this.dir, relativePath);
|
|
47
|
+
if (ops.length === 1) {
|
|
48
|
+
return appendOp(fullPath, ops[0]);
|
|
49
|
+
}
|
|
50
|
+
return appendOps(fullPath, ops);
|
|
51
|
+
}
|
|
52
|
+
async readOps(relativePath) {
|
|
53
|
+
return readOps(join(this.dir, relativePath));
|
|
54
|
+
}
|
|
55
|
+
async truncateLastOp(relativePath) {
|
|
56
|
+
return truncateLastOp(join(this.dir, relativePath));
|
|
57
|
+
}
|
|
58
|
+
async createOpsFile() {
|
|
59
|
+
const filename = `ops-${Date.now()}.jsonl`;
|
|
60
|
+
const relativePath = `ops/${filename}`;
|
|
61
|
+
await writeFile(join(this.dir, relativePath), "", "utf-8");
|
|
62
|
+
return relativePath;
|
|
63
|
+
}
|
|
64
|
+
// -- Archive --
|
|
65
|
+
async writeArchiveSegment(period, records) {
|
|
66
|
+
return writeArchiveSegment(this.dir, period, records);
|
|
67
|
+
}
|
|
68
|
+
async loadArchiveSegment(relativePath) {
|
|
69
|
+
return loadArchiveSegment(this.dir, relativePath);
|
|
70
|
+
}
|
|
71
|
+
async listArchiveSegments() {
|
|
72
|
+
return fsListArchiveSegments(this.dir);
|
|
73
|
+
}
|
|
74
|
+
// -- Locking (single-writer) --
|
|
75
|
+
async acquireLock() {
|
|
76
|
+
const fh = await fsAcquireLock(this.dir);
|
|
77
|
+
return new FsLockHandle(fh, this.dir);
|
|
78
|
+
}
|
|
79
|
+
async releaseLock(handle) {
|
|
80
|
+
const fsHandle = handle;
|
|
81
|
+
return fsReleaseLock(fsHandle.dir, fsHandle.fh);
|
|
82
|
+
}
|
|
83
|
+
// -- Multi-writer extensions --
|
|
84
|
+
async createAgentOpsFile(agentId) {
|
|
85
|
+
const filename = `agent-${agentId}-${Date.now()}.jsonl`;
|
|
86
|
+
const relativePath = `ops/${filename}`;
|
|
87
|
+
await writeFile(join(this.dir, relativePath), "", "utf-8");
|
|
88
|
+
return relativePath;
|
|
89
|
+
}
|
|
90
|
+
async listOpsFiles() {
|
|
91
|
+
const opsDir = join(this.dir, "ops");
|
|
92
|
+
try {
|
|
93
|
+
const files = await readdir(opsDir);
|
|
94
|
+
return files.filter((f) => f.endsWith(".jsonl")).map((f) => `ops/${f}`);
|
|
95
|
+
}
|
|
96
|
+
catch {
|
|
97
|
+
return [];
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
async acquireCompactionLock() {
|
|
101
|
+
const lockPath = join(this.dir, ".compact-lock");
|
|
102
|
+
let fh;
|
|
103
|
+
try {
|
|
104
|
+
fh = await open(lockPath, "wx");
|
|
105
|
+
}
|
|
106
|
+
catch (err) {
|
|
107
|
+
if (err.code === "EEXIST") {
|
|
108
|
+
throw new Error("Compaction lock held by another agent", { cause: err });
|
|
109
|
+
}
|
|
110
|
+
throw err;
|
|
111
|
+
}
|
|
112
|
+
await fh.writeFile(String(process.pid), "utf-8");
|
|
113
|
+
return new FsLockHandle(fh, this.dir);
|
|
114
|
+
}
|
|
115
|
+
async releaseCompactionLock(handle) {
|
|
116
|
+
const fsHandle = handle;
|
|
117
|
+
await fsHandle.fh.close();
|
|
118
|
+
try {
|
|
119
|
+
await unlink(join(fsHandle.dir, ".compact-lock"));
|
|
120
|
+
}
|
|
121
|
+
catch {
|
|
122
|
+
// Already cleaned up
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
async getManifestVersion() {
|
|
126
|
+
try {
|
|
127
|
+
const s = await stat(join(this.dir, "manifest.json"));
|
|
128
|
+
return s.mtimeMs.toString();
|
|
129
|
+
}
|
|
130
|
+
catch {
|
|
131
|
+
return null;
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
}
|
package/dist/clock.d.ts
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Lamport logical clock for multi-writer operation ordering.
|
|
3
|
+
* Each agent maintains its own clock. On local events, tick().
|
|
4
|
+
* On receiving remote events, merge(received) to stay ahead.
|
|
5
|
+
* Ties are broken by agent ID (lexicographic) for deterministic total order.
|
|
6
|
+
*/
|
|
7
|
+
export declare class LamportClock {
|
|
8
|
+
private counter;
|
|
9
|
+
constructor(initial?: number);
|
|
10
|
+
/** Increment and return the new value (for local events). */
|
|
11
|
+
tick(): number;
|
|
12
|
+
/** Merge with a received clock value and increment. */
|
|
13
|
+
merge(received: number): number;
|
|
14
|
+
/** Current clock value without incrementing. */
|
|
15
|
+
get current(): number;
|
|
16
|
+
}
|
package/dist/clock.js
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Lamport logical clock for multi-writer operation ordering.
|
|
3
|
+
* Each agent maintains its own clock. On local events, tick().
|
|
4
|
+
* On receiving remote events, merge(received) to stay ahead.
|
|
5
|
+
* Ties are broken by agent ID (lexicographic) for deterministic total order.
|
|
6
|
+
*/
|
|
7
|
+
export class LamportClock {
|
|
8
|
+
counter;
|
|
9
|
+
constructor(initial = 0) {
|
|
10
|
+
this.counter = initial;
|
|
11
|
+
}
|
|
12
|
+
/** Increment and return the new value (for local events). */
|
|
13
|
+
tick() {
|
|
14
|
+
return ++this.counter;
|
|
15
|
+
}
|
|
16
|
+
/** Merge with a received clock value and increment. */
|
|
17
|
+
merge(received) {
|
|
18
|
+
this.counter = Math.max(this.counter, received) + 1;
|
|
19
|
+
return this.counter;
|
|
20
|
+
}
|
|
21
|
+
/** Current clock value without incrementing. */
|
|
22
|
+
get current() {
|
|
23
|
+
return this.counter;
|
|
24
|
+
}
|
|
25
|
+
}
|
package/dist/delta.d.ts
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Delta encoding for operations.
|
|
3
|
+
* Instead of storing the full previous record, store only the diff.
|
|
4
|
+
*
|
|
5
|
+
* Uses a simplified JSON Patch-like format:
|
|
6
|
+
* - Only tracks changed/added/removed top-level keys
|
|
7
|
+
* - prev becomes a patch object: { $set: {...}, $unset: [...] }
|
|
8
|
+
*/
|
|
9
|
+
export interface DeltaPatch {
|
|
10
|
+
/** Fields that were changed or added (old values). */
|
|
11
|
+
$set?: Record<string, unknown>;
|
|
12
|
+
/** Fields that were removed. */
|
|
13
|
+
$unset?: string[];
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* Create a delta patch from an old record to a new record.
|
|
17
|
+
* The patch, when applied to the new record, produces the old record.
|
|
18
|
+
* This is the "reverse patch" — stored as `prev` so undo can apply it.
|
|
19
|
+
*/
|
|
20
|
+
export declare function createDelta(oldRecord: Record<string, unknown> | null, newRecord: Record<string, unknown>): DeltaPatch | null;
|
|
21
|
+
/**
|
|
22
|
+
* Apply a delta patch to a record to produce the previous version.
|
|
23
|
+
* Used during undo: apply the reverse patch to current record → get old record.
|
|
24
|
+
*/
|
|
25
|
+
export declare function applyDelta(record: Record<string, unknown>, patch: DeltaPatch): Record<string, unknown>;
|
|
26
|
+
/**
|
|
27
|
+
* Check if a delta patch is smaller than the full record.
|
|
28
|
+
* Used to decide whether to use delta or full encoding.
|
|
29
|
+
*/
|
|
30
|
+
export declare function isDeltaSmaller(patch: DeltaPatch | null, fullRecord: Record<string, unknown> | null): boolean;
|
package/dist/delta.js
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Delta encoding for operations.
|
|
3
|
+
* Instead of storing the full previous record, store only the diff.
|
|
4
|
+
*
|
|
5
|
+
* Uses a simplified JSON Patch-like format:
|
|
6
|
+
* - Only tracks changed/added/removed top-level keys
|
|
7
|
+
* - prev becomes a patch object: { $set: {...}, $unset: [...] }
|
|
8
|
+
*/
|
|
9
|
+
/**
|
|
10
|
+
* Create a delta patch from an old record to a new record.
|
|
11
|
+
* The patch, when applied to the new record, produces the old record.
|
|
12
|
+
* This is the "reverse patch" — stored as `prev` so undo can apply it.
|
|
13
|
+
*/
|
|
14
|
+
export function createDelta(oldRecord, newRecord) {
|
|
15
|
+
if (oldRecord === null)
|
|
16
|
+
return null; // Create operation — no previous
|
|
17
|
+
const patch = {};
|
|
18
|
+
const $set = {};
|
|
19
|
+
const $unset = [];
|
|
20
|
+
// Fields in old that differ from new (changed or removed in new)
|
|
21
|
+
for (const [key, oldVal] of Object.entries(oldRecord)) {
|
|
22
|
+
if (!(key in newRecord)) {
|
|
23
|
+
// Field was removed in new → to restore old, we need to $set it
|
|
24
|
+
$set[key] = oldVal;
|
|
25
|
+
}
|
|
26
|
+
else if (JSON.stringify(oldVal) !== JSON.stringify(newRecord[key])) {
|
|
27
|
+
// Field changed → store old value
|
|
28
|
+
$set[key] = oldVal;
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
// Fields in new that weren't in old (added in new)
|
|
32
|
+
for (const key of Object.keys(newRecord)) {
|
|
33
|
+
if (!(key in oldRecord)) {
|
|
34
|
+
// Field was added in new → to restore old, we need to $unset it
|
|
35
|
+
$unset.push(key);
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
if (Object.keys($set).length > 0)
|
|
39
|
+
patch.$set = $set;
|
|
40
|
+
if ($unset.length > 0)
|
|
41
|
+
patch.$unset = $unset;
|
|
42
|
+
// If no changes, return empty patch
|
|
43
|
+
if (!patch.$set && !patch.$unset)
|
|
44
|
+
return null;
|
|
45
|
+
return patch;
|
|
46
|
+
}
|
|
47
|
+
/**
|
|
48
|
+
* Apply a delta patch to a record to produce the previous version.
|
|
49
|
+
* Used during undo: apply the reverse patch to current record → get old record.
|
|
50
|
+
*/
|
|
51
|
+
export function applyDelta(record, patch) {
|
|
52
|
+
const result = { ...record };
|
|
53
|
+
if (patch.$set) {
|
|
54
|
+
for (const [key, val] of Object.entries(patch.$set)) {
|
|
55
|
+
result[key] = val;
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
if (patch.$unset) {
|
|
59
|
+
for (const key of patch.$unset) {
|
|
60
|
+
delete result[key];
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
return result;
|
|
64
|
+
}
|
|
65
|
+
/**
|
|
66
|
+
* Check if a delta patch is smaller than the full record.
|
|
67
|
+
* Used to decide whether to use delta or full encoding.
|
|
68
|
+
*/
|
|
69
|
+
export function isDeltaSmaller(patch, fullRecord) {
|
|
70
|
+
if (patch === null || fullRecord === null)
|
|
71
|
+
return false;
|
|
72
|
+
const patchSize = JSON.stringify(patch).length;
|
|
73
|
+
const fullSize = JSON.stringify(fullRecord).length;
|
|
74
|
+
return patchSize < fullSize;
|
|
75
|
+
}
|
package/dist/index.d.ts
CHANGED
|
@@ -1,3 +1,8 @@
|
|
|
1
1
|
export { Store } from "./store.js";
|
|
2
|
+
export { FsBackend } from "./backend.js";
|
|
3
|
+
export { LamportClock } from "./clock.js";
|
|
4
|
+
export { createDelta, applyDelta, isDeltaSmaller } from "./delta.js";
|
|
5
|
+
export type { DeltaPatch } from "./delta.js";
|
|
2
6
|
export { acquireLock, releaseLock } from "./lock.js";
|
|
3
|
-
export
|
|
7
|
+
export { validateOp, validateManifest, validateSnapshot, validateArchiveSegment, } from "./validate.js";
|
|
8
|
+
export type { Operation, Snapshot, Manifest, ManifestStats, ArchiveSegment, StoreOptions, StoreStats, StorageBackend, LockHandle, } from "./types.js";
|
package/dist/index.js
CHANGED
|
@@ -1,2 +1,6 @@
|
|
|
1
1
|
export { Store } from "./store.js";
|
|
2
|
+
export { FsBackend } from "./backend.js";
|
|
3
|
+
export { LamportClock } from "./clock.js";
|
|
4
|
+
export { createDelta, applyDelta, isDeltaSmaller } from "./delta.js";
|
|
2
5
|
export { acquireLock, releaseLock } from "./lock.js";
|
|
6
|
+
export { validateOp, validateManifest, validateSnapshot, validateArchiveSegment, } from "./validate.js";
|
package/dist/store.d.ts
CHANGED
|
@@ -8,20 +8,26 @@ export declare class Store<T = Record<string, unknown>> {
|
|
|
8
8
|
private version;
|
|
9
9
|
private activeOpsPath;
|
|
10
10
|
private created;
|
|
11
|
-
private
|
|
11
|
+
private coreOpts;
|
|
12
12
|
private archivedRecordCount;
|
|
13
13
|
private batching;
|
|
14
14
|
private batchOps;
|
|
15
15
|
private _lock;
|
|
16
|
-
private
|
|
16
|
+
private lockHandle;
|
|
17
|
+
private backend;
|
|
18
|
+
private agentId?;
|
|
19
|
+
private clock;
|
|
20
|
+
private manifestVersion;
|
|
17
21
|
/**
|
|
18
22
|
* Serialize all state-mutating operations through a promise chain.
|
|
19
|
-
*
|
|
20
|
-
* undo + set) which could corrupt the WAL or in-memory state.
|
|
21
|
-
* Read operations remain synchronous and lock-free.
|
|
23
|
+
* Prevents interleaving of async mutations. Reads remain synchronous and lock-free.
|
|
22
24
|
*/
|
|
23
25
|
private serialize;
|
|
26
|
+
private isMultiWriter;
|
|
24
27
|
open(dir: string, options?: StoreOptions): Promise<void>;
|
|
28
|
+
private initFreshStore;
|
|
29
|
+
private loadExistingStore;
|
|
30
|
+
private loadMultiWriterOps;
|
|
25
31
|
close(): Promise<void>;
|
|
26
32
|
get(id: string): T | undefined;
|
|
27
33
|
set(id: string, value: T): Promise<void> | void;
|
|
@@ -40,14 +46,42 @@ export declare class Store<T = Record<string, unknown>> {
|
|
|
40
46
|
listArchiveSegments(): string[];
|
|
41
47
|
loadArchive(segment: string): Promise<Map<string, T>>;
|
|
42
48
|
stats(): StoreStats;
|
|
49
|
+
/**
|
|
50
|
+
* Reload state from the backend (multi-writer mode).
|
|
51
|
+
* Re-reads the manifest, snapshot, and all agent WAL files.
|
|
52
|
+
* Use this to pick up writes from other agents.
|
|
53
|
+
*/
|
|
54
|
+
refresh(): Promise<void>;
|
|
55
|
+
private watchTimer;
|
|
56
|
+
private watchCallback;
|
|
57
|
+
/**
|
|
58
|
+
* Tail the WAL for new operations. Re-reads the active ops file
|
|
59
|
+
* and replays any new operations since the last known count.
|
|
60
|
+
* Returns the newly applied operations.
|
|
61
|
+
* Works in any mode (single-writer readOnly, multi-writer, etc).
|
|
62
|
+
*/
|
|
63
|
+
tail(): Promise<Operation<T>[]>;
|
|
64
|
+
/**
|
|
65
|
+
* Watch for new operations on an interval.
|
|
66
|
+
* Calls the callback with new operations whenever they appear.
|
|
67
|
+
* @param callback Called with new operations
|
|
68
|
+
* @param intervalMs Polling interval in milliseconds (default: 1000)
|
|
69
|
+
*/
|
|
70
|
+
watch(callback: (ops: Operation<T>[]) => void, intervalMs?: number): void;
|
|
71
|
+
/** Stop watching for new operations. */
|
|
72
|
+
unwatch(): void;
|
|
73
|
+
private makeOp;
|
|
43
74
|
private _set;
|
|
44
75
|
private _setSync;
|
|
45
76
|
private _delete;
|
|
46
77
|
private _deleteSync;
|
|
47
78
|
private _batch;
|
|
48
79
|
private _undo;
|
|
80
|
+
private _undoMultiWriter;
|
|
49
81
|
private _compact;
|
|
82
|
+
private _compactMultiWriter;
|
|
50
83
|
private _archive;
|
|
84
|
+
private _refresh;
|
|
51
85
|
private ensureOpen;
|
|
52
86
|
private ensureWritable;
|
|
53
87
|
private applyOp;
|
package/dist/store.js
CHANGED
|
@@ -1,10 +1,7 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import {
|
|
3
|
-
import {
|
|
4
|
-
import {
|
|
5
|
-
import { createDefaultManifest, readManifest, writeManifest, } from "./manifest.js";
|
|
6
|
-
import { loadArchiveSegment, writeArchiveSegment, } from "./archive.js";
|
|
7
|
-
import { acquireLock, releaseLock } from "./lock.js";
|
|
1
|
+
import { createDefaultManifest } from "./manifest.js";
|
|
2
|
+
import { FsBackend } from "./backend.js";
|
|
3
|
+
import { LamportClock } from "./clock.js";
|
|
4
|
+
import { createDelta, applyDelta, isDeltaSmaller } from "./delta.js";
|
|
8
5
|
export class Store {
|
|
9
6
|
dir = "";
|
|
10
7
|
records = new Map();
|
|
@@ -14,7 +11,7 @@ export class Store {
|
|
|
14
11
|
version = 1;
|
|
15
12
|
activeOpsPath = "";
|
|
16
13
|
created = "";
|
|
17
|
-
|
|
14
|
+
coreOpts = {
|
|
18
15
|
checkpointThreshold: 100,
|
|
19
16
|
checkpointOnClose: true,
|
|
20
17
|
version: 1,
|
|
@@ -25,12 +22,15 @@ export class Store {
|
|
|
25
22
|
batching = false;
|
|
26
23
|
batchOps = [];
|
|
27
24
|
_lock = Promise.resolve();
|
|
28
|
-
|
|
25
|
+
lockHandle = null;
|
|
26
|
+
backend;
|
|
27
|
+
// Multi-writer state
|
|
28
|
+
agentId;
|
|
29
|
+
clock = null;
|
|
30
|
+
manifestVersion = null;
|
|
29
31
|
/**
|
|
30
32
|
* Serialize all state-mutating operations through a promise chain.
|
|
31
|
-
*
|
|
32
|
-
* undo + set) which could corrupt the WAL or in-memory state.
|
|
33
|
-
* Read operations remain synchronous and lock-free.
|
|
33
|
+
* Prevents interleaving of async mutations. Reads remain synchronous and lock-free.
|
|
34
34
|
*/
|
|
35
35
|
serialize(fn) {
|
|
36
36
|
const prev = this._lock;
|
|
@@ -40,79 +40,159 @@ export class Store {
|
|
|
40
40
|
});
|
|
41
41
|
return prev.then(fn).finally(() => resolve());
|
|
42
42
|
}
|
|
43
|
+
isMultiWriter() {
|
|
44
|
+
return this.agentId !== undefined;
|
|
45
|
+
}
|
|
43
46
|
async open(dir, options) {
|
|
44
47
|
this.dir = dir;
|
|
45
48
|
if (options) {
|
|
46
|
-
|
|
49
|
+
const { backend, agentId, ...rest } = options;
|
|
50
|
+
this.coreOpts = { ...this.coreOpts, ...rest };
|
|
51
|
+
if (backend)
|
|
52
|
+
this.backend = backend;
|
|
53
|
+
if (agentId)
|
|
54
|
+
this.agentId = agentId;
|
|
47
55
|
}
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
this.
|
|
56
|
+
this.backend ??= new FsBackend();
|
|
57
|
+
await this.backend.initialize(dir, { readOnly: this.coreOpts.readOnly });
|
|
58
|
+
// Acquire write lock (single-writer only, not readOnly)
|
|
59
|
+
if (!this.coreOpts.readOnly && !this.isMultiWriter()) {
|
|
60
|
+
this.lockHandle = await this.backend.acquireLock();
|
|
53
61
|
}
|
|
54
|
-
const manifest = await readManifest(
|
|
62
|
+
const manifest = await this.backend.readManifest();
|
|
55
63
|
if (!manifest) {
|
|
56
|
-
if (this.
|
|
64
|
+
if (this.coreOpts.readOnly) {
|
|
57
65
|
throw new Error("Cannot open in readOnly mode: no existing store found");
|
|
58
66
|
}
|
|
59
|
-
|
|
60
|
-
const snapshotPath = await writeSnapshot(dir, new Map(), this.options.version);
|
|
61
|
-
const opsFilename = `ops-${Date.now()}.jsonl`;
|
|
62
|
-
const opsPath = `ops/${opsFilename}`;
|
|
63
|
-
await writeFile(join(dir, opsPath), "", "utf-8");
|
|
64
|
-
const newManifest = createDefaultManifest(snapshotPath, opsPath);
|
|
65
|
-
await writeManifest(dir, newManifest);
|
|
66
|
-
this.version = this.options.version;
|
|
67
|
-
this.activeOpsPath = opsPath;
|
|
68
|
-
this.created = newManifest.stats.created;
|
|
69
|
-
this.archiveSegments = [];
|
|
67
|
+
await this.initFreshStore();
|
|
70
68
|
}
|
|
71
69
|
else {
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
70
|
+
await this.loadExistingStore(manifest);
|
|
71
|
+
}
|
|
72
|
+
this.manifestVersion = await this.backend.getManifestVersion();
|
|
73
|
+
this.opened = true;
|
|
74
|
+
}
|
|
75
|
+
async initFreshStore() {
|
|
76
|
+
const snapshotPath = await this.backend.writeSnapshot(new Map(), this.coreOpts.version);
|
|
77
|
+
let opsPath;
|
|
78
|
+
if (this.isMultiWriter()) {
|
|
79
|
+
opsPath = await this.backend.createAgentOpsFile(this.agentId);
|
|
80
|
+
}
|
|
81
|
+
else {
|
|
82
|
+
opsPath = await this.backend.createOpsFile();
|
|
83
|
+
}
|
|
84
|
+
const newManifest = createDefaultManifest(snapshotPath, opsPath);
|
|
85
|
+
if (this.isMultiWriter()) {
|
|
86
|
+
newManifest.activeAgentOps = { [this.agentId]: opsPath };
|
|
87
|
+
}
|
|
88
|
+
await this.backend.writeManifest(newManifest);
|
|
89
|
+
this.version = this.coreOpts.version;
|
|
90
|
+
this.activeOpsPath = opsPath;
|
|
91
|
+
this.created = newManifest.stats.created;
|
|
92
|
+
this.archiveSegments = [];
|
|
93
|
+
if (this.isMultiWriter()) {
|
|
94
|
+
this.clock = new LamportClock(0);
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
async loadExistingStore(manifest) {
|
|
98
|
+
// Load snapshot
|
|
99
|
+
let snapshotData;
|
|
100
|
+
try {
|
|
101
|
+
snapshotData = await this.backend.loadSnapshot(manifest.currentSnapshot);
|
|
102
|
+
}
|
|
103
|
+
catch (err) {
|
|
104
|
+
const isNotFound = err instanceof Error &&
|
|
105
|
+
"code" in err &&
|
|
106
|
+
err.code === "ENOENT";
|
|
107
|
+
if (isNotFound) {
|
|
108
|
+
throw new Error(`Snapshot file not found: ${manifest.currentSnapshot}. The data directory may be corrupted.`, { cause: err });
|
|
83
109
|
}
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
this.version = this.options.version;
|
|
110
|
+
throw err;
|
|
111
|
+
}
|
|
112
|
+
const { records, version: storedVersion } = snapshotData;
|
|
113
|
+
this.records = records;
|
|
114
|
+
this.version = storedVersion;
|
|
115
|
+
this.created = manifest.stats.created;
|
|
116
|
+
this.archiveSegments = manifest.archiveSegments;
|
|
117
|
+
this.archivedRecordCount = manifest.stats.archivedRecords;
|
|
118
|
+
// Migrate if needed
|
|
119
|
+
if (storedVersion < this.coreOpts.version) {
|
|
120
|
+
for (const [id, record] of this.records) {
|
|
121
|
+
this.records.set(id, this.coreOpts.migrate(record, storedVersion));
|
|
97
122
|
}
|
|
98
|
-
|
|
99
|
-
|
|
123
|
+
this.version = this.coreOpts.version;
|
|
124
|
+
}
|
|
125
|
+
if (this.isMultiWriter()) {
|
|
126
|
+
await this.loadMultiWriterOps(manifest);
|
|
127
|
+
}
|
|
128
|
+
else {
|
|
129
|
+
// Single-writer: replay ops from active ops file
|
|
130
|
+
const ops = (await this.backend.readOps(manifest.activeOps));
|
|
100
131
|
for (const op of ops) {
|
|
101
132
|
this.applyOp(op);
|
|
102
133
|
}
|
|
103
134
|
this.ops = ops;
|
|
135
|
+
this.activeOpsPath = manifest.activeOps;
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
async loadMultiWriterOps(manifest) {
|
|
139
|
+
const allOps = [];
|
|
140
|
+
// Read all agent ops files
|
|
141
|
+
if (manifest.activeAgentOps) {
|
|
142
|
+
for (const opsPath of Object.values(manifest.activeAgentOps)) {
|
|
143
|
+
const ops = (await this.backend.readOps(opsPath));
|
|
144
|
+
allOps.push(...ops);
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
// Also read legacy single-writer ops for backward compat
|
|
148
|
+
if (manifest.activeOps && !manifest.activeAgentOps) {
|
|
149
|
+
const ops = (await this.backend.readOps(manifest.activeOps));
|
|
150
|
+
allOps.push(...ops);
|
|
151
|
+
}
|
|
152
|
+
// Merge-sort by (clock, agent) for deterministic total order
|
|
153
|
+
allOps.sort((a, b) => {
|
|
154
|
+
const clockDiff = (a.clock ?? 0) - (b.clock ?? 0);
|
|
155
|
+
if (clockDiff !== 0)
|
|
156
|
+
return clockDiff;
|
|
157
|
+
return (a.agent ?? "").localeCompare(b.agent ?? "");
|
|
158
|
+
});
|
|
159
|
+
for (const op of allOps) {
|
|
160
|
+
this.applyOp(op);
|
|
161
|
+
}
|
|
162
|
+
this.ops = allOps;
|
|
163
|
+
// Initialize Lamport clock from max seen value
|
|
164
|
+
const maxClock = allOps.reduce((max, op) => Math.max(max, op.clock ?? 0), 0);
|
|
165
|
+
this.clock = new LamportClock(maxClock);
|
|
166
|
+
// Find or create our agent's ops file
|
|
167
|
+
if (manifest.activeAgentOps?.[this.agentId]) {
|
|
168
|
+
this.activeOpsPath = manifest.activeAgentOps[this.agentId];
|
|
169
|
+
}
|
|
170
|
+
else {
|
|
171
|
+
// Register this agent in the manifest
|
|
172
|
+
this.activeOpsPath = await this.backend.createAgentOpsFile(this.agentId);
|
|
173
|
+
const updatedManifest = {
|
|
174
|
+
...manifest,
|
|
175
|
+
activeAgentOps: {
|
|
176
|
+
...(manifest.activeAgentOps ?? {}),
|
|
177
|
+
[this.agentId]: this.activeOpsPath,
|
|
178
|
+
},
|
|
179
|
+
};
|
|
180
|
+
await this.backend.writeManifest(updatedManifest);
|
|
104
181
|
}
|
|
105
|
-
this.opened = true;
|
|
106
182
|
}
|
|
107
183
|
async close() {
|
|
108
184
|
this.ensureOpen();
|
|
109
|
-
|
|
185
|
+
this.unwatch();
|
|
186
|
+
if (!this.coreOpts.readOnly &&
|
|
187
|
+
this.coreOpts.checkpointOnClose &&
|
|
188
|
+
this.ops.length > 0) {
|
|
110
189
|
await this.serialize(() => this._compact());
|
|
111
190
|
}
|
|
112
|
-
if (this.
|
|
113
|
-
await releaseLock(this.
|
|
114
|
-
this.
|
|
191
|
+
if (this.lockHandle) {
|
|
192
|
+
await this.backend.releaseLock(this.lockHandle);
|
|
193
|
+
this.lockHandle = null;
|
|
115
194
|
}
|
|
195
|
+
await this.backend.shutdown();
|
|
116
196
|
this.opened = false;
|
|
117
197
|
}
|
|
118
198
|
get(id) {
|
|
@@ -208,7 +288,7 @@ export class Store {
|
|
|
208
288
|
const segmentPath = this.archiveSegments.find((s) => s === `archive/archive-${segment}.json`) || this.archiveSegments.find((s) => s.includes(segment));
|
|
209
289
|
if (!segmentPath)
|
|
210
290
|
throw new Error(`Archive segment '${segment}' not found`);
|
|
211
|
-
return loadArchiveSegment(
|
|
291
|
+
return this.backend.loadArchiveSegment(segmentPath);
|
|
212
292
|
}
|
|
213
293
|
stats() {
|
|
214
294
|
this.ensureOpen();
|
|
@@ -218,28 +298,105 @@ export class Store {
|
|
|
218
298
|
archiveSegments: this.archiveSegments.length,
|
|
219
299
|
};
|
|
220
300
|
}
|
|
301
|
+
/**
|
|
302
|
+
* Reload state from the backend (multi-writer mode).
|
|
303
|
+
* Re-reads the manifest, snapshot, and all agent WAL files.
|
|
304
|
+
* Use this to pick up writes from other agents.
|
|
305
|
+
*/
|
|
306
|
+
async refresh() {
|
|
307
|
+
this.ensureOpen();
|
|
308
|
+
if (!this.isMultiWriter()) {
|
|
309
|
+
throw new Error("refresh() is only available in multi-writer mode");
|
|
310
|
+
}
|
|
311
|
+
return this.serialize(() => this._refresh());
|
|
312
|
+
}
|
|
313
|
+
// --- WAL tailing ---
|
|
314
|
+
watchTimer = null;
|
|
315
|
+
watchCallback = null;
|
|
316
|
+
/**
|
|
317
|
+
* Tail the WAL for new operations. Re-reads the active ops file
|
|
318
|
+
* and replays any new operations since the last known count.
|
|
319
|
+
* Returns the newly applied operations.
|
|
320
|
+
* Works in any mode (single-writer readOnly, multi-writer, etc).
|
|
321
|
+
*/
|
|
322
|
+
async tail() {
|
|
323
|
+
this.ensureOpen();
|
|
324
|
+
const prevCount = this.ops.length;
|
|
325
|
+
// Re-read the ops file for new entries
|
|
326
|
+
const allOps = (await this.backend.readOps(this.activeOpsPath));
|
|
327
|
+
if (allOps.length <= prevCount)
|
|
328
|
+
return [];
|
|
329
|
+
const newOps = allOps.slice(prevCount);
|
|
330
|
+
for (const op of newOps) {
|
|
331
|
+
this.applyOp(op);
|
|
332
|
+
}
|
|
333
|
+
this.ops.push(...newOps);
|
|
334
|
+
return newOps;
|
|
335
|
+
}
|
|
336
|
+
/**
|
|
337
|
+
* Watch for new operations on an interval.
|
|
338
|
+
* Calls the callback with new operations whenever they appear.
|
|
339
|
+
* @param callback Called with new operations
|
|
340
|
+
* @param intervalMs Polling interval in milliseconds (default: 1000)
|
|
341
|
+
*/
|
|
342
|
+
watch(callback, intervalMs = 1000) {
|
|
343
|
+
this.ensureOpen();
|
|
344
|
+
if (this.watchTimer)
|
|
345
|
+
this.unwatch();
|
|
346
|
+
this.watchCallback = callback;
|
|
347
|
+
this.watchTimer = setInterval(async () => {
|
|
348
|
+
try {
|
|
349
|
+
const newOps = await this.tail();
|
|
350
|
+
if (newOps.length > 0 && this.watchCallback) {
|
|
351
|
+
this.watchCallback(newOps);
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
catch {
|
|
355
|
+
// Silently ignore tail errors during watch
|
|
356
|
+
}
|
|
357
|
+
}, intervalMs);
|
|
358
|
+
}
|
|
359
|
+
/** Stop watching for new operations. */
|
|
360
|
+
unwatch() {
|
|
361
|
+
if (this.watchTimer) {
|
|
362
|
+
clearInterval(this.watchTimer);
|
|
363
|
+
this.watchTimer = null;
|
|
364
|
+
}
|
|
365
|
+
this.watchCallback = null;
|
|
366
|
+
}
|
|
221
367
|
// --- Private mutation implementations ---
|
|
222
|
-
|
|
223
|
-
const prev = this.records.get(id) ?? null;
|
|
368
|
+
makeOp(type, id, data, prev) {
|
|
224
369
|
const op = {
|
|
225
370
|
ts: new Date().toISOString(),
|
|
226
|
-
op:
|
|
371
|
+
op: type,
|
|
227
372
|
id,
|
|
228
|
-
data: value,
|
|
229
373
|
prev,
|
|
230
374
|
};
|
|
375
|
+
if (type === "set")
|
|
376
|
+
op.data = data;
|
|
377
|
+
if (this.agentId) {
|
|
378
|
+
op.agent = this.agentId;
|
|
379
|
+
op.clock = this.clock.tick();
|
|
380
|
+
}
|
|
381
|
+
// Try delta encoding for updates (not creates or deletes)
|
|
382
|
+
if (type === "set" && prev !== null && data !== undefined) {
|
|
383
|
+
const delta = createDelta(prev, data);
|
|
384
|
+
if (delta && isDeltaSmaller(delta, prev)) {
|
|
385
|
+
op.prev = delta;
|
|
386
|
+
op.encoding = "delta";
|
|
387
|
+
}
|
|
388
|
+
}
|
|
389
|
+
return op;
|
|
390
|
+
}
|
|
391
|
+
async _set(id, value) {
|
|
392
|
+
const prev = this.records.get(id) ?? null;
|
|
393
|
+
const op = this.makeOp("set", id, value, prev);
|
|
231
394
|
this.records.set(id, value);
|
|
232
395
|
await this.persistOp(op);
|
|
233
396
|
}
|
|
234
397
|
_setSync(id, value) {
|
|
235
398
|
const prev = this.records.get(id) ?? null;
|
|
236
|
-
const op =
|
|
237
|
-
ts: new Date().toISOString(),
|
|
238
|
-
op: "set",
|
|
239
|
-
id,
|
|
240
|
-
data: value,
|
|
241
|
-
prev,
|
|
242
|
-
};
|
|
399
|
+
const op = this.makeOp("set", id, value, prev);
|
|
243
400
|
this.records.set(id, value);
|
|
244
401
|
this.batchOps.push(op);
|
|
245
402
|
}
|
|
@@ -248,12 +405,7 @@ export class Store {
|
|
|
248
405
|
if (prev === undefined) {
|
|
249
406
|
throw new Error(`Record '${id}' not found`);
|
|
250
407
|
}
|
|
251
|
-
const op =
|
|
252
|
-
ts: new Date().toISOString(),
|
|
253
|
-
op: "delete",
|
|
254
|
-
id,
|
|
255
|
-
prev,
|
|
256
|
-
};
|
|
408
|
+
const op = this.makeOp("delete", id, undefined, prev);
|
|
257
409
|
this.records.delete(id);
|
|
258
410
|
await this.persistOp(op);
|
|
259
411
|
}
|
|
@@ -262,12 +414,7 @@ export class Store {
|
|
|
262
414
|
if (prev === undefined) {
|
|
263
415
|
throw new Error(`Record '${id}' not found`);
|
|
264
416
|
}
|
|
265
|
-
const op =
|
|
266
|
-
ts: new Date().toISOString(),
|
|
267
|
-
op: "delete",
|
|
268
|
-
id,
|
|
269
|
-
prev,
|
|
270
|
-
};
|
|
417
|
+
const op = this.makeOp("delete", id, undefined, prev);
|
|
271
418
|
this.records.delete(id);
|
|
272
419
|
this.batchOps.push(op);
|
|
273
420
|
}
|
|
@@ -276,17 +423,15 @@ export class Store {
|
|
|
276
423
|
this.batchOps = [];
|
|
277
424
|
try {
|
|
278
425
|
fn();
|
|
279
|
-
// Empty batches are no-ops — no I/O if fn() didn't call set/delete
|
|
280
426
|
if (this.batchOps.length > 0) {
|
|
281
|
-
await appendOps(
|
|
427
|
+
await this.backend.appendOps(this.activeOpsPath, this.batchOps);
|
|
282
428
|
this.ops.push(...this.batchOps);
|
|
283
|
-
if (this.ops.length >= this.
|
|
429
|
+
if (this.ops.length >= this.coreOpts.checkpointThreshold) {
|
|
284
430
|
await this._compact();
|
|
285
431
|
}
|
|
286
432
|
}
|
|
287
433
|
}
|
|
288
434
|
catch (err) {
|
|
289
|
-
// Rollback in-memory changes on failure
|
|
290
435
|
for (const op of this.batchOps.reverse()) {
|
|
291
436
|
try {
|
|
292
437
|
this.reverseOp(op);
|
|
@@ -303,19 +448,37 @@ export class Store {
|
|
|
303
448
|
}
|
|
304
449
|
}
|
|
305
450
|
async _undo() {
|
|
451
|
+
if (this.isMultiWriter()) {
|
|
452
|
+
return this._undoMultiWriter();
|
|
453
|
+
}
|
|
454
|
+
// Single-writer: O(1) undo
|
|
306
455
|
if (this.ops.length === 0)
|
|
307
456
|
return false;
|
|
308
457
|
const lastOp = this.ops[this.ops.length - 1];
|
|
309
458
|
this.reverseOp(lastOp);
|
|
310
459
|
this.ops.pop();
|
|
311
|
-
await truncateLastOp(
|
|
460
|
+
await this.backend.truncateLastOp(this.activeOpsPath);
|
|
461
|
+
return true;
|
|
462
|
+
}
|
|
463
|
+
async _undoMultiWriter() {
|
|
464
|
+
// Find last op from this agent
|
|
465
|
+
const myOps = this.ops.filter((op) => op.agent === this.agentId);
|
|
466
|
+
if (myOps.length === 0)
|
|
467
|
+
return false;
|
|
468
|
+
// Truncate our WAL file
|
|
469
|
+
await this.backend.truncateLastOp(this.activeOpsPath);
|
|
470
|
+
// Re-derive state from scratch (correct but O(n))
|
|
471
|
+
await this._refresh();
|
|
312
472
|
return true;
|
|
313
473
|
}
|
|
314
474
|
async _compact() {
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
475
|
+
if (this.isMultiWriter()) {
|
|
476
|
+
await this._compactMultiWriter();
|
|
477
|
+
return;
|
|
478
|
+
}
|
|
479
|
+
// Single-writer compaction
|
|
480
|
+
const snapshotPath = await this.backend.writeSnapshot(this.records, this.version);
|
|
481
|
+
const opsPath = await this.backend.createOpsFile();
|
|
319
482
|
const updatedManifest = {
|
|
320
483
|
version: this.version,
|
|
321
484
|
currentSnapshot: snapshotPath,
|
|
@@ -329,10 +492,45 @@ export class Store {
|
|
|
329
492
|
lastCheckpoint: new Date().toISOString(),
|
|
330
493
|
},
|
|
331
494
|
};
|
|
332
|
-
await writeManifest(
|
|
495
|
+
await this.backend.writeManifest(updatedManifest);
|
|
333
496
|
this.activeOpsPath = opsPath;
|
|
334
497
|
this.ops = [];
|
|
335
498
|
}
|
|
499
|
+
async _compactMultiWriter() {
|
|
500
|
+
let compactLock;
|
|
501
|
+
try {
|
|
502
|
+
compactLock = await this.backend.acquireCompactionLock();
|
|
503
|
+
}
|
|
504
|
+
catch {
|
|
505
|
+
// Another agent is compacting — skip
|
|
506
|
+
return;
|
|
507
|
+
}
|
|
508
|
+
try {
|
|
509
|
+
const snapshotPath = await this.backend.writeSnapshot(this.records, this.version);
|
|
510
|
+
const opsPath = await this.backend.createAgentOpsFile(this.agentId);
|
|
511
|
+
const updatedManifest = {
|
|
512
|
+
version: this.version,
|
|
513
|
+
currentSnapshot: snapshotPath,
|
|
514
|
+
activeOps: opsPath,
|
|
515
|
+
activeAgentOps: { [this.agentId]: opsPath },
|
|
516
|
+
archiveSegments: this.archiveSegments,
|
|
517
|
+
stats: {
|
|
518
|
+
activeRecords: this.records.size,
|
|
519
|
+
archivedRecords: this.archivedRecordCount,
|
|
520
|
+
opsCount: 0,
|
|
521
|
+
created: this.created,
|
|
522
|
+
lastCheckpoint: new Date().toISOString(),
|
|
523
|
+
},
|
|
524
|
+
};
|
|
525
|
+
await this.backend.writeManifest(updatedManifest);
|
|
526
|
+
this.activeOpsPath = opsPath;
|
|
527
|
+
this.ops = [];
|
|
528
|
+
this.manifestVersion = await this.backend.getManifestVersion();
|
|
529
|
+
}
|
|
530
|
+
finally {
|
|
531
|
+
await this.backend.releaseCompactionLock(compactLock);
|
|
532
|
+
}
|
|
533
|
+
}
|
|
336
534
|
async _archive(predicate, segment) {
|
|
337
535
|
const toArchive = new Map();
|
|
338
536
|
for (const [id, value] of this.records) {
|
|
@@ -342,7 +540,7 @@ export class Store {
|
|
|
342
540
|
if (toArchive.size === 0)
|
|
343
541
|
return 0;
|
|
344
542
|
const period = segment ?? this.defaultPeriod();
|
|
345
|
-
const segmentPath = await writeArchiveSegment(
|
|
543
|
+
const segmentPath = await this.backend.writeArchiveSegment(period, toArchive);
|
|
346
544
|
if (!this.archiveSegments.includes(segmentPath)) {
|
|
347
545
|
this.archiveSegments.push(segmentPath);
|
|
348
546
|
}
|
|
@@ -353,13 +551,67 @@ export class Store {
|
|
|
353
551
|
await this._compact();
|
|
354
552
|
return toArchive.size;
|
|
355
553
|
}
|
|
554
|
+
async _refresh() {
|
|
555
|
+
const manifest = await this.backend.readManifest();
|
|
556
|
+
if (!manifest)
|
|
557
|
+
throw new Error("Manifest not found during refresh");
|
|
558
|
+
const { records, version } = await this.backend.loadSnapshot(manifest.currentSnapshot);
|
|
559
|
+
this.records = records;
|
|
560
|
+
this.version = version;
|
|
561
|
+
this.archiveSegments = manifest.archiveSegments;
|
|
562
|
+
this.archivedRecordCount = manifest.stats.archivedRecords;
|
|
563
|
+
this.created = manifest.stats.created;
|
|
564
|
+
// Read all agent ops
|
|
565
|
+
const allOps = [];
|
|
566
|
+
if (manifest.activeAgentOps) {
|
|
567
|
+
for (const opsPath of Object.values(manifest.activeAgentOps)) {
|
|
568
|
+
const ops = await this.backend.readOps(opsPath);
|
|
569
|
+
allOps.push(...ops);
|
|
570
|
+
}
|
|
571
|
+
}
|
|
572
|
+
// Legacy single-writer ops
|
|
573
|
+
if (manifest.activeOps && !manifest.activeAgentOps) {
|
|
574
|
+
const ops = await this.backend.readOps(manifest.activeOps);
|
|
575
|
+
allOps.push(...ops);
|
|
576
|
+
}
|
|
577
|
+
// Merge-sort
|
|
578
|
+
allOps.sort((a, b) => {
|
|
579
|
+
const clockDiff = (a.clock ?? 0) - (b.clock ?? 0);
|
|
580
|
+
if (clockDiff !== 0)
|
|
581
|
+
return clockDiff;
|
|
582
|
+
return (a.agent ?? "").localeCompare(b.agent ?? "");
|
|
583
|
+
});
|
|
584
|
+
for (const op of allOps)
|
|
585
|
+
this.applyOp(op);
|
|
586
|
+
this.ops = allOps;
|
|
587
|
+
// Update clock
|
|
588
|
+
const maxClock = allOps.reduce((max, op) => Math.max(max, op.clock ?? 0), 0);
|
|
589
|
+
this.clock = new LamportClock(maxClock);
|
|
590
|
+
// Update our ops path if manifest changed
|
|
591
|
+
if (manifest.activeAgentOps?.[this.agentId]) {
|
|
592
|
+
this.activeOpsPath = manifest.activeAgentOps[this.agentId];
|
|
593
|
+
}
|
|
594
|
+
else {
|
|
595
|
+
// Our ops file is not in the manifest (compaction happened)
|
|
596
|
+
this.activeOpsPath = await this.backend.createAgentOpsFile(this.agentId);
|
|
597
|
+
const updatedManifest = {
|
|
598
|
+
...manifest,
|
|
599
|
+
activeAgentOps: {
|
|
600
|
+
...(manifest.activeAgentOps ?? {}),
|
|
601
|
+
[this.agentId]: this.activeOpsPath,
|
|
602
|
+
},
|
|
603
|
+
};
|
|
604
|
+
await this.backend.writeManifest(updatedManifest);
|
|
605
|
+
}
|
|
606
|
+
this.manifestVersion = await this.backend.getManifestVersion();
|
|
607
|
+
}
|
|
356
608
|
// --- Helpers ---
|
|
357
609
|
ensureOpen() {
|
|
358
610
|
if (!this.opened)
|
|
359
611
|
throw new Error("Store is not open. Call open() first.");
|
|
360
612
|
}
|
|
361
613
|
ensureWritable() {
|
|
362
|
-
if (this.
|
|
614
|
+
if (this.coreOpts.readOnly)
|
|
363
615
|
throw new Error("Store is read-only. Cannot perform mutations.");
|
|
364
616
|
}
|
|
365
617
|
applyOp(op) {
|
|
@@ -375,19 +627,27 @@ export class Store {
|
|
|
375
627
|
// Was a create — reverse by deleting
|
|
376
628
|
this.records.delete(op.id);
|
|
377
629
|
}
|
|
630
|
+
else if (op.encoding === "delta") {
|
|
631
|
+
// Delta-encoded: apply the reverse patch to the current record
|
|
632
|
+
const current = this.records.get(op.id);
|
|
633
|
+
if (current) {
|
|
634
|
+
const restored = applyDelta(current, op.prev);
|
|
635
|
+
this.records.set(op.id, restored);
|
|
636
|
+
}
|
|
637
|
+
}
|
|
378
638
|
else if (op.op === "delete") {
|
|
379
|
-
// Was a delete — reverse by restoring
|
|
639
|
+
// Was a delete — reverse by restoring full prev
|
|
380
640
|
this.records.set(op.id, op.prev);
|
|
381
641
|
}
|
|
382
642
|
else {
|
|
383
|
-
// Was an update — reverse by restoring prev
|
|
643
|
+
// Was an update — reverse by restoring full prev
|
|
384
644
|
this.records.set(op.id, op.prev);
|
|
385
645
|
}
|
|
386
646
|
}
|
|
387
647
|
async persistOp(op) {
|
|
388
|
-
await
|
|
648
|
+
await this.backend.appendOps(this.activeOpsPath, [op]);
|
|
389
649
|
this.ops.push(op);
|
|
390
|
-
if (this.ops.length >= this.
|
|
650
|
+
if (this.ops.length >= this.coreOpts.checkpointThreshold) {
|
|
391
651
|
await this._compact();
|
|
392
652
|
}
|
|
393
653
|
}
|
package/dist/types.d.ts
CHANGED
|
@@ -11,6 +11,10 @@ export interface Operation<T = Record<string, unknown>> {
|
|
|
11
11
|
prev: T | null;
|
|
12
12
|
/** Encoding format for prev field. Omitted or "full" = full record. "delta" = JSON Patch (future). */
|
|
13
13
|
encoding?: "full" | "delta";
|
|
14
|
+
/** Agent ID (present in multi-writer mode) */
|
|
15
|
+
agent?: string;
|
|
16
|
+
/** Lamport clock value (present in multi-writer mode) */
|
|
17
|
+
clock?: number;
|
|
14
18
|
}
|
|
15
19
|
export interface Snapshot<T = Record<string, unknown>> {
|
|
16
20
|
version: number;
|
|
@@ -21,6 +25,8 @@ export interface Manifest {
|
|
|
21
25
|
version: number;
|
|
22
26
|
currentSnapshot: string;
|
|
23
27
|
activeOps: string;
|
|
28
|
+
/** Per-agent ops file paths (multi-writer mode). Keys are agent IDs. */
|
|
29
|
+
activeAgentOps?: Record<string, string>;
|
|
24
30
|
archiveSegments: string[];
|
|
25
31
|
stats: ManifestStats;
|
|
26
32
|
}
|
|
@@ -48,9 +54,46 @@ export interface StoreOptions {
|
|
|
48
54
|
migrate?: (record: unknown, fromVersion: number) => unknown;
|
|
49
55
|
/** Open in read-only mode: skips directory lock, rejects all mutations. */
|
|
50
56
|
readOnly?: boolean;
|
|
57
|
+
/** Storage backend implementation (default: FsBackend). */
|
|
58
|
+
backend?: StorageBackend;
|
|
59
|
+
/** Agent ID for multi-writer mode. Enables per-agent WAL streams and LWW conflict resolution. */
|
|
60
|
+
agentId?: string;
|
|
51
61
|
}
|
|
52
62
|
export interface StoreStats {
|
|
53
63
|
activeRecords: number;
|
|
54
64
|
opsCount: number;
|
|
55
65
|
archiveSegments: number;
|
|
56
66
|
}
|
|
67
|
+
/** Opaque lock handle returned by StorageBackend locking methods. */
|
|
68
|
+
export interface LockHandle {
|
|
69
|
+
}
|
|
70
|
+
/** Pluggable storage backend for opslog. */
|
|
71
|
+
export interface StorageBackend {
|
|
72
|
+
/** Initialize the backend (create directories, etc.). Called once during store.open(). */
|
|
73
|
+
initialize(dir: string, opts: {
|
|
74
|
+
readOnly: boolean;
|
|
75
|
+
}): Promise<void>;
|
|
76
|
+
/** Shut down the backend. Called during store.close(). */
|
|
77
|
+
shutdown(): Promise<void>;
|
|
78
|
+
readManifest(): Promise<Manifest | null>;
|
|
79
|
+
writeManifest(manifest: Manifest): Promise<void>;
|
|
80
|
+
writeSnapshot(records: Map<string, unknown>, version: number): Promise<string>;
|
|
81
|
+
loadSnapshot(relativePath: string): Promise<{
|
|
82
|
+
records: Map<string, unknown>;
|
|
83
|
+
version: number;
|
|
84
|
+
}>;
|
|
85
|
+
appendOps(relativePath: string, ops: Operation[]): Promise<void>;
|
|
86
|
+
readOps(relativePath: string): Promise<Operation[]>;
|
|
87
|
+
truncateLastOp(relativePath: string): Promise<boolean>;
|
|
88
|
+
createOpsFile(): Promise<string>;
|
|
89
|
+
writeArchiveSegment(period: string, records: Map<string, unknown>): Promise<string>;
|
|
90
|
+
loadArchiveSegment(relativePath: string): Promise<Map<string, unknown>>;
|
|
91
|
+
listArchiveSegments(): Promise<string[]>;
|
|
92
|
+
acquireLock(): Promise<LockHandle>;
|
|
93
|
+
releaseLock(handle: LockHandle): Promise<void>;
|
|
94
|
+
createAgentOpsFile(agentId: string): Promise<string>;
|
|
95
|
+
listOpsFiles(): Promise<string[]>;
|
|
96
|
+
acquireCompactionLock(): Promise<LockHandle>;
|
|
97
|
+
releaseCompactionLock(handle: LockHandle): Promise<void>;
|
|
98
|
+
getManifestVersion(): Promise<string | null>;
|
|
99
|
+
}
|
package/dist/validate.js
CHANGED
|
@@ -24,6 +24,14 @@ export function validateOp(raw) {
|
|
|
24
24
|
if ("encoding" in obj && obj.encoding !== "full" && obj.encoding !== "delta") {
|
|
25
25
|
throw new Error(`Invalid operation: encoding must be "full" or "delta", got "${obj.encoding}"`);
|
|
26
26
|
}
|
|
27
|
+
if ("agent" in obj && (typeof obj.agent !== "string" || obj.agent.length === 0)) {
|
|
28
|
+
throw new Error("Invalid operation: agent must be a non-empty string");
|
|
29
|
+
}
|
|
30
|
+
if ("clock" in obj) {
|
|
31
|
+
if (typeof obj.clock !== "number" || !Number.isFinite(obj.clock) || !Number.isInteger(obj.clock) || obj.clock < 0) {
|
|
32
|
+
throw new Error("Invalid operation: clock must be a non-negative integer");
|
|
33
|
+
}
|
|
34
|
+
}
|
|
27
35
|
return raw;
|
|
28
36
|
}
|
|
29
37
|
export function validateManifest(raw) {
|
|
@@ -58,6 +66,16 @@ export function validateManifest(raw) {
|
|
|
58
66
|
throw new Error("Invalid manifest: stats.created must be a non-empty string");
|
|
59
67
|
if (typeof stats.lastCheckpoint !== "string" || stats.lastCheckpoint.length === 0)
|
|
60
68
|
throw new Error("Invalid manifest: stats.lastCheckpoint must be a non-empty string");
|
|
69
|
+
if ("activeAgentOps" in obj && obj.activeAgentOps !== undefined) {
|
|
70
|
+
if (typeof obj.activeAgentOps !== "object" || obj.activeAgentOps === null || Array.isArray(obj.activeAgentOps)) {
|
|
71
|
+
throw new Error("Invalid manifest: activeAgentOps must be an object");
|
|
72
|
+
}
|
|
73
|
+
for (const [, val] of Object.entries(obj.activeAgentOps)) {
|
|
74
|
+
if (typeof val !== "string" || val.length === 0) {
|
|
75
|
+
throw new Error("Invalid manifest: activeAgentOps values must be non-empty strings");
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
}
|
|
61
79
|
return raw;
|
|
62
80
|
}
|
|
63
81
|
export function validateSnapshot(raw) {
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@backloghq/opslog",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.4.0",
|
|
4
4
|
"description": "Embedded event-sourced document store. Append-only operation log with immutable snapshots, zero native dependencies.",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"types": "dist/index.d.ts",
|