@git-stunts/git-warp 10.8.0 → 11.3.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +53 -32
- package/SECURITY.md +64 -0
- package/bin/cli/commands/check.js +168 -0
- package/bin/cli/commands/doctor/checks.js +422 -0
- package/bin/cli/commands/doctor/codes.js +46 -0
- package/bin/cli/commands/doctor/index.js +239 -0
- package/bin/cli/commands/doctor/types.js +89 -0
- package/bin/cli/commands/history.js +80 -0
- package/bin/cli/commands/info.js +139 -0
- package/bin/cli/commands/install-hooks.js +128 -0
- package/bin/cli/commands/materialize.js +99 -0
- package/bin/cli/commands/patch.js +142 -0
- package/bin/cli/commands/path.js +88 -0
- package/bin/cli/commands/query.js +235 -0
- package/bin/cli/commands/registry.js +32 -0
- package/bin/cli/commands/seek.js +598 -0
- package/bin/cli/commands/tree.js +230 -0
- package/bin/cli/commands/trust.js +154 -0
- package/bin/cli/commands/verify-audit.js +114 -0
- package/bin/cli/commands/view.js +46 -0
- package/bin/cli/infrastructure.js +350 -0
- package/bin/cli/schemas.js +177 -0
- package/bin/cli/shared.js +244 -0
- package/bin/cli/types.js +96 -0
- package/bin/presenters/index.js +41 -9
- package/bin/presenters/json.js +14 -12
- package/bin/presenters/text.js +286 -28
- package/bin/warp-graph.js +5 -2346
- package/index.d.ts +111 -21
- package/index.js +2 -0
- package/package.json +10 -8
- package/src/domain/WarpGraph.js +109 -3252
- package/src/domain/crdt/ORSet.js +8 -8
- package/src/domain/errors/EmptyMessageError.js +2 -2
- package/src/domain/errors/ForkError.js +1 -1
- package/src/domain/errors/IndexError.js +1 -1
- package/src/domain/errors/OperationAbortedError.js +1 -1
- package/src/domain/errors/QueryError.js +3 -3
- package/src/domain/errors/SchemaUnsupportedError.js +1 -1
- package/src/domain/errors/ShardCorruptionError.js +2 -2
- package/src/domain/errors/ShardLoadError.js +2 -2
- package/src/domain/errors/ShardValidationError.js +4 -4
- package/src/domain/errors/StorageError.js +2 -2
- package/src/domain/errors/SyncError.js +1 -1
- package/src/domain/errors/TraversalError.js +1 -1
- package/src/domain/errors/TrustError.js +29 -0
- package/src/domain/errors/WarpError.js +2 -2
- package/src/domain/errors/WormholeError.js +1 -1
- package/src/domain/errors/index.js +1 -0
- package/src/domain/services/AuditMessageCodec.js +137 -0
- package/src/domain/services/AuditReceiptService.js +471 -0
- package/src/domain/services/AuditVerifierService.js +707 -0
- package/src/domain/services/BitmapIndexBuilder.js +3 -3
- package/src/domain/services/BitmapIndexReader.js +28 -19
- package/src/domain/services/BoundaryTransitionRecord.js +18 -17
- package/src/domain/services/CheckpointSerializerV5.js +17 -16
- package/src/domain/services/CheckpointService.js +2 -2
- package/src/domain/services/CommitDagTraversalService.js +13 -13
- package/src/domain/services/DagPathFinding.js +7 -7
- package/src/domain/services/DagTopology.js +1 -1
- package/src/domain/services/DagTraversal.js +1 -1
- package/src/domain/services/HealthCheckService.js +1 -1
- package/src/domain/services/HookInstaller.js +1 -1
- package/src/domain/services/HttpSyncServer.js +120 -55
- package/src/domain/services/IndexRebuildService.js +7 -7
- package/src/domain/services/IndexStalenessChecker.js +4 -3
- package/src/domain/services/JoinReducer.js +11 -11
- package/src/domain/services/LogicalTraversal.js +1 -1
- package/src/domain/services/MessageCodecInternal.js +4 -1
- package/src/domain/services/MessageSchemaDetector.js +2 -2
- package/src/domain/services/MigrationService.js +1 -1
- package/src/domain/services/ObserverView.js +8 -8
- package/src/domain/services/PatchBuilderV2.js +42 -26
- package/src/domain/services/ProvenanceIndex.js +1 -1
- package/src/domain/services/ProvenancePayload.js +1 -1
- package/src/domain/services/QueryBuilder.js +3 -3
- package/src/domain/services/StateDiff.js +14 -11
- package/src/domain/services/StateSerializerV5.js +2 -2
- package/src/domain/services/StreamingBitmapIndexBuilder.js +26 -24
- package/src/domain/services/SyncAuthService.js +71 -4
- package/src/domain/services/SyncProtocol.js +25 -11
- package/src/domain/services/TemporalQuery.js +9 -6
- package/src/domain/services/TranslationCost.js +7 -5
- package/src/domain/services/WarpMessageCodec.js +4 -1
- package/src/domain/services/WormholeService.js +16 -7
- package/src/domain/trust/TrustCanonical.js +42 -0
- package/src/domain/trust/TrustCrypto.js +111 -0
- package/src/domain/trust/TrustEvaluator.js +195 -0
- package/src/domain/trust/TrustRecordService.js +281 -0
- package/src/domain/trust/TrustStateBuilder.js +222 -0
- package/src/domain/trust/canonical.js +68 -0
- package/src/domain/trust/reasonCodes.js +64 -0
- package/src/domain/trust/schemas.js +160 -0
- package/src/domain/trust/verdict.js +42 -0
- package/src/domain/types/TickReceipt.js +1 -1
- package/src/domain/types/WarpErrors.js +45 -0
- package/src/domain/types/WarpOptions.js +29 -0
- package/src/domain/types/WarpPersistence.js +41 -0
- package/src/domain/types/WarpTypes.js +2 -2
- package/src/domain/types/WarpTypesV2.js +2 -2
- package/src/domain/types/git-cas.d.ts +20 -0
- package/src/domain/utils/MinHeap.js +6 -5
- package/src/domain/utils/RefLayout.js +59 -0
- package/src/domain/utils/canonicalStringify.js +5 -4
- package/src/domain/utils/roaring.js +31 -5
- package/src/domain/warp/PatchSession.js +26 -17
- package/src/domain/warp/Writer.js +18 -3
- package/src/domain/warp/_internal.js +26 -0
- package/src/domain/warp/_wire.js +58 -0
- package/src/domain/warp/_wiredMethods.d.ts +254 -0
- package/src/domain/warp/checkpoint.methods.js +401 -0
- package/src/domain/warp/fork.methods.js +323 -0
- package/src/domain/warp/materialize.methods.js +238 -0
- package/src/domain/warp/materializeAdvanced.methods.js +350 -0
- package/src/domain/warp/patch.methods.js +554 -0
- package/src/domain/warp/provenance.methods.js +286 -0
- package/src/domain/warp/query.methods.js +280 -0
- package/src/domain/warp/subscribe.methods.js +272 -0
- package/src/domain/warp/sync.methods.js +554 -0
- package/src/globals.d.ts +64 -0
- package/src/infrastructure/adapters/BunHttpAdapter.js +14 -9
- package/src/infrastructure/adapters/CasSeekCacheAdapter.js +9 -4
- package/src/infrastructure/adapters/DenoHttpAdapter.js +5 -6
- package/src/infrastructure/adapters/GitGraphAdapter.js +79 -11
- package/src/infrastructure/adapters/InMemoryGraphAdapter.js +36 -0
- package/src/infrastructure/adapters/NodeHttpAdapter.js +2 -2
- package/src/infrastructure/adapters/WebCryptoAdapter.js +2 -2
- package/src/ports/CommitPort.js +10 -0
- package/src/ports/RefPort.js +17 -0
- package/src/visualization/layouts/converters.js +2 -2
- package/src/visualization/layouts/elkAdapter.js +1 -1
- package/src/visualization/layouts/elkLayout.js +10 -7
- package/src/visualization/layouts/index.js +1 -1
- package/src/visualization/renderers/ascii/seek.js +16 -6
- package/src/visualization/renderers/svg/index.js +1 -1
- package/src/hooks/post-merge.sh +0 -60
package/src/domain/WarpGraph.js
CHANGED
|
@@ -8,105 +8,31 @@
|
|
|
8
8
|
* @see WARP Spec Section 11
|
|
9
9
|
*/
|
|
10
10
|
|
|
11
|
-
import { validateGraphName, validateWriterId
|
|
12
|
-
import {
|
|
13
|
-
import {
|
|
14
|
-
import {
|
|
15
|
-
import {
|
|
16
|
-
import { ProvenancePayload } from './services/ProvenancePayload.js';
|
|
17
|
-
import { diffStates, isEmptyDiff } from './services/StateDiff.js';
|
|
18
|
-
import { orsetContains, orsetElements } from './crdt/ORSet.js';
|
|
11
|
+
import { validateGraphName, validateWriterId } from './utils/RefLayout.js';
|
|
12
|
+
import { createVersionVector } from './crdt/VersionVector.js';
|
|
13
|
+
import { DEFAULT_GC_POLICY } from './services/GCPolicy.js';
|
|
14
|
+
import { AuditReceiptService } from './services/AuditReceiptService.js';
|
|
15
|
+
import { TemporalQuery } from './services/TemporalQuery.js';
|
|
19
16
|
import defaultCodec from './utils/defaultCodec.js';
|
|
20
17
|
import defaultCrypto from './utils/defaultCrypto.js';
|
|
21
|
-
import
|
|
22
|
-
import { loadCheckpoint, materializeIncremental, create as createCheckpointCommit } from './services/CheckpointService.js';
|
|
23
|
-
import { createFrontier, updateFrontier } from './services/Frontier.js';
|
|
24
|
-
import { createVersionVector, vvClone, vvIncrement } from './crdt/VersionVector.js';
|
|
25
|
-
import { DEFAULT_GC_POLICY, shouldRunGC, executeGC } from './services/GCPolicy.js';
|
|
26
|
-
import { collectGCMetrics } from './services/GCMetrics.js';
|
|
27
|
-
import { computeAppliedVV, serializeFullStateV5, deserializeFullStateV5 } from './services/CheckpointSerializerV5.js';
|
|
28
|
-
import { computeStateHashV5 } from './services/StateSerializerV5.js';
|
|
29
|
-
import {
|
|
30
|
-
createSyncRequest,
|
|
31
|
-
processSyncRequest,
|
|
32
|
-
applySyncResponse,
|
|
33
|
-
syncNeeded,
|
|
34
|
-
} from './services/SyncProtocol.js';
|
|
35
|
-
import { retry, timeout, RetryExhaustedError, TimeoutError } from '@git-stunts/alfred';
|
|
36
|
-
import { Writer } from './warp/Writer.js';
|
|
37
|
-
import { generateWriterId, resolveWriterId } from './utils/WriterId.js';
|
|
38
|
-
import QueryBuilder from './services/QueryBuilder.js';
|
|
18
|
+
import defaultClock from './utils/defaultClock.js';
|
|
39
19
|
import LogicalTraversal from './services/LogicalTraversal.js';
|
|
40
|
-
import ObserverView from './services/ObserverView.js';
|
|
41
|
-
import { computeTranslationCost } from './services/TranslationCost.js';
|
|
42
20
|
import LRUCache from './utils/LRUCache.js';
|
|
43
|
-
import
|
|
44
|
-
import
|
|
45
|
-
import
|
|
46
|
-
import
|
|
47
|
-
import
|
|
48
|
-
import
|
|
49
|
-
import
|
|
50
|
-
import
|
|
51
|
-
import
|
|
52
|
-
import
|
|
53
|
-
import { buildSeekCacheKey } from './utils/seekCacheKey.js';
|
|
54
|
-
import defaultClock from './utils/defaultClock.js';
|
|
21
|
+
import { wireWarpMethods } from './warp/_wire.js';
|
|
22
|
+
import * as queryMethods from './warp/query.methods.js';
|
|
23
|
+
import * as subscribeMethods from './warp/subscribe.methods.js';
|
|
24
|
+
import * as provenanceMethods from './warp/provenance.methods.js';
|
|
25
|
+
import * as forkMethods from './warp/fork.methods.js';
|
|
26
|
+
import * as syncMethods from './warp/sync.methods.js';
|
|
27
|
+
import * as checkpointMethods from './warp/checkpoint.methods.js';
|
|
28
|
+
import * as patchMethods from './warp/patch.methods.js';
|
|
29
|
+
import * as materializeMethods from './warp/materialize.methods.js';
|
|
30
|
+
import * as materializeAdvancedMethods from './warp/materializeAdvanced.methods.js';
|
|
55
31
|
|
|
56
32
|
/**
|
|
57
33
|
* @typedef {import('../ports/GraphPersistencePort.js').default & import('../ports/RefPort.js').default & import('../ports/CommitPort.js').default & import('../ports/BlobPort.js').default & import('../ports/TreePort.js').default & import('../ports/ConfigPort.js').default} FullPersistence
|
|
58
34
|
*/
|
|
59
35
|
|
|
60
|
-
const DEFAULT_SYNC_SERVER_MAX_BYTES = 4 * 1024 * 1024;
|
|
61
|
-
const DEFAULT_SYNC_WITH_RETRIES = 3;
|
|
62
|
-
const DEFAULT_SYNC_WITH_BASE_DELAY_MS = 250;
|
|
63
|
-
const DEFAULT_SYNC_WITH_MAX_DELAY_MS = 2000;
|
|
64
|
-
const DEFAULT_SYNC_WITH_TIMEOUT_MS = 10_000;
|
|
65
|
-
|
|
66
|
-
/**
|
|
67
|
-
* Normalizes a sync endpoint path to ensure it starts with '/'.
|
|
68
|
-
* Returns '/sync' if no path is provided.
|
|
69
|
-
*
|
|
70
|
-
* @param {string|undefined|null} path - The sync path to normalize
|
|
71
|
-
* @returns {string} Normalized path starting with '/'
|
|
72
|
-
* @private
|
|
73
|
-
*/
|
|
74
|
-
function normalizeSyncPath(path) {
|
|
75
|
-
if (!path) {
|
|
76
|
-
return '/sync';
|
|
77
|
-
}
|
|
78
|
-
return path.startsWith('/') ? path : `/${path}`;
|
|
79
|
-
}
|
|
80
|
-
|
|
81
|
-
/**
|
|
82
|
-
* Builds auth headers for an outgoing sync request if auth is configured.
|
|
83
|
-
*
|
|
84
|
-
* @param {Object} params
|
|
85
|
-
* @param {{ secret: string, keyId?: string }|undefined} params.auth
|
|
86
|
-
* @param {string} params.bodyStr - Serialized request body
|
|
87
|
-
* @param {URL} params.targetUrl
|
|
88
|
-
* @param {import('../ports/CryptoPort.js').default} params.crypto
|
|
89
|
-
* @returns {Promise<Record<string, string>>}
|
|
90
|
-
* @private
|
|
91
|
-
*/
|
|
92
|
-
async function buildSyncAuthHeaders({ auth, bodyStr, targetUrl, crypto }) {
|
|
93
|
-
if (!auth || !auth.secret) {
|
|
94
|
-
return {};
|
|
95
|
-
}
|
|
96
|
-
const bodyBuf = new TextEncoder().encode(bodyStr);
|
|
97
|
-
return await signSyncRequest(
|
|
98
|
-
{
|
|
99
|
-
method: 'POST',
|
|
100
|
-
path: canonicalizePath(targetUrl.pathname + (targetUrl.search || '')),
|
|
101
|
-
contentType: 'application/json',
|
|
102
|
-
body: bodyBuf,
|
|
103
|
-
secret: auth.secret,
|
|
104
|
-
keyId: auth.keyId || 'default',
|
|
105
|
-
},
|
|
106
|
-
{ crypto },
|
|
107
|
-
);
|
|
108
|
-
}
|
|
109
|
-
|
|
110
36
|
const DEFAULT_ADJACENCY_CACHE_SIZE = 3;
|
|
111
37
|
|
|
112
38
|
/**
|
|
@@ -129,15 +55,16 @@ export default class WarpGraph {
|
|
|
129
55
|
* @param {Object} [options.gcPolicy] - GC policy configuration (overrides defaults)
|
|
130
56
|
* @param {number} [options.adjacencyCacheSize] - Max materialized adjacency cache entries
|
|
131
57
|
* @param {{every: number}} [options.checkpointPolicy] - Auto-checkpoint policy; creates a checkpoint every N patches
|
|
132
|
-
* @param {boolean} [options.autoMaterialize=
|
|
58
|
+
* @param {boolean} [options.autoMaterialize=true] - If true, query methods auto-materialize instead of throwing
|
|
133
59
|
* @param {'reject'|'cascade'|'warn'} [options.onDeleteWithData='warn'] - Policy when deleting a node that still has edges or properties
|
|
134
60
|
* @param {import('../ports/LoggerPort.js').default} [options.logger] - Logger for structured logging
|
|
135
61
|
* @param {import('../ports/ClockPort.js').default} [options.clock] - Clock for timing instrumentation (defaults to performance-based clock)
|
|
136
62
|
* @param {import('../ports/CryptoPort.js').default} [options.crypto] - Crypto adapter for hashing
|
|
137
63
|
* @param {import('../ports/CodecPort.js').default} [options.codec] - Codec for CBOR serialization (defaults to domain-local codec)
|
|
138
64
|
* @param {import('../ports/SeekCachePort.js').default} [options.seekCache] - Persistent cache for seek materialization (optional)
|
|
65
|
+
* @param {boolean} [options.audit=false] - If true, creates audit receipts for each data commit
|
|
139
66
|
*/
|
|
140
|
-
constructor({ persistence, graphName, writerId, gcPolicy = {}, adjacencyCacheSize = DEFAULT_ADJACENCY_CACHE_SIZE, checkpointPolicy, autoMaterialize =
|
|
67
|
+
constructor({ persistence, graphName, writerId, gcPolicy = {}, adjacencyCacheSize = DEFAULT_ADJACENCY_CACHE_SIZE, checkpointPolicy, autoMaterialize = true, onDeleteWithData = 'warn', logger, clock, crypto, codec, seekCache, audit = false }) {
|
|
141
68
|
/** @type {FullPersistence} */
|
|
142
69
|
this._persistence = /** @type {FullPersistence} */ (persistence);
|
|
143
70
|
|
|
@@ -168,6 +95,9 @@ export default class WarpGraph {
|
|
|
168
95
|
/** @type {number} */
|
|
169
96
|
this._patchesSinceCheckpoint = 0;
|
|
170
97
|
|
|
98
|
+
/** @type {number} */
|
|
99
|
+
this._maxObservedLamport = 0;
|
|
100
|
+
|
|
171
101
|
/** @type {{every: number}|null} */
|
|
172
102
|
this._checkpointPolicy = checkpointPolicy || null;
|
|
173
103
|
|
|
@@ -228,8 +158,20 @@ export default class WarpGraph {
|
|
|
228
158
|
/** @type {import('../ports/SeekCachePort.js').default|null} */
|
|
229
159
|
this._seekCache = seekCache || null;
|
|
230
160
|
|
|
161
|
+
/** @type {boolean} */
|
|
162
|
+
this._patchInProgress = false;
|
|
163
|
+
|
|
231
164
|
/** @type {boolean} */
|
|
232
165
|
this._provenanceDegraded = false;
|
|
166
|
+
|
|
167
|
+
/** @type {boolean} */
|
|
168
|
+
this._audit = !!audit;
|
|
169
|
+
|
|
170
|
+
/** @type {AuditReceiptService|null} */
|
|
171
|
+
this._auditService = null;
|
|
172
|
+
|
|
173
|
+
/** @type {number} */
|
|
174
|
+
this._auditSkipCount = 0;
|
|
233
175
|
}
|
|
234
176
|
|
|
235
177
|
/**
|
|
@@ -259,7 +201,6 @@ export default class WarpGraph {
|
|
|
259
201
|
* @param {Object} [opts] - Options
|
|
260
202
|
* @param {string} [opts.metrics] - Extra metrics string to append in parentheses
|
|
261
203
|
* @param {Error} [opts.error] - If set, logs a failure message instead
|
|
262
|
-
* @private
|
|
263
204
|
*/
|
|
264
205
|
_logTiming(op, t0, { metrics, error } = {}) {
|
|
265
206
|
if (!this._logger) {
|
|
@@ -291,6 +232,7 @@ export default class WarpGraph {
|
|
|
291
232
|
* @param {import('../ports/CryptoPort.js').default} [options.crypto] - Crypto adapter for hashing
|
|
292
233
|
* @param {import('../ports/CodecPort.js').default} [options.codec] - Codec for CBOR serialization (defaults to domain-local codec)
|
|
293
234
|
* @param {import('../ports/SeekCachePort.js').default} [options.seekCache] - Persistent cache for seek materialization (optional)
|
|
235
|
+
* @param {boolean} [options.audit=false] - If true, creates audit receipts for each data commit
|
|
294
236
|
* @returns {Promise<WarpGraph>} The opened graph instance
|
|
295
237
|
* @throws {Error} If graphName, writerId, checkpointPolicy, or onDeleteWithData is invalid
|
|
296
238
|
*
|
|
@@ -301,7 +243,7 @@ export default class WarpGraph {
|
|
|
301
243
|
* writerId: 'node-1'
|
|
302
244
|
* });
|
|
303
245
|
*/
|
|
304
|
-
static async open({ persistence, graphName, writerId, gcPolicy = {}, adjacencyCacheSize, checkpointPolicy, autoMaterialize, onDeleteWithData, logger, clock, crypto, codec, seekCache }) {
|
|
246
|
+
static async open({ persistence, graphName, writerId, gcPolicy = {}, adjacencyCacheSize, checkpointPolicy, autoMaterialize, onDeleteWithData, logger, clock, crypto, codec, seekCache, audit }) {
|
|
305
247
|
// Validate inputs
|
|
306
248
|
validateGraphName(graphName);
|
|
307
249
|
validateWriterId(writerId);
|
|
@@ -325,6 +267,11 @@ export default class WarpGraph {
|
|
|
325
267
|
throw new Error('autoMaterialize must be a boolean');
|
|
326
268
|
}
|
|
327
269
|
|
|
270
|
+
// Validate audit
|
|
271
|
+
if (audit !== undefined && typeof audit !== 'boolean') {
|
|
272
|
+
throw new Error('audit must be a boolean');
|
|
273
|
+
}
|
|
274
|
+
|
|
328
275
|
// Validate onDeleteWithData
|
|
329
276
|
if (onDeleteWithData !== undefined) {
|
|
330
277
|
const valid = ['reject', 'cascade', 'warn'];
|
|
@@ -333,11 +280,24 @@ export default class WarpGraph {
|
|
|
333
280
|
}
|
|
334
281
|
}
|
|
335
282
|
|
|
336
|
-
const graph = new WarpGraph({ persistence, graphName, writerId, gcPolicy, adjacencyCacheSize, checkpointPolicy, autoMaterialize, onDeleteWithData, logger, clock, crypto, codec, seekCache });
|
|
283
|
+
const graph = new WarpGraph({ persistence, graphName, writerId, gcPolicy, adjacencyCacheSize, checkpointPolicy, autoMaterialize, onDeleteWithData, logger, clock, crypto, codec, seekCache, audit });
|
|
337
284
|
|
|
338
285
|
// Validate migration boundary
|
|
339
286
|
await graph._validateMigrationBoundary();
|
|
340
287
|
|
|
288
|
+
// Initialize audit service if enabled
|
|
289
|
+
if (graph._audit) {
|
|
290
|
+
graph._auditService = new AuditReceiptService({
|
|
291
|
+
persistence: /** @type {import('./types/WarpPersistence.js').CorePersistence} */ (persistence),
|
|
292
|
+
graphName,
|
|
293
|
+
writerId,
|
|
294
|
+
codec: graph._codec,
|
|
295
|
+
crypto: graph._crypto,
|
|
296
|
+
logger: graph._logger || undefined,
|
|
297
|
+
});
|
|
298
|
+
await graph._auditService.init();
|
|
299
|
+
}
|
|
300
|
+
|
|
341
301
|
return graph;
|
|
342
302
|
}
|
|
343
303
|
|
|
@@ -374,3188 +334,85 @@ export default class WarpGraph {
|
|
|
374
334
|
}
|
|
375
335
|
|
|
376
336
|
/**
|
|
377
|
-
*
|
|
378
|
-
*
|
|
379
|
-
* On successful commit, the internal `onCommitSuccess` callback receives
|
|
380
|
-
* `{ patch, sha }` where `patch` is the committed patch object and `sha`
|
|
381
|
-
* is the Git commit SHA. This updates the version vector and applies the
|
|
382
|
-
* patch to cached state for eager re-materialization.
|
|
383
|
-
*
|
|
384
|
-
* @returns {Promise<PatchBuilderV2>} A fluent patch builder
|
|
385
|
-
*
|
|
386
|
-
* @example
|
|
387
|
-
* const commitSha = await (await graph.createPatch())
|
|
388
|
-
* .addNode('user:alice')
|
|
389
|
-
* .setProperty('user:alice', 'name', 'Alice')
|
|
390
|
-
* .addEdge('user:alice', 'user:bob', 'follows')
|
|
391
|
-
* .commit();
|
|
392
|
-
*/
|
|
393
|
-
async createPatch() {
|
|
394
|
-
const { lamport, parentSha } = await this._nextLamport();
|
|
395
|
-
return new PatchBuilderV2({
|
|
396
|
-
persistence: this._persistence,
|
|
397
|
-
graphName: this._graphName,
|
|
398
|
-
writerId: this._writerId,
|
|
399
|
-
lamport,
|
|
400
|
-
versionVector: this._versionVector,
|
|
401
|
-
getCurrentState: () => this._cachedState,
|
|
402
|
-
expectedParentSha: parentSha,
|
|
403
|
-
onDeleteWithData: this._onDeleteWithData,
|
|
404
|
-
onCommitSuccess: (/** @type {{patch?: import('./types/WarpTypesV2.js').PatchV2, sha?: string}} */ opts) => this._onPatchCommitted(this._writerId, opts),
|
|
405
|
-
codec: this._codec,
|
|
406
|
-
logger: this._logger || undefined,
|
|
407
|
-
});
|
|
408
|
-
}
|
|
409
|
-
|
|
410
|
-
/**
|
|
411
|
-
* Returns patches from a writer's ref chain.
|
|
412
|
-
*
|
|
413
|
-
* @param {string} writerId - The writer ID to load patches for
|
|
414
|
-
* @param {string|null} [stopAtSha=null] - Stop walking when reaching this SHA (exclusive)
|
|
415
|
-
* @returns {Promise<Array<{patch: import('./types/WarpTypesV2.js').PatchV2, sha: string}>>} Array of patches
|
|
416
|
-
*/
|
|
417
|
-
async getWriterPatches(writerId, stopAtSha = null) {
|
|
418
|
-
return await this._loadWriterPatches(writerId, stopAtSha);
|
|
419
|
-
}
|
|
420
|
-
|
|
421
|
-
/**
|
|
422
|
-
* Gets the next lamport timestamp and current parent SHA for this writer.
|
|
423
|
-
* Reads from the current ref chain to determine values.
|
|
424
|
-
*
|
|
425
|
-
* @returns {Promise<{lamport: number, parentSha: string|null}>} The next lamport and current parent
|
|
426
|
-
* @private
|
|
427
|
-
*/
|
|
428
|
-
async _nextLamport() {
|
|
429
|
-
const writerRef = buildWriterRef(this._graphName, this._writerId);
|
|
430
|
-
const currentRefSha = await this._persistence.readRef(writerRef);
|
|
431
|
-
|
|
432
|
-
if (!currentRefSha) {
|
|
433
|
-
// First commit for this writer
|
|
434
|
-
return { lamport: 1, parentSha: null };
|
|
435
|
-
}
|
|
436
|
-
|
|
437
|
-
// Read the current patch commit to get its lamport timestamp
|
|
438
|
-
const commitMessage = await this._persistence.showNode(currentRefSha);
|
|
439
|
-
const kind = detectMessageKind(commitMessage);
|
|
440
|
-
|
|
441
|
-
if (kind !== 'patch') {
|
|
442
|
-
// Writer ref doesn't point to a patch commit - treat as first commit
|
|
443
|
-
return { lamport: 1, parentSha: currentRefSha };
|
|
444
|
-
}
|
|
445
|
-
|
|
446
|
-
try {
|
|
447
|
-
const patchInfo = decodePatchMessage(commitMessage);
|
|
448
|
-
return { lamport: patchInfo.lamport + 1, parentSha: currentRefSha };
|
|
449
|
-
} catch {
|
|
450
|
-
// Malformed message - error with actionable message
|
|
451
|
-
throw new Error(
|
|
452
|
-
`Failed to parse lamport from writer ref ${writerRef}: ` +
|
|
453
|
-
`commit ${currentRefSha} has invalid patch message format`
|
|
454
|
-
);
|
|
455
|
-
}
|
|
456
|
-
}
|
|
457
|
-
|
|
458
|
-
/**
|
|
459
|
-
* Loads all patches from a writer's ref chain.
|
|
460
|
-
*
|
|
461
|
-
* Walks commits from the tip SHA back to the first patch commit,
|
|
462
|
-
* collecting all patches along the way.
|
|
463
|
-
*
|
|
464
|
-
* @param {string} writerId - The writer ID to load patches for
|
|
465
|
-
* @param {string|null} [stopAtSha=null] - Stop walking when reaching this SHA (exclusive)
|
|
466
|
-
* @returns {Promise<Array<{patch: import('./types/WarpTypesV2.js').PatchV2, sha: string}>>} Array of patches
|
|
467
|
-
* @private
|
|
468
|
-
*/
|
|
469
|
-
async _loadWriterPatches(writerId, stopAtSha = null) {
|
|
470
|
-
const writerRef = buildWriterRef(this._graphName, writerId);
|
|
471
|
-
const tipSha = await this._persistence.readRef(writerRef);
|
|
472
|
-
|
|
473
|
-
if (!tipSha) {
|
|
474
|
-
return [];
|
|
475
|
-
}
|
|
476
|
-
|
|
477
|
-
const patches = [];
|
|
478
|
-
let currentSha = tipSha;
|
|
479
|
-
|
|
480
|
-
while (currentSha && currentSha !== stopAtSha) {
|
|
481
|
-
// Get commit info and message
|
|
482
|
-
const nodeInfo = await this._persistence.getNodeInfo(currentSha);
|
|
483
|
-
const {message} = nodeInfo;
|
|
484
|
-
|
|
485
|
-
// Check if this is a patch commit
|
|
486
|
-
const kind = detectMessageKind(message);
|
|
487
|
-
if (kind !== 'patch') {
|
|
488
|
-
// Not a patch commit, stop walking
|
|
489
|
-
break;
|
|
490
|
-
}
|
|
491
|
-
|
|
492
|
-
// Decode the patch message to get patchOid
|
|
493
|
-
const patchMeta = decodePatchMessage(message);
|
|
494
|
-
|
|
495
|
-
// Read the patch blob
|
|
496
|
-
const patchBuffer = await this._persistence.readBlob(patchMeta.patchOid);
|
|
497
|
-
const patch = /** @type {import('./types/WarpTypesV2.js').PatchV2} */ (this._codec.decode(patchBuffer));
|
|
498
|
-
|
|
499
|
-
patches.push({ patch, sha: currentSha });
|
|
500
|
-
|
|
501
|
-
// Move to parent commit
|
|
502
|
-
if (nodeInfo.parents && nodeInfo.parents.length > 0) {
|
|
503
|
-
currentSha = nodeInfo.parents[0];
|
|
504
|
-
} else {
|
|
505
|
-
break;
|
|
506
|
-
}
|
|
507
|
-
}
|
|
508
|
-
|
|
509
|
-
// Patches are collected in reverse order (newest first), reverse them
|
|
510
|
-
return patches.reverse();
|
|
511
|
-
}
|
|
512
|
-
|
|
513
|
-
/**
|
|
514
|
-
* Builds a deterministic adjacency map for the logical graph.
|
|
515
|
-
* @param {import('./services/JoinReducer.js').WarpStateV5} state
|
|
516
|
-
* @returns {{outgoing: Map<string, Array<{neighborId: string, label: string}>>, incoming: Map<string, Array<{neighborId: string, label: string}>>}}
|
|
517
|
-
* @private
|
|
518
|
-
*/
|
|
519
|
-
_buildAdjacency(state) {
|
|
520
|
-
const outgoing = new Map();
|
|
521
|
-
const incoming = new Map();
|
|
522
|
-
|
|
523
|
-
for (const edgeKey of orsetElements(state.edgeAlive)) {
|
|
524
|
-
const { from, to, label } = decodeEdgeKey(edgeKey);
|
|
525
|
-
|
|
526
|
-
if (!orsetContains(state.nodeAlive, from) || !orsetContains(state.nodeAlive, to)) {
|
|
527
|
-
continue;
|
|
528
|
-
}
|
|
529
|
-
|
|
530
|
-
if (!outgoing.has(from)) {
|
|
531
|
-
outgoing.set(from, []);
|
|
532
|
-
}
|
|
533
|
-
if (!incoming.has(to)) {
|
|
534
|
-
incoming.set(to, []);
|
|
535
|
-
}
|
|
536
|
-
|
|
537
|
-
outgoing.get(from).push({ neighborId: to, label });
|
|
538
|
-
incoming.get(to).push({ neighborId: from, label });
|
|
539
|
-
}
|
|
540
|
-
|
|
541
|
-
const sortNeighbors = (/** @type {Array<{neighborId: string, label: string}>} */ list) => {
|
|
542
|
-
list.sort((/** @type {{neighborId: string, label: string}} */ a, /** @type {{neighborId: string, label: string}} */ b) => {
|
|
543
|
-
if (a.neighborId !== b.neighborId) {
|
|
544
|
-
return a.neighborId < b.neighborId ? -1 : 1;
|
|
545
|
-
}
|
|
546
|
-
return a.label < b.label ? -1 : a.label > b.label ? 1 : 0;
|
|
547
|
-
});
|
|
548
|
-
};
|
|
549
|
-
|
|
550
|
-
for (const list of outgoing.values()) {
|
|
551
|
-
sortNeighbors(list);
|
|
552
|
-
}
|
|
553
|
-
|
|
554
|
-
for (const list of incoming.values()) {
|
|
555
|
-
sortNeighbors(list);
|
|
556
|
-
}
|
|
557
|
-
|
|
558
|
-
return { outgoing, incoming };
|
|
559
|
-
}
|
|
560
|
-
|
|
561
|
-
/**
|
|
562
|
-
* Sets the cached state and materialized graph details.
|
|
563
|
-
* @param {import('./services/JoinReducer.js').WarpStateV5} state
|
|
564
|
-
* @returns {Promise<MaterializedGraph>}
|
|
565
|
-
* @private
|
|
566
|
-
*/
|
|
567
|
-
async _setMaterializedState(state) {
|
|
568
|
-
this._cachedState = state;
|
|
569
|
-
this._stateDirty = false;
|
|
570
|
-
this._versionVector = vvClone(state.observedFrontier);
|
|
571
|
-
|
|
572
|
-
const stateHash = await computeStateHashV5(state, { crypto: this._crypto, codec: this._codec });
|
|
573
|
-
let adjacency;
|
|
574
|
-
|
|
575
|
-
if (this._adjacencyCache) {
|
|
576
|
-
adjacency = this._adjacencyCache.get(stateHash);
|
|
577
|
-
if (!adjacency) {
|
|
578
|
-
adjacency = this._buildAdjacency(state);
|
|
579
|
-
this._adjacencyCache.set(stateHash, adjacency);
|
|
580
|
-
}
|
|
581
|
-
} else {
|
|
582
|
-
adjacency = this._buildAdjacency(state);
|
|
583
|
-
}
|
|
584
|
-
|
|
585
|
-
this._materializedGraph = { state, stateHash, adjacency };
|
|
586
|
-
return this._materializedGraph;
|
|
587
|
-
}
|
|
588
|
-
|
|
589
|
-
/**
|
|
590
|
-
* Callback invoked after a patch is successfully committed.
|
|
591
|
-
*
|
|
592
|
-
* Updates version vector, patch count, cached state (if clean),
|
|
593
|
-
* provenance index, and frontier tracking.
|
|
337
|
+
* Gets the current GC policy.
|
|
594
338
|
*
|
|
595
|
-
* @
|
|
596
|
-
* @param {{patch?: import('./types/WarpTypesV2.js').PatchV2, sha?: string}} [opts] - Commit details
|
|
597
|
-
* @private
|
|
598
|
-
*/
|
|
599
|
-
async _onPatchCommitted(writerId, { patch, sha } = {}) {
|
|
600
|
-
vvIncrement(this._versionVector, writerId);
|
|
601
|
-
this._patchesSinceCheckpoint++;
|
|
602
|
-
// Eager re-materialize: apply the just-committed patch to cached state
|
|
603
|
-
// Only when the cache is clean — applying a patch to stale state would be incorrect
|
|
604
|
-
if (this._cachedState && !this._stateDirty && patch && sha) {
|
|
605
|
-
joinPatch(this._cachedState, /** @type {any} */ (patch), sha); // TODO(ts-cleanup): type patch array
|
|
606
|
-
await this._setMaterializedState(this._cachedState);
|
|
607
|
-
// Update provenance index with new patch
|
|
608
|
-
if (this._provenanceIndex) {
|
|
609
|
-
this._provenanceIndex.addPatch(sha, /** @type {string[]|undefined} */ (patch.reads), /** @type {string[]|undefined} */ (patch.writes));
|
|
610
|
-
}
|
|
611
|
-
// Keep _lastFrontier in sync so hasFrontierChanged() won't misreport stale
|
|
612
|
-
if (this._lastFrontier) {
|
|
613
|
-
this._lastFrontier.set(writerId, sha);
|
|
614
|
-
}
|
|
615
|
-
} else {
|
|
616
|
-
this._stateDirty = true;
|
|
617
|
-
}
|
|
618
|
-
}
|
|
619
|
-
|
|
620
|
-
/**
|
|
621
|
-
* Materializes the graph and returns the materialized graph details.
|
|
622
|
-
* @returns {Promise<MaterializedGraph>}
|
|
623
|
-
* @private
|
|
339
|
+
* @returns {Object} The GC policy configuration
|
|
624
340
|
*/
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
if (!this._materializedGraph || this._materializedGraph.state !== state) {
|
|
628
|
-
await this._setMaterializedState(/** @type {import('./services/JoinReducer.js').WarpStateV5} */ (state));
|
|
629
|
-
}
|
|
630
|
-
return /** @type {MaterializedGraph} */ (this._materializedGraph);
|
|
341
|
+
get gcPolicy() {
|
|
342
|
+
return { ...this._gcPolicy };
|
|
631
343
|
}
|
|
632
344
|
|
|
633
345
|
/**
|
|
634
|
-
*
|
|
635
|
-
*
|
|
636
|
-
* Discovers all writers, collects all patches from each writer's ref chain,
|
|
637
|
-
* and reduces them to produce the current state.
|
|
346
|
+
* Gets the temporal query interface for CTL*-style temporal operators.
|
|
638
347
|
*
|
|
639
|
-
*
|
|
348
|
+
* Returns a TemporalQuery instance that provides `always` and `eventually`
|
|
349
|
+
* operators for evaluating predicates across the graph's history.
|
|
640
350
|
*
|
|
641
|
-
*
|
|
642
|
-
* receipts is an array of TickReceipt objects (one per applied patch).
|
|
643
|
-
* When false or omitted (default), returns just the state for backward
|
|
644
|
-
* compatibility with zero receipt overhead.
|
|
351
|
+
* The instance is lazily created on first access and reused thereafter.
|
|
645
352
|
*
|
|
646
|
-
*
|
|
647
|
-
* instance-level `_seekCeiling`), delegates to a ceiling-aware path
|
|
648
|
-
* that replays only patches with `lamport <= ceiling`, bypassing
|
|
649
|
-
* checkpoints, auto-checkpoint, and GC.
|
|
353
|
+
* @returns {import('./services/TemporalQuery.js').TemporalQuery} Temporal query interface
|
|
650
354
|
*
|
|
651
|
-
*
|
|
652
|
-
*
|
|
653
|
-
*
|
|
355
|
+
* @example
|
|
356
|
+
* const alwaysActive = await graph.temporal.always(
|
|
357
|
+
* 'user:alice',
|
|
358
|
+
* n => n.props.status === 'active',
|
|
359
|
+
* { since: 0 }
|
|
360
|
+
* );
|
|
654
361
|
*
|
|
655
|
-
* @
|
|
656
|
-
*
|
|
657
|
-
*
|
|
658
|
-
*
|
|
362
|
+
* @example
|
|
363
|
+
* const eventuallyMerged = await graph.temporal.eventually(
|
|
364
|
+
* 'user:alice',
|
|
365
|
+
* n => n.props.status === 'merged'
|
|
366
|
+
* );
|
|
659
367
|
*/
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
const ceiling = this._resolveCeiling(options);
|
|
666
|
-
|
|
667
|
-
// When ceiling is active, delegate to ceiling-aware path (with its own cache)
|
|
668
|
-
if (ceiling !== null) {
|
|
669
|
-
return await this._materializeWithCeiling(ceiling, !!collectReceipts, t0);
|
|
670
|
-
}
|
|
671
|
-
|
|
672
|
-
try {
|
|
673
|
-
// Check for checkpoint
|
|
674
|
-
const checkpoint = await this._loadLatestCheckpoint();
|
|
675
|
-
|
|
676
|
-
/** @type {import('./services/JoinReducer.js').WarpStateV5|undefined} */
|
|
677
|
-
let state;
|
|
678
|
-
/** @type {import('./types/TickReceipt.js').TickReceipt[]|undefined} */
|
|
679
|
-
let receipts;
|
|
680
|
-
let patchCount = 0;
|
|
681
|
-
|
|
682
|
-
// If checkpoint exists, use incremental materialization
|
|
683
|
-
if (checkpoint?.schema === 2 || checkpoint?.schema === 3) {
|
|
684
|
-
const patches = await this._loadPatchesSince(checkpoint);
|
|
685
|
-
if (collectReceipts) {
|
|
686
|
-
const result = /** @type {{state: import('./services/JoinReducer.js').WarpStateV5, receipts: import('./types/TickReceipt.js').TickReceipt[]}} */ (reduceV5(/** @type {any} */ (patches), /** @type {import('./services/JoinReducer.js').WarpStateV5} */ (checkpoint.state), { receipts: true })); // TODO(ts-cleanup): type patch array
|
|
687
|
-
state = result.state;
|
|
688
|
-
receipts = result.receipts;
|
|
689
|
-
} else {
|
|
690
|
-
state = /** @type {import('./services/JoinReducer.js').WarpStateV5} */ (reduceV5(/** @type {any} */ (patches), /** @type {import('./services/JoinReducer.js').WarpStateV5} */ (checkpoint.state))); // TODO(ts-cleanup): type patch array
|
|
691
|
-
}
|
|
692
|
-
patchCount = patches.length;
|
|
693
|
-
|
|
694
|
-
// Build provenance index: start from checkpoint index if present, then add new patches
|
|
695
|
-
const ckPI = /** @type {any} */ (checkpoint).provenanceIndex; // TODO(ts-cleanup): type checkpoint cast
|
|
696
|
-
this._provenanceIndex = ckPI
|
|
697
|
-
? ckPI.clone()
|
|
698
|
-
: new ProvenanceIndex();
|
|
699
|
-
for (const { patch, sha } of patches) {
|
|
700
|
-
/** @type {import('./services/ProvenanceIndex.js').ProvenanceIndex} */ (this._provenanceIndex).addPatch(sha, patch.reads, patch.writes);
|
|
701
|
-
}
|
|
702
|
-
} else {
|
|
703
|
-
// 1. Discover all writers
|
|
704
|
-
const writerIds = await this.discoverWriters();
|
|
705
|
-
|
|
706
|
-
// 2. If no writers, return empty state
|
|
707
|
-
if (writerIds.length === 0) {
|
|
708
|
-
state = createEmptyStateV5();
|
|
709
|
-
this._provenanceIndex = new ProvenanceIndex();
|
|
710
|
-
if (collectReceipts) {
|
|
711
|
-
receipts = [];
|
|
712
|
-
}
|
|
713
|
-
} else {
|
|
714
|
-
// 3. For each writer, collect all patches
|
|
368
|
+
get temporal() {
|
|
369
|
+
if (!this._temporalQuery) {
|
|
370
|
+
this._temporalQuery = new TemporalQuery({
|
|
371
|
+
loadAllPatches: async () => {
|
|
372
|
+
const writerIds = await this.discoverWriters();
|
|
715
373
|
const allPatches = [];
|
|
716
374
|
for (const writerId of writerIds) {
|
|
717
375
|
const writerPatches = await this._loadWriterPatches(writerId);
|
|
718
376
|
allPatches.push(...writerPatches);
|
|
719
377
|
}
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
state = createEmptyStateV5();
|
|
724
|
-
this._provenanceIndex = new ProvenanceIndex();
|
|
725
|
-
if (collectReceipts) {
|
|
726
|
-
receipts = [];
|
|
727
|
-
}
|
|
728
|
-
} else {
|
|
729
|
-
// 5. Reduce all patches to state
|
|
730
|
-
if (collectReceipts) {
|
|
731
|
-
const result = /** @type {{state: import('./services/JoinReducer.js').WarpStateV5, receipts: import('./types/TickReceipt.js').TickReceipt[]}} */ (reduceV5(/** @type {any} */ (allPatches), undefined, { receipts: true })); // TODO(ts-cleanup): type patch array
|
|
732
|
-
state = result.state;
|
|
733
|
-
receipts = result.receipts;
|
|
734
|
-
} else {
|
|
735
|
-
state = /** @type {import('./services/JoinReducer.js').WarpStateV5} */ (reduceV5(/** @type {any} */ (allPatches))); // TODO(ts-cleanup): type patch array
|
|
736
|
-
}
|
|
737
|
-
patchCount = allPatches.length;
|
|
738
|
-
|
|
739
|
-
// Build provenance index from all patches
|
|
740
|
-
this._provenanceIndex = new ProvenanceIndex();
|
|
741
|
-
for (const { patch, sha } of allPatches) {
|
|
742
|
-
this._provenanceIndex.addPatch(sha, patch.reads, patch.writes);
|
|
743
|
-
}
|
|
744
|
-
}
|
|
745
|
-
}
|
|
746
|
-
}
|
|
747
|
-
|
|
748
|
-
await this._setMaterializedState(state);
|
|
749
|
-
this._provenanceDegraded = false;
|
|
750
|
-
this._cachedCeiling = null;
|
|
751
|
-
this._cachedFrontier = null;
|
|
752
|
-
this._lastFrontier = await this.getFrontier();
|
|
753
|
-
this._patchesSinceCheckpoint = patchCount;
|
|
754
|
-
|
|
755
|
-
// Auto-checkpoint if policy is set and threshold exceeded.
|
|
756
|
-
// Guard prevents recursion: createCheckpoint() calls materialize() internally.
|
|
757
|
-
if (this._checkpointPolicy && !this._checkpointing && patchCount >= this._checkpointPolicy.every) {
|
|
758
|
-
try {
|
|
759
|
-
await this.createCheckpoint();
|
|
760
|
-
this._patchesSinceCheckpoint = 0;
|
|
761
|
-
} catch {
|
|
762
|
-
// Checkpoint failure does not break materialize — continue silently
|
|
763
|
-
}
|
|
764
|
-
}
|
|
765
|
-
|
|
766
|
-
this._maybeRunGC(state);
|
|
767
|
-
|
|
768
|
-
// Notify subscribers if state changed since last notification
|
|
769
|
-
// Also handles deferred replay for subscribers added with replay: true before cached state
|
|
770
|
-
if (this._subscribers.length > 0) {
|
|
771
|
-
const hasPendingReplay = this._subscribers.some(s => s.pendingReplay);
|
|
772
|
-
const diff = diffStates(this._lastNotifiedState, state);
|
|
773
|
-
if (!isEmptyDiff(diff) || hasPendingReplay) {
|
|
774
|
-
this._notifySubscribers(diff, state);
|
|
775
|
-
}
|
|
776
|
-
}
|
|
777
|
-
// Clone state to prevent eager path mutations from affecting the baseline
|
|
778
|
-
this._lastNotifiedState = cloneStateV5(state);
|
|
779
|
-
|
|
780
|
-
this._logTiming('materialize', t0, { metrics: `${patchCount} patches` });
|
|
781
|
-
|
|
782
|
-
if (collectReceipts) {
|
|
783
|
-
return { state, receipts: /** @type {import('./types/TickReceipt.js').TickReceipt[]} */ (receipts) };
|
|
784
|
-
}
|
|
785
|
-
return state;
|
|
786
|
-
} catch (err) {
|
|
787
|
-
this._logTiming('materialize', t0, { error: /** @type {Error} */ (err) });
|
|
788
|
-
throw err;
|
|
789
|
-
}
|
|
790
|
-
}
|
|
791
|
-
|
|
792
|
-
/**
|
|
793
|
-
* Resolves the effective ceiling from options and instance state.
|
|
794
|
-
*
|
|
795
|
-
* Precedence: explicit `ceiling` in options overrides the instance-level
|
|
796
|
-
* `_seekCeiling`. Uses the `'ceiling' in options` check, so passing
|
|
797
|
-
* `{ ceiling: null }` explicitly clears the seek ceiling for that call
|
|
798
|
-
* (returns `null`), while omitting the key falls through to `_seekCeiling`.
|
|
799
|
-
*
|
|
800
|
-
* @param {{ceiling?: number|null}} [options] - Options object; when the
|
|
801
|
-
* `ceiling` key is present (even if `null`), its value takes precedence
|
|
802
|
-
* @returns {number|null} Lamport ceiling to apply, or `null` for latest
|
|
803
|
-
* @private
|
|
804
|
-
*/
|
|
805
|
-
_resolveCeiling(options) {
|
|
806
|
-
if (options && options.ceiling !== undefined) {
|
|
807
|
-
return options.ceiling;
|
|
378
|
+
return this._sortPatchesCausally(allPatches);
|
|
379
|
+
},
|
|
380
|
+
});
|
|
808
381
|
}
|
|
809
|
-
return this.
|
|
382
|
+
return this._temporalQuery;
|
|
810
383
|
}
|
|
811
384
|
|
|
812
385
|
/**
|
|
813
|
-
*
|
|
814
|
-
*
|
|
815
|
-
* Bypasses checkpoints entirely — replays all patches from all writers,
|
|
816
|
-
* filtering to only those with `lamport <= ceiling`. Skips auto-checkpoint
|
|
817
|
-
* and GC since this is an exploratory read.
|
|
818
|
-
*
|
|
819
|
-
* Uses a dedicated cache keyed on `ceiling` + frontier snapshot. Cache
|
|
820
|
-
* is bypassed when the writer frontier has advanced (new writers or
|
|
821
|
-
* updated tips) or when `collectReceipts` is `true` because the cached
|
|
822
|
-
* path does not retain receipt data.
|
|
386
|
+
* Gets the current provenance index for this graph.
|
|
823
387
|
*
|
|
824
|
-
*
|
|
825
|
-
*
|
|
826
|
-
* @param {boolean} collectReceipts - When `true`, return receipts alongside
|
|
827
|
-
* state and skip the ceiling cache
|
|
828
|
-
* @param {number} t0 - Start timestamp for performance logging
|
|
829
|
-
* @returns {Promise<import('./services/JoinReducer.js').WarpStateV5 |
|
|
830
|
-
* {state: import('./services/JoinReducer.js').WarpStateV5,
|
|
831
|
-
* receipts: import('./types/TickReceipt.js').TickReceipt[]}>}
|
|
832
|
-
* Plain state when `collectReceipts` is falsy; `{ state, receipts }`
|
|
833
|
-
* when truthy
|
|
834
|
-
* @private
|
|
835
|
-
*/
|
|
836
|
-
async _materializeWithCeiling(ceiling, collectReceipts, t0) {
|
|
837
|
-
const frontier = await this.getFrontier();
|
|
838
|
-
|
|
839
|
-
// Cache hit: same ceiling, clean state, AND frontier unchanged.
|
|
840
|
-
// Bypass cache when collectReceipts is true — cached path has no receipts.
|
|
841
|
-
const cf = this._cachedFrontier;
|
|
842
|
-
if (
|
|
843
|
-
this._cachedState && !this._stateDirty &&
|
|
844
|
-
ceiling === this._cachedCeiling && !collectReceipts &&
|
|
845
|
-
cf !== null &&
|
|
846
|
-
cf.size === frontier.size &&
|
|
847
|
-
[...frontier].every(([w, sha]) => cf.get(w) === sha)
|
|
848
|
-
) {
|
|
849
|
-
return this._cachedState;
|
|
850
|
-
}
|
|
851
|
-
|
|
852
|
-
const writerIds = [...frontier.keys()];
|
|
853
|
-
|
|
854
|
-
if (writerIds.length === 0 || ceiling <= 0) {
|
|
855
|
-
const state = createEmptyStateV5();
|
|
856
|
-
this._provenanceIndex = new ProvenanceIndex();
|
|
857
|
-
this._provenanceDegraded = false;
|
|
858
|
-
await this._setMaterializedState(state);
|
|
859
|
-
this._cachedCeiling = ceiling;
|
|
860
|
-
this._cachedFrontier = frontier;
|
|
861
|
-
this._logTiming('materialize', t0, { metrics: '0 patches (ceiling)' });
|
|
862
|
-
if (collectReceipts) {
|
|
863
|
-
return { state, receipts: [] };
|
|
864
|
-
}
|
|
865
|
-
return state;
|
|
866
|
-
}
|
|
867
|
-
|
|
868
|
-
// Persistent cache check — skip when collectReceipts is requested
|
|
869
|
-
let cacheKey;
|
|
870
|
-
if (this._seekCache && !collectReceipts) {
|
|
871
|
-
cacheKey = buildSeekCacheKey(ceiling, frontier);
|
|
872
|
-
try {
|
|
873
|
-
const cached = await this._seekCache.get(cacheKey);
|
|
874
|
-
if (cached) {
|
|
875
|
-
try {
|
|
876
|
-
const state = deserializeFullStateV5(cached, { codec: this._codec });
|
|
877
|
-
this._provenanceIndex = new ProvenanceIndex();
|
|
878
|
-
this._provenanceDegraded = true;
|
|
879
|
-
await this._setMaterializedState(state);
|
|
880
|
-
this._cachedCeiling = ceiling;
|
|
881
|
-
this._cachedFrontier = frontier;
|
|
882
|
-
this._logTiming('materialize', t0, { metrics: `cache hit (ceiling=${ceiling})` });
|
|
883
|
-
return state;
|
|
884
|
-
} catch {
|
|
885
|
-
// Corrupted payload — self-heal by removing the bad entry
|
|
886
|
-
try { await this._seekCache.delete(cacheKey); } catch { /* best-effort */ }
|
|
887
|
-
}
|
|
888
|
-
}
|
|
889
|
-
} catch {
|
|
890
|
-
// Cache read failed — fall through to full materialization
|
|
891
|
-
}
|
|
892
|
-
}
|
|
893
|
-
|
|
894
|
-
const allPatches = [];
|
|
895
|
-
for (const writerId of writerIds) {
|
|
896
|
-
const writerPatches = await this._loadWriterPatches(writerId);
|
|
897
|
-
for (const entry of writerPatches) {
|
|
898
|
-
if (entry.patch.lamport <= ceiling) {
|
|
899
|
-
allPatches.push(entry);
|
|
900
|
-
}
|
|
901
|
-
}
|
|
902
|
-
}
|
|
903
|
-
|
|
904
|
-
/** @type {import('./services/JoinReducer.js').WarpStateV5|undefined} */
|
|
905
|
-
let state;
|
|
906
|
-
/** @type {import('./types/TickReceipt.js').TickReceipt[]|undefined} */
|
|
907
|
-
let receipts;
|
|
908
|
-
|
|
909
|
-
if (allPatches.length === 0) {
|
|
910
|
-
state = createEmptyStateV5();
|
|
911
|
-
if (collectReceipts) {
|
|
912
|
-
receipts = [];
|
|
913
|
-
}
|
|
914
|
-
} else if (collectReceipts) {
|
|
915
|
-
const result = /** @type {{state: import('./services/JoinReducer.js').WarpStateV5, receipts: import('./types/TickReceipt.js').TickReceipt[]}} */ (reduceV5(/** @type {any} */ (allPatches), undefined, { receipts: true })); // TODO(ts-cleanup): type patch array
|
|
916
|
-
state = result.state;
|
|
917
|
-
receipts = result.receipts;
|
|
918
|
-
} else {
|
|
919
|
-
state = /** @type {import('./services/JoinReducer.js').WarpStateV5} */ (reduceV5(/** @type {any} */ (allPatches))); // TODO(ts-cleanup): type patch array
|
|
920
|
-
}
|
|
921
|
-
|
|
922
|
-
this._provenanceIndex = new ProvenanceIndex();
|
|
923
|
-
for (const { patch, sha } of allPatches) {
|
|
924
|
-
this._provenanceIndex.addPatch(sha, /** @type {string[]|undefined} */ (patch.reads), /** @type {string[]|undefined} */ (patch.writes));
|
|
925
|
-
}
|
|
926
|
-
this._provenanceDegraded = false;
|
|
927
|
-
|
|
928
|
-
await this._setMaterializedState(state);
|
|
929
|
-
this._cachedCeiling = ceiling;
|
|
930
|
-
this._cachedFrontier = frontier;
|
|
931
|
-
|
|
932
|
-
// Store to persistent cache (fire-and-forget — failure is non-fatal)
|
|
933
|
-
if (this._seekCache && !collectReceipts && allPatches.length > 0) {
|
|
934
|
-
if (!cacheKey) {
|
|
935
|
-
cacheKey = buildSeekCacheKey(ceiling, frontier);
|
|
936
|
-
}
|
|
937
|
-
const buf = serializeFullStateV5(state, { codec: this._codec });
|
|
938
|
-
this._seekCache.set(cacheKey, /** @type {Buffer} */ (buf)).catch(() => {});
|
|
939
|
-
}
|
|
940
|
-
|
|
941
|
-
// Skip auto-checkpoint and GC — this is an exploratory read
|
|
942
|
-
this._logTiming('materialize', t0, { metrics: `${allPatches.length} patches (ceiling=${ceiling})` });
|
|
943
|
-
|
|
944
|
-
if (collectReceipts) {
|
|
945
|
-
return { state, receipts: /** @type {import('./types/TickReceipt.js').TickReceipt[]} */ (receipts) };
|
|
946
|
-
}
|
|
947
|
-
return state;
|
|
948
|
-
}
|
|
949
|
-
|
|
950
|
-
/**
|
|
951
|
-
* Joins (merges) another state into the current cached state.
|
|
388
|
+
* The provenance index maps node/edge IDs to the patch SHAs that affected them.
|
|
389
|
+
* It is built during materialization from the patches' I/O declarations.
|
|
952
390
|
*
|
|
953
|
-
*
|
|
954
|
-
* CRDT join semantics defined in JoinReducer. The merge is deterministic
|
|
955
|
-
* and commutative - joining A with B produces the same result as B with A.
|
|
391
|
+
* **Requires a cached state.** Call materialize() first if not already cached.
|
|
956
392
|
*
|
|
957
|
-
* @
|
|
958
|
-
* @returns {{
|
|
959
|
-
* state: import('./services/JoinReducer.js').WarpStateV5,
|
|
960
|
-
* receipt: {
|
|
961
|
-
* nodesAdded: number,
|
|
962
|
-
* nodesRemoved: number,
|
|
963
|
-
* edgesAdded: number,
|
|
964
|
-
* edgesRemoved: number,
|
|
965
|
-
* propsChanged: number,
|
|
966
|
-
* frontierMerged: boolean
|
|
967
|
-
* }
|
|
968
|
-
* }} The merged state and a receipt describing the merge
|
|
969
|
-
* @throws {QueryError} If no cached state exists (code: `E_NO_STATE`)
|
|
393
|
+
* @returns {import('./services/ProvenanceIndex.js').ProvenanceIndex|null} The provenance index, or null if not materialized
|
|
970
394
|
*
|
|
971
395
|
* @example
|
|
972
|
-
*
|
|
973
|
-
*
|
|
974
|
-
*
|
|
975
|
-
*
|
|
976
|
-
*
|
|
977
|
-
*
|
|
978
|
-
* // Merge the states
|
|
979
|
-
* const { state, receipt } = graph.join(remoteState);
|
|
980
|
-
* console.log(`Merged: ${receipt.nodesAdded} nodes added, ${receipt.propsChanged} props changed`);
|
|
981
|
-
*/
|
|
982
|
-
join(otherState) {
|
|
983
|
-
if (!this._cachedState) {
|
|
984
|
-
throw new QueryError('No cached state. Call materialize() first.', {
|
|
985
|
-
code: 'E_NO_STATE',
|
|
986
|
-
});
|
|
987
|
-
}
|
|
988
|
-
|
|
989
|
-
if (!otherState || !otherState.nodeAlive || !otherState.edgeAlive) {
|
|
990
|
-
throw new Error('Invalid state: must be a valid WarpStateV5 object');
|
|
991
|
-
}
|
|
992
|
-
|
|
993
|
-
// Capture pre-merge counts for receipt
|
|
994
|
-
const beforeNodes = orsetElements(this._cachedState.nodeAlive).length;
|
|
995
|
-
const beforeEdges = orsetElements(this._cachedState.edgeAlive).length;
|
|
996
|
-
const beforeFrontierSize = this._cachedState.observedFrontier.size;
|
|
997
|
-
|
|
998
|
-
// Perform the join
|
|
999
|
-
const mergedState = joinStates(this._cachedState, otherState);
|
|
1000
|
-
|
|
1001
|
-
// Calculate receipt
|
|
1002
|
-
const afterNodes = orsetElements(mergedState.nodeAlive).length;
|
|
1003
|
-
const afterEdges = orsetElements(mergedState.edgeAlive).length;
|
|
1004
|
-
const afterFrontierSize = mergedState.observedFrontier.size;
|
|
1005
|
-
|
|
1006
|
-
// Count property changes (keys that existed in both but have different values)
|
|
1007
|
-
let propsChanged = 0;
|
|
1008
|
-
for (const [key, reg] of mergedState.prop) {
|
|
1009
|
-
const oldReg = this._cachedState.prop.get(key);
|
|
1010
|
-
if (!oldReg || oldReg.value !== reg.value) {
|
|
1011
|
-
propsChanged++;
|
|
1012
|
-
}
|
|
1013
|
-
}
|
|
1014
|
-
|
|
1015
|
-
const receipt = {
|
|
1016
|
-
nodesAdded: Math.max(0, afterNodes - beforeNodes),
|
|
1017
|
-
nodesRemoved: Math.max(0, beforeNodes - afterNodes),
|
|
1018
|
-
edgesAdded: Math.max(0, afterEdges - beforeEdges),
|
|
1019
|
-
edgesRemoved: Math.max(0, beforeEdges - afterEdges),
|
|
1020
|
-
propsChanged,
|
|
1021
|
-
frontierMerged: afterFrontierSize !== beforeFrontierSize ||
|
|
1022
|
-
!this._frontierEquals(this._cachedState.observedFrontier, mergedState.observedFrontier),
|
|
1023
|
-
};
|
|
1024
|
-
|
|
1025
|
-
// Update cached state
|
|
1026
|
-
this._cachedState = mergedState;
|
|
1027
|
-
|
|
1028
|
-
return { state: mergedState, receipt };
|
|
1029
|
-
}
|
|
1030
|
-
|
|
1031
|
-
/**
|
|
1032
|
-
* Compares two version vectors for equality.
|
|
1033
|
-
* @param {import('./crdt/VersionVector.js').VersionVector} a
|
|
1034
|
-
* @param {import('./crdt/VersionVector.js').VersionVector} b
|
|
1035
|
-
* @returns {boolean}
|
|
1036
|
-
* @private
|
|
396
|
+
* await graph.materialize();
|
|
397
|
+
* const index = graph.provenanceIndex;
|
|
398
|
+
* if (index) {
|
|
399
|
+
* console.log(`Index contains ${index.size} entities`);
|
|
400
|
+
* }
|
|
1037
401
|
*/
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
return false;
|
|
1041
|
-
}
|
|
1042
|
-
for (const [key, val] of a) {
|
|
1043
|
-
if (b.get(key) !== val) {
|
|
1044
|
-
return false;
|
|
1045
|
-
}
|
|
1046
|
-
}
|
|
1047
|
-
return true;
|
|
402
|
+
get provenanceIndex() {
|
|
403
|
+
return this._provenanceIndex;
|
|
1048
404
|
}
|
|
405
|
+
}
|
|
1049
406
|
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
* @example
|
|
1063
|
-
* // Time-travel to a previous checkpoint
|
|
1064
|
-
* const oldState = await graph.materializeAt('abc123');
|
|
1065
|
-
* console.log('Nodes at checkpoint:', orsetElements(oldState.nodeAlive));
|
|
1066
|
-
*/
|
|
1067
|
-
async materializeAt(checkpointSha) {
|
|
1068
|
-
// 1. Discover current writers to build target frontier
|
|
1069
|
-
const writerIds = await this.discoverWriters();
|
|
1070
|
-
|
|
1071
|
-
// 2. Build target frontier (current tips for all writers)
|
|
1072
|
-
const targetFrontier = createFrontier();
|
|
1073
|
-
for (const writerId of writerIds) {
|
|
1074
|
-
const writerRef = buildWriterRef(this._graphName, writerId);
|
|
1075
|
-
const tipSha = await this._persistence.readRef(writerRef);
|
|
1076
|
-
if (tipSha) {
|
|
1077
|
-
updateFrontier(targetFrontier, writerId, tipSha);
|
|
1078
|
-
}
|
|
1079
|
-
}
|
|
1080
|
-
|
|
1081
|
-
// 3. Create a patch loader function for incremental materialization
|
|
1082
|
-
const patchLoader = async (/** @type {string} */ writerId, /** @type {string|null} */ fromSha, /** @type {string} */ toSha) => {
|
|
1083
|
-
// Load patches from fromSha (exclusive) to toSha (inclusive)
|
|
1084
|
-
// Walk from toSha back to fromSha
|
|
1085
|
-
const patches = [];
|
|
1086
|
-
let currentSha = toSha;
|
|
1087
|
-
|
|
1088
|
-
while (currentSha && currentSha !== fromSha) {
|
|
1089
|
-
const nodeInfo = await this._persistence.getNodeInfo(currentSha);
|
|
1090
|
-
const {message} = nodeInfo;
|
|
1091
|
-
|
|
1092
|
-
const kind = detectMessageKind(message);
|
|
1093
|
-
if (kind !== 'patch') {
|
|
1094
|
-
break;
|
|
1095
|
-
}
|
|
1096
|
-
|
|
1097
|
-
const patchMeta = decodePatchMessage(message);
|
|
1098
|
-
const patchBuffer = await this._persistence.readBlob(patchMeta.patchOid);
|
|
1099
|
-
const patch = this._codec.decode(patchBuffer);
|
|
1100
|
-
|
|
1101
|
-
patches.push({ patch, sha: currentSha });
|
|
1102
|
-
|
|
1103
|
-
if (nodeInfo.parents && nodeInfo.parents.length > 0) {
|
|
1104
|
-
currentSha = nodeInfo.parents[0];
|
|
1105
|
-
} else {
|
|
1106
|
-
break;
|
|
1107
|
-
}
|
|
1108
|
-
}
|
|
1109
|
-
|
|
1110
|
-
return patches.reverse();
|
|
1111
|
-
};
|
|
1112
|
-
|
|
1113
|
-
// 4. Call materializeIncremental with the checkpoint and target frontier
|
|
1114
|
-
const state = await materializeIncremental({
|
|
1115
|
-
persistence: /** @type {any} */ (this._persistence), // TODO(ts-cleanup): narrow port type
|
|
1116
|
-
graphName: this._graphName,
|
|
1117
|
-
checkpointSha,
|
|
1118
|
-
targetFrontier,
|
|
1119
|
-
patchLoader,
|
|
1120
|
-
codec: this._codec,
|
|
1121
|
-
});
|
|
1122
|
-
await this._setMaterializedState(state);
|
|
1123
|
-
return state;
|
|
1124
|
-
}
|
|
1125
|
-
|
|
1126
|
-
/**
|
|
1127
|
-
* Creates a new checkpoint of the current graph state.
|
|
1128
|
-
*
|
|
1129
|
-
* Materializes the current state, creates a checkpoint commit with
|
|
1130
|
-
* frontier information, and updates the checkpoint ref.
|
|
1131
|
-
*
|
|
1132
|
-
* @returns {Promise<string>} The checkpoint commit SHA
|
|
1133
|
-
* @throws {Error} If materialization fails
|
|
1134
|
-
* @throws {Error} If checkpoint commit creation fails
|
|
1135
|
-
* @throws {Error} If ref update fails
|
|
1136
|
-
*/
|
|
1137
|
-
async createCheckpoint() {
|
|
1138
|
-
const t0 = this._clock.now();
|
|
1139
|
-
try {
|
|
1140
|
-
// 1. Discover all writers
|
|
1141
|
-
const writers = await this.discoverWriters();
|
|
1142
|
-
|
|
1143
|
-
// 2. Build frontier (map of writerId → tip SHA)
|
|
1144
|
-
const frontier = createFrontier();
|
|
1145
|
-
const parents = [];
|
|
1146
|
-
|
|
1147
|
-
for (const writerId of writers) {
|
|
1148
|
-
const writerRef = buildWriterRef(this._graphName, writerId);
|
|
1149
|
-
const sha = await this._persistence.readRef(writerRef);
|
|
1150
|
-
if (sha) {
|
|
1151
|
-
updateFrontier(frontier, writerId, sha);
|
|
1152
|
-
parents.push(sha);
|
|
1153
|
-
}
|
|
1154
|
-
}
|
|
1155
|
-
|
|
1156
|
-
// 3. Materialize current state (reuse cached if fresh, guard against recursion)
|
|
1157
|
-
const prevCheckpointing = this._checkpointing;
|
|
1158
|
-
this._checkpointing = true;
|
|
1159
|
-
/** @type {import('./services/JoinReducer.js').WarpStateV5} */
|
|
1160
|
-
let state;
|
|
1161
|
-
try {
|
|
1162
|
-
state = /** @type {import('./services/JoinReducer.js').WarpStateV5} */ ((this._cachedState && !this._stateDirty)
|
|
1163
|
-
? this._cachedState
|
|
1164
|
-
: await this.materialize());
|
|
1165
|
-
} finally {
|
|
1166
|
-
this._checkpointing = prevCheckpointing;
|
|
1167
|
-
}
|
|
1168
|
-
|
|
1169
|
-
// 4. Call CheckpointService.create() with provenance index if available
|
|
1170
|
-
const checkpointSha = await createCheckpointCommit({
|
|
1171
|
-
persistence: /** @type {any} */ (this._persistence), // TODO(ts-cleanup): narrow port type
|
|
1172
|
-
graphName: this._graphName,
|
|
1173
|
-
state,
|
|
1174
|
-
frontier,
|
|
1175
|
-
parents,
|
|
1176
|
-
provenanceIndex: this._provenanceIndex || undefined,
|
|
1177
|
-
crypto: this._crypto,
|
|
1178
|
-
codec: this._codec,
|
|
1179
|
-
});
|
|
1180
|
-
|
|
1181
|
-
// 5. Update checkpoint ref
|
|
1182
|
-
const checkpointRef = buildCheckpointRef(this._graphName);
|
|
1183
|
-
await this._persistence.updateRef(checkpointRef, checkpointSha);
|
|
1184
|
-
|
|
1185
|
-
this._logTiming('createCheckpoint', t0);
|
|
1186
|
-
|
|
1187
|
-
// 6. Return checkpoint SHA
|
|
1188
|
-
return checkpointSha;
|
|
1189
|
-
} catch (err) {
|
|
1190
|
-
this._logTiming('createCheckpoint', t0, { error: /** @type {Error} */ (err) });
|
|
1191
|
-
throw err;
|
|
1192
|
-
}
|
|
1193
|
-
}
|
|
1194
|
-
|
|
1195
|
-
/**
|
|
1196
|
-
* Syncs coverage information across writers.
|
|
1197
|
-
*
|
|
1198
|
-
* Creates an octopus anchor commit with all writer tips as parents,
|
|
1199
|
-
* then updates the coverage ref to point to this anchor. The "octopus anchor"
|
|
1200
|
-
* is a merge commit that records which writer tips have been observed,
|
|
1201
|
-
* enabling efficient replication and consistency checks.
|
|
1202
|
-
*
|
|
1203
|
-
* @returns {Promise<void>}
|
|
1204
|
-
* @throws {Error} If ref access or commit creation fails
|
|
1205
|
-
*/
|
|
1206
|
-
async syncCoverage() {
|
|
1207
|
-
// 1. Discover all writers
|
|
1208
|
-
const writers = await this.discoverWriters();
|
|
1209
|
-
|
|
1210
|
-
// If no writers exist, do nothing
|
|
1211
|
-
if (writers.length === 0) {
|
|
1212
|
-
return;
|
|
1213
|
-
}
|
|
1214
|
-
|
|
1215
|
-
// 2. Get tip SHA for each writer's ref
|
|
1216
|
-
const parents = [];
|
|
1217
|
-
for (const writerId of writers) {
|
|
1218
|
-
const writerRef = buildWriterRef(this._graphName, writerId);
|
|
1219
|
-
const sha = await this._persistence.readRef(writerRef);
|
|
1220
|
-
if (sha) {
|
|
1221
|
-
parents.push(sha);
|
|
1222
|
-
}
|
|
1223
|
-
}
|
|
1224
|
-
|
|
1225
|
-
// If no refs have SHAs, do nothing
|
|
1226
|
-
if (parents.length === 0) {
|
|
1227
|
-
return;
|
|
1228
|
-
}
|
|
1229
|
-
|
|
1230
|
-
// 3. Create octopus anchor commit with all tips as parents
|
|
1231
|
-
const message = encodeAnchorMessage({ graph: this._graphName });
|
|
1232
|
-
const anchorSha = await this._persistence.commitNode({ message, parents });
|
|
1233
|
-
|
|
1234
|
-
// 4. Update coverage ref
|
|
1235
|
-
const coverageRef = buildCoverageRef(this._graphName);
|
|
1236
|
-
await this._persistence.updateRef(coverageRef, anchorSha);
|
|
1237
|
-
}
|
|
1238
|
-
|
|
1239
|
-
/**
|
|
1240
|
-
* Discovers all writers that have contributed to this graph.
|
|
1241
|
-
*
|
|
1242
|
-
* Lists all refs under refs/warp/<graphName>/writers/ and
|
|
1243
|
-
* extracts writer IDs from the ref paths.
|
|
1244
|
-
*
|
|
1245
|
-
* @returns {Promise<string[]>} Sorted array of writer IDs
|
|
1246
|
-
* @throws {Error} If listing refs fails
|
|
1247
|
-
*/
|
|
1248
|
-
async discoverWriters() {
|
|
1249
|
-
const prefix = buildWritersPrefix(this._graphName);
|
|
1250
|
-
const refs = await this._persistence.listRefs(prefix);
|
|
1251
|
-
|
|
1252
|
-
const writerIds = [];
|
|
1253
|
-
for (const refPath of refs) {
|
|
1254
|
-
const writerId = parseWriterIdFromRef(refPath);
|
|
1255
|
-
if (writerId) {
|
|
1256
|
-
writerIds.push(writerId);
|
|
1257
|
-
}
|
|
1258
|
-
}
|
|
1259
|
-
|
|
1260
|
-
return writerIds.sort();
|
|
1261
|
-
}
|
|
1262
|
-
|
|
1263
|
-
/**
|
|
1264
|
-
* Discovers all distinct Lamport ticks across all writers.
|
|
1265
|
-
*
|
|
1266
|
-
* Walks each writer's patch chain from tip to root, reading commit
|
|
1267
|
-
* messages (no CBOR blob deserialization) to extract Lamport timestamps.
|
|
1268
|
-
* Stops when a non-patch commit (e.g. checkpoint) is encountered.
|
|
1269
|
-
* Logs a warning for any non-monotonic lamport sequence within a single
|
|
1270
|
-
* writer's chain.
|
|
1271
|
-
*
|
|
1272
|
-
* @returns {Promise<{
|
|
1273
|
-
* ticks: number[],
|
|
1274
|
-
* maxTick: number,
|
|
1275
|
-
* perWriter: Map<string, {ticks: number[], tipSha: string|null}>
|
|
1276
|
-
* }>} `ticks` is the sorted (ascending) deduplicated union of all
|
|
1277
|
-
* Lamport values; `maxTick` is the largest value (0 if none);
|
|
1278
|
-
* `perWriter` maps each writer ID to its ticks in ascending order
|
|
1279
|
-
* and its current tip SHA (or `null` if the writer ref is missing)
|
|
1280
|
-
* @throws {Error} If reading refs or commit metadata fails
|
|
1281
|
-
*/
|
|
1282
|
-
async discoverTicks() {
|
|
1283
|
-
const writerIds = await this.discoverWriters();
|
|
1284
|
-
/** @type {Set<number>} */
|
|
1285
|
-
const globalTickSet = new Set();
|
|
1286
|
-
const perWriter = new Map();
|
|
1287
|
-
|
|
1288
|
-
for (const writerId of writerIds) {
|
|
1289
|
-
const writerRef = buildWriterRef(this._graphName, writerId);
|
|
1290
|
-
const tipSha = await this._persistence.readRef(writerRef);
|
|
1291
|
-
const writerTicks = [];
|
|
1292
|
-
/** @type {Record<number, string>} */
|
|
1293
|
-
const tickShas = {};
|
|
1294
|
-
|
|
1295
|
-
if (tipSha) {
|
|
1296
|
-
let currentSha = tipSha;
|
|
1297
|
-
let lastLamport = Infinity;
|
|
1298
|
-
|
|
1299
|
-
while (currentSha) {
|
|
1300
|
-
const nodeInfo = await this._persistence.getNodeInfo(currentSha);
|
|
1301
|
-
const kind = detectMessageKind(nodeInfo.message);
|
|
1302
|
-
if (kind !== 'patch') {
|
|
1303
|
-
break;
|
|
1304
|
-
}
|
|
1305
|
-
|
|
1306
|
-
const patchMeta = decodePatchMessage(nodeInfo.message);
|
|
1307
|
-
globalTickSet.add(patchMeta.lamport);
|
|
1308
|
-
writerTicks.push(patchMeta.lamport);
|
|
1309
|
-
tickShas[patchMeta.lamport] = currentSha;
|
|
1310
|
-
|
|
1311
|
-
// Check monotonic invariant (walking newest→oldest, lamport should decrease)
|
|
1312
|
-
if (patchMeta.lamport > lastLamport && this._logger) {
|
|
1313
|
-
this._logger.warn(`[warp] non-monotonic lamport for writer ${writerId}: ${patchMeta.lamport} > ${lastLamport}`);
|
|
1314
|
-
}
|
|
1315
|
-
lastLamport = patchMeta.lamport;
|
|
1316
|
-
|
|
1317
|
-
if (nodeInfo.parents && nodeInfo.parents.length > 0) {
|
|
1318
|
-
currentSha = nodeInfo.parents[0];
|
|
1319
|
-
} else {
|
|
1320
|
-
break;
|
|
1321
|
-
}
|
|
1322
|
-
}
|
|
1323
|
-
}
|
|
1324
|
-
|
|
1325
|
-
perWriter.set(writerId, {
|
|
1326
|
-
ticks: writerTicks.reverse(),
|
|
1327
|
-
tipSha: tipSha || null,
|
|
1328
|
-
tickShas,
|
|
1329
|
-
});
|
|
1330
|
-
}
|
|
1331
|
-
|
|
1332
|
-
const ticks = [...globalTickSet].sort((a, b) => a - b);
|
|
1333
|
-
const maxTick = ticks.length > 0 ? ticks[ticks.length - 1] : 0;
|
|
1334
|
-
|
|
1335
|
-
return { ticks, maxTick, perWriter };
|
|
1336
|
-
}
|
|
1337
|
-
|
|
1338
|
-
// ============================================================================
|
|
1339
|
-
// Schema Migration Support
|
|
1340
|
-
// ============================================================================
|
|
1341
|
-
|
|
1342
|
-
/**
|
|
1343
|
-
* Validates migration boundary for graphs.
|
|
1344
|
-
*
|
|
1345
|
-
* Graphs cannot be opened if there is schema:1 history without
|
|
1346
|
-
* a migration checkpoint. This ensures data consistency during migration.
|
|
1347
|
-
*
|
|
1348
|
-
* @returns {Promise<void>}
|
|
1349
|
-
* @throws {Error} If v1 history exists without migration checkpoint
|
|
1350
|
-
* @private
|
|
1351
|
-
*/
|
|
1352
|
-
async _validateMigrationBoundary() {
|
|
1353
|
-
const checkpoint = await this._loadLatestCheckpoint();
|
|
1354
|
-
if (checkpoint?.schema === 2 || checkpoint?.schema === 3) {
|
|
1355
|
-
return; // Already migrated
|
|
1356
|
-
}
|
|
1357
|
-
|
|
1358
|
-
const hasSchema1History = await this._hasSchema1Patches();
|
|
1359
|
-
if (hasSchema1History) {
|
|
1360
|
-
throw new Error(
|
|
1361
|
-
'Cannot open graph with v1 history. ' +
|
|
1362
|
-
'Run MigrationService.migrate() first to create migration checkpoint.'
|
|
1363
|
-
);
|
|
1364
|
-
}
|
|
1365
|
-
}
|
|
1366
|
-
|
|
1367
|
-
/**
|
|
1368
|
-
* Loads the latest checkpoint for this graph.
|
|
1369
|
-
*
|
|
1370
|
-
* @returns {Promise<{state: import('./services/JoinReducer.js').WarpStateV5, frontier: Map<string, string>, stateHash: string, schema: number, provenanceIndex?: import('./services/ProvenanceIndex.js').ProvenanceIndex}|null>} The checkpoint or null
|
|
1371
|
-
* @private
|
|
1372
|
-
*/
|
|
1373
|
-
async _loadLatestCheckpoint() {
|
|
1374
|
-
const checkpointRef = buildCheckpointRef(this._graphName);
|
|
1375
|
-
const checkpointSha = await this._persistence.readRef(checkpointRef);
|
|
1376
|
-
|
|
1377
|
-
if (!checkpointSha) {
|
|
1378
|
-
return null;
|
|
1379
|
-
}
|
|
1380
|
-
|
|
1381
|
-
try {
|
|
1382
|
-
return await loadCheckpoint(this._persistence, checkpointSha, { codec: this._codec });
|
|
1383
|
-
} catch {
|
|
1384
|
-
return null;
|
|
1385
|
-
}
|
|
1386
|
-
}
|
|
1387
|
-
|
|
1388
|
-
/**
|
|
1389
|
-
* Checks if there are any schema:1 patches in the graph.
|
|
1390
|
-
*
|
|
1391
|
-
* @returns {Promise<boolean>} True if schema:1 patches exist
|
|
1392
|
-
* @private
|
|
1393
|
-
*/
|
|
1394
|
-
async _hasSchema1Patches() {
|
|
1395
|
-
const writerIds = await this.discoverWriters();
|
|
1396
|
-
|
|
1397
|
-
for (const writerId of writerIds) {
|
|
1398
|
-
const writerRef = buildWriterRef(this._graphName, writerId);
|
|
1399
|
-
const tipSha = await this._persistence.readRef(writerRef);
|
|
1400
|
-
|
|
1401
|
-
if (!tipSha) {
|
|
1402
|
-
continue;
|
|
1403
|
-
}
|
|
1404
|
-
|
|
1405
|
-
// Check the first (most recent) patch from this writer
|
|
1406
|
-
const nodeInfo = await this._persistence.getNodeInfo(tipSha);
|
|
1407
|
-
const kind = detectMessageKind(nodeInfo.message);
|
|
1408
|
-
|
|
1409
|
-
if (kind === 'patch') {
|
|
1410
|
-
const patchMeta = decodePatchMessage(nodeInfo.message);
|
|
1411
|
-
const patchBuffer = await this._persistence.readBlob(patchMeta.patchOid);
|
|
1412
|
-
const patch = /** @type {{schema?: number}} */ (this._codec.decode(patchBuffer));
|
|
1413
|
-
|
|
1414
|
-
// If any patch has schema:1, we have v1 history
|
|
1415
|
-
if (patch.schema === 1 || patch.schema === undefined) {
|
|
1416
|
-
return true;
|
|
1417
|
-
}
|
|
1418
|
-
}
|
|
1419
|
-
}
|
|
1420
|
-
|
|
1421
|
-
return false;
|
|
1422
|
-
}
|
|
1423
|
-
|
|
1424
|
-
/**
|
|
1425
|
-
* Loads patches since a checkpoint for incremental materialization.
|
|
1426
|
-
*
|
|
1427
|
-
* @param {{state: import('./services/JoinReducer.js').WarpStateV5, frontier: Map<string, string>, stateHash: string, schema: number}} checkpoint - The checkpoint to start from
|
|
1428
|
-
* @returns {Promise<Array<{patch: import('./types/WarpTypesV2.js').PatchV2, sha: string}>>} Patches since checkpoint
|
|
1429
|
-
* @private
|
|
1430
|
-
*/
|
|
1431
|
-
async _loadPatchesSince(checkpoint) {
|
|
1432
|
-
const writerIds = await this.discoverWriters();
|
|
1433
|
-
const allPatches = [];
|
|
1434
|
-
|
|
1435
|
-
for (const writerId of writerIds) {
|
|
1436
|
-
const checkpointSha = checkpoint.frontier?.get(writerId) || null;
|
|
1437
|
-
const patches = await this._loadWriterPatches(writerId, checkpointSha);
|
|
1438
|
-
|
|
1439
|
-
// Validate each patch against checkpoint frontier
|
|
1440
|
-
for (const { sha } of patches) {
|
|
1441
|
-
await this._validatePatchAgainstCheckpoint(writerId, sha, checkpoint);
|
|
1442
|
-
}
|
|
1443
|
-
|
|
1444
|
-
allPatches.push(...patches);
|
|
1445
|
-
}
|
|
1446
|
-
|
|
1447
|
-
return allPatches;
|
|
1448
|
-
}
|
|
1449
|
-
|
|
1450
|
-
// ============================================================================
|
|
1451
|
-
// Backfill Rejection and Divergence Detection
|
|
1452
|
-
// ============================================================================
|
|
1453
|
-
|
|
1454
|
-
/**
|
|
1455
|
-
* Checks if ancestorSha is an ancestor of descendantSha.
|
|
1456
|
-
* Walks the commit graph (linear per-writer chain assumption).
|
|
1457
|
-
*
|
|
1458
|
-
* @param {string} ancestorSha - The potential ancestor commit SHA
|
|
1459
|
-
* @param {string} descendantSha - The potential descendant commit SHA
|
|
1460
|
-
* @returns {Promise<boolean>} True if ancestorSha is an ancestor of descendantSha
|
|
1461
|
-
* @private
|
|
1462
|
-
*/
|
|
1463
|
-
async _isAncestor(ancestorSha, descendantSha) {
|
|
1464
|
-
if (!ancestorSha || !descendantSha) {
|
|
1465
|
-
return false;
|
|
1466
|
-
}
|
|
1467
|
-
if (ancestorSha === descendantSha) {
|
|
1468
|
-
return true;
|
|
1469
|
-
}
|
|
1470
|
-
|
|
1471
|
-
let cur = descendantSha;
|
|
1472
|
-
while (cur) {
|
|
1473
|
-
const nodeInfo = await this._persistence.getNodeInfo(cur);
|
|
1474
|
-
const parent = nodeInfo.parents?.[0] ?? null;
|
|
1475
|
-
if (parent === ancestorSha) {
|
|
1476
|
-
return true;
|
|
1477
|
-
}
|
|
1478
|
-
cur = parent;
|
|
1479
|
-
}
|
|
1480
|
-
return false;
|
|
1481
|
-
}
|
|
1482
|
-
|
|
1483
|
-
/**
|
|
1484
|
-
* Determines relationship between incoming patch and checkpoint head.
|
|
1485
|
-
*
|
|
1486
|
-
* @param {string} ckHead - The checkpoint head SHA for this writer
|
|
1487
|
-
* @param {string} incomingSha - The incoming patch commit SHA
|
|
1488
|
-
* @returns {Promise<'same' | 'ahead' | 'behind' | 'diverged'>} The relationship
|
|
1489
|
-
* @private
|
|
1490
|
-
*/
|
|
1491
|
-
async _relationToCheckpointHead(ckHead, incomingSha) {
|
|
1492
|
-
if (incomingSha === ckHead) {
|
|
1493
|
-
return 'same';
|
|
1494
|
-
}
|
|
1495
|
-
if (await this._isAncestor(ckHead, incomingSha)) {
|
|
1496
|
-
return 'ahead';
|
|
1497
|
-
}
|
|
1498
|
-
if (await this._isAncestor(incomingSha, ckHead)) {
|
|
1499
|
-
return 'behind';
|
|
1500
|
-
}
|
|
1501
|
-
return 'diverged';
|
|
1502
|
-
}
|
|
1503
|
-
|
|
1504
|
-
/**
|
|
1505
|
-
* Validates an incoming patch against checkpoint frontier.
|
|
1506
|
-
* Uses graph reachability, NOT lamport timestamps.
|
|
1507
|
-
*
|
|
1508
|
-
* @param {string} writerId - The writer ID for this patch
|
|
1509
|
-
* @param {string} incomingSha - The incoming patch commit SHA
|
|
1510
|
-
* @param {{state: import('./services/JoinReducer.js').WarpStateV5, frontier: Map<string, string>, stateHash: string, schema: number}} checkpoint - The checkpoint to validate against
|
|
1511
|
-
* @returns {Promise<void>}
|
|
1512
|
-
* @throws {Error} If patch is behind/same as checkpoint frontier (backfill rejected)
|
|
1513
|
-
* @throws {Error} If patch does not extend checkpoint head (writer fork detected)
|
|
1514
|
-
* @private
|
|
1515
|
-
*/
|
|
1516
|
-
async _validatePatchAgainstCheckpoint(writerId, incomingSha, checkpoint) {
|
|
1517
|
-
if (!checkpoint || (checkpoint.schema !== 2 && checkpoint.schema !== 3)) {
|
|
1518
|
-
return;
|
|
1519
|
-
}
|
|
1520
|
-
|
|
1521
|
-
const ckHead = checkpoint.frontier?.get(writerId);
|
|
1522
|
-
if (!ckHead) {
|
|
1523
|
-
return; // Checkpoint didn't include this writer
|
|
1524
|
-
}
|
|
1525
|
-
|
|
1526
|
-
const relation = await this._relationToCheckpointHead(ckHead, incomingSha);
|
|
1527
|
-
|
|
1528
|
-
if (relation === 'same' || relation === 'behind') {
|
|
1529
|
-
throw new Error(
|
|
1530
|
-
`Backfill rejected for writer ${writerId}: ` +
|
|
1531
|
-
`incoming patch is ${relation} checkpoint frontier`
|
|
1532
|
-
);
|
|
1533
|
-
}
|
|
1534
|
-
|
|
1535
|
-
if (relation === 'diverged') {
|
|
1536
|
-
throw new Error(
|
|
1537
|
-
`Writer fork detected for ${writerId}: ` +
|
|
1538
|
-
`incoming patch does not extend checkpoint head`
|
|
1539
|
-
);
|
|
1540
|
-
}
|
|
1541
|
-
// relation === 'ahead' => OK
|
|
1542
|
-
}
|
|
1543
|
-
|
|
1544
|
-
// ============================================================================
|
|
1545
|
-
// Garbage Collection
|
|
1546
|
-
// ============================================================================
|
|
1547
|
-
|
|
1548
|
-
/**
|
|
1549
|
-
* Post-materialize GC check. Warn by default; execute only when enabled.
|
|
1550
|
-
* GC failure never breaks materialize.
|
|
1551
|
-
*
|
|
1552
|
-
* @param {import('./services/JoinReducer.js').WarpStateV5} state
|
|
1553
|
-
* @private
|
|
1554
|
-
*/
|
|
1555
|
-
_maybeRunGC(state) {
|
|
1556
|
-
try {
|
|
1557
|
-
const metrics = collectGCMetrics(state);
|
|
1558
|
-
/** @type {import('./services/GCPolicy.js').GCInputMetrics} */
|
|
1559
|
-
const inputMetrics = {
|
|
1560
|
-
...metrics,
|
|
1561
|
-
patchesSinceCompaction: this._patchesSinceGC,
|
|
1562
|
-
timeSinceCompaction: Date.now() - this._lastGCTime,
|
|
1563
|
-
};
|
|
1564
|
-
const { shouldRun, reasons } = shouldRunGC(inputMetrics, /** @type {import('./services/GCPolicy.js').GCPolicy} */ (this._gcPolicy));
|
|
1565
|
-
|
|
1566
|
-
if (!shouldRun) {
|
|
1567
|
-
return;
|
|
1568
|
-
}
|
|
1569
|
-
|
|
1570
|
-
if (/** @type {import('./services/GCPolicy.js').GCPolicy} */ (this._gcPolicy).enabled) {
|
|
1571
|
-
const appliedVV = computeAppliedVV(state);
|
|
1572
|
-
const result = executeGC(state, appliedVV);
|
|
1573
|
-
this._lastGCTime = Date.now();
|
|
1574
|
-
this._patchesSinceGC = 0;
|
|
1575
|
-
if (this._logger) {
|
|
1576
|
-
this._logger.info('Auto-GC completed', { ...result, reasons });
|
|
1577
|
-
}
|
|
1578
|
-
} else if (this._logger) {
|
|
1579
|
-
this._logger.warn(
|
|
1580
|
-
'GC thresholds exceeded but auto-GC is disabled. Set gcPolicy: { enabled: true } to auto-compact.',
|
|
1581
|
-
{ reasons },
|
|
1582
|
-
);
|
|
1583
|
-
}
|
|
1584
|
-
} catch {
|
|
1585
|
-
// GC failure never breaks materialize
|
|
1586
|
-
}
|
|
1587
|
-
}
|
|
1588
|
-
|
|
1589
|
-
/**
|
|
1590
|
-
* Checks if GC should run based on current metrics and policy.
|
|
1591
|
-
* If thresholds are exceeded, runs GC on the cached state.
|
|
1592
|
-
*
|
|
1593
|
-
* **Requires a cached state.**
|
|
1594
|
-
*
|
|
1595
|
-
* @returns {{ran: boolean, result: Object|null, reasons: string[]}} GC result
|
|
1596
|
-
*
|
|
1597
|
-
* @example
|
|
1598
|
-
* await graph.materialize();
|
|
1599
|
-
* const { ran, result, reasons } = graph.maybeRunGC();
|
|
1600
|
-
* if (ran) {
|
|
1601
|
-
* console.log(`GC ran: ${result.tombstonesRemoved} tombstones removed`);
|
|
1602
|
-
* }
|
|
1603
|
-
*/
|
|
1604
|
-
maybeRunGC() {
|
|
1605
|
-
if (!this._cachedState) {
|
|
1606
|
-
return { ran: false, result: null, reasons: [] };
|
|
1607
|
-
}
|
|
1608
|
-
|
|
1609
|
-
const rawMetrics = collectGCMetrics(this._cachedState);
|
|
1610
|
-
/** @type {import('./services/GCPolicy.js').GCInputMetrics} */
|
|
1611
|
-
const metrics = {
|
|
1612
|
-
...rawMetrics,
|
|
1613
|
-
patchesSinceCompaction: this._patchesSinceGC,
|
|
1614
|
-
timeSinceCompaction: this._lastGCTime > 0 ? Date.now() - this._lastGCTime : 0,
|
|
1615
|
-
};
|
|
1616
|
-
|
|
1617
|
-
const { shouldRun, reasons } = shouldRunGC(metrics, /** @type {import('./services/GCPolicy.js').GCPolicy} */ (this._gcPolicy));
|
|
1618
|
-
|
|
1619
|
-
if (!shouldRun) {
|
|
1620
|
-
return { ran: false, result: null, reasons: [] };
|
|
1621
|
-
}
|
|
1622
|
-
|
|
1623
|
-
const result = this.runGC();
|
|
1624
|
-
return { ran: true, result, reasons };
|
|
1625
|
-
}
|
|
1626
|
-
|
|
1627
|
-
/**
|
|
1628
|
-
* Explicitly runs GC on the cached state.
|
|
1629
|
-
* Compacts tombstoned dots that are covered by the appliedVV.
|
|
1630
|
-
*
|
|
1631
|
-
* **Requires a cached state.**
|
|
1632
|
-
*
|
|
1633
|
-
* @returns {{nodesCompacted: number, edgesCompacted: number, tombstonesRemoved: number, durationMs: number}}
|
|
1634
|
-
* @throws {QueryError} If no cached state exists (code: `E_NO_STATE`)
|
|
1635
|
-
*
|
|
1636
|
-
* @example
|
|
1637
|
-
* await graph.materialize();
|
|
1638
|
-
* const result = graph.runGC();
|
|
1639
|
-
* console.log(`Removed ${result.tombstonesRemoved} tombstones in ${result.durationMs}ms`);
|
|
1640
|
-
*/
|
|
1641
|
-
runGC() {
|
|
1642
|
-
const t0 = this._clock.now();
|
|
1643
|
-
try {
|
|
1644
|
-
if (!this._cachedState) {
|
|
1645
|
-
throw new QueryError('No cached state. Call materialize() first.', {
|
|
1646
|
-
code: 'E_NO_STATE',
|
|
1647
|
-
});
|
|
1648
|
-
}
|
|
1649
|
-
|
|
1650
|
-
// Compute appliedVV from current state
|
|
1651
|
-
const appliedVV = computeAppliedVV(this._cachedState);
|
|
1652
|
-
|
|
1653
|
-
// Execute GC (mutates cached state)
|
|
1654
|
-
const result = executeGC(this._cachedState, appliedVV);
|
|
1655
|
-
|
|
1656
|
-
// Update GC tracking
|
|
1657
|
-
this._lastGCTime = Date.now();
|
|
1658
|
-
this._patchesSinceGC = 0;
|
|
1659
|
-
|
|
1660
|
-
this._logTiming('runGC', t0, { metrics: `${result.tombstonesRemoved} tombstones removed` });
|
|
1661
|
-
|
|
1662
|
-
return result;
|
|
1663
|
-
} catch (err) {
|
|
1664
|
-
this._logTiming('runGC', t0, { error: /** @type {Error} */ (err) });
|
|
1665
|
-
throw err;
|
|
1666
|
-
}
|
|
1667
|
-
}
|
|
1668
|
-
|
|
1669
|
-
/**
|
|
1670
|
-
* Gets current GC metrics for the cached state.
|
|
1671
|
-
*
|
|
1672
|
-
* @returns {{
|
|
1673
|
-
* nodeCount: number,
|
|
1674
|
-
* edgeCount: number,
|
|
1675
|
-
* tombstoneCount: number,
|
|
1676
|
-
* tombstoneRatio: number,
|
|
1677
|
-
* patchesSinceCompaction: number,
|
|
1678
|
-
* lastCompactionTime: number
|
|
1679
|
-
* }|null} GC metrics or null if no cached state
|
|
1680
|
-
*/
|
|
1681
|
-
getGCMetrics() {
|
|
1682
|
-
if (!this._cachedState) {
|
|
1683
|
-
return null;
|
|
1684
|
-
}
|
|
1685
|
-
|
|
1686
|
-
const rawMetrics = collectGCMetrics(this._cachedState);
|
|
1687
|
-
return {
|
|
1688
|
-
...rawMetrics,
|
|
1689
|
-
nodeCount: rawMetrics.nodeLiveDots,
|
|
1690
|
-
edgeCount: rawMetrics.edgeLiveDots,
|
|
1691
|
-
tombstoneCount: rawMetrics.totalTombstones,
|
|
1692
|
-
patchesSinceCompaction: this._patchesSinceGC,
|
|
1693
|
-
lastCompactionTime: this._lastGCTime,
|
|
1694
|
-
};
|
|
1695
|
-
}
|
|
1696
|
-
|
|
1697
|
-
/**
|
|
1698
|
-
* Gets the current GC policy.
|
|
1699
|
-
*
|
|
1700
|
-
* @returns {Object} The GC policy configuration
|
|
1701
|
-
*/
|
|
1702
|
-
get gcPolicy() {
|
|
1703
|
-
return { ...this._gcPolicy };
|
|
1704
|
-
}
|
|
1705
|
-
|
|
1706
|
-
// ============================================================================
|
|
1707
|
-
// Network Sync API
|
|
1708
|
-
// ============================================================================
|
|
1709
|
-
|
|
1710
|
-
/**
|
|
1711
|
-
* Gets the current frontier for this graph.
|
|
1712
|
-
* The frontier maps each writer ID to their current tip SHA.
|
|
1713
|
-
*
|
|
1714
|
-
* @returns {Promise<Map<string, string>>} Map of writerId to tip SHA
|
|
1715
|
-
* @throws {Error} If listing refs fails
|
|
1716
|
-
*/
|
|
1717
|
-
async getFrontier() {
|
|
1718
|
-
const writerIds = await this.discoverWriters();
|
|
1719
|
-
const frontier = createFrontier();
|
|
1720
|
-
|
|
1721
|
-
for (const writerId of writerIds) {
|
|
1722
|
-
const writerRef = buildWriterRef(this._graphName, writerId);
|
|
1723
|
-
const tipSha = await this._persistence.readRef(writerRef);
|
|
1724
|
-
if (tipSha) {
|
|
1725
|
-
updateFrontier(frontier, writerId, tipSha);
|
|
1726
|
-
}
|
|
1727
|
-
}
|
|
1728
|
-
|
|
1729
|
-
return frontier;
|
|
1730
|
-
}
|
|
1731
|
-
|
|
1732
|
-
/**
|
|
1733
|
-
* Checks whether any writer tip has changed since the last materialize.
|
|
1734
|
-
*
|
|
1735
|
-
* O(writers) comparison of stored writer tip SHAs against current refs.
|
|
1736
|
-
* Cheap "has anything changed?" check without materialization.
|
|
1737
|
-
*
|
|
1738
|
-
* @returns {Promise<boolean>} True if frontier has changed (or never materialized)
|
|
1739
|
-
* @throws {Error} If listing refs fails
|
|
1740
|
-
*/
|
|
1741
|
-
async hasFrontierChanged() {
|
|
1742
|
-
if (this._lastFrontier === null) {
|
|
1743
|
-
return true;
|
|
1744
|
-
}
|
|
1745
|
-
|
|
1746
|
-
const current = await this.getFrontier();
|
|
1747
|
-
|
|
1748
|
-
if (current.size !== this._lastFrontier.size) {
|
|
1749
|
-
return true;
|
|
1750
|
-
}
|
|
1751
|
-
|
|
1752
|
-
for (const [writerId, tipSha] of current) {
|
|
1753
|
-
if (this._lastFrontier.get(writerId) !== tipSha) {
|
|
1754
|
-
return true;
|
|
1755
|
-
}
|
|
1756
|
-
}
|
|
1757
|
-
|
|
1758
|
-
return false;
|
|
1759
|
-
}
|
|
1760
|
-
|
|
1761
|
-
/**
|
|
1762
|
-
* Returns a lightweight status snapshot of the graph's operational state.
|
|
1763
|
-
*
|
|
1764
|
-
* This method is O(writers) and does NOT trigger materialization.
|
|
1765
|
-
*
|
|
1766
|
-
* @returns {Promise<{
|
|
1767
|
-
* cachedState: 'fresh' | 'stale' | 'none',
|
|
1768
|
-
* patchesSinceCheckpoint: number,
|
|
1769
|
-
* tombstoneRatio: number,
|
|
1770
|
-
* writers: number,
|
|
1771
|
-
* frontier: Record<string, string>,
|
|
1772
|
-
* }>} The graph status
|
|
1773
|
-
* @throws {Error} If listing refs fails
|
|
1774
|
-
*/
|
|
1775
|
-
async status() {
|
|
1776
|
-
// Determine cachedState
|
|
1777
|
-
/** @type {'fresh' | 'stale' | 'none'} */
|
|
1778
|
-
let cachedState;
|
|
1779
|
-
if (this._cachedState === null) {
|
|
1780
|
-
cachedState = 'none';
|
|
1781
|
-
} else if (this._stateDirty || await this.hasFrontierChanged()) {
|
|
1782
|
-
cachedState = 'stale';
|
|
1783
|
-
} else {
|
|
1784
|
-
cachedState = 'fresh';
|
|
1785
|
-
}
|
|
1786
|
-
|
|
1787
|
-
// patchesSinceCheckpoint
|
|
1788
|
-
const patchesSinceCheckpoint = this._patchesSinceCheckpoint;
|
|
1789
|
-
|
|
1790
|
-
// tombstoneRatio
|
|
1791
|
-
let tombstoneRatio = 0;
|
|
1792
|
-
if (this._cachedState) {
|
|
1793
|
-
const metrics = collectGCMetrics(this._cachedState);
|
|
1794
|
-
tombstoneRatio = metrics.tombstoneRatio;
|
|
1795
|
-
}
|
|
1796
|
-
|
|
1797
|
-
// writers and frontier
|
|
1798
|
-
const frontier = await this.getFrontier();
|
|
1799
|
-
const writers = frontier.size;
|
|
1800
|
-
|
|
1801
|
-
// Convert frontier Map to plain object
|
|
1802
|
-
const frontierObj = Object.fromEntries(frontier);
|
|
1803
|
-
|
|
1804
|
-
return {
|
|
1805
|
-
cachedState,
|
|
1806
|
-
patchesSinceCheckpoint,
|
|
1807
|
-
tombstoneRatio,
|
|
1808
|
-
writers,
|
|
1809
|
-
frontier: frontierObj,
|
|
1810
|
-
};
|
|
1811
|
-
}
|
|
1812
|
-
|
|
1813
|
-
/**
|
|
1814
|
-
* Subscribes to graph changes.
|
|
1815
|
-
*
|
|
1816
|
-
* The `onChange` handler is called after each `materialize()` that results in
|
|
1817
|
-
* state changes. The handler receives a diff object describing what changed.
|
|
1818
|
-
*
|
|
1819
|
-
* When `replay: true` is set and `_cachedState` is available, immediately
|
|
1820
|
-
* fires `onChange` with a diff from empty state to current state. If
|
|
1821
|
-
* `_cachedState` is null, replay is deferred until the first materialize.
|
|
1822
|
-
*
|
|
1823
|
-
* Errors thrown by handlers are caught and forwarded to `onError` if provided.
|
|
1824
|
-
* One handler's error does not prevent other handlers from being called.
|
|
1825
|
-
*
|
|
1826
|
-
* @param {Object} options - Subscription options
|
|
1827
|
-
* @param {(diff: import('./services/StateDiff.js').StateDiffResult) => void} options.onChange - Called with diff when graph changes
|
|
1828
|
-
* @param {(error: Error) => void} [options.onError] - Called if onChange throws an error
|
|
1829
|
-
* @param {boolean} [options.replay=false] - If true, immediately fires onChange with initial state diff
|
|
1830
|
-
* @returns {{unsubscribe: () => void}} Subscription handle
|
|
1831
|
-
* @throws {Error} If onChange is not a function
|
|
1832
|
-
*
|
|
1833
|
-
* @example
|
|
1834
|
-
* const { unsubscribe } = graph.subscribe({
|
|
1835
|
-
* onChange: (diff) => {
|
|
1836
|
-
* console.log('Nodes added:', diff.nodes.added);
|
|
1837
|
-
* console.log('Nodes removed:', diff.nodes.removed);
|
|
1838
|
-
* },
|
|
1839
|
-
* onError: (err) => console.error('Handler error:', err),
|
|
1840
|
-
* });
|
|
1841
|
-
*
|
|
1842
|
-
* // Later, to stop receiving updates:
|
|
1843
|
-
* unsubscribe();
|
|
1844
|
-
*
|
|
1845
|
-
* @example
|
|
1846
|
-
* // With replay: get initial state immediately
|
|
1847
|
-
* await graph.materialize();
|
|
1848
|
-
* graph.subscribe({
|
|
1849
|
-
* onChange: (diff) => console.log('Initial or changed:', diff),
|
|
1850
|
-
* replay: true, // Immediately fires with current state as additions
|
|
1851
|
-
* });
|
|
1852
|
-
*/
|
|
1853
|
-
subscribe({ onChange, onError, replay = false }) {
|
|
1854
|
-
if (typeof onChange !== 'function') {
|
|
1855
|
-
throw new Error('onChange must be a function');
|
|
1856
|
-
}
|
|
1857
|
-
|
|
1858
|
-
const subscriber = { onChange, onError, pendingReplay: replay && !this._cachedState };
|
|
1859
|
-
this._subscribers.push(subscriber);
|
|
1860
|
-
|
|
1861
|
-
// Immediate replay if requested and cached state is available
|
|
1862
|
-
if (replay && this._cachedState) {
|
|
1863
|
-
const diff = diffStates(null, this._cachedState);
|
|
1864
|
-
if (!isEmptyDiff(diff)) {
|
|
1865
|
-
try {
|
|
1866
|
-
onChange(diff);
|
|
1867
|
-
} catch (err) {
|
|
1868
|
-
if (onError) {
|
|
1869
|
-
try {
|
|
1870
|
-
onError(/** @type {Error} */ (err));
|
|
1871
|
-
} catch {
|
|
1872
|
-
// onError itself threw — swallow to prevent cascade
|
|
1873
|
-
}
|
|
1874
|
-
}
|
|
1875
|
-
}
|
|
1876
|
-
}
|
|
1877
|
-
}
|
|
1878
|
-
|
|
1879
|
-
return {
|
|
1880
|
-
unsubscribe: () => {
|
|
1881
|
-
const index = this._subscribers.indexOf(subscriber);
|
|
1882
|
-
if (index !== -1) {
|
|
1883
|
-
this._subscribers.splice(index, 1);
|
|
1884
|
-
}
|
|
1885
|
-
},
|
|
1886
|
-
};
|
|
1887
|
-
}
|
|
1888
|
-
|
|
1889
|
-
/**
|
|
1890
|
-
* Watches for graph changes matching a pattern.
|
|
1891
|
-
*
|
|
1892
|
-
* Like `subscribe()`, but only fires for changes where node IDs match the
|
|
1893
|
-
* provided glob pattern. Uses the same pattern syntax as `query().match()`.
|
|
1894
|
-
*
|
|
1895
|
-
* - Nodes: filters `added` and `removed` to matching IDs
|
|
1896
|
-
* - Edges: filters to edges where `from` or `to` matches the pattern
|
|
1897
|
-
* - Props: filters to properties where `nodeId` matches the pattern
|
|
1898
|
-
*
|
|
1899
|
-
* If all changes are filtered out, the handler is not called.
|
|
1900
|
-
*
|
|
1901
|
-
* When `poll` is set, periodically checks `hasFrontierChanged()` and auto-materializes
|
|
1902
|
-
* if the frontier has changed (e.g., remote writes detected). The poll interval must
|
|
1903
|
-
* be at least 1000ms.
|
|
1904
|
-
*
|
|
1905
|
-
* @param {string} pattern - Glob pattern (e.g., 'user:*', 'order:123', '*')
|
|
1906
|
-
* @param {Object} options - Watch options
|
|
1907
|
-
* @param {(diff: import('./services/StateDiff.js').StateDiffResult) => void} options.onChange - Called with filtered diff when matching changes occur
|
|
1908
|
-
* @param {(error: Error) => void} [options.onError] - Called if onChange throws an error
|
|
1909
|
-
* @param {number} [options.poll] - Poll interval in ms (min 1000); checks frontier and auto-materializes
|
|
1910
|
-
* @returns {{unsubscribe: () => void}} Subscription handle
|
|
1911
|
-
* @throws {Error} If pattern is not a string
|
|
1912
|
-
* @throws {Error} If onChange is not a function
|
|
1913
|
-
* @throws {Error} If poll is provided but less than 1000
|
|
1914
|
-
*
|
|
1915
|
-
* @example
|
|
1916
|
-
* const { unsubscribe } = graph.watch('user:*', {
|
|
1917
|
-
* onChange: (diff) => {
|
|
1918
|
-
* // Only user node changes arrive here
|
|
1919
|
-
* console.log('User nodes added:', diff.nodes.added);
|
|
1920
|
-
* },
|
|
1921
|
-
* });
|
|
1922
|
-
*
|
|
1923
|
-
* @example
|
|
1924
|
-
* // With polling: checks every 5s for remote changes
|
|
1925
|
-
* const { unsubscribe } = graph.watch('user:*', {
|
|
1926
|
-
* onChange: (diff) => console.log('User changed:', diff),
|
|
1927
|
-
* poll: 5000,
|
|
1928
|
-
* });
|
|
1929
|
-
*
|
|
1930
|
-
* // Later, to stop receiving updates:
|
|
1931
|
-
* unsubscribe();
|
|
1932
|
-
*/
|
|
1933
|
-
watch(pattern, { onChange, onError, poll }) {
|
|
1934
|
-
if (typeof pattern !== 'string') {
|
|
1935
|
-
throw new Error('pattern must be a string');
|
|
1936
|
-
}
|
|
1937
|
-
if (typeof onChange !== 'function') {
|
|
1938
|
-
throw new Error('onChange must be a function');
|
|
1939
|
-
}
|
|
1940
|
-
if (poll !== undefined) {
|
|
1941
|
-
if (typeof poll !== 'number' || poll < 1000) {
|
|
1942
|
-
throw new Error('poll must be a number >= 1000');
|
|
1943
|
-
}
|
|
1944
|
-
}
|
|
1945
|
-
|
|
1946
|
-
// Pattern matching: same logic as QueryBuilder.match()
|
|
1947
|
-
// Pre-compile pattern matcher once for performance
|
|
1948
|
-
/** @type {(nodeId: string) => boolean} */
|
|
1949
|
-
let matchesPattern;
|
|
1950
|
-
if (pattern === '*') {
|
|
1951
|
-
matchesPattern = () => true;
|
|
1952
|
-
} else if (pattern.includes('*')) {
|
|
1953
|
-
const escaped = pattern.replace(/[.+?^${}()|[\]\\]/g, '\\$&');
|
|
1954
|
-
const regex = new RegExp(`^${escaped.replace(/\*/g, '.*')}$`);
|
|
1955
|
-
matchesPattern = (/** @type {string} */ nodeId) => regex.test(nodeId);
|
|
1956
|
-
} else {
|
|
1957
|
-
matchesPattern = (/** @type {string} */ nodeId) => nodeId === pattern;
|
|
1958
|
-
}
|
|
1959
|
-
|
|
1960
|
-
// Filtered onChange that only passes matching changes
|
|
1961
|
-
const filteredOnChange = (/** @type {import('./services/StateDiff.js').StateDiffResult} */ diff) => {
|
|
1962
|
-
const filteredDiff = {
|
|
1963
|
-
nodes: {
|
|
1964
|
-
added: diff.nodes.added.filter(matchesPattern),
|
|
1965
|
-
removed: diff.nodes.removed.filter(matchesPattern),
|
|
1966
|
-
},
|
|
1967
|
-
edges: {
|
|
1968
|
-
added: diff.edges.added.filter((/** @type {import('./services/StateDiff.js').EdgeChange} */ e) => matchesPattern(e.from) || matchesPattern(e.to)),
|
|
1969
|
-
removed: diff.edges.removed.filter((/** @type {import('./services/StateDiff.js').EdgeChange} */ e) => matchesPattern(e.from) || matchesPattern(e.to)),
|
|
1970
|
-
},
|
|
1971
|
-
props: {
|
|
1972
|
-
set: diff.props.set.filter((/** @type {import('./services/StateDiff.js').PropSet} */ p) => matchesPattern(p.nodeId)),
|
|
1973
|
-
removed: diff.props.removed.filter((/** @type {import('./services/StateDiff.js').PropRemoved} */ p) => matchesPattern(p.nodeId)),
|
|
1974
|
-
},
|
|
1975
|
-
};
|
|
1976
|
-
|
|
1977
|
-
// Only call handler if there are matching changes
|
|
1978
|
-
const hasChanges =
|
|
1979
|
-
filteredDiff.nodes.added.length > 0 ||
|
|
1980
|
-
filteredDiff.nodes.removed.length > 0 ||
|
|
1981
|
-
filteredDiff.edges.added.length > 0 ||
|
|
1982
|
-
filteredDiff.edges.removed.length > 0 ||
|
|
1983
|
-
filteredDiff.props.set.length > 0 ||
|
|
1984
|
-
filteredDiff.props.removed.length > 0;
|
|
1985
|
-
|
|
1986
|
-
if (hasChanges) {
|
|
1987
|
-
onChange(filteredDiff);
|
|
1988
|
-
}
|
|
1989
|
-
};
|
|
1990
|
-
|
|
1991
|
-
// Reuse subscription infrastructure
|
|
1992
|
-
const subscription = this.subscribe({ onChange: filteredOnChange, onError });
|
|
1993
|
-
|
|
1994
|
-
// Polling: periodically check frontier and auto-materialize if changed
|
|
1995
|
-
/** @type {ReturnType<typeof setInterval>|null} */
|
|
1996
|
-
let pollIntervalId = null;
|
|
1997
|
-
let pollInFlight = false;
|
|
1998
|
-
if (poll) {
|
|
1999
|
-
pollIntervalId = setInterval(() => {
|
|
2000
|
-
if (pollInFlight) {
|
|
2001
|
-
return;
|
|
2002
|
-
}
|
|
2003
|
-
pollInFlight = true;
|
|
2004
|
-
this.hasFrontierChanged()
|
|
2005
|
-
.then(async (changed) => {
|
|
2006
|
-
if (changed) {
|
|
2007
|
-
await this.materialize();
|
|
2008
|
-
}
|
|
2009
|
-
})
|
|
2010
|
-
.catch((err) => {
|
|
2011
|
-
if (onError) {
|
|
2012
|
-
try {
|
|
2013
|
-
onError(err);
|
|
2014
|
-
} catch {
|
|
2015
|
-
// onError itself threw — swallow to prevent cascade
|
|
2016
|
-
}
|
|
2017
|
-
}
|
|
2018
|
-
})
|
|
2019
|
-
.finally(() => {
|
|
2020
|
-
pollInFlight = false;
|
|
2021
|
-
});
|
|
2022
|
-
}, poll);
|
|
2023
|
-
}
|
|
2024
|
-
|
|
2025
|
-
return {
|
|
2026
|
-
unsubscribe: () => {
|
|
2027
|
-
if (pollIntervalId !== null) {
|
|
2028
|
-
clearInterval(pollIntervalId);
|
|
2029
|
-
pollIntervalId = null;
|
|
2030
|
-
}
|
|
2031
|
-
subscription.unsubscribe();
|
|
2032
|
-
},
|
|
2033
|
-
};
|
|
2034
|
-
}
|
|
2035
|
-
|
|
2036
|
-
/**
|
|
2037
|
-
* Notifies all subscribers of state changes.
|
|
2038
|
-
* Handles deferred replay for subscribers added with `replay: true` before
|
|
2039
|
-
* cached state was available.
|
|
2040
|
-
* @param {import('./services/StateDiff.js').StateDiffResult} diff
|
|
2041
|
-
* @param {import('./services/JoinReducer.js').WarpStateV5} currentState - The current state for deferred replay
|
|
2042
|
-
* @private
|
|
2043
|
-
*/
|
|
2044
|
-
_notifySubscribers(diff, currentState) {
|
|
2045
|
-
for (const subscriber of this._subscribers) {
|
|
2046
|
-
try {
|
|
2047
|
-
// Handle deferred replay: on first notification, send full state diff instead
|
|
2048
|
-
if (subscriber.pendingReplay) {
|
|
2049
|
-
subscriber.pendingReplay = false;
|
|
2050
|
-
const replayDiff = diffStates(null, currentState);
|
|
2051
|
-
if (!isEmptyDiff(replayDiff)) {
|
|
2052
|
-
subscriber.onChange(replayDiff);
|
|
2053
|
-
}
|
|
2054
|
-
} else {
|
|
2055
|
-
// Skip non-replay subscribers when diff is empty
|
|
2056
|
-
if (isEmptyDiff(diff)) {
|
|
2057
|
-
continue;
|
|
2058
|
-
}
|
|
2059
|
-
subscriber.onChange(diff);
|
|
2060
|
-
}
|
|
2061
|
-
} catch (err) {
|
|
2062
|
-
if (subscriber.onError) {
|
|
2063
|
-
try {
|
|
2064
|
-
subscriber.onError(err);
|
|
2065
|
-
} catch {
|
|
2066
|
-
// onError itself threw — swallow to prevent cascade
|
|
2067
|
-
}
|
|
2068
|
-
}
|
|
2069
|
-
}
|
|
2070
|
-
}
|
|
2071
|
-
}
|
|
2072
|
-
|
|
2073
|
-
/**
|
|
2074
|
-
* Creates a sync request to send to a remote peer.
|
|
2075
|
-
* The request contains the local frontier for comparison.
|
|
2076
|
-
*
|
|
2077
|
-
* @returns {Promise<import('./services/SyncProtocol.js').SyncRequest>} The sync request
|
|
2078
|
-
* @throws {Error} If listing refs fails
|
|
2079
|
-
*
|
|
2080
|
-
* @example
|
|
2081
|
-
* const request = await graph.createSyncRequest();
|
|
2082
|
-
* // Send request to remote peer...
|
|
2083
|
-
*/
|
|
2084
|
-
async createSyncRequest() {
|
|
2085
|
-
const frontier = await this.getFrontier();
|
|
2086
|
-
return createSyncRequest(frontier);
|
|
2087
|
-
}
|
|
2088
|
-
|
|
2089
|
-
/**
|
|
2090
|
-
* Processes an incoming sync request and returns patches the requester needs.
|
|
2091
|
-
*
|
|
2092
|
-
* @param {import('./services/SyncProtocol.js').SyncRequest} request - The incoming sync request
|
|
2093
|
-
* @returns {Promise<import('./services/SyncProtocol.js').SyncResponse>} The sync response
|
|
2094
|
-
* @throws {Error} If listing refs or reading patches fails
|
|
2095
|
-
*
|
|
2096
|
-
* @example
|
|
2097
|
-
* // Receive request from remote peer
|
|
2098
|
-
* const response = await graph.processSyncRequest(request);
|
|
2099
|
-
* // Send response back to requester...
|
|
2100
|
-
*/
|
|
2101
|
-
async processSyncRequest(request) {
|
|
2102
|
-
const localFrontier = await this.getFrontier();
|
|
2103
|
-
return await processSyncRequest(
|
|
2104
|
-
request,
|
|
2105
|
-
localFrontier,
|
|
2106
|
-
/** @type {any} */ (this._persistence), // TODO(ts-cleanup): narrow port type
|
|
2107
|
-
this._graphName,
|
|
2108
|
-
{ codec: this._codec }
|
|
2109
|
-
);
|
|
2110
|
-
}
|
|
2111
|
-
|
|
2112
|
-
/**
|
|
2113
|
-
* Applies a sync response to the local graph state.
|
|
2114
|
-
* Updates the cached state with received patches.
|
|
2115
|
-
*
|
|
2116
|
-
* **Requires a cached state.**
|
|
2117
|
-
*
|
|
2118
|
-
* @param {import('./services/SyncProtocol.js').SyncResponse} response - The sync response
|
|
2119
|
-
* @returns {{state: import('./services/JoinReducer.js').WarpStateV5, applied: number}} Result with updated state
|
|
2120
|
-
* @throws {QueryError} If no cached state exists (code: `E_NO_STATE`)
|
|
2121
|
-
*
|
|
2122
|
-
* @example
|
|
2123
|
-
* await graph.materialize(); // Cache state first
|
|
2124
|
-
* const result = graph.applySyncResponse(response);
|
|
2125
|
-
* console.log(`Applied ${result.applied} patches from remote`);
|
|
2126
|
-
*/
|
|
2127
|
-
applySyncResponse(response) {
|
|
2128
|
-
if (!this._cachedState) {
|
|
2129
|
-
throw new QueryError('No cached state. Call materialize() first.', {
|
|
2130
|
-
code: 'E_NO_STATE',
|
|
2131
|
-
});
|
|
2132
|
-
}
|
|
2133
|
-
|
|
2134
|
-
const currentFrontier = /** @type {any} */ (this._cachedState.observedFrontier); // TODO(ts-cleanup): narrow port type
|
|
2135
|
-
const result = /** @type {{state: import('./services/JoinReducer.js').WarpStateV5, frontier: Map<string, string>, applied: number}} */ (applySyncResponse(response, this._cachedState, currentFrontier));
|
|
2136
|
-
|
|
2137
|
-
// Update cached state
|
|
2138
|
-
this._cachedState = result.state;
|
|
2139
|
-
|
|
2140
|
-
// Keep _lastFrontier in sync so hasFrontierChanged() won't misreport stale.
|
|
2141
|
-
// Merge the response's per-writer tips into the stored frontier snapshot.
|
|
2142
|
-
if (this._lastFrontier && Array.isArray(response.patches)) {
|
|
2143
|
-
for (const { writerId, sha } of response.patches) {
|
|
2144
|
-
if (writerId && sha) {
|
|
2145
|
-
this._lastFrontier.set(writerId, sha);
|
|
2146
|
-
}
|
|
2147
|
-
}
|
|
2148
|
-
}
|
|
2149
|
-
|
|
2150
|
-
// Track patches for GC
|
|
2151
|
-
this._patchesSinceGC += result.applied;
|
|
2152
|
-
|
|
2153
|
-
return result;
|
|
2154
|
-
}
|
|
2155
|
-
|
|
2156
|
-
/**
|
|
2157
|
-
* Checks if sync is needed with a remote frontier.
|
|
2158
|
-
*
|
|
2159
|
-
* @param {Map<string, string>} remoteFrontier - The remote peer's frontier
|
|
2160
|
-
* @returns {Promise<boolean>} True if sync would transfer any patches
|
|
2161
|
-
* @throws {Error} If listing refs fails
|
|
2162
|
-
*/
|
|
2163
|
-
async syncNeeded(remoteFrontier) {
|
|
2164
|
-
const localFrontier = await this.getFrontier();
|
|
2165
|
-
return syncNeeded(localFrontier, remoteFrontier);
|
|
2166
|
-
}
|
|
2167
|
-
|
|
2168
|
-
/**
|
|
2169
|
-
* Syncs with a remote peer (HTTP or direct graph instance).
|
|
2170
|
-
*
|
|
2171
|
-
* @param {string|WarpGraph} remote - URL or peer graph instance
|
|
2172
|
-
* @param {Object} [options]
|
|
2173
|
-
* @param {string} [options.path='/sync'] - Sync path (HTTP mode)
|
|
2174
|
-
* @param {number} [options.retries=3] - Retry count
|
|
2175
|
-
* @param {number} [options.baseDelayMs=250] - Base backoff delay
|
|
2176
|
-
* @param {number} [options.maxDelayMs=2000] - Max backoff delay
|
|
2177
|
-
* @param {number} [options.timeoutMs=10000] - Request timeout
|
|
2178
|
-
* @param {AbortSignal} [options.signal] - Abort signal
|
|
2179
|
-
* @param {(event: {type: string, attempt: number, durationMs?: number, status?: number, error?: Error}) => void} [options.onStatus]
|
|
2180
|
-
* @param {boolean} [options.materialize=false] - Auto-materialize after sync
|
|
2181
|
-
* @param {{ secret: string, keyId?: string }} [options.auth] - Client auth credentials
|
|
2182
|
-
* @returns {Promise<{applied: number, attempts: number, state?: import('./services/JoinReducer.js').WarpStateV5}>}
|
|
2183
|
-
*/
|
|
2184
|
-
async syncWith(remote, options = {}) {
|
|
2185
|
-
const t0 = this._clock.now();
|
|
2186
|
-
const {
|
|
2187
|
-
path = '/sync',
|
|
2188
|
-
retries = DEFAULT_SYNC_WITH_RETRIES,
|
|
2189
|
-
baseDelayMs = DEFAULT_SYNC_WITH_BASE_DELAY_MS,
|
|
2190
|
-
maxDelayMs = DEFAULT_SYNC_WITH_MAX_DELAY_MS,
|
|
2191
|
-
timeoutMs = DEFAULT_SYNC_WITH_TIMEOUT_MS,
|
|
2192
|
-
signal,
|
|
2193
|
-
onStatus,
|
|
2194
|
-
materialize: materializeAfterSync = false,
|
|
2195
|
-
auth,
|
|
2196
|
-
} = options;
|
|
2197
|
-
|
|
2198
|
-
const hasPathOverride = Object.prototype.hasOwnProperty.call(options, 'path');
|
|
2199
|
-
const isDirectPeer = remote && typeof remote === 'object' &&
|
|
2200
|
-
typeof remote.processSyncRequest === 'function';
|
|
2201
|
-
let targetUrl = null;
|
|
2202
|
-
if (!isDirectPeer) {
|
|
2203
|
-
try {
|
|
2204
|
-
targetUrl = remote instanceof URL ? new URL(remote.toString()) : new URL(/** @type {string} */ (remote));
|
|
2205
|
-
} catch {
|
|
2206
|
-
throw new SyncError('Invalid remote URL', {
|
|
2207
|
-
code: 'E_SYNC_REMOTE_URL',
|
|
2208
|
-
context: { remote },
|
|
2209
|
-
});
|
|
2210
|
-
}
|
|
2211
|
-
|
|
2212
|
-
if (!['http:', 'https:'].includes(targetUrl.protocol)) {
|
|
2213
|
-
throw new SyncError('Unsupported remote URL protocol', {
|
|
2214
|
-
code: 'E_SYNC_REMOTE_URL',
|
|
2215
|
-
context: { protocol: targetUrl.protocol },
|
|
2216
|
-
});
|
|
2217
|
-
}
|
|
2218
|
-
|
|
2219
|
-
const normalizedPath = normalizeSyncPath(path);
|
|
2220
|
-
if (!targetUrl.pathname || targetUrl.pathname === '/') {
|
|
2221
|
-
targetUrl.pathname = normalizedPath;
|
|
2222
|
-
} else if (hasPathOverride) {
|
|
2223
|
-
targetUrl.pathname = normalizedPath;
|
|
2224
|
-
}
|
|
2225
|
-
targetUrl.hash = '';
|
|
2226
|
-
}
|
|
2227
|
-
let attempt = 0;
|
|
2228
|
-
const emit = (/** @type {string} */ type, /** @type {Record<string, any>} */ payload = {}) => {
|
|
2229
|
-
if (typeof onStatus === 'function') {
|
|
2230
|
-
onStatus(/** @type {any} */ ({ type, attempt, ...payload })); // TODO(ts-cleanup): type sync protocol
|
|
2231
|
-
}
|
|
2232
|
-
};
|
|
2233
|
-
const shouldRetry = (/** @type {any} */ err) => { // TODO(ts-cleanup): type error
|
|
2234
|
-
if (isDirectPeer) { return false; }
|
|
2235
|
-
if (err instanceof SyncError) {
|
|
2236
|
-
return ['E_SYNC_REMOTE', 'E_SYNC_TIMEOUT', 'E_SYNC_NETWORK'].includes(err.code);
|
|
2237
|
-
}
|
|
2238
|
-
return err instanceof TimeoutError;
|
|
2239
|
-
};
|
|
2240
|
-
const executeAttempt = async () => {
|
|
2241
|
-
checkAborted(signal, 'syncWith');
|
|
2242
|
-
attempt += 1;
|
|
2243
|
-
const attemptStart = Date.now();
|
|
2244
|
-
emit('connecting');
|
|
2245
|
-
const request = await this.createSyncRequest();
|
|
2246
|
-
emit('requestBuilt');
|
|
2247
|
-
let response;
|
|
2248
|
-
if (isDirectPeer) {
|
|
2249
|
-
emit('requestSent');
|
|
2250
|
-
response = await remote.processSyncRequest(request);
|
|
2251
|
-
emit('responseReceived');
|
|
2252
|
-
} else {
|
|
2253
|
-
emit('requestSent');
|
|
2254
|
-
const bodyStr = JSON.stringify(request);
|
|
2255
|
-
const authHeaders = await buildSyncAuthHeaders({
|
|
2256
|
-
auth, bodyStr, targetUrl: /** @type {URL} */ (targetUrl), crypto: this._crypto,
|
|
2257
|
-
});
|
|
2258
|
-
let res;
|
|
2259
|
-
try {
|
|
2260
|
-
res = await timeout(timeoutMs, (timeoutSignal) => {
|
|
2261
|
-
const combinedSignal = signal
|
|
2262
|
-
? AbortSignal.any([timeoutSignal, signal])
|
|
2263
|
-
: timeoutSignal;
|
|
2264
|
-
return fetch(/** @type {URL} */ (targetUrl).toString(), {
|
|
2265
|
-
method: 'POST',
|
|
2266
|
-
headers: {
|
|
2267
|
-
'content-type': 'application/json',
|
|
2268
|
-
'accept': 'application/json',
|
|
2269
|
-
...authHeaders,
|
|
2270
|
-
},
|
|
2271
|
-
body: bodyStr,
|
|
2272
|
-
signal: combinedSignal,
|
|
2273
|
-
});
|
|
2274
|
-
});
|
|
2275
|
-
} catch (err) {
|
|
2276
|
-
if (/** @type {any} */ (err)?.name === 'AbortError') { // TODO(ts-cleanup): type error
|
|
2277
|
-
throw new OperationAbortedError('syncWith', { reason: 'Signal received' });
|
|
2278
|
-
}
|
|
2279
|
-
if (err instanceof TimeoutError) {
|
|
2280
|
-
throw new SyncError('Sync request timed out', {
|
|
2281
|
-
code: 'E_SYNC_TIMEOUT',
|
|
2282
|
-
context: { timeoutMs },
|
|
2283
|
-
});
|
|
2284
|
-
}
|
|
2285
|
-
throw new SyncError('Network error', {
|
|
2286
|
-
code: 'E_SYNC_NETWORK',
|
|
2287
|
-
context: { message: /** @type {any} */ (err)?.message }, // TODO(ts-cleanup): type error
|
|
2288
|
-
});
|
|
2289
|
-
}
|
|
2290
|
-
|
|
2291
|
-
emit('responseReceived', { status: res.status });
|
|
2292
|
-
|
|
2293
|
-
if (res.status >= 500) {
|
|
2294
|
-
throw new SyncError(`Remote error: ${res.status}`, {
|
|
2295
|
-
code: 'E_SYNC_REMOTE',
|
|
2296
|
-
context: { status: res.status },
|
|
2297
|
-
});
|
|
2298
|
-
}
|
|
2299
|
-
|
|
2300
|
-
if (res.status >= 400) {
|
|
2301
|
-
throw new SyncError(`Protocol error: ${res.status}`, {
|
|
2302
|
-
code: 'E_SYNC_PROTOCOL',
|
|
2303
|
-
context: { status: res.status },
|
|
2304
|
-
});
|
|
2305
|
-
}
|
|
2306
|
-
|
|
2307
|
-
try {
|
|
2308
|
-
response = await res.json();
|
|
2309
|
-
} catch {
|
|
2310
|
-
throw new SyncError('Invalid JSON response', {
|
|
2311
|
-
code: 'E_SYNC_PROTOCOL',
|
|
2312
|
-
context: { status: res.status },
|
|
2313
|
-
});
|
|
2314
|
-
}
|
|
2315
|
-
}
|
|
2316
|
-
|
|
2317
|
-
if (!this._cachedState) {
|
|
2318
|
-
await this.materialize();
|
|
2319
|
-
emit('materialized');
|
|
2320
|
-
}
|
|
2321
|
-
|
|
2322
|
-
if (!response || typeof response !== 'object' ||
|
|
2323
|
-
response.type !== 'sync-response' ||
|
|
2324
|
-
!response.frontier || typeof response.frontier !== 'object' || Array.isArray(response.frontier) ||
|
|
2325
|
-
!Array.isArray(response.patches)) {
|
|
2326
|
-
throw new SyncError('Invalid sync response', {
|
|
2327
|
-
code: 'E_SYNC_PROTOCOL',
|
|
2328
|
-
});
|
|
2329
|
-
}
|
|
2330
|
-
|
|
2331
|
-
const result = this.applySyncResponse(response);
|
|
2332
|
-
emit('applied', { applied: result.applied });
|
|
2333
|
-
|
|
2334
|
-
const durationMs = Date.now() - attemptStart;
|
|
2335
|
-
emit('complete', { durationMs, applied: result.applied });
|
|
2336
|
-
return { applied: result.applied, attempts: attempt };
|
|
2337
|
-
};
|
|
2338
|
-
|
|
2339
|
-
try {
|
|
2340
|
-
const syncResult = await retry(executeAttempt, {
|
|
2341
|
-
retries,
|
|
2342
|
-
delay: baseDelayMs,
|
|
2343
|
-
maxDelay: maxDelayMs,
|
|
2344
|
-
backoff: 'exponential',
|
|
2345
|
-
jitter: 'decorrelated',
|
|
2346
|
-
signal,
|
|
2347
|
-
shouldRetry,
|
|
2348
|
-
onRetry: (/** @type {Error} */ error, /** @type {number} */ attemptNumber, /** @type {number} */ delayMs) => {
|
|
2349
|
-
if (typeof onStatus === 'function') {
|
|
2350
|
-
onStatus(/** @type {any} */ ({ type: 'retrying', attempt: attemptNumber, delayMs, error })); // TODO(ts-cleanup): type sync protocol
|
|
2351
|
-
}
|
|
2352
|
-
},
|
|
2353
|
-
});
|
|
2354
|
-
|
|
2355
|
-
this._logTiming('syncWith', t0, { metrics: `${syncResult.applied} patches applied` });
|
|
2356
|
-
|
|
2357
|
-
if (materializeAfterSync) {
|
|
2358
|
-
if (!this._cachedState) { await this.materialize(); }
|
|
2359
|
-
return { ...syncResult, state: /** @type {import('./services/JoinReducer.js').WarpStateV5} */ (this._cachedState) };
|
|
2360
|
-
}
|
|
2361
|
-
return syncResult;
|
|
2362
|
-
} catch (err) {
|
|
2363
|
-
this._logTiming('syncWith', t0, { error: /** @type {Error} */ (err) });
|
|
2364
|
-
if (/** @type {any} */ (err)?.name === 'AbortError') { // TODO(ts-cleanup): type error
|
|
2365
|
-
const abortedError = new OperationAbortedError('syncWith', { reason: 'Signal received' });
|
|
2366
|
-
if (typeof onStatus === 'function') {
|
|
2367
|
-
onStatus({ type: 'failed', attempt, error: abortedError });
|
|
2368
|
-
}
|
|
2369
|
-
throw abortedError;
|
|
2370
|
-
}
|
|
2371
|
-
if (err instanceof RetryExhaustedError) {
|
|
2372
|
-
const cause = /** @type {Error} */ (err.cause || err);
|
|
2373
|
-
if (typeof onStatus === 'function') {
|
|
2374
|
-
onStatus({ type: 'failed', attempt: err.attempts, error: cause });
|
|
2375
|
-
}
|
|
2376
|
-
throw cause;
|
|
2377
|
-
}
|
|
2378
|
-
if (typeof onStatus === 'function') {
|
|
2379
|
-
onStatus({ type: 'failed', attempt, error: /** @type {Error} */ (err) });
|
|
2380
|
-
}
|
|
2381
|
-
throw err;
|
|
2382
|
-
}
|
|
2383
|
-
}
|
|
2384
|
-
|
|
2385
|
-
/**
|
|
2386
|
-
* Starts a built-in sync server for this graph.
|
|
2387
|
-
*
|
|
2388
|
-
* @param {Object} options
|
|
2389
|
-
* @param {number} options.port - Port to listen on
|
|
2390
|
-
* @param {string} [options.host='127.0.0.1'] - Host to bind
|
|
2391
|
-
* @param {string} [options.path='/sync'] - Path to handle sync requests
|
|
2392
|
-
* @param {number} [options.maxRequestBytes=4194304] - Max request size in bytes
|
|
2393
|
-
* @param {import('../ports/HttpServerPort.js').default} options.httpPort - HTTP server adapter (required)
|
|
2394
|
-
* @param {{ keys: Record<string, string>, mode?: 'enforce'|'log-only' }} [options.auth] - Auth configuration
|
|
2395
|
-
* @returns {Promise<{close: () => Promise<void>, url: string}>} Server handle
|
|
2396
|
-
* @throws {Error} If port is not a number
|
|
2397
|
-
* @throws {Error} If httpPort adapter is not provided
|
|
2398
|
-
*/
|
|
2399
|
-
async serve({ port, host = '127.0.0.1', path = '/sync', maxRequestBytes = DEFAULT_SYNC_SERVER_MAX_BYTES, httpPort, auth } = /** @type {any} */ ({})) { // TODO(ts-cleanup): needs options type
|
|
2400
|
-
if (typeof port !== 'number') {
|
|
2401
|
-
throw new Error('serve() requires a numeric port');
|
|
2402
|
-
}
|
|
2403
|
-
if (!httpPort) {
|
|
2404
|
-
throw new Error('serve() requires an httpPort adapter');
|
|
2405
|
-
}
|
|
2406
|
-
|
|
2407
|
-
const authConfig = auth
|
|
2408
|
-
? { ...auth, crypto: this._crypto, logger: this._logger || undefined }
|
|
2409
|
-
: undefined;
|
|
2410
|
-
|
|
2411
|
-
const httpServer = new HttpSyncServer({
|
|
2412
|
-
httpPort,
|
|
2413
|
-
graph: this,
|
|
2414
|
-
path,
|
|
2415
|
-
host,
|
|
2416
|
-
maxRequestBytes,
|
|
2417
|
-
auth: authConfig,
|
|
2418
|
-
});
|
|
2419
|
-
|
|
2420
|
-
return await httpServer.listen(port);
|
|
2421
|
-
}
|
|
2422
|
-
|
|
2423
|
-
// ============================================================================
|
|
2424
|
-
// Writer Factory Methods
|
|
2425
|
-
// ============================================================================
|
|
2426
|
-
|
|
2427
|
-
/**
|
|
2428
|
-
* Gets or creates a Writer for this graph.
|
|
2429
|
-
*
|
|
2430
|
-
* If an explicit writerId is provided, it is validated and used directly.
|
|
2431
|
-
* Otherwise, the writerId is resolved from git config using the key
|
|
2432
|
-
* `warp.writerId.<graphName>`. If no config exists, a new canonical ID
|
|
2433
|
-
* is generated and persisted.
|
|
2434
|
-
*
|
|
2435
|
-
* @param {string} [writerId] - Optional explicit writer ID. If not provided, resolves stable ID from git config.
|
|
2436
|
-
* @returns {Promise<Writer>} A Writer instance
|
|
2437
|
-
* @throws {Error} If writerId is invalid
|
|
2438
|
-
*
|
|
2439
|
-
* @example
|
|
2440
|
-
* // Use explicit writer ID
|
|
2441
|
-
* const writer = await graph.writer('alice');
|
|
2442
|
-
*
|
|
2443
|
-
* @example
|
|
2444
|
-
* // Resolve from git config (or generate new)
|
|
2445
|
-
* const writer = await graph.writer();
|
|
2446
|
-
*/
|
|
2447
|
-
async writer(writerId) {
|
|
2448
|
-
// Build config adapters for resolveWriterId
|
|
2449
|
-
const configGet = async (/** @type {string} */ key) => await this._persistence.configGet(key);
|
|
2450
|
-
const configSet = async (/** @type {string} */ key, /** @type {string} */ value) => await this._persistence.configSet(key, value);
|
|
2451
|
-
|
|
2452
|
-
// Resolve the writer ID
|
|
2453
|
-
const resolvedWriterId = await resolveWriterId({
|
|
2454
|
-
graphName: this._graphName,
|
|
2455
|
-
explicitWriterId: writerId,
|
|
2456
|
-
configGet,
|
|
2457
|
-
configSet,
|
|
2458
|
-
});
|
|
2459
|
-
|
|
2460
|
-
return new Writer({
|
|
2461
|
-
persistence: /** @type {any} */ (this._persistence), // TODO(ts-cleanup): narrow port type
|
|
2462
|
-
graphName: this._graphName,
|
|
2463
|
-
writerId: resolvedWriterId,
|
|
2464
|
-
versionVector: this._versionVector,
|
|
2465
|
-
getCurrentState: () => /** @type {any} */ (this._cachedState), // TODO(ts-cleanup): narrow port type
|
|
2466
|
-
onDeleteWithData: this._onDeleteWithData,
|
|
2467
|
-
onCommitSuccess: (/** @type {any} */ opts) => this._onPatchCommitted(resolvedWriterId, opts), // TODO(ts-cleanup): type sync protocol
|
|
2468
|
-
codec: this._codec,
|
|
2469
|
-
});
|
|
2470
|
-
}
|
|
2471
|
-
|
|
2472
|
-
/**
|
|
2473
|
-
* Creates a new Writer with a fresh canonical ID.
|
|
2474
|
-
*
|
|
2475
|
-
* This always generates a new unique writer ID, regardless of any
|
|
2476
|
-
* existing configuration. Use this when you need a guaranteed fresh
|
|
2477
|
-
* identity (e.g., spawning a new writer process).
|
|
2478
|
-
*
|
|
2479
|
-
* @deprecated Use `writer()` to resolve a stable ID from git config, or `writer(id)` with an explicit ID.
|
|
2480
|
-
* @param {Object} [opts]
|
|
2481
|
-
* @param {'config'|'none'} [opts.persist='none'] - Whether to persist the new ID to git config
|
|
2482
|
-
* @param {string} [opts.alias] - Optional alias for config key (used with persist:'config')
|
|
2483
|
-
* @returns {Promise<Writer>} A Writer instance with new canonical ID
|
|
2484
|
-
* @throws {Error} If config operations fail (when persist:'config')
|
|
2485
|
-
*
|
|
2486
|
-
* @example
|
|
2487
|
-
* // Create ephemeral writer (not persisted)
|
|
2488
|
-
* const writer = await graph.createWriter();
|
|
2489
|
-
*
|
|
2490
|
-
* @example
|
|
2491
|
-
* // Create and persist to git config
|
|
2492
|
-
* const writer = await graph.createWriter({ persist: 'config' });
|
|
2493
|
-
*/
|
|
2494
|
-
async createWriter(opts = {}) {
|
|
2495
|
-
if (this._logger) {
|
|
2496
|
-
this._logger.warn('[warp] createWriter() is deprecated. Use writer() or writer(id) instead.');
|
|
2497
|
-
}
|
|
2498
|
-
// eslint-disable-next-line no-console
|
|
2499
|
-
console.warn('[warp] createWriter() is deprecated. Use writer() or writer(id) instead.');
|
|
2500
|
-
|
|
2501
|
-
const { persist = 'none', alias } = opts;
|
|
2502
|
-
|
|
2503
|
-
// Generate new canonical writerId
|
|
2504
|
-
const freshWriterId = generateWriterId();
|
|
2505
|
-
|
|
2506
|
-
// Optionally persist to git config
|
|
2507
|
-
if (persist === 'config') {
|
|
2508
|
-
const configKey = alias
|
|
2509
|
-
? `warp.writerId.${alias}`
|
|
2510
|
-
: `warp.writerId.${this._graphName}`;
|
|
2511
|
-
await this._persistence.configSet(configKey, freshWriterId);
|
|
2512
|
-
}
|
|
2513
|
-
|
|
2514
|
-
return new Writer({
|
|
2515
|
-
persistence: /** @type {any} */ (this._persistence), // TODO(ts-cleanup): narrow port type
|
|
2516
|
-
graphName: this._graphName,
|
|
2517
|
-
writerId: freshWriterId,
|
|
2518
|
-
versionVector: this._versionVector,
|
|
2519
|
-
getCurrentState: () => /** @type {any} */ (this._cachedState), // TODO(ts-cleanup): narrow port type
|
|
2520
|
-
onDeleteWithData: this._onDeleteWithData,
|
|
2521
|
-
onCommitSuccess: (/** @type {any} */ commitOpts) => this._onPatchCommitted(freshWriterId, commitOpts), // TODO(ts-cleanup): type sync protocol
|
|
2522
|
-
codec: this._codec,
|
|
2523
|
-
});
|
|
2524
|
-
}
|
|
2525
|
-
|
|
2526
|
-
// ============================================================================
|
|
2527
|
-
// Auto-Materialize Guard
|
|
2528
|
-
// ============================================================================
|
|
2529
|
-
|
|
2530
|
-
/**
|
|
2531
|
-
* Ensures cached state is fresh. When autoMaterialize is enabled,
|
|
2532
|
-
* materializes if state is null or dirty. Otherwise throws.
|
|
2533
|
-
*
|
|
2534
|
-
* @returns {Promise<void>}
|
|
2535
|
-
* @throws {QueryError} If no cached state and autoMaterialize is off (code: `E_NO_STATE`)
|
|
2536
|
-
* @throws {QueryError} If cached state is dirty and autoMaterialize is off (code: `E_STALE_STATE`)
|
|
2537
|
-
* @private
|
|
2538
|
-
*/
|
|
2539
|
-
async _ensureFreshState() {
|
|
2540
|
-
if (this._autoMaterialize && (!this._cachedState || this._stateDirty)) {
|
|
2541
|
-
await this.materialize();
|
|
2542
|
-
return;
|
|
2543
|
-
}
|
|
2544
|
-
if (!this._cachedState) {
|
|
2545
|
-
throw new QueryError(
|
|
2546
|
-
'No cached state. Call materialize() to load initial state, or pass autoMaterialize: true to WarpGraph.open().',
|
|
2547
|
-
{ code: 'E_NO_STATE' },
|
|
2548
|
-
);
|
|
2549
|
-
}
|
|
2550
|
-
if (this._stateDirty) {
|
|
2551
|
-
throw new QueryError(
|
|
2552
|
-
'Cached state is stale. Call materialize() to refresh, or enable autoMaterialize.',
|
|
2553
|
-
{ code: 'E_STALE_STATE' },
|
|
2554
|
-
);
|
|
2555
|
-
}
|
|
2556
|
-
}
|
|
2557
|
-
|
|
2558
|
-
// ============================================================================
|
|
2559
|
-
// Query API (Task 7) - Queries on Materialized WARP State
|
|
2560
|
-
// ============================================================================
|
|
2561
|
-
|
|
2562
|
-
/**
|
|
2563
|
-
* Creates a fluent query builder for the logical graph.
|
|
2564
|
-
*
|
|
2565
|
-
* The query builder provides a chainable API for querying nodes, filtering
|
|
2566
|
-
* by patterns and properties, traversing edges, and selecting results.
|
|
2567
|
-
*
|
|
2568
|
-
* **Requires a cached state.** Call materialize() first if not already cached,
|
|
2569
|
-
* or use autoMaterialize option when opening the graph.
|
|
2570
|
-
*
|
|
2571
|
-
* @returns {import('./services/QueryBuilder.js').default} A fluent query builder
|
|
2572
|
-
*
|
|
2573
|
-
* @example
|
|
2574
|
-
* await graph.materialize();
|
|
2575
|
-
* const users = await graph.query()
|
|
2576
|
-
* .match('user:*')
|
|
2577
|
-
* .where('active', true)
|
|
2578
|
-
* .outgoing('follows')
|
|
2579
|
-
* .select('*');
|
|
2580
|
-
*/
|
|
2581
|
-
query() {
|
|
2582
|
-
return new QueryBuilder(this);
|
|
2583
|
-
}
|
|
2584
|
-
|
|
2585
|
-
/**
|
|
2586
|
-
* Creates a read-only observer view of the current materialized state.
|
|
2587
|
-
*
|
|
2588
|
-
* The observer sees only nodes matching the `match` glob pattern, with
|
|
2589
|
-
* property visibility controlled by `expose` and `redact` lists.
|
|
2590
|
-
* Edges are only visible when both endpoints pass the match filter.
|
|
2591
|
-
*
|
|
2592
|
-
* **Requires a cached state.** Call materialize() first if not already cached,
|
|
2593
|
-
* or use autoMaterialize option when opening the graph.
|
|
2594
|
-
*
|
|
2595
|
-
* @param {string} name - Observer name
|
|
2596
|
-
* @param {Object} config - Observer configuration
|
|
2597
|
-
* @param {string} config.match - Glob pattern for visible nodes (e.g. 'user:*')
|
|
2598
|
-
* @param {string[]} [config.expose] - Property keys to include (whitelist)
|
|
2599
|
-
* @param {string[]} [config.redact] - Property keys to exclude (blacklist, takes precedence over expose)
|
|
2600
|
-
* @returns {Promise<import('./services/ObserverView.js').default>} A read-only observer view
|
|
2601
|
-
* @throws {QueryError} If no cached state exists (code: `E_NO_STATE`)
|
|
2602
|
-
* @throws {QueryError} If cached state is dirty (code: `E_STALE_STATE`)
|
|
2603
|
-
*
|
|
2604
|
-
* @example
|
|
2605
|
-
* await graph.materialize();
|
|
2606
|
-
* const view = await graph.observer('userView', {
|
|
2607
|
-
* match: 'user:*',
|
|
2608
|
-
* redact: ['ssn', 'password'],
|
|
2609
|
-
* });
|
|
2610
|
-
* const users = await view.getNodes();
|
|
2611
|
-
* const result = await view.query().match('user:*').run();
|
|
2612
|
-
*/
|
|
2613
|
-
async observer(name, config) {
|
|
2614
|
-
if (!config || typeof config.match !== 'string') {
|
|
2615
|
-
throw new Error('observer config.match must be a string');
|
|
2616
|
-
}
|
|
2617
|
-
await this._ensureFreshState();
|
|
2618
|
-
return new ObserverView({ name, config, graph: this });
|
|
2619
|
-
}
|
|
2620
|
-
|
|
2621
|
-
/**
|
|
2622
|
-
* Computes the directed MDL translation cost from observer A to observer B.
|
|
2623
|
-
*
|
|
2624
|
-
* The cost measures how much information is lost when translating from
|
|
2625
|
-
* A's view to B's view. It is asymmetric: cost(A->B) != cost(B->A).
|
|
2626
|
-
*
|
|
2627
|
-
* **Requires a cached state.** Call materialize() first if not already cached,
|
|
2628
|
-
* or use autoMaterialize option when opening the graph.
|
|
2629
|
-
*
|
|
2630
|
-
* @param {Object} configA - Observer configuration for A
|
|
2631
|
-
* @param {string} configA.match - Glob pattern for visible nodes
|
|
2632
|
-
* @param {string[]} [configA.expose] - Property keys to include
|
|
2633
|
-
* @param {string[]} [configA.redact] - Property keys to exclude
|
|
2634
|
-
* @param {Object} configB - Observer configuration for B
|
|
2635
|
-
* @param {string} configB.match - Glob pattern for visible nodes
|
|
2636
|
-
* @param {string[]} [configB.expose] - Property keys to include
|
|
2637
|
-
* @param {string[]} [configB.redact] - Property keys to exclude
|
|
2638
|
-
* @returns {Promise<{cost: number, breakdown: {nodeLoss: number, edgeLoss: number, propLoss: number}}>}
|
|
2639
|
-
* @throws {QueryError} If no cached state exists (code: `E_NO_STATE`)
|
|
2640
|
-
* @throws {QueryError} If cached state is dirty (code: `E_STALE_STATE`)
|
|
2641
|
-
*
|
|
2642
|
-
* @see Paper IV, Section 4 -- Directed rulial cost
|
|
2643
|
-
*
|
|
2644
|
-
* @example
|
|
2645
|
-
* await graph.materialize();
|
|
2646
|
-
* const result = await graph.translationCost(
|
|
2647
|
-
* { match: 'user:*' },
|
|
2648
|
-
* { match: 'user:*', redact: ['ssn'] }
|
|
2649
|
-
* );
|
|
2650
|
-
* console.log(result.cost); // e.g. 0.04
|
|
2651
|
-
* console.log(result.breakdown); // { nodeLoss: 0, edgeLoss: 0, propLoss: 0.2 }
|
|
2652
|
-
*/
|
|
2653
|
-
async translationCost(configA, configB) {
|
|
2654
|
-
await this._ensureFreshState();
|
|
2655
|
-
return computeTranslationCost(configA, configB, this._cachedState);
|
|
2656
|
-
}
|
|
2657
|
-
|
|
2658
|
-
/**
|
|
2659
|
-
* Checks if a node exists in the materialized graph state.
|
|
2660
|
-
*
|
|
2661
|
-
* **Requires a cached state.** Call materialize() first if not already cached.
|
|
2662
|
-
*
|
|
2663
|
-
* @param {string} nodeId - The node ID to check
|
|
2664
|
-
* @returns {Promise<boolean>} True if the node exists in the materialized state
|
|
2665
|
-
* @throws {QueryError} If no cached state exists (code: `E_NO_STATE`)
|
|
2666
|
-
* @throws {QueryError} If cached state is dirty (code: `E_STALE_STATE`)
|
|
2667
|
-
*
|
|
2668
|
-
* @example
|
|
2669
|
-
* await graph.materialize();
|
|
2670
|
-
* if (await graph.hasNode('user:alice')) {
|
|
2671
|
-
* console.log('Alice exists in the graph');
|
|
2672
|
-
* }
|
|
2673
|
-
*/
|
|
2674
|
-
async hasNode(nodeId) {
|
|
2675
|
-
await this._ensureFreshState();
|
|
2676
|
-
const s = /** @type {import('./services/JoinReducer.js').WarpStateV5} */ (this._cachedState);
|
|
2677
|
-
return orsetContains(s.nodeAlive, nodeId);
|
|
2678
|
-
}
|
|
2679
|
-
|
|
2680
|
-
/**
|
|
2681
|
-
* Gets all properties for a node from the materialized state.
|
|
2682
|
-
*
|
|
2683
|
-
* Returns properties as a Map of key → value. Only returns properties
|
|
2684
|
-
* for nodes that exist in the materialized state.
|
|
2685
|
-
*
|
|
2686
|
-
* **Requires a cached state.** Call materialize() first if not already cached.
|
|
2687
|
-
*
|
|
2688
|
-
* @param {string} nodeId - The node ID to get properties for
|
|
2689
|
-
* @returns {Promise<Map<string, *>|null>} Map of property key → value, or null if node doesn't exist
|
|
2690
|
-
* @throws {QueryError} If no cached state exists (code: `E_NO_STATE`)
|
|
2691
|
-
* @throws {QueryError} If cached state is dirty (code: `E_STALE_STATE`)
|
|
2692
|
-
*
|
|
2693
|
-
* @example
|
|
2694
|
-
* await graph.materialize();
|
|
2695
|
-
* const props = await graph.getNodeProps('user:alice');
|
|
2696
|
-
* if (props) {
|
|
2697
|
-
* console.log('Name:', props.get('name'));
|
|
2698
|
-
* }
|
|
2699
|
-
*/
|
|
2700
|
-
async getNodeProps(nodeId) {
|
|
2701
|
-
await this._ensureFreshState();
|
|
2702
|
-
const s = /** @type {import('./services/JoinReducer.js').WarpStateV5} */ (this._cachedState);
|
|
2703
|
-
|
|
2704
|
-
// Check if node exists
|
|
2705
|
-
if (!orsetContains(s.nodeAlive, nodeId)) {
|
|
2706
|
-
return null;
|
|
2707
|
-
}
|
|
2708
|
-
|
|
2709
|
-
// Collect all properties for this node
|
|
2710
|
-
const props = new Map();
|
|
2711
|
-
for (const [propKey, register] of s.prop) {
|
|
2712
|
-
const decoded = decodePropKey(propKey);
|
|
2713
|
-
if (decoded.nodeId === nodeId) {
|
|
2714
|
-
props.set(decoded.propKey, register.value);
|
|
2715
|
-
}
|
|
2716
|
-
}
|
|
2717
|
-
|
|
2718
|
-
return props;
|
|
2719
|
-
}
|
|
2720
|
-
|
|
2721
|
-
/**
|
|
2722
|
-
* Gets all properties for an edge from the materialized state.
|
|
2723
|
-
*
|
|
2724
|
-
* Returns properties as a plain object of key → value. Only returns
|
|
2725
|
-
* properties for edges that exist in the materialized state.
|
|
2726
|
-
*
|
|
2727
|
-
* **Requires a cached state.** Call materialize() first if not already cached.
|
|
2728
|
-
*
|
|
2729
|
-
* @param {string} from - Source node ID
|
|
2730
|
-
* @param {string} to - Target node ID
|
|
2731
|
-
* @param {string} label - Edge label
|
|
2732
|
-
* @returns {Promise<Record<string, *>|null>} Object of property key → value, or null if edge doesn't exist
|
|
2733
|
-
* @throws {QueryError} If no cached state exists (code: `E_NO_STATE`)
|
|
2734
|
-
* @throws {QueryError} If cached state is dirty (code: `E_STALE_STATE`)
|
|
2735
|
-
*
|
|
2736
|
-
* @example
|
|
2737
|
-
* await graph.materialize();
|
|
2738
|
-
* const props = await graph.getEdgeProps('user:alice', 'user:bob', 'follows');
|
|
2739
|
-
* if (props) {
|
|
2740
|
-
* console.log('Weight:', props.weight);
|
|
2741
|
-
* }
|
|
2742
|
-
*/
|
|
2743
|
-
async getEdgeProps(from, to, label) {
|
|
2744
|
-
await this._ensureFreshState();
|
|
2745
|
-
const s = /** @type {import('./services/JoinReducer.js').WarpStateV5} */ (this._cachedState);
|
|
2746
|
-
|
|
2747
|
-
// Check if edge exists
|
|
2748
|
-
const edgeKey = encodeEdgeKey(from, to, label);
|
|
2749
|
-
if (!orsetContains(s.edgeAlive, edgeKey)) {
|
|
2750
|
-
return null;
|
|
2751
|
-
}
|
|
2752
|
-
|
|
2753
|
-
// Check node liveness for both endpoints
|
|
2754
|
-
if (!orsetContains(s.nodeAlive, from) ||
|
|
2755
|
-
!orsetContains(s.nodeAlive, to)) {
|
|
2756
|
-
return null;
|
|
2757
|
-
}
|
|
2758
|
-
|
|
2759
|
-
// Determine the birth EventId for clean-slate filtering
|
|
2760
|
-
const birthEvent = s.edgeBirthEvent?.get(edgeKey);
|
|
2761
|
-
|
|
2762
|
-
// Collect all properties for this edge, filtering out stale props
|
|
2763
|
-
// (props set before the edge's most recent re-add)
|
|
2764
|
-
/** @type {Record<string, any>} */
|
|
2765
|
-
const props = {};
|
|
2766
|
-
for (const [propKey, register] of s.prop) {
|
|
2767
|
-
if (!isEdgePropKey(propKey)) {
|
|
2768
|
-
continue;
|
|
2769
|
-
}
|
|
2770
|
-
const decoded = decodeEdgePropKey(propKey);
|
|
2771
|
-
if (decoded.from === from && decoded.to === to && decoded.label === label) {
|
|
2772
|
-
if (birthEvent && register.eventId && compareEventIds(register.eventId, birthEvent) < 0) {
|
|
2773
|
-
continue; // stale prop from before the edge's current incarnation
|
|
2774
|
-
}
|
|
2775
|
-
props[decoded.propKey] = register.value;
|
|
2776
|
-
}
|
|
2777
|
-
}
|
|
2778
|
-
|
|
2779
|
-
return props;
|
|
2780
|
-
}
|
|
2781
|
-
|
|
2782
|
-
/**
|
|
2783
|
-
* Gets neighbors of a node from the materialized state.
|
|
2784
|
-
*
|
|
2785
|
-
* Returns node IDs connected to the given node by edges in the specified direction.
|
|
2786
|
-
* Direction 'outgoing' returns nodes where the given node is the edge source.
|
|
2787
|
-
* Direction 'incoming' returns nodes where the given node is the edge target.
|
|
2788
|
-
* Direction 'both' returns all connected nodes.
|
|
2789
|
-
*
|
|
2790
|
-
* **Requires a cached state.** Call materialize() first if not already cached.
|
|
2791
|
-
*
|
|
2792
|
-
* @param {string} nodeId - The node ID to get neighbors for
|
|
2793
|
-
* @param {'outgoing' | 'incoming' | 'both'} [direction='both'] - Edge direction to follow
|
|
2794
|
-
* @param {string} [edgeLabel] - Optional edge label filter
|
|
2795
|
-
* @returns {Promise<Array<{nodeId: string, label: string, direction: 'outgoing' | 'incoming'}>>} Array of neighbor info
|
|
2796
|
-
* @throws {QueryError} If no cached state exists (code: `E_NO_STATE`)
|
|
2797
|
-
* @throws {QueryError} If cached state is dirty (code: `E_STALE_STATE`)
|
|
2798
|
-
*
|
|
2799
|
-
* @example
|
|
2800
|
-
* await graph.materialize();
|
|
2801
|
-
* // Get all outgoing neighbors
|
|
2802
|
-
* const outgoing = await graph.neighbors('user:alice', 'outgoing');
|
|
2803
|
-
* // Get neighbors connected by 'follows' edges
|
|
2804
|
-
* const follows = await graph.neighbors('user:alice', 'outgoing', 'follows');
|
|
2805
|
-
*/
|
|
2806
|
-
async neighbors(nodeId, direction = 'both', edgeLabel = undefined) {
|
|
2807
|
-
await this._ensureFreshState();
|
|
2808
|
-
const s = /** @type {import('./services/JoinReducer.js').WarpStateV5} */ (this._cachedState);
|
|
2809
|
-
|
|
2810
|
-
/** @type {Array<{nodeId: string, label: string, direction: 'outgoing' | 'incoming'}>} */
|
|
2811
|
-
const neighbors = [];
|
|
2812
|
-
|
|
2813
|
-
// Iterate over all visible edges
|
|
2814
|
-
for (const edgeKey of orsetElements(s.edgeAlive)) {
|
|
2815
|
-
const { from, to, label } = decodeEdgeKey(edgeKey);
|
|
2816
|
-
|
|
2817
|
-
// Filter by label if specified
|
|
2818
|
-
if (edgeLabel !== undefined && label !== edgeLabel) {
|
|
2819
|
-
continue;
|
|
2820
|
-
}
|
|
2821
|
-
|
|
2822
|
-
// Check edge direction and collect neighbors
|
|
2823
|
-
if ((direction === 'outgoing' || direction === 'both') && from === nodeId) {
|
|
2824
|
-
// Ensure target node is visible
|
|
2825
|
-
if (orsetContains(s.nodeAlive, to)) {
|
|
2826
|
-
neighbors.push({ nodeId: to, label, direction: /** @type {const} */ ('outgoing') });
|
|
2827
|
-
}
|
|
2828
|
-
}
|
|
2829
|
-
|
|
2830
|
-
if ((direction === 'incoming' || direction === 'both') && to === nodeId) {
|
|
2831
|
-
// Ensure source node is visible
|
|
2832
|
-
if (orsetContains(s.nodeAlive, from)) {
|
|
2833
|
-
neighbors.push({ nodeId: from, label, direction: /** @type {const} */ ('incoming') });
|
|
2834
|
-
}
|
|
2835
|
-
}
|
|
2836
|
-
}
|
|
2837
|
-
|
|
2838
|
-
return neighbors;
|
|
2839
|
-
}
|
|
2840
|
-
|
|
2841
|
-
/**
|
|
2842
|
-
* Returns a defensive copy of the current materialized state.
|
|
2843
|
-
*
|
|
2844
|
-
* The returned object is a shallow clone: top-level ORSet, LWW, and
|
|
2845
|
-
* VersionVector instances are copied so that mutations by the caller
|
|
2846
|
-
* cannot corrupt the internal cache.
|
|
2847
|
-
*
|
|
2848
|
-
* **Requires a cached state.** Call materialize() first if not already cached.
|
|
2849
|
-
*
|
|
2850
|
-
* @returns {Promise<import('./services/JoinReducer.js').WarpStateV5 | null>}
|
|
2851
|
-
* Cloned state, or null if no state has been materialized yet.
|
|
2852
|
-
*/
|
|
2853
|
-
async getStateSnapshot() {
|
|
2854
|
-
if (!this._cachedState && !this._autoMaterialize) {
|
|
2855
|
-
return null;
|
|
2856
|
-
}
|
|
2857
|
-
await this._ensureFreshState();
|
|
2858
|
-
if (!this._cachedState) {
|
|
2859
|
-
return null;
|
|
2860
|
-
}
|
|
2861
|
-
return cloneStateV5(/** @type {import('./services/JoinReducer.js').WarpStateV5} */ (this._cachedState));
|
|
2862
|
-
}
|
|
2863
|
-
|
|
2864
|
-
/**
|
|
2865
|
-
* Gets all visible nodes in the materialized state.
|
|
2866
|
-
*
|
|
2867
|
-
* **Requires a cached state.** Call materialize() first if not already cached.
|
|
2868
|
-
*
|
|
2869
|
-
* @returns {Promise<string[]>} Array of node IDs
|
|
2870
|
-
* @throws {QueryError} If no cached state exists (code: `E_NO_STATE`)
|
|
2871
|
-
* @throws {QueryError} If cached state is dirty (code: `E_STALE_STATE`)
|
|
2872
|
-
*
|
|
2873
|
-
* @example
|
|
2874
|
-
* await graph.materialize();
|
|
2875
|
-
* for (const nodeId of await graph.getNodes()) {
|
|
2876
|
-
* console.log(nodeId);
|
|
2877
|
-
* }
|
|
2878
|
-
*/
|
|
2879
|
-
async getNodes() {
|
|
2880
|
-
await this._ensureFreshState();
|
|
2881
|
-
const s = /** @type {import('./services/JoinReducer.js').WarpStateV5} */ (this._cachedState);
|
|
2882
|
-
return [...orsetElements(s.nodeAlive)];
|
|
2883
|
-
}
|
|
2884
|
-
|
|
2885
|
-
/**
|
|
2886
|
-
* Gets all visible edges in the materialized state.
|
|
2887
|
-
*
|
|
2888
|
-
* Each edge includes a `props` object containing any edge properties
|
|
2889
|
-
* from the materialized state.
|
|
2890
|
-
*
|
|
2891
|
-
* **Requires a cached state.** Call materialize() first if not already cached.
|
|
2892
|
-
*
|
|
2893
|
-
* @returns {Promise<Array<{from: string, to: string, label: string, props: Record<string, *>}>>} Array of edge info
|
|
2894
|
-
* @throws {QueryError} If no cached state exists (code: `E_NO_STATE`)
|
|
2895
|
-
* @throws {QueryError} If cached state is dirty (code: `E_STALE_STATE`)
|
|
2896
|
-
*
|
|
2897
|
-
* @example
|
|
2898
|
-
* await graph.materialize();
|
|
2899
|
-
* for (const edge of await graph.getEdges()) {
|
|
2900
|
-
* console.log(`${edge.from} --${edge.label}--> ${edge.to}`, edge.props);
|
|
2901
|
-
* }
|
|
2902
|
-
*/
|
|
2903
|
-
async getEdges() {
|
|
2904
|
-
await this._ensureFreshState();
|
|
2905
|
-
const s = /** @type {import('./services/JoinReducer.js').WarpStateV5} */ (this._cachedState);
|
|
2906
|
-
|
|
2907
|
-
// Pre-collect edge props into a lookup: "from\0to\0label" → {propKey: value}
|
|
2908
|
-
// Filters out stale props using full EventId ordering via compareEventIds
|
|
2909
|
-
// against the edge's birth EventId (clean-slate semantics on re-add)
|
|
2910
|
-
const edgePropsByKey = new Map();
|
|
2911
|
-
for (const [propKey, register] of s.prop) {
|
|
2912
|
-
if (!isEdgePropKey(propKey)) {
|
|
2913
|
-
continue;
|
|
2914
|
-
}
|
|
2915
|
-
const decoded = decodeEdgePropKey(propKey);
|
|
2916
|
-
const ek = encodeEdgeKey(decoded.from, decoded.to, decoded.label);
|
|
2917
|
-
|
|
2918
|
-
// Clean-slate filter: skip props from before the edge's current incarnation
|
|
2919
|
-
const birthEvent = s.edgeBirthEvent?.get(ek);
|
|
2920
|
-
if (birthEvent && register.eventId && compareEventIds(register.eventId, birthEvent) < 0) {
|
|
2921
|
-
continue;
|
|
2922
|
-
}
|
|
2923
|
-
|
|
2924
|
-
let bag = edgePropsByKey.get(ek);
|
|
2925
|
-
if (!bag) {
|
|
2926
|
-
bag = {};
|
|
2927
|
-
edgePropsByKey.set(ek, bag);
|
|
2928
|
-
}
|
|
2929
|
-
bag[decoded.propKey] = register.value;
|
|
2930
|
-
}
|
|
2931
|
-
|
|
2932
|
-
const edges = [];
|
|
2933
|
-
for (const edgeKey of orsetElements(s.edgeAlive)) {
|
|
2934
|
-
const { from, to, label } = decodeEdgeKey(edgeKey);
|
|
2935
|
-
// Only include edges where both endpoints are visible
|
|
2936
|
-
if (orsetContains(s.nodeAlive, from) &&
|
|
2937
|
-
orsetContains(s.nodeAlive, to)) {
|
|
2938
|
-
const props = edgePropsByKey.get(edgeKey) || {};
|
|
2939
|
-
edges.push({ from, to, label, props });
|
|
2940
|
-
}
|
|
2941
|
-
}
|
|
2942
|
-
return edges;
|
|
2943
|
-
}
|
|
2944
|
-
|
|
2945
|
-
/**
|
|
2946
|
-
* Returns the number of property entries in the materialized state.
|
|
2947
|
-
*
|
|
2948
|
-
* **Requires a cached state.** Call materialize() first if not already cached.
|
|
2949
|
-
*
|
|
2950
|
-
* @returns {Promise<number>} Number of property entries
|
|
2951
|
-
* @throws {QueryError} If no cached state exists (code: `E_NO_STATE`)
|
|
2952
|
-
* @throws {QueryError} If cached state is dirty (code: `E_STALE_STATE`)
|
|
2953
|
-
*/
|
|
2954
|
-
async getPropertyCount() {
|
|
2955
|
-
await this._ensureFreshState();
|
|
2956
|
-
const s = /** @type {import('./services/JoinReducer.js').WarpStateV5} */ (this._cachedState);
|
|
2957
|
-
return s.prop.size;
|
|
2958
|
-
}
|
|
2959
|
-
|
|
2960
|
-
// ============================================================================
|
|
2961
|
-
// Fork API (HOLOGRAM)
|
|
2962
|
-
// ============================================================================
|
|
2963
|
-
|
|
2964
|
-
/**
|
|
2965
|
-
* Creates a fork of this graph at a specific point in a writer's history.
|
|
2966
|
-
*
|
|
2967
|
-
* A fork creates a new WarpGraph instance that shares history up to the
|
|
2968
|
-
* specified patch SHA. Due to Git's content-addressed storage, the shared
|
|
2969
|
-
* history is automatically deduplicated. The fork gets a new writer ID and
|
|
2970
|
-
* operates independently from the original graph.
|
|
2971
|
-
*
|
|
2972
|
-
* **Key Properties:**
|
|
2973
|
-
* - Fork materializes the same state as the original at the fork point
|
|
2974
|
-
* - Writes to the fork don't appear in the original
|
|
2975
|
-
* - Writes to the original after fork don't appear in the fork
|
|
2976
|
-
* - History up to the fork point is shared (content-addressed dedup)
|
|
2977
|
-
*
|
|
2978
|
-
* @param {Object} options - Fork configuration
|
|
2979
|
-
* @param {string} options.from - Writer ID whose chain to fork from
|
|
2980
|
-
* @param {string} options.at - Patch SHA to fork at (must be in the writer's chain)
|
|
2981
|
-
* @param {string} [options.forkName] - Name for the forked graph. Defaults to `<graphName>-fork-<timestamp>`
|
|
2982
|
-
* @param {string} [options.forkWriterId] - Writer ID for the fork. Defaults to a new canonical ID.
|
|
2983
|
-
* @returns {Promise<WarpGraph>} A new WarpGraph instance for the fork
|
|
2984
|
-
* @throws {ForkError} If `from` writer does not exist (code: `E_FORK_WRITER_NOT_FOUND`)
|
|
2985
|
-
* @throws {ForkError} If `at` SHA does not exist (code: `E_FORK_PATCH_NOT_FOUND`)
|
|
2986
|
-
* @throws {ForkError} If `at` SHA is not in the writer's chain (code: `E_FORK_PATCH_NOT_IN_CHAIN`)
|
|
2987
|
-
* @throws {ForkError} If fork graph name is invalid (code: `E_FORK_NAME_INVALID`)
|
|
2988
|
-
* @throws {ForkError} If a graph with the fork name already has refs (code: `E_FORK_ALREADY_EXISTS`)
|
|
2989
|
-
* @throws {ForkError} If required parameters are missing or invalid (code: `E_FORK_INVALID_ARGS`)
|
|
2990
|
-
* @throws {ForkError} If forkWriterId is invalid (code: `E_FORK_WRITER_ID_INVALID`)
|
|
2991
|
-
*
|
|
2992
|
-
* @example
|
|
2993
|
-
* // Fork from alice's chain at a specific commit
|
|
2994
|
-
* const fork = await graph.fork({
|
|
2995
|
-
* from: 'alice',
|
|
2996
|
-
* at: 'abc123def456',
|
|
2997
|
-
* });
|
|
2998
|
-
*
|
|
2999
|
-
* // Fork materializes same state as original at that point
|
|
3000
|
-
* const originalState = await graph.materializeAt('abc123def456');
|
|
3001
|
-
* const forkState = await fork.materialize();
|
|
3002
|
-
* // originalState and forkState are equivalent
|
|
3003
|
-
*
|
|
3004
|
-
* @example
|
|
3005
|
-
* // Fork with custom name and writer ID
|
|
3006
|
-
* const fork = await graph.fork({
|
|
3007
|
-
* from: 'alice',
|
|
3008
|
-
* at: 'abc123def456',
|
|
3009
|
-
* forkName: 'events-experiment',
|
|
3010
|
-
* forkWriterId: 'experiment-writer',
|
|
3011
|
-
* });
|
|
3012
|
-
*/
|
|
3013
|
-
async fork({ from, at, forkName, forkWriterId }) {
|
|
3014
|
-
const t0 = this._clock.now();
|
|
3015
|
-
|
|
3016
|
-
try {
|
|
3017
|
-
// Validate required parameters
|
|
3018
|
-
if (!from || typeof from !== 'string') {
|
|
3019
|
-
throw new ForkError("Required parameter 'from' is missing or not a string", {
|
|
3020
|
-
code: 'E_FORK_INVALID_ARGS',
|
|
3021
|
-
context: { from },
|
|
3022
|
-
});
|
|
3023
|
-
}
|
|
3024
|
-
|
|
3025
|
-
if (!at || typeof at !== 'string') {
|
|
3026
|
-
throw new ForkError("Required parameter 'at' is missing or not a string", {
|
|
3027
|
-
code: 'E_FORK_INVALID_ARGS',
|
|
3028
|
-
context: { at },
|
|
3029
|
-
});
|
|
3030
|
-
}
|
|
3031
|
-
|
|
3032
|
-
// 1. Validate that the `from` writer exists
|
|
3033
|
-
const writers = await this.discoverWriters();
|
|
3034
|
-
if (!writers.includes(from)) {
|
|
3035
|
-
throw new ForkError(`Writer '${from}' does not exist in graph '${this._graphName}'`, {
|
|
3036
|
-
code: 'E_FORK_WRITER_NOT_FOUND',
|
|
3037
|
-
context: { writerId: from, graphName: this._graphName, existingWriters: writers },
|
|
3038
|
-
});
|
|
3039
|
-
}
|
|
3040
|
-
|
|
3041
|
-
// 2. Validate that `at` SHA exists in the repository
|
|
3042
|
-
const nodeExists = await this._persistence.nodeExists(at);
|
|
3043
|
-
if (!nodeExists) {
|
|
3044
|
-
throw new ForkError(`Patch SHA '${at}' does not exist`, {
|
|
3045
|
-
code: 'E_FORK_PATCH_NOT_FOUND',
|
|
3046
|
-
context: { patchSha: at, writerId: from },
|
|
3047
|
-
});
|
|
3048
|
-
}
|
|
3049
|
-
|
|
3050
|
-
// 3. Validate that `at` SHA is in the writer's chain
|
|
3051
|
-
const writerRef = buildWriterRef(this._graphName, from);
|
|
3052
|
-
const tipSha = await this._persistence.readRef(writerRef);
|
|
3053
|
-
|
|
3054
|
-
if (!tipSha) {
|
|
3055
|
-
throw new ForkError(`Writer '${from}' has no commits`, {
|
|
3056
|
-
code: 'E_FORK_WRITER_NOT_FOUND',
|
|
3057
|
-
context: { writerId: from },
|
|
3058
|
-
});
|
|
3059
|
-
}
|
|
3060
|
-
|
|
3061
|
-
// Walk the chain to verify `at` is reachable from the tip
|
|
3062
|
-
const isInChain = await this._isAncestor(at, tipSha);
|
|
3063
|
-
if (!isInChain) {
|
|
3064
|
-
throw new ForkError(`Patch SHA '${at}' is not in writer '${from}' chain`, {
|
|
3065
|
-
code: 'E_FORK_PATCH_NOT_IN_CHAIN',
|
|
3066
|
-
context: { patchSha: at, writerId: from, tipSha },
|
|
3067
|
-
});
|
|
3068
|
-
}
|
|
3069
|
-
|
|
3070
|
-
// 4. Generate or validate fork name (add random suffix to prevent collisions)
|
|
3071
|
-
const resolvedForkName =
|
|
3072
|
-
forkName ?? `${this._graphName}-fork-${Date.now()}-${Math.random().toString(36).slice(2, 6)}`;
|
|
3073
|
-
try {
|
|
3074
|
-
validateGraphName(resolvedForkName);
|
|
3075
|
-
} catch (err) {
|
|
3076
|
-
throw new ForkError(`Invalid fork name: ${/** @type {Error} */ (err).message}`, {
|
|
3077
|
-
code: 'E_FORK_NAME_INVALID',
|
|
3078
|
-
context: { forkName: resolvedForkName, originalError: /** @type {Error} */ (err).message },
|
|
3079
|
-
});
|
|
3080
|
-
}
|
|
3081
|
-
|
|
3082
|
-
// 5. Check that the fork graph doesn't already exist (has any refs)
|
|
3083
|
-
const forkWritersPrefix = buildWritersPrefix(resolvedForkName);
|
|
3084
|
-
const existingForkRefs = await this._persistence.listRefs(forkWritersPrefix);
|
|
3085
|
-
if (existingForkRefs.length > 0) {
|
|
3086
|
-
throw new ForkError(`Graph '${resolvedForkName}' already exists`, {
|
|
3087
|
-
code: 'E_FORK_ALREADY_EXISTS',
|
|
3088
|
-
context: { forkName: resolvedForkName, existingRefs: existingForkRefs },
|
|
3089
|
-
});
|
|
3090
|
-
}
|
|
3091
|
-
|
|
3092
|
-
// 6. Generate or validate fork writer ID
|
|
3093
|
-
const resolvedForkWriterId = forkWriterId || generateWriterId();
|
|
3094
|
-
try {
|
|
3095
|
-
validateWriterId(resolvedForkWriterId);
|
|
3096
|
-
} catch (err) {
|
|
3097
|
-
throw new ForkError(`Invalid fork writer ID: ${/** @type {Error} */ (err).message}`, {
|
|
3098
|
-
code: 'E_FORK_WRITER_ID_INVALID',
|
|
3099
|
-
context: { forkWriterId: resolvedForkWriterId, originalError: /** @type {Error} */ (err).message },
|
|
3100
|
-
});
|
|
3101
|
-
}
|
|
3102
|
-
|
|
3103
|
-
// 7. Create the fork's writer ref pointing to the `at` commit
|
|
3104
|
-
const forkWriterRef = buildWriterRef(resolvedForkName, resolvedForkWriterId);
|
|
3105
|
-
await this._persistence.updateRef(forkWriterRef, at);
|
|
3106
|
-
|
|
3107
|
-
// 8. Open and return a new WarpGraph instance for the fork
|
|
3108
|
-
const forkGraph = await WarpGraph.open({
|
|
3109
|
-
persistence: this._persistence,
|
|
3110
|
-
graphName: resolvedForkName,
|
|
3111
|
-
writerId: resolvedForkWriterId,
|
|
3112
|
-
gcPolicy: this._gcPolicy,
|
|
3113
|
-
adjacencyCacheSize: this._adjacencyCache?.maxSize ?? DEFAULT_ADJACENCY_CACHE_SIZE,
|
|
3114
|
-
checkpointPolicy: this._checkpointPolicy || undefined,
|
|
3115
|
-
autoMaterialize: this._autoMaterialize,
|
|
3116
|
-
onDeleteWithData: this._onDeleteWithData,
|
|
3117
|
-
logger: this._logger || undefined,
|
|
3118
|
-
clock: this._clock,
|
|
3119
|
-
crypto: this._crypto,
|
|
3120
|
-
codec: this._codec,
|
|
3121
|
-
});
|
|
3122
|
-
|
|
3123
|
-
this._logTiming('fork', t0, {
|
|
3124
|
-
metrics: `from=${from} at=${at.slice(0, 7)} name=${resolvedForkName}`,
|
|
3125
|
-
});
|
|
3126
|
-
|
|
3127
|
-
return forkGraph;
|
|
3128
|
-
} catch (err) {
|
|
3129
|
-
this._logTiming('fork', t0, { error: /** @type {Error} */ (err) });
|
|
3130
|
-
throw err;
|
|
3131
|
-
}
|
|
3132
|
-
}
|
|
3133
|
-
|
|
3134
|
-
// ============================================================================
|
|
3135
|
-
// Wormhole API (HOLOGRAM)
|
|
3136
|
-
// ============================================================================
|
|
3137
|
-
|
|
3138
|
-
/**
|
|
3139
|
-
* Creates a wormhole compressing a range of patches.
|
|
3140
|
-
*
|
|
3141
|
-
* A wormhole is a compressed representation of a contiguous range of patches
|
|
3142
|
-
* from a single writer. It preserves provenance by storing the original
|
|
3143
|
-
* patches as a ProvenancePayload that can be replayed during materialization.
|
|
3144
|
-
*
|
|
3145
|
-
* **Key Properties:**
|
|
3146
|
-
* - **Provenance Preservation**: The wormhole contains the full sub-payload,
|
|
3147
|
-
* allowing exact replay of the compressed segment.
|
|
3148
|
-
* - **Monoid Composition**: Two consecutive wormholes can be composed by
|
|
3149
|
-
* concatenating their sub-payloads (use `WormholeService.composeWormholes`).
|
|
3150
|
-
* - **Materialization Equivalence**: A wormhole + remaining patches produces
|
|
3151
|
-
* the same state as materializing all patches.
|
|
3152
|
-
*
|
|
3153
|
-
* @param {string} fromSha - SHA of the first (oldest) patch commit in the range
|
|
3154
|
-
* @param {string} toSha - SHA of the last (newest) patch commit in the range
|
|
3155
|
-
* @returns {Promise<{fromSha: string, toSha: string, writerId: string, payload: import('./services/ProvenancePayload.js').default, patchCount: number}>} The created wormhole edge
|
|
3156
|
-
* @throws {WormholeError} If fromSha or toSha doesn't exist (E_WORMHOLE_SHA_NOT_FOUND)
|
|
3157
|
-
* @throws {WormholeError} If fromSha is not an ancestor of toSha (E_WORMHOLE_INVALID_RANGE)
|
|
3158
|
-
* @throws {WormholeError} If commits span multiple writers (E_WORMHOLE_MULTI_WRITER)
|
|
3159
|
-
* @throws {WormholeError} If a commit is not a patch commit (E_WORMHOLE_NOT_PATCH)
|
|
3160
|
-
*
|
|
3161
|
-
* @example
|
|
3162
|
-
* // Compress a range of patches into a wormhole
|
|
3163
|
-
* const wormhole = await graph.createWormhole('abc123...', 'def456...');
|
|
3164
|
-
* console.log(`Compressed ${wormhole.patchCount} patches`);
|
|
3165
|
-
*
|
|
3166
|
-
* // The wormhole payload can be replayed to get the same state
|
|
3167
|
-
* const state = wormhole.payload.replay();
|
|
3168
|
-
*
|
|
3169
|
-
* @example
|
|
3170
|
-
* // Compress first 50 patches, then materialize with remaining
|
|
3171
|
-
* const patches = await graph.getWriterPatches('alice');
|
|
3172
|
-
* const wormhole = await graph.createWormhole(patches[0].sha, patches[49].sha);
|
|
3173
|
-
*
|
|
3174
|
-
* // Replay wormhole then remaining patches produces same state
|
|
3175
|
-
* const wormholeState = wormhole.payload.replay();
|
|
3176
|
-
* const remainingPayload = new ProvenancePayload(patches.slice(50));
|
|
3177
|
-
* const finalState = remainingPayload.replay(wormholeState);
|
|
3178
|
-
*/
|
|
3179
|
-
async createWormhole(fromSha, toSha) {
|
|
3180
|
-
const t0 = this._clock.now();
|
|
3181
|
-
|
|
3182
|
-
try {
|
|
3183
|
-
const wormhole = await createWormholeImpl(/** @type {any} */ ({ // TODO(ts-cleanup): needs options type
|
|
3184
|
-
persistence: this._persistence,
|
|
3185
|
-
graphName: this._graphName,
|
|
3186
|
-
fromSha,
|
|
3187
|
-
toSha,
|
|
3188
|
-
codec: this._codec,
|
|
3189
|
-
}));
|
|
3190
|
-
|
|
3191
|
-
this._logTiming('createWormhole', t0, {
|
|
3192
|
-
metrics: `${wormhole.patchCount} patches from=${fromSha.slice(0, 7)} to=${toSha.slice(0, 7)}`,
|
|
3193
|
-
});
|
|
3194
|
-
|
|
3195
|
-
return wormhole;
|
|
3196
|
-
} catch (err) {
|
|
3197
|
-
this._logTiming('createWormhole', t0, { error: /** @type {Error} */ (err) });
|
|
3198
|
-
throw err;
|
|
3199
|
-
}
|
|
3200
|
-
}
|
|
3201
|
-
|
|
3202
|
-
// ============================================================================
|
|
3203
|
-
// Provenance Index API (HG/IO/2)
|
|
3204
|
-
// ============================================================================
|
|
3205
|
-
|
|
3206
|
-
/**
|
|
3207
|
-
* Returns all patch SHAs that affected a given node or edge.
|
|
3208
|
-
*
|
|
3209
|
-
* "Affected" means the patch either read from or wrote to the entity
|
|
3210
|
-
* (based on the patch's I/O declarations from HG/IO/1).
|
|
3211
|
-
*
|
|
3212
|
-
* If `autoMaterialize` is enabled, this will automatically materialize
|
|
3213
|
-
* the state if dirty. Otherwise, call `materialize()` first.
|
|
3214
|
-
*
|
|
3215
|
-
* @param {string} entityId - The node ID or edge key to query
|
|
3216
|
-
* @returns {Promise<string[]>} Array of patch SHAs that affected the entity, sorted alphabetically
|
|
3217
|
-
* @throws {QueryError} If no cached state exists and autoMaterialize is off (code: `E_NO_STATE`)
|
|
3218
|
-
*
|
|
3219
|
-
* @example
|
|
3220
|
-
* const shas = await graph.patchesFor('user:alice');
|
|
3221
|
-
* console.log(`Node user:alice was affected by ${shas.length} patches:`, shas);
|
|
3222
|
-
*
|
|
3223
|
-
* @example
|
|
3224
|
-
* // Query which patches affected an edge
|
|
3225
|
-
* const edgeKey = encodeEdgeKey('user:alice', 'user:bob', 'follows');
|
|
3226
|
-
* const edgeShas = await graph.patchesFor(edgeKey);
|
|
3227
|
-
*/
|
|
3228
|
-
async patchesFor(entityId) {
|
|
3229
|
-
await this._ensureFreshState();
|
|
3230
|
-
|
|
3231
|
-
if (this._provenanceDegraded) {
|
|
3232
|
-
throw new QueryError('Provenance unavailable for cached seek. Re-seek with --no-persistent-cache or call materialize({ ceiling }) directly.', {
|
|
3233
|
-
code: 'E_PROVENANCE_DEGRADED',
|
|
3234
|
-
});
|
|
3235
|
-
}
|
|
3236
|
-
|
|
3237
|
-
if (!this._provenanceIndex) {
|
|
3238
|
-
throw new QueryError('No provenance index. Call materialize() first.', {
|
|
3239
|
-
code: 'E_NO_STATE',
|
|
3240
|
-
});
|
|
3241
|
-
}
|
|
3242
|
-
return this._provenanceIndex.patchesFor(entityId);
|
|
3243
|
-
}
|
|
3244
|
-
|
|
3245
|
-
// ============================================================================
|
|
3246
|
-
// Slice Materialization (HG/SLICE/1)
|
|
3247
|
-
// ============================================================================
|
|
3248
|
-
|
|
3249
|
-
/**
|
|
3250
|
-
* Materializes only the backward causal cone for a specific node.
|
|
3251
|
-
*
|
|
3252
|
-
* This implements the slicing theorem from Paper III (Computational Holography):
|
|
3253
|
-
* Given a target node v, compute its backward causal cone D(v) - the set of
|
|
3254
|
-
* all patches that contributed to v's current state - and replay only those.
|
|
3255
|
-
*
|
|
3256
|
-
* The algorithm:
|
|
3257
|
-
* 1. Start with patches that directly wrote to the target node
|
|
3258
|
-
* 2. For each patch in the cone, find patches it depends on (via reads)
|
|
3259
|
-
* 3. Recursively gather all dependencies
|
|
3260
|
-
* 4. Topologically sort by Lamport timestamp (causal order)
|
|
3261
|
-
* 5. Replay the sorted patches against empty state
|
|
3262
|
-
*
|
|
3263
|
-
* **Requires a cached state.** Call materialize() first to build the provenance index.
|
|
3264
|
-
*
|
|
3265
|
-
* @param {string} nodeId - The target node ID to materialize the cone for
|
|
3266
|
-
* @param {{receipts?: boolean}} [options] - Optional configuration
|
|
3267
|
-
* @returns {Promise<{state: import('./services/JoinReducer.js').WarpStateV5, patchCount: number, receipts?: import('./types/TickReceipt.js').TickReceipt[]}>}
|
|
3268
|
-
* Returns the sliced state with the patch count (for comparison with full materialization)
|
|
3269
|
-
* @throws {QueryError} If no provenance index exists (code: `E_NO_STATE`)
|
|
3270
|
-
* @throws {Error} If patch loading fails
|
|
3271
|
-
*
|
|
3272
|
-
* @example
|
|
3273
|
-
* await graph.materialize();
|
|
3274
|
-
*
|
|
3275
|
-
* // Materialize only the causal cone for a specific node
|
|
3276
|
-
* const slice = await graph.materializeSlice('user:alice');
|
|
3277
|
-
* console.log(`Slice required ${slice.patchCount} patches`);
|
|
3278
|
-
*
|
|
3279
|
-
* // The sliced state contains only the target node and its dependencies
|
|
3280
|
-
* const props = slice.state.prop;
|
|
3281
|
-
*
|
|
3282
|
-
* @example
|
|
3283
|
-
* // Compare with full materialization
|
|
3284
|
-
* const fullState = await graph.materialize();
|
|
3285
|
-
* const slice = await graph.materializeSlice('node:target');
|
|
3286
|
-
*
|
|
3287
|
-
* // Slice should have fewer patches (unless the entire graph is connected)
|
|
3288
|
-
* console.log(`Full: all patches, Slice: ${slice.patchCount} patches`);
|
|
3289
|
-
*/
|
|
3290
|
-
async materializeSlice(nodeId, options) {
|
|
3291
|
-
const t0 = this._clock.now();
|
|
3292
|
-
const collectReceipts = options && options.receipts;
|
|
3293
|
-
|
|
3294
|
-
try {
|
|
3295
|
-
// Ensure fresh state before accessing provenance index
|
|
3296
|
-
await this._ensureFreshState();
|
|
3297
|
-
|
|
3298
|
-
if (this._provenanceDegraded) {
|
|
3299
|
-
throw new QueryError('Provenance unavailable for cached seek. Re-seek with --no-persistent-cache or call materialize({ ceiling }) directly.', {
|
|
3300
|
-
code: 'E_PROVENANCE_DEGRADED',
|
|
3301
|
-
});
|
|
3302
|
-
}
|
|
3303
|
-
|
|
3304
|
-
if (!this._provenanceIndex) {
|
|
3305
|
-
throw new QueryError('No provenance index. Call materialize() first.', {
|
|
3306
|
-
code: 'E_NO_STATE',
|
|
3307
|
-
});
|
|
3308
|
-
}
|
|
3309
|
-
|
|
3310
|
-
// 1. Compute backward causal cone using BFS over the provenance index
|
|
3311
|
-
// Returns Map<sha, patch> with patches already loaded (avoids double I/O)
|
|
3312
|
-
const conePatchMap = await this._computeBackwardCone(nodeId);
|
|
3313
|
-
|
|
3314
|
-
// 2. If no patches in cone, return empty state
|
|
3315
|
-
if (conePatchMap.size === 0) {
|
|
3316
|
-
const emptyState = createEmptyStateV5();
|
|
3317
|
-
this._logTiming('materializeSlice', t0, { metrics: '0 patches (empty cone)' });
|
|
3318
|
-
return {
|
|
3319
|
-
state: emptyState,
|
|
3320
|
-
patchCount: 0,
|
|
3321
|
-
...(collectReceipts ? { receipts: [] } : {}),
|
|
3322
|
-
};
|
|
3323
|
-
}
|
|
3324
|
-
|
|
3325
|
-
// 3. Convert cached patches to entry format (patches already loaded by _computeBackwardCone)
|
|
3326
|
-
const patchEntries = [];
|
|
3327
|
-
for (const [sha, patch] of conePatchMap) {
|
|
3328
|
-
patchEntries.push({ patch, sha });
|
|
3329
|
-
}
|
|
3330
|
-
|
|
3331
|
-
// 4. Topologically sort by causal order (Lamport timestamp, then writer, then SHA)
|
|
3332
|
-
const sortedPatches = this._sortPatchesCausally(patchEntries);
|
|
3333
|
-
|
|
3334
|
-
// 5. Replay: use reduceV5 directly when collecting receipts, otherwise use ProvenancePayload
|
|
3335
|
-
this._logTiming('materializeSlice', t0, { metrics: `${sortedPatches.length} patches` });
|
|
3336
|
-
|
|
3337
|
-
if (collectReceipts) {
|
|
3338
|
-
const result = /** @type {{state: import('./services/JoinReducer.js').WarpStateV5, receipts: import('./types/TickReceipt.js').TickReceipt[]}} */ (reduceV5(sortedPatches, undefined, { receipts: true }));
|
|
3339
|
-
return {
|
|
3340
|
-
state: result.state,
|
|
3341
|
-
patchCount: sortedPatches.length,
|
|
3342
|
-
receipts: result.receipts,
|
|
3343
|
-
};
|
|
3344
|
-
}
|
|
3345
|
-
|
|
3346
|
-
const payload = new ProvenancePayload(sortedPatches);
|
|
3347
|
-
return {
|
|
3348
|
-
state: payload.replay(),
|
|
3349
|
-
patchCount: sortedPatches.length,
|
|
3350
|
-
};
|
|
3351
|
-
} catch (err) {
|
|
3352
|
-
this._logTiming('materializeSlice', t0, { error: /** @type {Error} */ (err) });
|
|
3353
|
-
throw err;
|
|
3354
|
-
}
|
|
3355
|
-
}
|
|
3356
|
-
|
|
3357
|
-
/**
|
|
3358
|
-
* Computes the backward causal cone for a node.
|
|
3359
|
-
*
|
|
3360
|
-
* Uses BFS over the provenance index:
|
|
3361
|
-
* 1. Find all patches that wrote to the target node
|
|
3362
|
-
* 2. For each patch, find entities it read from
|
|
3363
|
-
* 3. Find all patches that wrote to those entities
|
|
3364
|
-
* 4. Repeat until no new patches are found
|
|
3365
|
-
*
|
|
3366
|
-
* Returns a Map of SHA → patch to avoid double-loading (the cone
|
|
3367
|
-
* computation needs to read patches for their read-dependencies,
|
|
3368
|
-
* so we cache them for later replay).
|
|
3369
|
-
*
|
|
3370
|
-
* @param {string} nodeId - The target node ID
|
|
3371
|
-
* @returns {Promise<Map<string, Object>>} Map of patch SHA to loaded patch object
|
|
3372
|
-
* @private
|
|
3373
|
-
*/
|
|
3374
|
-
async _computeBackwardCone(nodeId) {
|
|
3375
|
-
const cone = new Map(); // sha → patch (cache loaded patches)
|
|
3376
|
-
const visited = new Set(); // Visited entities
|
|
3377
|
-
const queue = [nodeId]; // BFS queue of entities to process
|
|
3378
|
-
let qi = 0;
|
|
3379
|
-
|
|
3380
|
-
while (qi < queue.length) {
|
|
3381
|
-
const entityId = queue[qi++];
|
|
3382
|
-
|
|
3383
|
-
if (visited.has(entityId)) {
|
|
3384
|
-
continue;
|
|
3385
|
-
}
|
|
3386
|
-
visited.add(entityId);
|
|
3387
|
-
|
|
3388
|
-
// Get all patches that affected this entity
|
|
3389
|
-
const patchShas = /** @type {import('./services/ProvenanceIndex.js').ProvenanceIndex} */ (this._provenanceIndex).patchesFor(entityId);
|
|
3390
|
-
|
|
3391
|
-
for (const sha of patchShas) {
|
|
3392
|
-
if (cone.has(sha)) {
|
|
3393
|
-
continue;
|
|
3394
|
-
}
|
|
3395
|
-
|
|
3396
|
-
// Load the patch and cache it
|
|
3397
|
-
const patch = await this._loadPatchBySha(sha);
|
|
3398
|
-
cone.set(sha, patch);
|
|
3399
|
-
|
|
3400
|
-
// Add read dependencies to the queue
|
|
3401
|
-
const patchReads = /** @type {any} */ (patch)?.reads; // TODO(ts-cleanup): type patch array
|
|
3402
|
-
if (patchReads) {
|
|
3403
|
-
for (const readEntity of patchReads) {
|
|
3404
|
-
if (!visited.has(readEntity)) {
|
|
3405
|
-
queue.push(readEntity);
|
|
3406
|
-
}
|
|
3407
|
-
}
|
|
3408
|
-
}
|
|
3409
|
-
}
|
|
3410
|
-
}
|
|
3411
|
-
|
|
3412
|
-
return cone;
|
|
3413
|
-
}
|
|
3414
|
-
|
|
3415
|
-
/**
|
|
3416
|
-
* Loads a single patch by its SHA.
|
|
3417
|
-
*
|
|
3418
|
-
* @param {string} sha - The patch commit SHA
|
|
3419
|
-
* @returns {Promise<Object>} The decoded patch object
|
|
3420
|
-
* @throws {Error} If the commit is not a patch or loading fails
|
|
3421
|
-
*
|
|
3422
|
-
* @public
|
|
3423
|
-
* @remarks
|
|
3424
|
-
* Thin wrapper around the internal `_loadPatchBySha` helper. Exposed for
|
|
3425
|
-
* CLI/debug tooling (e.g. seek tick receipts) that needs to inspect patch
|
|
3426
|
-
* operations without re-materializing intermediate states.
|
|
3427
|
-
*/
|
|
3428
|
-
async loadPatchBySha(sha) {
|
|
3429
|
-
return await this._loadPatchBySha(sha);
|
|
3430
|
-
}
|
|
3431
|
-
|
|
3432
|
-
/**
|
|
3433
|
-
* Loads a single patch by its SHA.
|
|
3434
|
-
*
|
|
3435
|
-
* @param {string} sha - The patch commit SHA
|
|
3436
|
-
* @returns {Promise<Object>} The decoded patch object
|
|
3437
|
-
* @throws {Error} If the commit is not a patch or loading fails
|
|
3438
|
-
* @private
|
|
3439
|
-
*/
|
|
3440
|
-
async _loadPatchBySha(sha) {
|
|
3441
|
-
const nodeInfo = await this._persistence.getNodeInfo(sha);
|
|
3442
|
-
const kind = detectMessageKind(nodeInfo.message);
|
|
3443
|
-
|
|
3444
|
-
if (kind !== 'patch') {
|
|
3445
|
-
throw new Error(`Commit ${sha} is not a patch`);
|
|
3446
|
-
}
|
|
3447
|
-
|
|
3448
|
-
const patchMeta = decodePatchMessage(nodeInfo.message);
|
|
3449
|
-
const patchBuffer = await this._persistence.readBlob(patchMeta.patchOid);
|
|
3450
|
-
return /** @type {Object} */ (this._codec.decode(patchBuffer));
|
|
3451
|
-
}
|
|
3452
|
-
|
|
3453
|
-
/**
|
|
3454
|
-
* Loads multiple patches by their SHAs.
|
|
3455
|
-
*
|
|
3456
|
-
* @param {string[]} shas - Array of patch commit SHAs
|
|
3457
|
-
* @returns {Promise<Array<{patch: Object, sha: string}>>} Array of patch entries
|
|
3458
|
-
* @throws {Error} If any SHA is not a patch or loading fails
|
|
3459
|
-
* @private
|
|
3460
|
-
*/
|
|
3461
|
-
async _loadPatchesBySha(shas) {
|
|
3462
|
-
const entries = [];
|
|
3463
|
-
|
|
3464
|
-
for (const sha of shas) {
|
|
3465
|
-
const patch = await this._loadPatchBySha(sha);
|
|
3466
|
-
entries.push({ patch, sha });
|
|
3467
|
-
}
|
|
3468
|
-
|
|
3469
|
-
return entries;
|
|
3470
|
-
}
|
|
3471
|
-
|
|
3472
|
-
/**
|
|
3473
|
-
* Sorts patches in causal order for deterministic replay.
|
|
3474
|
-
*
|
|
3475
|
-
* Sort order: Lamport timestamp (ascending), then writer ID, then SHA.
|
|
3476
|
-
* This ensures deterministic ordering regardless of discovery order.
|
|
3477
|
-
*
|
|
3478
|
-
* @param {Array<{patch: any, sha: string}>} patches - Unsorted patch entries
|
|
3479
|
-
* @returns {Array<{patch: any, sha: string}>} Sorted patch entries
|
|
3480
|
-
* @private
|
|
3481
|
-
*/
|
|
3482
|
-
_sortPatchesCausally(patches) {
|
|
3483
|
-
return [...patches].sort((a, b) => {
|
|
3484
|
-
// Primary: Lamport timestamp (ascending - earlier patches first)
|
|
3485
|
-
const lamportDiff = (a.patch.lamport || 0) - (b.patch.lamport || 0);
|
|
3486
|
-
if (lamportDiff !== 0) {
|
|
3487
|
-
return lamportDiff;
|
|
3488
|
-
}
|
|
3489
|
-
|
|
3490
|
-
// Secondary: Writer ID (lexicographic)
|
|
3491
|
-
const writerCmp = (a.patch.writer || '').localeCompare(b.patch.writer || '');
|
|
3492
|
-
if (writerCmp !== 0) {
|
|
3493
|
-
return writerCmp;
|
|
3494
|
-
}
|
|
3495
|
-
|
|
3496
|
-
// Tertiary: SHA (lexicographic) for total ordering
|
|
3497
|
-
return a.sha.localeCompare(b.sha);
|
|
3498
|
-
});
|
|
3499
|
-
}
|
|
3500
|
-
|
|
3501
|
-
/**
|
|
3502
|
-
* Gets the temporal query interface for CTL*-style temporal operators.
|
|
3503
|
-
*
|
|
3504
|
-
* Returns a TemporalQuery instance that provides `always` and `eventually`
|
|
3505
|
-
* operators for evaluating predicates across the graph's history.
|
|
3506
|
-
*
|
|
3507
|
-
* The instance is lazily created on first access and reused thereafter.
|
|
3508
|
-
*
|
|
3509
|
-
* @returns {import('./services/TemporalQuery.js').TemporalQuery} Temporal query interface
|
|
3510
|
-
*
|
|
3511
|
-
* @example
|
|
3512
|
-
* const alwaysActive = await graph.temporal.always(
|
|
3513
|
-
* 'user:alice',
|
|
3514
|
-
* n => n.props.status === 'active',
|
|
3515
|
-
* { since: 0 }
|
|
3516
|
-
* );
|
|
3517
|
-
*
|
|
3518
|
-
* @example
|
|
3519
|
-
* const eventuallyMerged = await graph.temporal.eventually(
|
|
3520
|
-
* 'user:alice',
|
|
3521
|
-
* n => n.props.status === 'merged'
|
|
3522
|
-
* );
|
|
3523
|
-
*/
|
|
3524
|
-
get temporal() {
|
|
3525
|
-
if (!this._temporalQuery) {
|
|
3526
|
-
this._temporalQuery = new TemporalQuery({
|
|
3527
|
-
loadAllPatches: async () => {
|
|
3528
|
-
const writerIds = await this.discoverWriters();
|
|
3529
|
-
const allPatches = [];
|
|
3530
|
-
for (const writerId of writerIds) {
|
|
3531
|
-
const writerPatches = await this._loadWriterPatches(writerId);
|
|
3532
|
-
allPatches.push(...writerPatches);
|
|
3533
|
-
}
|
|
3534
|
-
return this._sortPatchesCausally(allPatches);
|
|
3535
|
-
},
|
|
3536
|
-
});
|
|
3537
|
-
}
|
|
3538
|
-
return this._temporalQuery;
|
|
3539
|
-
}
|
|
3540
|
-
|
|
3541
|
-
/**
|
|
3542
|
-
* Gets the current provenance index for this graph.
|
|
3543
|
-
*
|
|
3544
|
-
* The provenance index maps node/edge IDs to the patch SHAs that affected them.
|
|
3545
|
-
* It is built during materialization from the patches' I/O declarations.
|
|
3546
|
-
*
|
|
3547
|
-
* **Requires a cached state.** Call materialize() first if not already cached.
|
|
3548
|
-
*
|
|
3549
|
-
* @returns {import('./services/ProvenanceIndex.js').ProvenanceIndex|null} The provenance index, or null if not materialized
|
|
3550
|
-
*
|
|
3551
|
-
* @example
|
|
3552
|
-
* await graph.materialize();
|
|
3553
|
-
* const index = graph.provenanceIndex;
|
|
3554
|
-
* if (index) {
|
|
3555
|
-
* console.log(`Index contains ${index.size} entities`);
|
|
3556
|
-
* }
|
|
3557
|
-
*/
|
|
3558
|
-
get provenanceIndex() {
|
|
3559
|
-
return this._provenanceIndex;
|
|
3560
|
-
}
|
|
3561
|
-
}
|
|
407
|
+
// ── Wire extracted method groups onto WarpGraph.prototype ───────────────────
|
|
408
|
+
wireWarpMethods(WarpGraph, [
|
|
409
|
+
queryMethods,
|
|
410
|
+
subscribeMethods,
|
|
411
|
+
provenanceMethods,
|
|
412
|
+
forkMethods,
|
|
413
|
+
syncMethods,
|
|
414
|
+
checkpointMethods,
|
|
415
|
+
patchMethods,
|
|
416
|
+
materializeMethods,
|
|
417
|
+
materializeAdvancedMethods,
|
|
418
|
+
]);
|