@git-stunts/git-warp 11.3.3 → 11.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +36 -1
- package/index.d.ts +36 -0
- package/index.js +2 -0
- package/package.json +3 -2
- package/src/domain/WarpGraph.js +22 -2
- package/src/domain/services/BitmapIndexReader.js +32 -10
- package/src/domain/services/CheckpointService.js +20 -1
- package/src/domain/services/JoinReducer.js +93 -42
- package/src/domain/services/KeyCodec.js +7 -0
- package/src/domain/services/PatchBuilderV2.js +54 -4
- package/src/domain/services/SyncController.js +576 -0
- package/src/domain/utils/validateShardOid.js +13 -0
- package/src/domain/warp/PatchSession.js +31 -0
- package/src/domain/warp/_internal.js +0 -9
- package/src/domain/warp/_wiredMethods.d.ts +1 -1
- package/src/domain/warp/query.methods.js +83 -1
- package/src/infrastructure/adapters/GitGraphAdapter.js +4 -1
- package/src/domain/warp/sync.methods.js +0 -554
|
@@ -0,0 +1,576 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* SyncController - Encapsulates all sync functionality for WarpGraph.
|
|
3
|
+
*
|
|
4
|
+
* Extracted from the original sync.methods.js free functions into a
|
|
5
|
+
* service class. WarpGraph.prototype delegates directly to this controller
|
|
6
|
+
* via defineProperty loops — no intermediate stub file.
|
|
7
|
+
*
|
|
8
|
+
* @module domain/services/SyncController
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
import SyncError from '../errors/SyncError.js';
|
|
12
|
+
import OperationAbortedError from '../errors/OperationAbortedError.js';
|
|
13
|
+
import { QueryError, E_NO_STATE_MSG } from '../warp/_internal.js';
|
|
14
|
+
import {
|
|
15
|
+
createSyncRequest as createSyncRequestImpl,
|
|
16
|
+
processSyncRequest as processSyncRequestImpl,
|
|
17
|
+
applySyncResponse as applySyncResponseImpl,
|
|
18
|
+
syncNeeded as syncNeededImpl,
|
|
19
|
+
} from './SyncProtocol.js';
|
|
20
|
+
import { retry, timeout, RetryExhaustedError, TimeoutError } from '@git-stunts/alfred';
|
|
21
|
+
import { checkAborted } from '../utils/cancellation.js';
|
|
22
|
+
import { createFrontier, updateFrontier } from './Frontier.js';
|
|
23
|
+
import { buildWriterRef } from '../utils/RefLayout.js';
|
|
24
|
+
import { collectGCMetrics } from './GCMetrics.js';
|
|
25
|
+
import HttpSyncServer from './HttpSyncServer.js';
|
|
26
|
+
import { signSyncRequest, canonicalizePath } from './SyncAuthService.js';
|
|
27
|
+
import { isError } from '../types/WarpErrors.js';
|
|
28
|
+
|
|
29
|
+
/** @typedef {import('../types/WarpPersistence.js').CorePersistence} CorePersistence */
|
|
30
|
+
|
|
31
|
+
/**
|
|
32
|
+
* The host interface that SyncController depends on.
|
|
33
|
+
*
|
|
34
|
+
* Documents the exact WarpGraph surface the controller accesses,
|
|
35
|
+
* making the coupling explicit and enabling lightweight mock hosts
|
|
36
|
+
* in unit tests.
|
|
37
|
+
*
|
|
38
|
+
* @typedef {Object} SyncHost
|
|
39
|
+
* @property {import('../services/JoinReducer.js').WarpStateV5|null} _cachedState
|
|
40
|
+
* @property {Map<string, string>|null} _lastFrontier
|
|
41
|
+
* @property {boolean} _stateDirty
|
|
42
|
+
* @property {number} _patchesSinceGC
|
|
43
|
+
* @property {string} _graphName
|
|
44
|
+
* @property {CorePersistence} _persistence
|
|
45
|
+
* @property {import('../../ports/ClockPort.js').default} _clock
|
|
46
|
+
* @property {import('../../ports/CodecPort.js').default} _codec
|
|
47
|
+
* @property {import('../../ports/CryptoPort.js').default} _crypto
|
|
48
|
+
* @property {import('../../ports/LoggerPort.js').default|null} _logger
|
|
49
|
+
* @property {number} _patchesSinceCheckpoint
|
|
50
|
+
* @property {(op: string, t0: number, opts?: {metrics?: string, error?: Error}) => void} _logTiming
|
|
51
|
+
* @property {(options?: Record<string, unknown>) => Promise<unknown>} materialize
|
|
52
|
+
* @property {() => Promise<string[]>} discoverWriters
|
|
53
|
+
*/
|
|
54
|
+
|
|
55
|
+
// ── Constants ───────────────────────────────────────────────────────────────
|
|
56
|
+
|
|
57
|
+
const DEFAULT_SYNC_SERVER_MAX_BYTES = 4 * 1024 * 1024;
|
|
58
|
+
const DEFAULT_SYNC_WITH_RETRIES = 3;
|
|
59
|
+
const DEFAULT_SYNC_WITH_BASE_DELAY_MS = 250;
|
|
60
|
+
const DEFAULT_SYNC_WITH_MAX_DELAY_MS = 2000;
|
|
61
|
+
const DEFAULT_SYNC_WITH_TIMEOUT_MS = 10_000;
|
|
62
|
+
|
|
63
|
+
// ── Private helpers ─────────────────────────────────────────────────────────
|
|
64
|
+
|
|
65
|
+
/**
|
|
66
|
+
* Compares two string→string Maps for value equality without allocating.
|
|
67
|
+
*
|
|
68
|
+
* @param {Map<string, string>} a
|
|
69
|
+
* @param {Map<string, string>} b
|
|
70
|
+
* @returns {boolean} True if every key in `a` has the same value in `b`
|
|
71
|
+
*/
|
|
72
|
+
function mapsEqual(a, b) {
|
|
73
|
+
for (const [k, v] of a) {
|
|
74
|
+
if (b.get(k) !== v) {
|
|
75
|
+
return false;
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
return true;
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/**
|
|
82
|
+
* Normalizes a sync endpoint path to ensure it starts with '/'.
|
|
83
|
+
* Returns '/sync' if no path is provided.
|
|
84
|
+
*
|
|
85
|
+
* @param {string|undefined|null} path - The sync path to normalize
|
|
86
|
+
* @returns {string} Normalized path starting with '/'
|
|
87
|
+
*/
|
|
88
|
+
function normalizeSyncPath(path) {
|
|
89
|
+
if (!path) {
|
|
90
|
+
return '/sync';
|
|
91
|
+
}
|
|
92
|
+
return path.startsWith('/') ? path : `/${path}`;
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
/**
|
|
96
|
+
* Builds auth headers for an outgoing sync request if auth is configured.
|
|
97
|
+
*
|
|
98
|
+
* @param {Object} params
|
|
99
|
+
* @param {{ secret: string, keyId?: string }|undefined} params.auth
|
|
100
|
+
* @param {string} params.bodyStr - Serialized request body
|
|
101
|
+
* @param {URL} params.targetUrl
|
|
102
|
+
* @param {import('../../ports/CryptoPort.js').default} params.crypto
|
|
103
|
+
* @returns {Promise<Record<string, string>>}
|
|
104
|
+
*/
|
|
105
|
+
async function buildSyncAuthHeaders({ auth, bodyStr, targetUrl, crypto }) {
|
|
106
|
+
if (!auth || !auth.secret) {
|
|
107
|
+
return {};
|
|
108
|
+
}
|
|
109
|
+
const bodyBuf = new TextEncoder().encode(bodyStr);
|
|
110
|
+
return await signSyncRequest(
|
|
111
|
+
{
|
|
112
|
+
method: 'POST',
|
|
113
|
+
path: canonicalizePath(targetUrl.pathname + (targetUrl.search || '')),
|
|
114
|
+
contentType: 'application/json',
|
|
115
|
+
body: bodyBuf,
|
|
116
|
+
secret: auth.secret,
|
|
117
|
+
keyId: auth.keyId || 'default',
|
|
118
|
+
},
|
|
119
|
+
{ crypto },
|
|
120
|
+
);
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
// ── SyncController ──────────────────────────────────────────────────────────
|
|
124
|
+
|
|
125
|
+
/**
|
|
126
|
+
* Encapsulates all sync-related operations for a WarpGraph instance.
|
|
127
|
+
*/
|
|
128
|
+
export default class SyncController {
|
|
129
|
+
/**
|
|
130
|
+
* @param {SyncHost} host - The WarpGraph instance (or any object satisfying SyncHost)
|
|
131
|
+
*/
|
|
132
|
+
constructor(host) {
|
|
133
|
+
/** @type {SyncHost} */
|
|
134
|
+
this._host = host;
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
/**
|
|
138
|
+
* Returns the current frontier -- a Map of writerId -> tip SHA.
|
|
139
|
+
*
|
|
140
|
+
* @returns {Promise<Map<string, string>>} Frontier map
|
|
141
|
+
* @throws {Error} If listing refs fails
|
|
142
|
+
*/
|
|
143
|
+
async getFrontier() {
|
|
144
|
+
const writerIds = await this._host.discoverWriters();
|
|
145
|
+
const frontier = createFrontier();
|
|
146
|
+
|
|
147
|
+
for (const writerId of writerIds) {
|
|
148
|
+
const writerRef = buildWriterRef(this._host._graphName, writerId);
|
|
149
|
+
const tipSha = await this._host._persistence.readRef(writerRef);
|
|
150
|
+
if (tipSha) {
|
|
151
|
+
updateFrontier(frontier, writerId, tipSha);
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
return frontier;
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
/**
|
|
159
|
+
* Checks whether any writer tip has changed since the last materialize.
|
|
160
|
+
*
|
|
161
|
+
* O(writers) comparison of stored writer tip SHAs against current refs.
|
|
162
|
+
* Cheap "has anything changed?" check without materialization.
|
|
163
|
+
*
|
|
164
|
+
* @returns {Promise<boolean>} True if frontier has changed (or never materialized)
|
|
165
|
+
* @throws {Error} If listing refs fails
|
|
166
|
+
*/
|
|
167
|
+
async hasFrontierChanged() {
|
|
168
|
+
if (this._host._lastFrontier === null) {
|
|
169
|
+
return true;
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
const current = await this.getFrontier();
|
|
173
|
+
|
|
174
|
+
if (current.size !== this._host._lastFrontier.size) {
|
|
175
|
+
return true;
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
for (const [writerId, tipSha] of current) {
|
|
179
|
+
if (this._host._lastFrontier.get(writerId) !== tipSha) {
|
|
180
|
+
return true;
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
return false;
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
/**
|
|
188
|
+
* Returns a lightweight status snapshot of the graph's operational state.
|
|
189
|
+
*
|
|
190
|
+
* This method is O(writers) and does NOT trigger materialization.
|
|
191
|
+
*
|
|
192
|
+
* @returns {Promise<{
|
|
193
|
+
* cachedState: 'fresh' | 'stale' | 'none',
|
|
194
|
+
* patchesSinceCheckpoint: number,
|
|
195
|
+
* tombstoneRatio: number,
|
|
196
|
+
* writers: number,
|
|
197
|
+
* frontier: Record<string, string>,
|
|
198
|
+
* }>} The graph status
|
|
199
|
+
* @throws {Error} If listing refs fails
|
|
200
|
+
*/
|
|
201
|
+
async status() {
|
|
202
|
+
// Fetch frontier once, reuse for both staleness check and return value
|
|
203
|
+
const frontier = await this.getFrontier();
|
|
204
|
+
|
|
205
|
+
// Determine cachedState
|
|
206
|
+
/** @type {'fresh' | 'stale' | 'none'} */
|
|
207
|
+
let cachedState;
|
|
208
|
+
if (this._host._cachedState === null) {
|
|
209
|
+
cachedState = 'none';
|
|
210
|
+
} else if (this._host._stateDirty || !this._host._lastFrontier ||
|
|
211
|
+
frontier.size !== this._host._lastFrontier.size ||
|
|
212
|
+
!mapsEqual(frontier, this._host._lastFrontier)) {
|
|
213
|
+
cachedState = 'stale';
|
|
214
|
+
} else {
|
|
215
|
+
cachedState = 'fresh';
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
// patchesSinceCheckpoint
|
|
219
|
+
const patchesSinceCheckpoint = this._host._patchesSinceCheckpoint;
|
|
220
|
+
|
|
221
|
+
// tombstoneRatio
|
|
222
|
+
let tombstoneRatio = 0;
|
|
223
|
+
if (this._host._cachedState) {
|
|
224
|
+
const metrics = collectGCMetrics(this._host._cachedState);
|
|
225
|
+
tombstoneRatio = metrics.tombstoneRatio;
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
// writers
|
|
229
|
+
const writers = frontier.size;
|
|
230
|
+
|
|
231
|
+
// Convert frontier Map to plain object
|
|
232
|
+
const frontierObj = Object.fromEntries(frontier);
|
|
233
|
+
|
|
234
|
+
return {
|
|
235
|
+
cachedState,
|
|
236
|
+
patchesSinceCheckpoint,
|
|
237
|
+
tombstoneRatio,
|
|
238
|
+
writers,
|
|
239
|
+
frontier: frontierObj,
|
|
240
|
+
};
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
/**
|
|
244
|
+
* Creates a sync request to send to a remote peer.
|
|
245
|
+
* The request contains the local frontier for comparison.
|
|
246
|
+
*
|
|
247
|
+
* @returns {Promise<import('./SyncProtocol.js').SyncRequest>} The sync request
|
|
248
|
+
* @throws {Error} If listing refs fails
|
|
249
|
+
*/
|
|
250
|
+
async createSyncRequest() {
|
|
251
|
+
const frontier = await this.getFrontier();
|
|
252
|
+
return createSyncRequestImpl(frontier);
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
/**
|
|
256
|
+
* Processes an incoming sync request and returns patches the requester needs.
|
|
257
|
+
*
|
|
258
|
+
* @param {import('./SyncProtocol.js').SyncRequest} request - The incoming sync request
|
|
259
|
+
* @returns {Promise<import('./SyncProtocol.js').SyncResponse>} The sync response
|
|
260
|
+
* @throws {Error} If listing refs or reading patches fails
|
|
261
|
+
*/
|
|
262
|
+
async processSyncRequest(request) {
|
|
263
|
+
const localFrontier = await this.getFrontier();
|
|
264
|
+
/** @type {CorePersistence} */
|
|
265
|
+
const persistence = this._host._persistence;
|
|
266
|
+
return await processSyncRequestImpl(
|
|
267
|
+
request,
|
|
268
|
+
localFrontier,
|
|
269
|
+
persistence,
|
|
270
|
+
this._host._graphName,
|
|
271
|
+
{ codec: this._host._codec }
|
|
272
|
+
);
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
/**
|
|
276
|
+
* Applies a sync response to the local graph state.
|
|
277
|
+
* Updates the cached state with received patches.
|
|
278
|
+
*
|
|
279
|
+
* **Requires a cached state.**
|
|
280
|
+
*
|
|
281
|
+
* @param {import('./SyncProtocol.js').SyncResponse} response - The sync response
|
|
282
|
+
* @returns {{state: import('./JoinReducer.js').WarpStateV5, frontier: Map<string, string>, applied: number}} Result with updated state and frontier
|
|
283
|
+
* @throws {import('../errors/QueryError.js').default} If no cached state exists (code: `E_NO_STATE`)
|
|
284
|
+
*/
|
|
285
|
+
applySyncResponse(response) {
|
|
286
|
+
if (!this._host._cachedState) {
|
|
287
|
+
throw new QueryError(E_NO_STATE_MSG, {
|
|
288
|
+
code: 'E_NO_STATE',
|
|
289
|
+
});
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
const currentFrontier = this._host._lastFrontier || createFrontier();
|
|
293
|
+
const result = /** @type {{state: import('./JoinReducer.js').WarpStateV5, frontier: Map<string, string>, applied: number}} */ (applySyncResponseImpl(response, this._host._cachedState, currentFrontier));
|
|
294
|
+
|
|
295
|
+
// Update cached state
|
|
296
|
+
this._host._cachedState = result.state;
|
|
297
|
+
|
|
298
|
+
// Keep _lastFrontier in sync so hasFrontierChanged() won't misreport stale.
|
|
299
|
+
this._host._lastFrontier = result.frontier;
|
|
300
|
+
|
|
301
|
+
// Track patches for GC
|
|
302
|
+
this._host._patchesSinceGC += result.applied;
|
|
303
|
+
|
|
304
|
+
// State is now in sync with the frontier -- clear dirty flag
|
|
305
|
+
this._host._stateDirty = false;
|
|
306
|
+
|
|
307
|
+
return result;
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
/**
|
|
311
|
+
* Checks if sync is needed with a remote frontier.
|
|
312
|
+
*
|
|
313
|
+
* @param {Map<string, string>} remoteFrontier - The remote peer's frontier
|
|
314
|
+
* @returns {Promise<boolean>} True if sync would transfer any patches
|
|
315
|
+
* @throws {Error} If listing refs fails
|
|
316
|
+
*/
|
|
317
|
+
async syncNeeded(remoteFrontier) {
|
|
318
|
+
const localFrontier = await this.getFrontier();
|
|
319
|
+
return syncNeededImpl(localFrontier, remoteFrontier);
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
/**
|
|
323
|
+
* Syncs with a remote peer (HTTP or direct graph instance).
|
|
324
|
+
*
|
|
325
|
+
* @param {string|import('../WarpGraph.js').default} remote - URL or peer graph instance
|
|
326
|
+
* @param {Object} [options]
|
|
327
|
+
* @param {string} [options.path='/sync'] - Sync path (HTTP mode)
|
|
328
|
+
* @param {number} [options.retries=3] - Retry count
|
|
329
|
+
* @param {number} [options.baseDelayMs=250] - Base backoff delay
|
|
330
|
+
* @param {number} [options.maxDelayMs=2000] - Max backoff delay
|
|
331
|
+
* @param {number} [options.timeoutMs=10000] - Request timeout
|
|
332
|
+
* @param {AbortSignal} [options.signal] - Abort signal
|
|
333
|
+
* @param {(event: {type: string, attempt: number, durationMs?: number, status?: number, error?: Error}) => void} [options.onStatus]
|
|
334
|
+
* @param {boolean} [options.materialize=false] - Auto-materialize after sync
|
|
335
|
+
* @param {{ secret: string, keyId?: string }} [options.auth] - Client auth credentials
|
|
336
|
+
* @returns {Promise<{applied: number, attempts: number, state?: import('./JoinReducer.js').WarpStateV5}>}
|
|
337
|
+
*/
|
|
338
|
+
async syncWith(remote, options = {}) {
|
|
339
|
+
const t0 = this._host._clock.now();
|
|
340
|
+
const {
|
|
341
|
+
path = '/sync',
|
|
342
|
+
retries = DEFAULT_SYNC_WITH_RETRIES,
|
|
343
|
+
baseDelayMs = DEFAULT_SYNC_WITH_BASE_DELAY_MS,
|
|
344
|
+
maxDelayMs = DEFAULT_SYNC_WITH_MAX_DELAY_MS,
|
|
345
|
+
timeoutMs = DEFAULT_SYNC_WITH_TIMEOUT_MS,
|
|
346
|
+
signal,
|
|
347
|
+
onStatus,
|
|
348
|
+
materialize: materializeAfterSync = false,
|
|
349
|
+
auth,
|
|
350
|
+
} = options;
|
|
351
|
+
|
|
352
|
+
const hasPathOverride = Object.prototype.hasOwnProperty.call(options, 'path');
|
|
353
|
+
const isDirectPeer = remote && typeof remote === 'object' &&
|
|
354
|
+
typeof remote.processSyncRequest === 'function';
|
|
355
|
+
let targetUrl = null;
|
|
356
|
+
if (!isDirectPeer) {
|
|
357
|
+
try {
|
|
358
|
+
targetUrl = remote instanceof URL ? new URL(remote.toString()) : new URL(/** @type {string} */ (remote));
|
|
359
|
+
} catch {
|
|
360
|
+
throw new SyncError('Invalid remote URL', {
|
|
361
|
+
code: 'E_SYNC_REMOTE_URL',
|
|
362
|
+
context: { remote },
|
|
363
|
+
});
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
if (!['http:', 'https:'].includes(targetUrl.protocol)) {
|
|
367
|
+
throw new SyncError('Unsupported remote URL protocol', {
|
|
368
|
+
code: 'E_SYNC_REMOTE_URL',
|
|
369
|
+
context: { protocol: targetUrl.protocol },
|
|
370
|
+
});
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
const normalizedPath = normalizeSyncPath(path);
|
|
374
|
+
if (!targetUrl.pathname || targetUrl.pathname === '/') {
|
|
375
|
+
targetUrl.pathname = normalizedPath;
|
|
376
|
+
} else if (hasPathOverride) {
|
|
377
|
+
targetUrl.pathname = normalizedPath;
|
|
378
|
+
}
|
|
379
|
+
targetUrl.hash = '';
|
|
380
|
+
}
|
|
381
|
+
let attempt = 0;
|
|
382
|
+
const emit = (/** @type {string} */ type, /** @type {Record<string, unknown>} */ payload = {}) => {
|
|
383
|
+
if (typeof onStatus === 'function') {
|
|
384
|
+
onStatus(/** @type {{type: string, attempt: number}} */ ({ type, attempt, ...payload }));
|
|
385
|
+
}
|
|
386
|
+
};
|
|
387
|
+
const shouldRetry = (/** @type {unknown} */ err) => {
|
|
388
|
+
if (isDirectPeer) { return false; }
|
|
389
|
+
if (err instanceof SyncError) {
|
|
390
|
+
return ['E_SYNC_REMOTE', 'E_SYNC_TIMEOUT', 'E_SYNC_NETWORK'].includes(err.code);
|
|
391
|
+
}
|
|
392
|
+
return err instanceof TimeoutError;
|
|
393
|
+
};
|
|
394
|
+
const executeAttempt = async () => {
|
|
395
|
+
checkAborted(signal, 'syncWith');
|
|
396
|
+
attempt += 1;
|
|
397
|
+
const attemptStart = this._host._clock.now();
|
|
398
|
+
emit('connecting');
|
|
399
|
+
const request = await this.createSyncRequest();
|
|
400
|
+
emit('requestBuilt');
|
|
401
|
+
let response;
|
|
402
|
+
if (isDirectPeer) {
|
|
403
|
+
emit('requestSent');
|
|
404
|
+
response = await remote.processSyncRequest(request);
|
|
405
|
+
emit('responseReceived');
|
|
406
|
+
} else {
|
|
407
|
+
emit('requestSent');
|
|
408
|
+
const bodyStr = JSON.stringify(request);
|
|
409
|
+
const authHeaders = await buildSyncAuthHeaders({
|
|
410
|
+
auth, bodyStr, targetUrl: /** @type {URL} */ (targetUrl), crypto: this._host._crypto,
|
|
411
|
+
});
|
|
412
|
+
let res;
|
|
413
|
+
try {
|
|
414
|
+
res = await timeout(timeoutMs, (timeoutSignal) => {
|
|
415
|
+
const combinedSignal = signal
|
|
416
|
+
? AbortSignal.any([timeoutSignal, signal])
|
|
417
|
+
: timeoutSignal;
|
|
418
|
+
return fetch(/** @type {URL} */ (targetUrl).toString(), {
|
|
419
|
+
method: 'POST',
|
|
420
|
+
headers: {
|
|
421
|
+
'content-type': 'application/json',
|
|
422
|
+
'accept': 'application/json',
|
|
423
|
+
...authHeaders,
|
|
424
|
+
},
|
|
425
|
+
body: bodyStr,
|
|
426
|
+
signal: combinedSignal,
|
|
427
|
+
});
|
|
428
|
+
});
|
|
429
|
+
} catch (err) {
|
|
430
|
+
if (isError(err) && err.name === 'AbortError') {
|
|
431
|
+
throw new OperationAbortedError('syncWith', { reason: 'Signal received' });
|
|
432
|
+
}
|
|
433
|
+
if (err instanceof TimeoutError) {
|
|
434
|
+
throw new SyncError('Sync request timed out', {
|
|
435
|
+
code: 'E_SYNC_TIMEOUT',
|
|
436
|
+
context: { timeoutMs },
|
|
437
|
+
});
|
|
438
|
+
}
|
|
439
|
+
throw new SyncError('Network error', {
|
|
440
|
+
code: 'E_SYNC_NETWORK',
|
|
441
|
+
context: { message: isError(err) ? err.message : String(err) },
|
|
442
|
+
});
|
|
443
|
+
}
|
|
444
|
+
|
|
445
|
+
emit('responseReceived', { status: res.status });
|
|
446
|
+
|
|
447
|
+
if (res.status >= 500) {
|
|
448
|
+
throw new SyncError(`Remote error: ${res.status}`, {
|
|
449
|
+
code: 'E_SYNC_REMOTE',
|
|
450
|
+
context: { status: res.status },
|
|
451
|
+
});
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
if (res.status >= 400) {
|
|
455
|
+
throw new SyncError(`Protocol error: ${res.status}`, {
|
|
456
|
+
code: 'E_SYNC_PROTOCOL',
|
|
457
|
+
context: { status: res.status },
|
|
458
|
+
});
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
try {
|
|
462
|
+
response = await res.json();
|
|
463
|
+
} catch {
|
|
464
|
+
throw new SyncError('Invalid JSON response', {
|
|
465
|
+
code: 'E_SYNC_PROTOCOL',
|
|
466
|
+
context: { status: res.status },
|
|
467
|
+
});
|
|
468
|
+
}
|
|
469
|
+
}
|
|
470
|
+
|
|
471
|
+
if (!response || typeof response !== 'object' ||
|
|
472
|
+
response.type !== 'sync-response' ||
|
|
473
|
+
!response.frontier || typeof response.frontier !== 'object' || Array.isArray(response.frontier) ||
|
|
474
|
+
!Array.isArray(response.patches)) {
|
|
475
|
+
throw new SyncError('Invalid sync response', {
|
|
476
|
+
code: 'E_SYNC_PROTOCOL',
|
|
477
|
+
});
|
|
478
|
+
}
|
|
479
|
+
|
|
480
|
+
if (!this._host._cachedState) {
|
|
481
|
+
await this._host.materialize();
|
|
482
|
+
emit('materialized');
|
|
483
|
+
}
|
|
484
|
+
|
|
485
|
+
const result = this.applySyncResponse(response);
|
|
486
|
+
emit('applied', { applied: result.applied });
|
|
487
|
+
|
|
488
|
+
const durationMs = this._host._clock.now() - attemptStart;
|
|
489
|
+
emit('complete', { durationMs, applied: result.applied });
|
|
490
|
+
return { applied: result.applied, attempts: attempt };
|
|
491
|
+
};
|
|
492
|
+
|
|
493
|
+
try {
|
|
494
|
+
const syncResult = await retry(executeAttempt, {
|
|
495
|
+
retries,
|
|
496
|
+
delay: baseDelayMs,
|
|
497
|
+
maxDelay: maxDelayMs,
|
|
498
|
+
backoff: 'exponential',
|
|
499
|
+
jitter: 'decorrelated',
|
|
500
|
+
signal,
|
|
501
|
+
shouldRetry,
|
|
502
|
+
onRetry: (/** @type {Error} */ error, /** @type {number} */ attemptNumber, /** @type {number} */ delayMs) => {
|
|
503
|
+
if (typeof onStatus === 'function') {
|
|
504
|
+
onStatus(/** @type {{type: string, attempt: number, delayMs: number, error: Error}} */ ({ type: 'retrying', attempt: attemptNumber, delayMs, error }));
|
|
505
|
+
}
|
|
506
|
+
},
|
|
507
|
+
});
|
|
508
|
+
|
|
509
|
+
this._host._logTiming('syncWith', t0, { metrics: `${syncResult.applied} patches applied` });
|
|
510
|
+
|
|
511
|
+
if (materializeAfterSync) {
|
|
512
|
+
if (!this._host._cachedState) { await this._host.materialize(); }
|
|
513
|
+
return { ...syncResult, state: /** @type {import('./JoinReducer.js').WarpStateV5} */ (this._host._cachedState) };
|
|
514
|
+
}
|
|
515
|
+
return syncResult;
|
|
516
|
+
} catch (err) {
|
|
517
|
+
this._host._logTiming('syncWith', t0, { error: /** @type {Error} */ (err) });
|
|
518
|
+
if (isError(err) && err.name === 'AbortError') {
|
|
519
|
+
const abortedError = new OperationAbortedError('syncWith', { reason: 'Signal received' });
|
|
520
|
+
if (typeof onStatus === 'function') {
|
|
521
|
+
onStatus({ type: 'failed', attempt, error: abortedError });
|
|
522
|
+
}
|
|
523
|
+
throw abortedError;
|
|
524
|
+
}
|
|
525
|
+
if (err instanceof RetryExhaustedError) {
|
|
526
|
+
const cause = /** @type {Error} */ (err.cause || err);
|
|
527
|
+
if (typeof onStatus === 'function') {
|
|
528
|
+
onStatus({ type: 'failed', attempt: err.attempts, error: cause });
|
|
529
|
+
}
|
|
530
|
+
throw cause;
|
|
531
|
+
}
|
|
532
|
+
if (typeof onStatus === 'function') {
|
|
533
|
+
onStatus({ type: 'failed', attempt, error: /** @type {Error} */ (err) });
|
|
534
|
+
}
|
|
535
|
+
throw err;
|
|
536
|
+
}
|
|
537
|
+
}
|
|
538
|
+
|
|
539
|
+
/**
|
|
540
|
+
* Starts a built-in sync server for this graph.
|
|
541
|
+
*
|
|
542
|
+
* @param {Object} options
|
|
543
|
+
* @param {number} options.port - Port to listen on
|
|
544
|
+
* @param {string} [options.host='127.0.0.1'] - Host to bind
|
|
545
|
+
* @param {string} [options.path='/sync'] - Path to handle sync requests
|
|
546
|
+
* @param {number} [options.maxRequestBytes=4194304] - Max request size in bytes
|
|
547
|
+
* @param {import('../../ports/HttpServerPort.js').default} options.httpPort - HTTP server adapter
|
|
548
|
+
* @param {{ keys: Record<string, string>, mode?: 'enforce'|'log-only' }} [options.auth] - Auth configuration
|
|
549
|
+
* @returns {Promise<{close: () => Promise<void>, url: string}>} Server handle
|
|
550
|
+
* @throws {Error} If port is not a number
|
|
551
|
+
* @throws {Error} If httpPort adapter is not provided
|
|
552
|
+
*/
|
|
553
|
+
async serve({ port, host = '127.0.0.1', path = '/sync', maxRequestBytes = DEFAULT_SYNC_SERVER_MAX_BYTES, httpPort, auth } = /** @type {{ port: number, httpPort: import('../../ports/HttpServerPort.js').default }} */ ({})) {
|
|
554
|
+
if (typeof port !== 'number') {
|
|
555
|
+
throw new Error('serve() requires a numeric port');
|
|
556
|
+
}
|
|
557
|
+
if (!httpPort) {
|
|
558
|
+
throw new Error('serve() requires an httpPort adapter');
|
|
559
|
+
}
|
|
560
|
+
|
|
561
|
+
const authConfig = auth
|
|
562
|
+
? { ...auth, crypto: this._host._crypto, logger: this._host._logger || undefined }
|
|
563
|
+
: undefined;
|
|
564
|
+
|
|
565
|
+
const httpServer = new HttpSyncServer({
|
|
566
|
+
httpPort,
|
|
567
|
+
graph: /** @type {{ processSyncRequest: Function }} */ (/** @type {unknown} */ (this._host)),
|
|
568
|
+
path,
|
|
569
|
+
host,
|
|
570
|
+
maxRequestBytes,
|
|
571
|
+
auth: authConfig,
|
|
572
|
+
});
|
|
573
|
+
|
|
574
|
+
return await httpServer.listen(port);
|
|
575
|
+
}
|
|
576
|
+
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Validates a shard Object ID (hex string, 4-64 chars).
|
|
3
|
+
*
|
|
4
|
+
* The 4-character minimum accommodates abbreviated OIDs used in test
|
|
5
|
+
* fixtures and short internal IDs. Full Git SHA-1 OIDs are 40 chars;
|
|
6
|
+
* SHA-256 OIDs are 64 chars.
|
|
7
|
+
*
|
|
8
|
+
* @param {string} oid - The OID to validate
|
|
9
|
+
* @returns {boolean} True if oid is a valid hex string of 4-64 characters
|
|
10
|
+
*/
|
|
11
|
+
export function isValidShardOid(oid) {
|
|
12
|
+
return typeof oid === 'string' && /^[0-9a-fA-F]{4,64}$/.test(oid);
|
|
13
|
+
}
|
|
@@ -148,6 +148,37 @@ export class PatchSession {
|
|
|
148
148
|
return this;
|
|
149
149
|
}
|
|
150
150
|
|
|
151
|
+
/**
|
|
152
|
+
* Attaches content to a node.
|
|
153
|
+
*
|
|
154
|
+
* @param {string} nodeId - The node ID to attach content to
|
|
155
|
+
* @param {Buffer|string} content - The content to attach
|
|
156
|
+
* @returns {Promise<this>} This session for chaining
|
|
157
|
+
* @throws {Error} If this session has already been committed
|
|
158
|
+
*/
|
|
159
|
+
async attachContent(nodeId, content) {
|
|
160
|
+
this._ensureNotCommitted();
|
|
161
|
+
await this._builder.attachContent(nodeId, content);
|
|
162
|
+
return this;
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
/**
|
|
166
|
+
* Attaches content to an edge.
|
|
167
|
+
*
|
|
168
|
+
* @param {string} from - Source node ID
|
|
169
|
+
* @param {string} to - Target node ID
|
|
170
|
+
* @param {string} label - Edge label/type
|
|
171
|
+
* @param {Buffer|string} content - The content to attach
|
|
172
|
+
* @returns {Promise<this>} This session for chaining
|
|
173
|
+
* @throws {Error} If this session has already been committed
|
|
174
|
+
*/
|
|
175
|
+
// eslint-disable-next-line max-params -- direct delegate matching PatchBuilderV2 signature
|
|
176
|
+
async attachEdgeContent(from, to, label, content) {
|
|
177
|
+
this._ensureNotCommitted();
|
|
178
|
+
await this._builder.attachEdgeContent(from, to, label, content);
|
|
179
|
+
return this;
|
|
180
|
+
}
|
|
181
|
+
|
|
151
182
|
/**
|
|
152
183
|
* Builds the PatchV2 object without committing.
|
|
153
184
|
*
|
|
@@ -10,17 +10,8 @@
|
|
|
10
10
|
// ── Error constructors ──────────────────────────────────────────────────────
|
|
11
11
|
export { default as QueryError } from '../errors/QueryError.js';
|
|
12
12
|
export { default as ForkError } from '../errors/ForkError.js';
|
|
13
|
-
export { default as SyncError } from '../errors/SyncError.js';
|
|
14
|
-
export { default as OperationAbortedError } from '../errors/OperationAbortedError.js';
|
|
15
13
|
|
|
16
14
|
// ── Shared constants ────────────────────────────────────────────────────────
|
|
17
15
|
export const DEFAULT_ADJACENCY_CACHE_SIZE = 3;
|
|
18
16
|
export const E_NO_STATE_MSG = 'No materialized state. Call materialize() before querying, or use autoMaterialize: true (the default). See https://github.com/git-stunts/git-warp#materialization';
|
|
19
17
|
export const E_STALE_STATE_MSG = 'State is stale (patches written since last materialize). Call materialize() to refresh. See https://github.com/git-stunts/git-warp#materialization';
|
|
20
|
-
|
|
21
|
-
// ── Sync constants ──────────────────────────────────────────────────────────
|
|
22
|
-
export const DEFAULT_SYNC_SERVER_MAX_BYTES = 4 * 1024 * 1024;
|
|
23
|
-
export const DEFAULT_SYNC_WITH_RETRIES = 3;
|
|
24
|
-
export const DEFAULT_SYNC_WITH_BASE_DELAY_MS = 250;
|
|
25
|
-
export const DEFAULT_SYNC_WITH_MAX_DELAY_MS = 2000;
|
|
26
|
-
export const DEFAULT_SYNC_WITH_TIMEOUT_MS = 10_000;
|
|
@@ -193,7 +193,7 @@ declare module '../WarpGraph.js' {
|
|
|
193
193
|
_relationToCheckpointHead(ckHead: string, incomingSha: string): Promise<string>;
|
|
194
194
|
_validatePatchAgainstCheckpoint(writerId: string, incomingSha: string, checkpoint: unknown): Promise<void>;
|
|
195
195
|
|
|
196
|
-
// ──
|
|
196
|
+
// ── SyncController (direct delegation) ─────────────────────────────────
|
|
197
197
|
getFrontier(): Promise<Map<string, string>>;
|
|
198
198
|
hasFrontierChanged(): Promise<boolean>;
|
|
199
199
|
status(): Promise<WarpGraphStatus>;
|