trellis 2.0.13 → 2.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli/index.js +1 -1
- package/dist/embeddings/index.js +1 -1
- package/dist/{index-7gvjxt27.js → index-2917tjd8.js} +1 -1
- package/package.json +2 -10
- package/dist/transformers.node-bx3q9d7k.js +0 -33130
- package/src/cli/index.ts +0 -3356
- package/src/core/agents/harness.ts +0 -380
- package/src/core/agents/index.ts +0 -18
- package/src/core/agents/types.ts +0 -90
- package/src/core/index.ts +0 -118
- package/src/core/kernel/middleware.ts +0 -44
- package/src/core/kernel/trellis-kernel.ts +0 -593
- package/src/core/ontology/builtins.ts +0 -248
- package/src/core/ontology/index.ts +0 -34
- package/src/core/ontology/registry.ts +0 -209
- package/src/core/ontology/types.ts +0 -124
- package/src/core/ontology/validator.ts +0 -382
- package/src/core/persist/backend.ts +0 -74
- package/src/core/persist/sqlite-backend.ts +0 -298
- package/src/core/plugins/index.ts +0 -17
- package/src/core/plugins/registry.ts +0 -322
- package/src/core/plugins/types.ts +0 -126
- package/src/core/query/datalog.ts +0 -188
- package/src/core/query/engine.ts +0 -370
- package/src/core/query/index.ts +0 -34
- package/src/core/query/parser.ts +0 -481
- package/src/core/query/types.ts +0 -200
- package/src/core/store/eav-store.ts +0 -467
- package/src/decisions/auto-capture.ts +0 -136
- package/src/decisions/hooks.ts +0 -163
- package/src/decisions/index.ts +0 -261
- package/src/decisions/types.ts +0 -103
- package/src/embeddings/auto-embed.ts +0 -248
- package/src/embeddings/chunker.ts +0 -327
- package/src/embeddings/index.ts +0 -48
- package/src/embeddings/model.ts +0 -112
- package/src/embeddings/search.ts +0 -305
- package/src/embeddings/store.ts +0 -313
- package/src/embeddings/types.ts +0 -92
- package/src/engine.ts +0 -1125
- package/src/garden/cluster.ts +0 -330
- package/src/garden/garden.ts +0 -306
- package/src/garden/index.ts +0 -29
- package/src/git/git-exporter.ts +0 -286
- package/src/git/git-importer.ts +0 -329
- package/src/git/git-reader.ts +0 -189
- package/src/git/index.ts +0 -22
- package/src/identity/governance.ts +0 -211
- package/src/identity/identity.ts +0 -224
- package/src/identity/index.ts +0 -30
- package/src/identity/signing-middleware.ts +0 -97
- package/src/index.ts +0 -29
- package/src/links/index.ts +0 -49
- package/src/links/lifecycle.ts +0 -400
- package/src/links/parser.ts +0 -484
- package/src/links/ref-index.ts +0 -186
- package/src/links/resolver.ts +0 -314
- package/src/links/types.ts +0 -108
- package/src/mcp/index.ts +0 -22
- package/src/mcp/server.ts +0 -1278
- package/src/semantic/csharp-parser.ts +0 -493
- package/src/semantic/go-parser.ts +0 -585
- package/src/semantic/index.ts +0 -34
- package/src/semantic/java-parser.ts +0 -456
- package/src/semantic/python-parser.ts +0 -659
- package/src/semantic/ruby-parser.ts +0 -446
- package/src/semantic/rust-parser.ts +0 -784
- package/src/semantic/semantic-merge.ts +0 -210
- package/src/semantic/ts-parser.ts +0 -681
- package/src/semantic/types.ts +0 -175
- package/src/sync/http-transport.ts +0 -144
- package/src/sync/index.ts +0 -43
- package/src/sync/memory-transport.ts +0 -66
- package/src/sync/multi-repo.ts +0 -200
- package/src/sync/reconciler.ts +0 -237
- package/src/sync/sync-engine.ts +0 -258
- package/src/sync/types.ts +0 -104
- package/src/sync/ws-transport.ts +0 -145
- package/src/ui/client.html +0 -695
- package/src/ui/server.ts +0 -419
- package/src/vcs/blob-store.ts +0 -124
- package/src/vcs/branch.ts +0 -150
- package/src/vcs/checkpoint.ts +0 -64
- package/src/vcs/decompose.ts +0 -469
- package/src/vcs/diff.ts +0 -409
- package/src/vcs/engine-context.ts +0 -26
- package/src/vcs/index.ts +0 -23
- package/src/vcs/issue.ts +0 -800
- package/src/vcs/merge.ts +0 -425
- package/src/vcs/milestone.ts +0 -124
- package/src/vcs/ops.ts +0 -59
- package/src/vcs/types.ts +0 -213
- package/src/vcs/vcs-middleware.ts +0 -81
- package/src/watcher/fs-watcher.ts +0 -255
- package/src/watcher/index.ts +0 -9
- package/src/watcher/ingestion.ts +0 -116
package/src/sync/reconciler.ts
DELETED
|
@@ -1,237 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* CRDT Reconciler
|
|
3
|
-
*
|
|
4
|
-
* DESIGN.md §10.5 — Merges divergent op streams using causal ordering.
|
|
5
|
-
* Each device maintains its own causal chain. The reconciler merges
|
|
6
|
-
* divergent chains by:
|
|
7
|
-
* 1. Finding the common ancestor (fork point)
|
|
8
|
-
* 2. Collecting ops unique to each side
|
|
9
|
-
* 3. Topologically sorting the combined set by causal dependencies
|
|
10
|
-
* 4. Detecting conflicts using patch commutativity (§4.4)
|
|
11
|
-
*/
|
|
12
|
-
|
|
13
|
-
import type { VcsOp } from '../vcs/types.js';
|
|
14
|
-
|
|
15
|
-
// ---------------------------------------------------------------------------
|
|
16
|
-
// Types
|
|
17
|
-
// ---------------------------------------------------------------------------
|
|
18
|
-
|
|
19
|
-
export interface ReconcileResult {
|
|
20
|
-
/** The merged op stream in causal order. */
|
|
21
|
-
merged: VcsOp[];
|
|
22
|
-
/** Ops that were only on side A. */
|
|
23
|
-
uniqueToA: VcsOp[];
|
|
24
|
-
/** Ops that were only on side B. */
|
|
25
|
-
uniqueToB: VcsOp[];
|
|
26
|
-
/** Common ancestor op hash (fork point). */
|
|
27
|
-
forkPoint: string | null;
|
|
28
|
-
/** Whether the merge was clean (no causal conflicts). */
|
|
29
|
-
clean: boolean;
|
|
30
|
-
/** Conflicting op pairs (both modify same file without commutativity). */
|
|
31
|
-
conflicts: ReconcileConflict[];
|
|
32
|
-
}
|
|
33
|
-
|
|
34
|
-
export interface ReconcileConflict {
|
|
35
|
-
opA: VcsOp;
|
|
36
|
-
opB: VcsOp;
|
|
37
|
-
filePath: string;
|
|
38
|
-
reason: string;
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
// ---------------------------------------------------------------------------
|
|
42
|
-
// Core reconciliation
|
|
43
|
-
// ---------------------------------------------------------------------------
|
|
44
|
-
|
|
45
|
-
/**
|
|
46
|
-
* Find the common ancestor (fork point) of two op streams.
|
|
47
|
-
* Returns the hash of the last op that appears in both streams.
|
|
48
|
-
*/
|
|
49
|
-
export function findForkPoint(opsA: VcsOp[], opsB: VcsOp[]): string | null {
|
|
50
|
-
const hashesB = new Set(opsB.map((o) => o.hash));
|
|
51
|
-
let forkPoint: string | null = null;
|
|
52
|
-
|
|
53
|
-
for (const op of opsA) {
|
|
54
|
-
if (hashesB.has(op.hash)) {
|
|
55
|
-
forkPoint = op.hash;
|
|
56
|
-
}
|
|
57
|
-
}
|
|
58
|
-
|
|
59
|
-
return forkPoint;
|
|
60
|
-
}
|
|
61
|
-
|
|
62
|
-
/**
|
|
63
|
-
* Reconcile two divergent op streams into a single merged stream.
|
|
64
|
-
*
|
|
65
|
-
* Algorithm:
|
|
66
|
-
* 1. Find the fork point (last common op)
|
|
67
|
-
* 2. Split each stream into shared prefix + unique suffix
|
|
68
|
-
* 3. Check for conflicts in the unique portions
|
|
69
|
-
* 4. Interleave unique ops in causal (timestamp) order
|
|
70
|
-
*/
|
|
71
|
-
export function reconcile(opsA: VcsOp[], opsB: VcsOp[]): ReconcileResult {
|
|
72
|
-
const forkPoint = findForkPoint(opsA, opsB);
|
|
73
|
-
|
|
74
|
-
// Split into shared prefix and unique suffixes
|
|
75
|
-
const hashesA = new Set(opsA.map((o) => o.hash));
|
|
76
|
-
const hashesB = new Set(opsB.map((o) => o.hash));
|
|
77
|
-
|
|
78
|
-
const shared: VcsOp[] = [];
|
|
79
|
-
const uniqueToA: VcsOp[] = [];
|
|
80
|
-
const uniqueToB: VcsOp[] = [];
|
|
81
|
-
|
|
82
|
-
for (const op of opsA) {
|
|
83
|
-
if (hashesB.has(op.hash)) {
|
|
84
|
-
shared.push(op);
|
|
85
|
-
} else {
|
|
86
|
-
uniqueToA.push(op);
|
|
87
|
-
}
|
|
88
|
-
}
|
|
89
|
-
|
|
90
|
-
for (const op of opsB) {
|
|
91
|
-
if (!hashesA.has(op.hash)) {
|
|
92
|
-
uniqueToB.push(op);
|
|
93
|
-
}
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
// If one side has no unique ops, it's a fast-forward
|
|
97
|
-
if (uniqueToA.length === 0) {
|
|
98
|
-
return {
|
|
99
|
-
merged: [...shared, ...uniqueToB],
|
|
100
|
-
uniqueToA: [],
|
|
101
|
-
uniqueToB,
|
|
102
|
-
forkPoint,
|
|
103
|
-
clean: true,
|
|
104
|
-
conflicts: [],
|
|
105
|
-
};
|
|
106
|
-
}
|
|
107
|
-
|
|
108
|
-
if (uniqueToB.length === 0) {
|
|
109
|
-
return {
|
|
110
|
-
merged: [...shared, ...uniqueToA],
|
|
111
|
-
uniqueToA,
|
|
112
|
-
uniqueToB: [],
|
|
113
|
-
forkPoint,
|
|
114
|
-
clean: true,
|
|
115
|
-
conflicts: [],
|
|
116
|
-
};
|
|
117
|
-
}
|
|
118
|
-
|
|
119
|
-
// Both sides diverged — detect conflicts
|
|
120
|
-
const conflicts = detectConflicts(uniqueToA, uniqueToB);
|
|
121
|
-
|
|
122
|
-
// Merge unique ops by timestamp (causal ordering)
|
|
123
|
-
const interleaved = interleaveByTimestamp(uniqueToA, uniqueToB);
|
|
124
|
-
|
|
125
|
-
return {
|
|
126
|
-
merged: [...shared, ...interleaved],
|
|
127
|
-
uniqueToA,
|
|
128
|
-
uniqueToB,
|
|
129
|
-
forkPoint,
|
|
130
|
-
clean: conflicts.length === 0,
|
|
131
|
-
conflicts,
|
|
132
|
-
};
|
|
133
|
-
}
|
|
134
|
-
|
|
135
|
-
// ---------------------------------------------------------------------------
|
|
136
|
-
// Conflict detection
|
|
137
|
-
// ---------------------------------------------------------------------------
|
|
138
|
-
|
|
139
|
-
/** VcsOp kinds that represent file-level mutations. */
|
|
140
|
-
const FILE_MUTATION_KINDS = new Set([
|
|
141
|
-
'vcs:fileAdd',
|
|
142
|
-
'vcs:fileModify',
|
|
143
|
-
'vcs:fileDelete',
|
|
144
|
-
'vcs:fileRename',
|
|
145
|
-
]);
|
|
146
|
-
|
|
147
|
-
/**
|
|
148
|
-
* Detect conflicts between two sets of unique ops.
|
|
149
|
-
* Two ops conflict when they both mutate the same file.
|
|
150
|
-
*/
|
|
151
|
-
function detectConflicts(
|
|
152
|
-
uniqueA: VcsOp[],
|
|
153
|
-
uniqueB: VcsOp[],
|
|
154
|
-
): ReconcileConflict[] {
|
|
155
|
-
const conflicts: ReconcileConflict[] = [];
|
|
156
|
-
|
|
157
|
-
// Index A's file mutations
|
|
158
|
-
const aMutations = new Map<string, VcsOp[]>();
|
|
159
|
-
for (const op of uniqueA) {
|
|
160
|
-
if (!FILE_MUTATION_KINDS.has(op.kind) || !op.vcs?.filePath) continue;
|
|
161
|
-
const path = op.vcs.filePath;
|
|
162
|
-
if (!aMutations.has(path)) aMutations.set(path, []);
|
|
163
|
-
aMutations.get(path)!.push(op);
|
|
164
|
-
}
|
|
165
|
-
|
|
166
|
-
// Check B's file mutations against A's
|
|
167
|
-
for (const op of uniqueB) {
|
|
168
|
-
if (!FILE_MUTATION_KINDS.has(op.kind) || !op.vcs?.filePath) continue;
|
|
169
|
-
const path = op.vcs.filePath;
|
|
170
|
-
const aOps = aMutations.get(path);
|
|
171
|
-
if (!aOps) continue;
|
|
172
|
-
|
|
173
|
-
for (const aOp of aOps) {
|
|
174
|
-
// Same file modified by both sides
|
|
175
|
-
if (aOp.kind === 'vcs:fileModify' && op.kind === 'vcs:fileModify') {
|
|
176
|
-
conflicts.push({
|
|
177
|
-
opA: aOp,
|
|
178
|
-
opB: op,
|
|
179
|
-
filePath: path,
|
|
180
|
-
reason: `Both sides modified ${path}`,
|
|
181
|
-
});
|
|
182
|
-
} else if (
|
|
183
|
-
(aOp.kind === 'vcs:fileDelete' && op.kind === 'vcs:fileModify') ||
|
|
184
|
-
(aOp.kind === 'vcs:fileModify' && op.kind === 'vcs:fileDelete')
|
|
185
|
-
) {
|
|
186
|
-
conflicts.push({
|
|
187
|
-
opA: aOp,
|
|
188
|
-
opB: op,
|
|
189
|
-
filePath: path,
|
|
190
|
-
reason: `Delete/modify conflict on ${path}`,
|
|
191
|
-
});
|
|
192
|
-
} else if (aOp.kind === 'vcs:fileAdd' && op.kind === 'vcs:fileAdd') {
|
|
193
|
-
// Both added same file — conflict if different content
|
|
194
|
-
if (aOp.vcs?.contentHash !== op.vcs?.contentHash) {
|
|
195
|
-
conflicts.push({
|
|
196
|
-
opA: aOp,
|
|
197
|
-
opB: op,
|
|
198
|
-
filePath: path,
|
|
199
|
-
reason: `Both sides added ${path} with different content`,
|
|
200
|
-
});
|
|
201
|
-
}
|
|
202
|
-
}
|
|
203
|
-
}
|
|
204
|
-
}
|
|
205
|
-
|
|
206
|
-
return conflicts;
|
|
207
|
-
}
|
|
208
|
-
|
|
209
|
-
// ---------------------------------------------------------------------------
|
|
210
|
-
// Interleaving
|
|
211
|
-
// ---------------------------------------------------------------------------
|
|
212
|
-
|
|
213
|
-
/**
|
|
214
|
-
* Interleave two op arrays by timestamp, preserving causal ordering
|
|
215
|
-
* within each array.
|
|
216
|
-
*/
|
|
217
|
-
function interleaveByTimestamp(a: VcsOp[], b: VcsOp[]): VcsOp[] {
|
|
218
|
-
const result: VcsOp[] = [];
|
|
219
|
-
let ai = 0;
|
|
220
|
-
let bi = 0;
|
|
221
|
-
|
|
222
|
-
while (ai < a.length && bi < b.length) {
|
|
223
|
-
const tA = new Date(a[ai].timestamp).getTime();
|
|
224
|
-
const tB = new Date(b[bi].timestamp).getTime();
|
|
225
|
-
|
|
226
|
-
if (tA <= tB) {
|
|
227
|
-
result.push(a[ai++]);
|
|
228
|
-
} else {
|
|
229
|
-
result.push(b[bi++]);
|
|
230
|
-
}
|
|
231
|
-
}
|
|
232
|
-
|
|
233
|
-
while (ai < a.length) result.push(a[ai++]);
|
|
234
|
-
while (bi < b.length) result.push(b[bi++]);
|
|
235
|
-
|
|
236
|
-
return result;
|
|
237
|
-
}
|
package/src/sync/sync-engine.ts
DELETED
|
@@ -1,258 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Sync Engine
|
|
3
|
-
*
|
|
4
|
-
* DESIGN.md §10.5 — Peer sync protocol.
|
|
5
|
-
* Coordinates push/pull of ops between peers using a transport layer.
|
|
6
|
-
* Supports both linear (fast-forward only) and CRDT (concurrent append)
|
|
7
|
-
* branch modes.
|
|
8
|
-
*/
|
|
9
|
-
|
|
10
|
-
import type { VcsOp } from '../vcs/types.js';
|
|
11
|
-
import type {
|
|
12
|
-
SyncTransport,
|
|
13
|
-
SyncMessage,
|
|
14
|
-
SyncState,
|
|
15
|
-
PeerId,
|
|
16
|
-
BranchPolicy,
|
|
17
|
-
} from './types.js';
|
|
18
|
-
import { reconcile, findForkPoint, type ReconcileResult } from './reconciler.js';
|
|
19
|
-
|
|
20
|
-
// ---------------------------------------------------------------------------
|
|
21
|
-
// Sync Engine
|
|
22
|
-
// ---------------------------------------------------------------------------
|
|
23
|
-
|
|
24
|
-
export class SyncEngine {
|
|
25
|
-
private localPeerId: string;
|
|
26
|
-
private state: SyncState;
|
|
27
|
-
private transport: SyncTransport;
|
|
28
|
-
private getLocalOps: () => VcsOp[];
|
|
29
|
-
private onOpsReceived: (ops: VcsOp[]) => void;
|
|
30
|
-
private branchPolicy: BranchPolicy;
|
|
31
|
-
|
|
32
|
-
constructor(opts: {
|
|
33
|
-
localPeerId: string;
|
|
34
|
-
transport: SyncTransport;
|
|
35
|
-
getLocalOps: () => VcsOp[];
|
|
36
|
-
onOpsReceived: (ops: VcsOp[]) => void;
|
|
37
|
-
branchPolicy?: BranchPolicy;
|
|
38
|
-
}) {
|
|
39
|
-
this.localPeerId = opts.localPeerId;
|
|
40
|
-
this.transport = opts.transport;
|
|
41
|
-
this.getLocalOps = opts.getLocalOps;
|
|
42
|
-
this.onOpsReceived = opts.onOpsReceived;
|
|
43
|
-
this.branchPolicy = opts.branchPolicy ?? { linear: true };
|
|
44
|
-
|
|
45
|
-
this.state = {
|
|
46
|
-
localPeerId: opts.localPeerId,
|
|
47
|
-
peerHeads: new Map(),
|
|
48
|
-
pendingAcks: new Set(),
|
|
49
|
-
lastSync: new Map(),
|
|
50
|
-
};
|
|
51
|
-
|
|
52
|
-
// Register message handler
|
|
53
|
-
this.transport.onMessage((msg) => this.handleMessage(msg));
|
|
54
|
-
}
|
|
55
|
-
|
|
56
|
-
// -------------------------------------------------------------------------
|
|
57
|
-
// Public API
|
|
58
|
-
// -------------------------------------------------------------------------
|
|
59
|
-
|
|
60
|
-
/**
|
|
61
|
-
* Initiate a sync with a specific peer.
|
|
62
|
-
* Sends a 'have' message advertising our heads.
|
|
63
|
-
*/
|
|
64
|
-
async pushTo(peerId: string): Promise<void> {
|
|
65
|
-
const ops = this.getLocalOps();
|
|
66
|
-
const heads: Record<string, string> = {};
|
|
67
|
-
if (ops.length > 0) {
|
|
68
|
-
heads['main'] = ops[ops.length - 1].hash;
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
await this.transport.send(peerId, {
|
|
72
|
-
type: 'have',
|
|
73
|
-
peerId: this.localPeerId,
|
|
74
|
-
heads,
|
|
75
|
-
opCount: ops.length,
|
|
76
|
-
});
|
|
77
|
-
}
|
|
78
|
-
|
|
79
|
-
/**
|
|
80
|
-
* Request ops from a peer.
|
|
81
|
-
*/
|
|
82
|
-
async pullFrom(peerId: string): Promise<void> {
|
|
83
|
-
const ops = this.getLocalOps();
|
|
84
|
-
const lastHash = ops.length > 0 ? ops[ops.length - 1].hash : undefined;
|
|
85
|
-
|
|
86
|
-
await this.transport.send(peerId, {
|
|
87
|
-
type: 'want',
|
|
88
|
-
peerId: this.localPeerId,
|
|
89
|
-
wantHashes: [],
|
|
90
|
-
afterHash: lastHash,
|
|
91
|
-
});
|
|
92
|
-
}
|
|
93
|
-
|
|
94
|
-
/**
|
|
95
|
-
* Send all our ops to a peer (full push).
|
|
96
|
-
*/
|
|
97
|
-
async sendOps(peerId: string, ops?: VcsOp[]): Promise<void> {
|
|
98
|
-
const opsToSend = ops ?? this.getLocalOps();
|
|
99
|
-
await this.transport.send(peerId, {
|
|
100
|
-
type: 'ops',
|
|
101
|
-
peerId: this.localPeerId,
|
|
102
|
-
ops: opsToSend,
|
|
103
|
-
});
|
|
104
|
-
}
|
|
105
|
-
|
|
106
|
-
/**
|
|
107
|
-
* Reconcile our ops with a remote peer's ops.
|
|
108
|
-
*/
|
|
109
|
-
reconcileWith(remoteOps: VcsOp[]): ReconcileResult {
|
|
110
|
-
const localOps = this.getLocalOps();
|
|
111
|
-
return reconcile(localOps, remoteOps);
|
|
112
|
-
}
|
|
113
|
-
|
|
114
|
-
/**
|
|
115
|
-
* Get current sync state.
|
|
116
|
-
*/
|
|
117
|
-
getState(): SyncState {
|
|
118
|
-
return this.state;
|
|
119
|
-
}
|
|
120
|
-
|
|
121
|
-
/**
|
|
122
|
-
* Get branch policy.
|
|
123
|
-
*/
|
|
124
|
-
getBranchPolicy(): BranchPolicy {
|
|
125
|
-
return this.branchPolicy;
|
|
126
|
-
}
|
|
127
|
-
|
|
128
|
-
/**
|
|
129
|
-
* Set branch policy.
|
|
130
|
-
*/
|
|
131
|
-
setBranchPolicy(policy: BranchPolicy): void {
|
|
132
|
-
this.branchPolicy = policy;
|
|
133
|
-
}
|
|
134
|
-
|
|
135
|
-
/**
|
|
136
|
-
* List known peers.
|
|
137
|
-
*/
|
|
138
|
-
listPeers(): PeerId[] {
|
|
139
|
-
return this.transport.peers();
|
|
140
|
-
}
|
|
141
|
-
|
|
142
|
-
// -------------------------------------------------------------------------
|
|
143
|
-
// Message handling
|
|
144
|
-
// -------------------------------------------------------------------------
|
|
145
|
-
|
|
146
|
-
private handleMessage(msg: SyncMessage): void {
|
|
147
|
-
switch (msg.type) {
|
|
148
|
-
case 'have':
|
|
149
|
-
this.handleHave(msg);
|
|
150
|
-
break;
|
|
151
|
-
case 'want':
|
|
152
|
-
this.handleWant(msg);
|
|
153
|
-
break;
|
|
154
|
-
case 'ops':
|
|
155
|
-
this.handleOps(msg);
|
|
156
|
-
break;
|
|
157
|
-
case 'ack':
|
|
158
|
-
this.handleAck(msg);
|
|
159
|
-
break;
|
|
160
|
-
}
|
|
161
|
-
}
|
|
162
|
-
|
|
163
|
-
private handleHave(msg: Extract<SyncMessage, { type: 'have' }>): void {
|
|
164
|
-
// Store peer heads
|
|
165
|
-
this.state.peerHeads.set(msg.peerId, msg.heads);
|
|
166
|
-
|
|
167
|
-
// Compare with our state — determine what we need
|
|
168
|
-
const localOps = this.getLocalOps();
|
|
169
|
-
const localHashes = new Set(localOps.map((o) => o.hash));
|
|
170
|
-
|
|
171
|
-
// Check if peer has ops we don't
|
|
172
|
-
for (const [, hash] of Object.entries(msg.heads)) {
|
|
173
|
-
if (!localHashes.has(hash)) {
|
|
174
|
-
// Peer is ahead — request their ops
|
|
175
|
-
this.transport.send(msg.peerId, {
|
|
176
|
-
type: 'want',
|
|
177
|
-
peerId: this.localPeerId,
|
|
178
|
-
wantHashes: [],
|
|
179
|
-
afterHash: localOps.length > 0 ? localOps[localOps.length - 1].hash : undefined,
|
|
180
|
-
});
|
|
181
|
-
return;
|
|
182
|
-
}
|
|
183
|
-
}
|
|
184
|
-
|
|
185
|
-
// Check if we have ops they don't — push them
|
|
186
|
-
const peerOpCount = msg.opCount;
|
|
187
|
-
if (localOps.length > peerOpCount) {
|
|
188
|
-
// Send ops they might be missing
|
|
189
|
-
const opsToSend = localOps.slice(peerOpCount);
|
|
190
|
-
this.transport.send(msg.peerId, {
|
|
191
|
-
type: 'ops',
|
|
192
|
-
peerId: this.localPeerId,
|
|
193
|
-
ops: opsToSend,
|
|
194
|
-
});
|
|
195
|
-
}
|
|
196
|
-
}
|
|
197
|
-
|
|
198
|
-
private handleWant(msg: Extract<SyncMessage, { type: 'want' }>): void {
|
|
199
|
-
const localOps = this.getLocalOps();
|
|
200
|
-
|
|
201
|
-
let opsToSend: VcsOp[];
|
|
202
|
-
if (msg.afterHash) {
|
|
203
|
-
const idx = localOps.findIndex((o) => o.hash === msg.afterHash);
|
|
204
|
-
opsToSend = idx >= 0 ? localOps.slice(idx + 1) : localOps;
|
|
205
|
-
} else if (msg.wantHashes.length > 0) {
|
|
206
|
-
const wanted = new Set(msg.wantHashes);
|
|
207
|
-
opsToSend = localOps.filter((o) => wanted.has(o.hash));
|
|
208
|
-
} else {
|
|
209
|
-
opsToSend = localOps;
|
|
210
|
-
}
|
|
211
|
-
|
|
212
|
-
if (opsToSend.length > 0) {
|
|
213
|
-
this.transport.send(msg.peerId, {
|
|
214
|
-
type: 'ops',
|
|
215
|
-
peerId: this.localPeerId,
|
|
216
|
-
ops: opsToSend,
|
|
217
|
-
});
|
|
218
|
-
}
|
|
219
|
-
}
|
|
220
|
-
|
|
221
|
-
private handleOps(msg: Extract<SyncMessage, { type: 'ops' }>): void {
|
|
222
|
-
if (msg.ops.length === 0) return;
|
|
223
|
-
|
|
224
|
-
if (this.branchPolicy.linear) {
|
|
225
|
-
// Linear mode: only accept fast-forward appends
|
|
226
|
-
const localOps = this.getLocalOps();
|
|
227
|
-
const localHashes = new Set(localOps.map((o) => o.hash));
|
|
228
|
-
|
|
229
|
-
// Filter to only new ops
|
|
230
|
-
const newOps = msg.ops.filter((o) => !localHashes.has(o.hash));
|
|
231
|
-
if (newOps.length > 0) {
|
|
232
|
-
this.onOpsReceived(newOps);
|
|
233
|
-
}
|
|
234
|
-
} else {
|
|
235
|
-
// CRDT mode: reconcile divergent streams
|
|
236
|
-
const result = this.reconcileWith(msg.ops);
|
|
237
|
-
if (result.uniqueToB.length > 0) {
|
|
238
|
-
this.onOpsReceived(result.uniqueToB);
|
|
239
|
-
}
|
|
240
|
-
}
|
|
241
|
-
|
|
242
|
-
// Acknowledge
|
|
243
|
-
this.transport.send(msg.peerId, {
|
|
244
|
-
type: 'ack',
|
|
245
|
-
peerId: this.localPeerId,
|
|
246
|
-
integrated: msg.ops.map((o) => o.hash),
|
|
247
|
-
});
|
|
248
|
-
|
|
249
|
-
this.state.lastSync.set(msg.peerId, new Date().toISOString());
|
|
250
|
-
}
|
|
251
|
-
|
|
252
|
-
private handleAck(msg: Extract<SyncMessage, { type: 'ack' }>): void {
|
|
253
|
-
for (const hash of msg.integrated) {
|
|
254
|
-
this.state.pendingAcks.delete(hash);
|
|
255
|
-
}
|
|
256
|
-
this.state.lastSync.set(msg.peerId, new Date().toISOString());
|
|
257
|
-
}
|
|
258
|
-
}
|
package/src/sync/types.ts
DELETED
|
@@ -1,104 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Peer Sync — Type Definitions
|
|
3
|
-
*
|
|
4
|
-
* DESIGN.md §3.5, §10.5 — Peer sync + CRDTs.
|
|
5
|
-
* Types for peer identity, sync messages, causal DAG, and
|
|
6
|
-
* branch concurrency modes.
|
|
7
|
-
*/
|
|
8
|
-
|
|
9
|
-
import type { VcsOp } from '../vcs/types.js';
|
|
10
|
-
|
|
11
|
-
// ---------------------------------------------------------------------------
|
|
12
|
-
// Peer Identity
|
|
13
|
-
// ---------------------------------------------------------------------------
|
|
14
|
-
|
|
15
|
-
export interface PeerId {
|
|
16
|
-
/** Unique peer identifier (typically derived from identity DID). */
|
|
17
|
-
id: string;
|
|
18
|
-
/** Human-readable display name. */
|
|
19
|
-
name: string;
|
|
20
|
-
/** Last seen timestamp. */
|
|
21
|
-
lastSeen?: string;
|
|
22
|
-
}
|
|
23
|
-
|
|
24
|
-
// ---------------------------------------------------------------------------
|
|
25
|
-
// Sync Messages
|
|
26
|
-
// ---------------------------------------------------------------------------
|
|
27
|
-
|
|
28
|
-
export type SyncMessage =
|
|
29
|
-
| SyncHaveMessage
|
|
30
|
-
| SyncWantMessage
|
|
31
|
-
| SyncOpsMessage
|
|
32
|
-
| SyncAckMessage;
|
|
33
|
-
|
|
34
|
-
/** Advertise which op hashes we have. */
|
|
35
|
-
export interface SyncHaveMessage {
|
|
36
|
-
type: 'have';
|
|
37
|
-
peerId: string;
|
|
38
|
-
/** Our head op hashes (one per branch). */
|
|
39
|
-
heads: Record<string, string>;
|
|
40
|
-
/** Total op count for quick comparison. */
|
|
41
|
-
opCount: number;
|
|
42
|
-
}
|
|
43
|
-
|
|
44
|
-
/** Request ops we're missing. */
|
|
45
|
-
export interface SyncWantMessage {
|
|
46
|
-
type: 'want';
|
|
47
|
-
peerId: string;
|
|
48
|
-
/** Op hashes we need (those the remote has but we don't). */
|
|
49
|
-
wantHashes: string[];
|
|
50
|
-
/** Alternatively: request all ops after a given hash. */
|
|
51
|
-
afterHash?: string;
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
/** Send a batch of ops. */
|
|
55
|
-
export interface SyncOpsMessage {
|
|
56
|
-
type: 'ops';
|
|
57
|
-
peerId: string;
|
|
58
|
-
ops: VcsOp[];
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
/** Acknowledge receipt. */
|
|
62
|
-
export interface SyncAckMessage {
|
|
63
|
-
type: 'ack';
|
|
64
|
-
peerId: string;
|
|
65
|
-
/** Hashes of ops we've integrated. */
|
|
66
|
-
integrated: string[];
|
|
67
|
-
}
|
|
68
|
-
|
|
69
|
-
// ---------------------------------------------------------------------------
|
|
70
|
-
// Sync State
|
|
71
|
-
// ---------------------------------------------------------------------------
|
|
72
|
-
|
|
73
|
-
export interface SyncState {
|
|
74
|
-
/** Our peer identity. */
|
|
75
|
-
localPeerId: string;
|
|
76
|
-
/** Known peers and their head hashes. */
|
|
77
|
-
peerHeads: Map<string, Record<string, string>>;
|
|
78
|
-
/** Ops we've sent but not yet acknowledged. */
|
|
79
|
-
pendingAcks: Set<string>;
|
|
80
|
-
/** Last sync timestamp per peer. */
|
|
81
|
-
lastSync: Map<string, string>;
|
|
82
|
-
}
|
|
83
|
-
|
|
84
|
-
// ---------------------------------------------------------------------------
|
|
85
|
-
// Branch Concurrency Policy
|
|
86
|
-
// ---------------------------------------------------------------------------
|
|
87
|
-
|
|
88
|
-
export interface BranchPolicy {
|
|
89
|
-
/** If true, only fast-forward appends (one writer). Default. */
|
|
90
|
-
linear: boolean;
|
|
91
|
-
}
|
|
92
|
-
|
|
93
|
-
// ---------------------------------------------------------------------------
|
|
94
|
-
// Sync Transport (abstract interface)
|
|
95
|
-
// ---------------------------------------------------------------------------
|
|
96
|
-
|
|
97
|
-
export interface SyncTransport {
|
|
98
|
-
/** Send a message to a specific peer. */
|
|
99
|
-
send(peerId: string, message: SyncMessage): Promise<void>;
|
|
100
|
-
/** Register a handler for incoming messages. */
|
|
101
|
-
onMessage(handler: (message: SyncMessage) => void): void;
|
|
102
|
-
/** List connected peers. */
|
|
103
|
-
peers(): PeerId[];
|
|
104
|
-
}
|