@grafema/rfdb-client 0.2.12-beta → 0.3.1-beta
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/base-client.d.ts +156 -0
- package/dist/base-client.d.ts.map +1 -0
- package/dist/base-client.js +591 -0
- package/dist/base-client.js.map +1 -0
- package/dist/client.d.ts +14 -319
- package/dist/client.d.ts.map +1 -1
- package/dist/client.js +20 -740
- package/dist/client.js.map +1 -1
- package/dist/index.d.ts +2 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +2 -0
- package/dist/index.js.map +1 -1
- package/dist/websocket-client.d.ts +41 -0
- package/dist/websocket-client.d.ts.map +1 -0
- package/dist/websocket-client.js +144 -0
- package/dist/websocket-client.js.map +1 -0
- package/package.json +2 -2
- package/ts/base-client.ts +737 -0
- package/ts/client.ts +28 -863
- package/ts/index.ts +2 -0
- package/ts/rfdb-client-locking.test.ts +897 -0
- package/ts/rfdb-websocket-client.test.ts +572 -0
- package/ts/websocket-client.ts +174 -0
package/ts/client.ts
CHANGED
|
@@ -7,34 +7,17 @@
|
|
|
7
7
|
|
|
8
8
|
import { createConnection, Socket } from 'net';
|
|
9
9
|
import { encode, decode } from '@msgpack/msgpack';
|
|
10
|
-
import { EventEmitter } from 'events';
|
|
11
10
|
import { StreamQueue } from './stream-queue.js';
|
|
11
|
+
import { BaseRFDBClient } from './base-client.js';
|
|
12
12
|
|
|
13
13
|
import type {
|
|
14
14
|
RFDBCommand,
|
|
15
15
|
WireNode,
|
|
16
|
-
WireEdge,
|
|
17
16
|
RFDBResponse,
|
|
18
|
-
IRFDBClient,
|
|
19
17
|
AttrQuery,
|
|
20
|
-
FieldDeclaration,
|
|
21
|
-
DatalogResult,
|
|
22
|
-
NodeType,
|
|
23
|
-
EdgeType,
|
|
24
18
|
HelloResponse,
|
|
25
|
-
CreateDatabaseResponse,
|
|
26
|
-
OpenDatabaseResponse,
|
|
27
|
-
ListDatabasesResponse,
|
|
28
|
-
CurrentDatabaseResponse,
|
|
29
|
-
SnapshotRef,
|
|
30
|
-
SnapshotDiff,
|
|
31
|
-
SnapshotInfo,
|
|
32
|
-
DiffSnapshotsResponse,
|
|
33
|
-
FindSnapshotResponse,
|
|
34
|
-
ListSnapshotsResponse,
|
|
35
|
-
CommitDelta,
|
|
36
|
-
CommitBatchResponse,
|
|
37
19
|
NodesChunkResponse,
|
|
20
|
+
CommitDelta,
|
|
38
21
|
} from '@grafema/types';
|
|
39
22
|
|
|
40
23
|
interface PendingRequest {
|
|
@@ -42,28 +25,24 @@ interface PendingRequest {
|
|
|
42
25
|
reject: (error: Error) => void;
|
|
43
26
|
}
|
|
44
27
|
|
|
45
|
-
export class RFDBClient extends
|
|
28
|
+
export class RFDBClient extends BaseRFDBClient {
|
|
46
29
|
readonly socketPath: string;
|
|
30
|
+
readonly clientName: string;
|
|
47
31
|
private socket: Socket | null;
|
|
48
32
|
connected: boolean;
|
|
49
33
|
private pending: Map<number, PendingRequest>;
|
|
50
34
|
private reqId: number;
|
|
51
35
|
private buffer: Buffer;
|
|
52
36
|
|
|
53
|
-
// Batch state
|
|
54
|
-
private _batching: boolean = false;
|
|
55
|
-
private _batchNodes: WireNode[] = [];
|
|
56
|
-
private _batchEdges: WireEdge[] = [];
|
|
57
|
-
private _batchFiles: Set<string> = new Set();
|
|
58
|
-
|
|
59
37
|
// Streaming state
|
|
60
38
|
private _supportsStreaming: boolean = false;
|
|
61
39
|
private _pendingStreams: Map<number, StreamQueue<WireNode>> = new Map();
|
|
62
40
|
private _streamTimers: Map<number, ReturnType<typeof setTimeout>> = new Map();
|
|
63
41
|
|
|
64
|
-
constructor(socketPath: string = '/tmp/rfdb.sock') {
|
|
42
|
+
constructor(socketPath: string = '/tmp/rfdb.sock', clientName: string = 'unknown') {
|
|
65
43
|
super();
|
|
66
44
|
this.socketPath = socketPath;
|
|
45
|
+
this.clientName = clientName;
|
|
67
46
|
this.socket = null;
|
|
68
47
|
this.connected = false;
|
|
69
48
|
this.pending = new Map();
|
|
@@ -75,7 +54,7 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
|
|
|
75
54
|
* Whether the connected server supports streaming responses.
|
|
76
55
|
* Set after calling hello(). Defaults to false.
|
|
77
56
|
*/
|
|
78
|
-
get supportsStreaming(): boolean {
|
|
57
|
+
override get supportsStreaming(): boolean {
|
|
79
58
|
return this._supportsStreaming;
|
|
80
59
|
}
|
|
81
60
|
|
|
@@ -192,7 +171,7 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
|
|
|
192
171
|
}
|
|
193
172
|
|
|
194
173
|
/**
|
|
195
|
-
* Handle decoded response
|
|
174
|
+
* Handle decoded response -- match by requestId, route streaming chunks
|
|
196
175
|
* to StreamQueue or resolve single-response Promise.
|
|
197
176
|
*/
|
|
198
177
|
private _handleResponse(response: RFDBResponse): void {
|
|
@@ -227,7 +206,7 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
|
|
|
227
206
|
return;
|
|
228
207
|
}
|
|
229
208
|
|
|
230
|
-
// Non-streaming response
|
|
209
|
+
// Non-streaming response -- existing behavior
|
|
231
210
|
if (!this.pending.has(id)) {
|
|
232
211
|
this.emit('error', new Error(`Received response for unknown requestId: ${response.requestId}`));
|
|
233
212
|
return;
|
|
@@ -245,22 +224,18 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
|
|
|
245
224
|
|
|
246
225
|
/**
|
|
247
226
|
* Handle a response for a streaming request.
|
|
248
|
-
* Routes chunk data to StreamQueue and manages stream lifecycle.
|
|
249
|
-
* Resets per-chunk timeout on each successful chunk arrival.
|
|
250
227
|
*/
|
|
251
228
|
private _handleStreamingResponse(
|
|
252
229
|
id: number,
|
|
253
230
|
response: RFDBResponse,
|
|
254
231
|
streamQueue: StreamQueue<WireNode>,
|
|
255
232
|
): void {
|
|
256
|
-
// Error response — fail the stream
|
|
257
233
|
if (response.error) {
|
|
258
234
|
this._cleanupStream(id);
|
|
259
235
|
streamQueue.fail(new Error(response.error));
|
|
260
236
|
return;
|
|
261
237
|
}
|
|
262
238
|
|
|
263
|
-
// Streaming chunk (has `done` field)
|
|
264
239
|
if ('done' in response) {
|
|
265
240
|
const chunk = response as unknown as NodesChunkResponse;
|
|
266
241
|
const nodes = chunk.nodes || [];
|
|
@@ -272,14 +247,12 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
|
|
|
272
247
|
this._cleanupStream(id);
|
|
273
248
|
streamQueue.end();
|
|
274
249
|
} else {
|
|
275
|
-
// Reset per-chunk timeout
|
|
276
250
|
this._resetStreamTimer(id, streamQueue);
|
|
277
251
|
}
|
|
278
252
|
return;
|
|
279
253
|
}
|
|
280
254
|
|
|
281
255
|
// Auto-fallback: server sent a non-streaming Nodes response
|
|
282
|
-
// (server doesn't support streaming or result was below threshold)
|
|
283
256
|
const nodesResponse = response as unknown as { nodes?: WireNode[] };
|
|
284
257
|
const nodes = nodesResponse.nodes || [];
|
|
285
258
|
for (const node of nodes) {
|
|
@@ -289,9 +262,6 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
|
|
|
289
262
|
streamQueue.end();
|
|
290
263
|
}
|
|
291
264
|
|
|
292
|
-
/**
|
|
293
|
-
* Reset the per-chunk timeout for a streaming request.
|
|
294
|
-
*/
|
|
295
265
|
private _resetStreamTimer(id: number, streamQueue: StreamQueue<WireNode>): void {
|
|
296
266
|
const existing = this._streamTimers.get(id);
|
|
297
267
|
if (existing) clearTimeout(existing);
|
|
@@ -306,9 +276,6 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
|
|
|
306
276
|
this._streamTimers.set(id, timer);
|
|
307
277
|
}
|
|
308
278
|
|
|
309
|
-
/**
|
|
310
|
-
* Clean up all state for a completed/failed streaming request.
|
|
311
|
-
*/
|
|
312
279
|
private _cleanupStream(id: number): void {
|
|
313
280
|
this._pendingStreams.delete(id);
|
|
314
281
|
this.pending.delete(id);
|
|
@@ -325,19 +292,15 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
|
|
|
325
292
|
return Number.isNaN(num) ? null : num;
|
|
326
293
|
}
|
|
327
294
|
|
|
328
|
-
/**
|
|
329
|
-
* Default timeout for operations (60 seconds)
|
|
330
|
-
* Flush/compact may take time for large graphs, but should not hang indefinitely
|
|
331
|
-
*/
|
|
332
295
|
private static readonly DEFAULT_TIMEOUT_MS = 60_000;
|
|
333
296
|
|
|
334
297
|
/**
|
|
335
298
|
* Send a request and wait for response with timeout
|
|
336
299
|
*/
|
|
337
|
-
|
|
300
|
+
protected async _send(
|
|
338
301
|
cmd: RFDBCommand,
|
|
339
302
|
payload: Record<string, unknown> = {},
|
|
340
|
-
timeoutMs: number = RFDBClient.DEFAULT_TIMEOUT_MS
|
|
303
|
+
timeoutMs: number = RFDBClient.DEFAULT_TIMEOUT_MS,
|
|
341
304
|
): Promise<RFDBResponse> {
|
|
342
305
|
if (!this.connected || !this.socket) {
|
|
343
306
|
throw new Error('Not connected to RFDB server');
|
|
@@ -348,13 +311,11 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
|
|
|
348
311
|
const request = { requestId: `r${id}`, cmd, ...payload };
|
|
349
312
|
const msgBytes = encode(request);
|
|
350
313
|
|
|
351
|
-
// Setup timeout
|
|
352
314
|
const timer = setTimeout(() => {
|
|
353
315
|
this.pending.delete(id);
|
|
354
316
|
reject(new Error(`RFDB ${cmd} timed out after ${timeoutMs}ms. Server may be unresponsive or dbPath may be invalid.`));
|
|
355
317
|
}, timeoutMs);
|
|
356
318
|
|
|
357
|
-
// Handle socket errors during this request
|
|
358
319
|
const errorHandler = (err: NodeJS.ErrnoException) => {
|
|
359
320
|
this.pending.delete(id);
|
|
360
321
|
clearTimeout(timer);
|
|
@@ -372,329 +333,36 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
|
|
|
372
333
|
clearTimeout(timer);
|
|
373
334
|
this.socket?.removeListener('error', errorHandler);
|
|
374
335
|
reject(error);
|
|
375
|
-
}
|
|
336
|
+
},
|
|
376
337
|
});
|
|
377
338
|
|
|
378
339
|
// Write length prefix + message
|
|
379
340
|
const header = Buffer.alloc(4);
|
|
380
341
|
header.writeUInt32BE(msgBytes.length);
|
|
381
|
-
|
|
382
342
|
this.socket!.write(Buffer.concat([header, Buffer.from(msgBytes)]));
|
|
383
343
|
});
|
|
384
344
|
}
|
|
385
345
|
|
|
386
346
|
// ===========================================================================
|
|
387
|
-
//
|
|
388
|
-
// ===========================================================================
|
|
389
|
-
|
|
390
|
-
/**
|
|
391
|
-
* Add nodes to the graph
|
|
392
|
-
* Extra properties beyond id/type/name/file/exported/metadata are merged into metadata
|
|
393
|
-
*/
|
|
394
|
-
async addNodes(nodes: Array<Partial<WireNode> & { id: string; type?: string; node_type?: string; nodeType?: string }>): Promise<RFDBResponse> {
|
|
395
|
-
const wireNodes: WireNode[] = nodes.map(n => {
|
|
396
|
-
// Cast to Record to allow iteration over extra properties
|
|
397
|
-
const nodeRecord = n as Record<string, unknown>;
|
|
398
|
-
|
|
399
|
-
// Extract known wire format fields, rest goes to metadata
|
|
400
|
-
const { id, type, node_type, nodeType, name, file, exported, metadata, semanticId, semantic_id, ...rest } = nodeRecord;
|
|
401
|
-
|
|
402
|
-
// Merge explicit metadata with extra properties
|
|
403
|
-
const existingMeta = typeof metadata === 'string' ? JSON.parse(metadata as string) : (metadata || {});
|
|
404
|
-
const combinedMeta = { ...existingMeta, ...rest };
|
|
405
|
-
|
|
406
|
-
const wire: WireNode = {
|
|
407
|
-
id: String(id),
|
|
408
|
-
nodeType: (node_type || nodeType || type || 'UNKNOWN') as NodeType,
|
|
409
|
-
name: (name as string) || '',
|
|
410
|
-
file: (file as string) || '',
|
|
411
|
-
exported: (exported as boolean) || false,
|
|
412
|
-
metadata: JSON.stringify(combinedMeta),
|
|
413
|
-
};
|
|
414
|
-
|
|
415
|
-
// Preserve semanticId as top-level field for v3 protocol
|
|
416
|
-
const sid = semanticId || semantic_id;
|
|
417
|
-
if (sid) {
|
|
418
|
-
(wire as WireNode & { semanticId: string }).semanticId = String(sid);
|
|
419
|
-
}
|
|
420
|
-
|
|
421
|
-
return wire;
|
|
422
|
-
});
|
|
423
|
-
|
|
424
|
-
if (this._batching) {
|
|
425
|
-
this._batchNodes.push(...wireNodes);
|
|
426
|
-
for (const node of wireNodes) {
|
|
427
|
-
if (node.file) this._batchFiles.add(node.file);
|
|
428
|
-
}
|
|
429
|
-
return { ok: true } as RFDBResponse;
|
|
430
|
-
}
|
|
431
|
-
|
|
432
|
-
return this._send('addNodes', { nodes: wireNodes });
|
|
433
|
-
}
|
|
434
|
-
|
|
435
|
-
/**
|
|
436
|
-
* Add edges to the graph
|
|
437
|
-
* Extra properties beyond src/dst/type are merged into metadata
|
|
438
|
-
*/
|
|
439
|
-
async addEdges(
|
|
440
|
-
edges: WireEdge[],
|
|
441
|
-
skipValidation: boolean = false
|
|
442
|
-
): Promise<RFDBResponse> {
|
|
443
|
-
const wireEdges: WireEdge[] = edges.map(e => {
|
|
444
|
-
// Cast to unknown first then to Record to allow extra properties
|
|
445
|
-
const edge = e as unknown as Record<string, unknown>;
|
|
446
|
-
|
|
447
|
-
// Extract known fields, rest goes to metadata
|
|
448
|
-
const { src, dst, type, edge_type, edgeType, metadata, ...rest } = edge;
|
|
449
|
-
|
|
450
|
-
// Merge explicit metadata with extra properties
|
|
451
|
-
const existingMeta = typeof metadata === 'string' ? JSON.parse(metadata as string) : (metadata || {});
|
|
452
|
-
const combinedMeta = { ...existingMeta, ...rest };
|
|
453
|
-
|
|
454
|
-
return {
|
|
455
|
-
src: String(src),
|
|
456
|
-
dst: String(dst),
|
|
457
|
-
edgeType: (edge_type || edgeType || type || e.edgeType || 'UNKNOWN') as EdgeType,
|
|
458
|
-
metadata: JSON.stringify(combinedMeta),
|
|
459
|
-
};
|
|
460
|
-
});
|
|
461
|
-
|
|
462
|
-
if (this._batching) {
|
|
463
|
-
this._batchEdges.push(...wireEdges);
|
|
464
|
-
return { ok: true } as RFDBResponse;
|
|
465
|
-
}
|
|
466
|
-
|
|
467
|
-
return this._send('addEdges', { edges: wireEdges, skipValidation });
|
|
468
|
-
}
|
|
469
|
-
|
|
470
|
-
/**
|
|
471
|
-
* Delete a node
|
|
472
|
-
*/
|
|
473
|
-
async deleteNode(id: string): Promise<RFDBResponse> {
|
|
474
|
-
return this._send('deleteNode', { id: String(id) });
|
|
475
|
-
}
|
|
476
|
-
|
|
477
|
-
/**
|
|
478
|
-
* Delete an edge
|
|
479
|
-
*/
|
|
480
|
-
async deleteEdge(src: string, dst: string, edgeType: EdgeType): Promise<RFDBResponse> {
|
|
481
|
-
return this._send('deleteEdge', {
|
|
482
|
-
src: String(src),
|
|
483
|
-
dst: String(dst),
|
|
484
|
-
edgeType
|
|
485
|
-
});
|
|
486
|
-
}
|
|
487
|
-
|
|
488
|
-
// ===========================================================================
|
|
489
|
-
// Read Operations
|
|
347
|
+
// Streaming Overrides (Unix socket supports streaming)
|
|
490
348
|
// ===========================================================================
|
|
491
349
|
|
|
492
350
|
/**
|
|
493
|
-
*
|
|
351
|
+
* Negotiate protocol version with server.
|
|
352
|
+
* Overrides base to set streaming flag.
|
|
494
353
|
*/
|
|
495
|
-
async
|
|
496
|
-
const response = await this._send('
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
/**
|
|
501
|
-
* Check if node exists
|
|
502
|
-
*/
|
|
503
|
-
async nodeExists(id: string): Promise<boolean> {
|
|
504
|
-
const response = await this._send('nodeExists', { id: String(id) });
|
|
505
|
-
return (response as { value: boolean }).value;
|
|
506
|
-
}
|
|
507
|
-
|
|
508
|
-
/**
|
|
509
|
-
* Find nodes by type
|
|
510
|
-
*/
|
|
511
|
-
async findByType(nodeType: NodeType): Promise<string[]> {
|
|
512
|
-
const response = await this._send('findByType', { nodeType });
|
|
513
|
-
return (response as { ids?: string[] }).ids || [];
|
|
514
|
-
}
|
|
515
|
-
|
|
516
|
-
/**
|
|
517
|
-
* Find nodes by attributes
|
|
518
|
-
*/
|
|
519
|
-
async findByAttr(query: Record<string, unknown>): Promise<string[]> {
|
|
520
|
-
const response = await this._send('findByAttr', { query });
|
|
521
|
-
return (response as { ids?: string[] }).ids || [];
|
|
522
|
-
}
|
|
523
|
-
|
|
524
|
-
// ===========================================================================
|
|
525
|
-
// Graph Traversal
|
|
526
|
-
// ===========================================================================
|
|
527
|
-
|
|
528
|
-
/**
|
|
529
|
-
* Get neighbors of a node
|
|
530
|
-
*/
|
|
531
|
-
async neighbors(id: string, edgeTypes: EdgeType[] = []): Promise<string[]> {
|
|
532
|
-
const response = await this._send('neighbors', {
|
|
533
|
-
id: String(id),
|
|
534
|
-
edgeTypes
|
|
535
|
-
});
|
|
536
|
-
return (response as { ids?: string[] }).ids || [];
|
|
537
|
-
}
|
|
538
|
-
|
|
539
|
-
/**
|
|
540
|
-
* Breadth-first search
|
|
541
|
-
*/
|
|
542
|
-
async bfs(startIds: string[], maxDepth: number, edgeTypes: EdgeType[] = []): Promise<string[]> {
|
|
543
|
-
const response = await this._send('bfs', {
|
|
544
|
-
startIds: startIds.map(String),
|
|
545
|
-
maxDepth,
|
|
546
|
-
edgeTypes
|
|
547
|
-
});
|
|
548
|
-
return (response as { ids?: string[] }).ids || [];
|
|
549
|
-
}
|
|
550
|
-
|
|
551
|
-
/**
|
|
552
|
-
* Depth-first search
|
|
553
|
-
*/
|
|
554
|
-
async dfs(startIds: string[], maxDepth: number, edgeTypes: EdgeType[] = []): Promise<string[]> {
|
|
555
|
-
const response = await this._send('dfs', {
|
|
556
|
-
startIds: startIds.map(String),
|
|
557
|
-
maxDepth,
|
|
558
|
-
edgeTypes
|
|
559
|
-
});
|
|
560
|
-
return (response as { ids?: string[] }).ids || [];
|
|
561
|
-
}
|
|
562
|
-
|
|
563
|
-
/**
|
|
564
|
-
* Reachability query - find all nodes reachable from start nodes
|
|
565
|
-
*/
|
|
566
|
-
async reachability(
|
|
567
|
-
startIds: string[],
|
|
568
|
-
maxDepth: number,
|
|
569
|
-
edgeTypes: EdgeType[] = [],
|
|
570
|
-
backward: boolean = false
|
|
571
|
-
): Promise<string[]> {
|
|
572
|
-
const response = await this._send('reachability', {
|
|
573
|
-
startIds: startIds.map(String),
|
|
574
|
-
maxDepth,
|
|
575
|
-
edgeTypes,
|
|
576
|
-
backward
|
|
577
|
-
});
|
|
578
|
-
return (response as { ids?: string[] }).ids || [];
|
|
579
|
-
}
|
|
580
|
-
|
|
581
|
-
/**
|
|
582
|
-
* Get outgoing edges from a node
|
|
583
|
-
* Parses metadata JSON and spreads it onto the edge object for convenience
|
|
584
|
-
*/
|
|
585
|
-
async getOutgoingEdges(id: string, edgeTypes: EdgeType[] | null = null): Promise<(WireEdge & Record<string, unknown>)[]> {
|
|
586
|
-
const response = await this._send('getOutgoingEdges', {
|
|
587
|
-
id: String(id),
|
|
588
|
-
edgeTypes
|
|
589
|
-
});
|
|
590
|
-
const edges = (response as { edges?: WireEdge[] }).edges || [];
|
|
591
|
-
|
|
592
|
-
// Parse metadata and spread onto edge for convenience
|
|
593
|
-
return edges.map(e => {
|
|
594
|
-
let meta = {};
|
|
595
|
-
try {
|
|
596
|
-
meta = e.metadata ? JSON.parse(e.metadata) : {};
|
|
597
|
-
} catch {
|
|
598
|
-
// Keep empty metadata on parse error
|
|
599
|
-
}
|
|
600
|
-
return { ...e, type: e.edgeType, ...meta };
|
|
601
|
-
});
|
|
602
|
-
}
|
|
603
|
-
|
|
604
|
-
/**
|
|
605
|
-
* Get incoming edges to a node
|
|
606
|
-
* Parses metadata JSON and spreads it onto the edge object for convenience
|
|
607
|
-
*/
|
|
608
|
-
async getIncomingEdges(id: string, edgeTypes: EdgeType[] | null = null): Promise<(WireEdge & Record<string, unknown>)[]> {
|
|
609
|
-
const response = await this._send('getIncomingEdges', {
|
|
610
|
-
id: String(id),
|
|
611
|
-
edgeTypes
|
|
612
|
-
});
|
|
613
|
-
const edges = (response as { edges?: WireEdge[] }).edges || [];
|
|
614
|
-
|
|
615
|
-
// Parse metadata and spread onto edge for convenience
|
|
616
|
-
return edges.map(e => {
|
|
617
|
-
let meta = {};
|
|
618
|
-
try {
|
|
619
|
-
meta = e.metadata ? JSON.parse(e.metadata) : {};
|
|
620
|
-
} catch {
|
|
621
|
-
// Keep empty metadata on parse error
|
|
622
|
-
}
|
|
623
|
-
return { ...e, type: e.edgeType, ...meta };
|
|
624
|
-
});
|
|
625
|
-
}
|
|
626
|
-
|
|
627
|
-
// ===========================================================================
|
|
628
|
-
// Stats
|
|
629
|
-
// ===========================================================================
|
|
630
|
-
|
|
631
|
-
/**
|
|
632
|
-
* Get node count
|
|
633
|
-
*/
|
|
634
|
-
async nodeCount(): Promise<number> {
|
|
635
|
-
const response = await this._send('nodeCount');
|
|
636
|
-
return (response as { count: number }).count;
|
|
637
|
-
}
|
|
638
|
-
|
|
639
|
-
/**
|
|
640
|
-
* Get edge count
|
|
641
|
-
*/
|
|
642
|
-
async edgeCount(): Promise<number> {
|
|
643
|
-
const response = await this._send('edgeCount');
|
|
644
|
-
return (response as { count: number }).count;
|
|
645
|
-
}
|
|
646
|
-
|
|
647
|
-
/**
|
|
648
|
-
* Count nodes by type
|
|
649
|
-
*/
|
|
650
|
-
async countNodesByType(types: NodeType[] | null = null): Promise<Record<string, number>> {
|
|
651
|
-
const response = await this._send('countNodesByType', { types });
|
|
652
|
-
return (response as { counts?: Record<string, number> }).counts || {};
|
|
653
|
-
}
|
|
654
|
-
|
|
655
|
-
/**
|
|
656
|
-
* Count edges by type
|
|
657
|
-
*/
|
|
658
|
-
async countEdgesByType(edgeTypes: EdgeType[] | null = null): Promise<Record<string, number>> {
|
|
659
|
-
const response = await this._send('countEdgesByType', { edgeTypes });
|
|
660
|
-
return (response as { counts?: Record<string, number> }).counts || {};
|
|
661
|
-
}
|
|
662
|
-
|
|
663
|
-
// ===========================================================================
|
|
664
|
-
// Control
|
|
665
|
-
// ===========================================================================
|
|
666
|
-
|
|
667
|
-
/**
|
|
668
|
-
* Flush data to disk
|
|
669
|
-
*/
|
|
670
|
-
async flush(): Promise<RFDBResponse> {
|
|
671
|
-
return this._send('flush');
|
|
672
|
-
}
|
|
673
|
-
|
|
674
|
-
/**
|
|
675
|
-
* Compact the database
|
|
676
|
-
*/
|
|
677
|
-
async compact(): Promise<RFDBResponse> {
|
|
678
|
-
return this._send('compact');
|
|
679
|
-
}
|
|
680
|
-
|
|
681
|
-
/**
|
|
682
|
-
* Clear the database
|
|
683
|
-
*/
|
|
684
|
-
async clear(): Promise<RFDBResponse> {
|
|
685
|
-
return this._send('clear');
|
|
354
|
+
override async hello(protocolVersion: number = 3): Promise<HelloResponse> {
|
|
355
|
+
const response = await this._send('hello' as RFDBCommand, { protocolVersion });
|
|
356
|
+
const hello = response as HelloResponse;
|
|
357
|
+
this._supportsStreaming = hello.features?.includes('streaming') ?? false;
|
|
358
|
+
return hello;
|
|
686
359
|
}
|
|
687
360
|
|
|
688
|
-
// ===========================================================================
|
|
689
|
-
// Bulk Read Operations
|
|
690
|
-
// ===========================================================================
|
|
691
|
-
|
|
692
361
|
/**
|
|
693
|
-
* Query nodes (async generator)
|
|
362
|
+
* Query nodes (async generator).
|
|
363
|
+
* Overrides base to support streaming for protocol v3+.
|
|
694
364
|
*/
|
|
695
|
-
async *queryNodes(query: AttrQuery): AsyncGenerator<WireNode, void, unknown> {
|
|
696
|
-
// When server supports streaming (protocol v3+), delegate to streaming handler
|
|
697
|
-
// to correctly handle chunked NodesChunk responses for large result sets.
|
|
365
|
+
override async *queryNodes(query: AttrQuery): AsyncGenerator<WireNode, void, unknown> {
|
|
698
366
|
if (this._supportsStreaming) {
|
|
699
367
|
yield* this.queryNodesStream(query);
|
|
700
368
|
return;
|
|
@@ -709,33 +377,13 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
|
|
|
709
377
|
}
|
|
710
378
|
}
|
|
711
379
|
|
|
712
|
-
/**
|
|
713
|
-
* Build a server query object from an AttrQuery.
|
|
714
|
-
*/
|
|
715
|
-
private _buildServerQuery(query: AttrQuery): Record<string, unknown> {
|
|
716
|
-
const serverQuery: Record<string, unknown> = {};
|
|
717
|
-
if (query.nodeType) serverQuery.nodeType = query.nodeType;
|
|
718
|
-
if (query.type) serverQuery.nodeType = query.type;
|
|
719
|
-
if (query.name) serverQuery.name = query.name;
|
|
720
|
-
if (query.file) serverQuery.file = query.file;
|
|
721
|
-
if (query.exported !== undefined) serverQuery.exported = query.exported;
|
|
722
|
-
return serverQuery;
|
|
723
|
-
}
|
|
724
|
-
|
|
725
380
|
/**
|
|
726
381
|
* Stream nodes matching query with true streaming support.
|
|
727
|
-
*
|
|
728
|
-
* Behavior depends on server capabilities:
|
|
729
|
-
* - Server supports streaming (protocol v3): receives chunked NodesChunk
|
|
730
|
-
* responses via StreamQueue. Nodes are yielded as they arrive.
|
|
731
|
-
* - Server does NOT support streaming (fallback): delegates to queryNodes()
|
|
732
|
-
* which yields nodes one by one from bulk response.
|
|
733
|
-
*
|
|
734
|
-
* The generator can be aborted by breaking out of the loop or calling .return().
|
|
382
|
+
* Overrides base to use StreamQueue for protocol v3+.
|
|
735
383
|
*/
|
|
736
|
-
async *queryNodesStream(query: AttrQuery): AsyncGenerator<WireNode, void, unknown> {
|
|
384
|
+
override async *queryNodesStream(query: AttrQuery): AsyncGenerator<WireNode, void, unknown> {
|
|
737
385
|
if (!this._supportsStreaming) {
|
|
738
|
-
yield*
|
|
386
|
+
yield* super.queryNodes(query);
|
|
739
387
|
return;
|
|
740
388
|
}
|
|
741
389
|
|
|
@@ -748,13 +396,11 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
|
|
|
748
396
|
const streamQueue = new StreamQueue<WireNode>();
|
|
749
397
|
this._pendingStreams.set(id, streamQueue);
|
|
750
398
|
|
|
751
|
-
// Build and send request manually (can't use _send which expects single response)
|
|
752
399
|
const request = { requestId: `r${id}`, cmd: 'queryNodes', query: serverQuery };
|
|
753
400
|
const msgBytes = encode(request);
|
|
754
401
|
const header = Buffer.alloc(4);
|
|
755
402
|
header.writeUInt32BE(msgBytes.length);
|
|
756
403
|
|
|
757
|
-
// Register in pending map for error routing
|
|
758
404
|
this.pending.set(id, {
|
|
759
405
|
resolve: () => { this._cleanupStream(id); },
|
|
760
406
|
reject: (error) => {
|
|
@@ -763,9 +409,7 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
|
|
|
763
409
|
},
|
|
764
410
|
});
|
|
765
411
|
|
|
766
|
-
// Start per-chunk timeout (resets on each chunk in _handleStreamingResponse)
|
|
767
412
|
this._resetStreamTimer(id, streamQueue);
|
|
768
|
-
|
|
769
413
|
this.socket!.write(Buffer.concat([header, Buffer.from(msgBytes)]));
|
|
770
414
|
|
|
771
415
|
try {
|
|
@@ -777,477 +421,15 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
|
|
|
777
421
|
}
|
|
778
422
|
}
|
|
779
423
|
|
|
780
|
-
/**
|
|
781
|
-
* Get all nodes matching query
|
|
782
|
-
*/
|
|
783
|
-
async getAllNodes(query: AttrQuery = {}): Promise<WireNode[]> {
|
|
784
|
-
const nodes: WireNode[] = [];
|
|
785
|
-
for await (const node of this.queryNodes(query)) {
|
|
786
|
-
nodes.push(node);
|
|
787
|
-
}
|
|
788
|
-
return nodes;
|
|
789
|
-
}
|
|
790
|
-
|
|
791
|
-
/**
|
|
792
|
-
* Get all edges
|
|
793
|
-
* Parses metadata JSON and spreads it onto the edge object for convenience
|
|
794
|
-
*/
|
|
795
|
-
async getAllEdges(): Promise<(WireEdge & Record<string, unknown>)[]> {
|
|
796
|
-
const response = await this._send('getAllEdges');
|
|
797
|
-
const edges = (response as { edges?: WireEdge[] }).edges || [];
|
|
798
|
-
|
|
799
|
-
// Parse metadata and spread onto edge for convenience
|
|
800
|
-
return edges.map(e => {
|
|
801
|
-
let meta = {};
|
|
802
|
-
try {
|
|
803
|
-
meta = e.metadata ? JSON.parse(e.metadata) : {};
|
|
804
|
-
} catch {
|
|
805
|
-
// Keep empty metadata on parse error
|
|
806
|
-
}
|
|
807
|
-
return { ...e, type: e.edgeType, ...meta };
|
|
808
|
-
});
|
|
809
|
-
}
|
|
810
|
-
|
|
811
|
-
// ===========================================================================
|
|
812
|
-
// Node Utility Methods
|
|
813
|
-
// ===========================================================================
|
|
814
|
-
|
|
815
|
-
/**
|
|
816
|
-
* Check if node is an endpoint (has no outgoing edges)
|
|
817
|
-
*/
|
|
818
|
-
async isEndpoint(id: string): Promise<boolean> {
|
|
819
|
-
const response = await this._send('isEndpoint', { id: String(id) });
|
|
820
|
-
return (response as { value: boolean }).value;
|
|
821
|
-
}
|
|
822
|
-
|
|
823
|
-
/**
|
|
824
|
-
* Get node identifier string
|
|
825
|
-
*/
|
|
826
|
-
async getNodeIdentifier(id: string): Promise<string | null> {
|
|
827
|
-
const response = await this._send('getNodeIdentifier', { id: String(id) });
|
|
828
|
-
return (response as { identifier?: string | null }).identifier || null;
|
|
829
|
-
}
|
|
830
|
-
|
|
831
|
-
/**
|
|
832
|
-
* Update node version
|
|
833
|
-
*/
|
|
834
|
-
async updateNodeVersion(id: string, version: string): Promise<RFDBResponse> {
|
|
835
|
-
return this._send('updateNodeVersion', { id: String(id), version });
|
|
836
|
-
}
|
|
837
|
-
|
|
838
|
-
/**
|
|
839
|
-
* Declare metadata fields for server-side indexing.
|
|
840
|
-
* Call before adding nodes so the server builds indexes on flush.
|
|
841
|
-
* Returns the number of declared fields.
|
|
842
|
-
*/
|
|
843
|
-
async declareFields(fields: FieldDeclaration[]): Promise<number> {
|
|
844
|
-
const response = await this._send('declareFields', { fields });
|
|
845
|
-
return (response as { count?: number }).count || 0;
|
|
846
|
-
}
|
|
847
|
-
|
|
848
|
-
// ===========================================================================
|
|
849
|
-
// Datalog API
|
|
850
|
-
// ===========================================================================
|
|
851
|
-
|
|
852
|
-
/**
|
|
853
|
-
* Load Datalog rules
|
|
854
|
-
*/
|
|
855
|
-
async datalogLoadRules(source: string): Promise<number> {
|
|
856
|
-
const response = await this._send('datalogLoadRules', { source });
|
|
857
|
-
return (response as { count: number }).count;
|
|
858
|
-
}
|
|
859
|
-
|
|
860
|
-
/**
|
|
861
|
-
* Clear Datalog rules
|
|
862
|
-
*/
|
|
863
|
-
async datalogClearRules(): Promise<RFDBResponse> {
|
|
864
|
-
return this._send('datalogClearRules');
|
|
865
|
-
}
|
|
866
|
-
|
|
867
|
-
/**
|
|
868
|
-
* Execute Datalog query
|
|
869
|
-
*/
|
|
870
|
-
async datalogQuery(query: string): Promise<DatalogResult[]> {
|
|
871
|
-
const response = await this._send('datalogQuery', { query });
|
|
872
|
-
return (response as { results?: DatalogResult[] }).results || [];
|
|
873
|
-
}
|
|
874
|
-
|
|
875
|
-
/**
|
|
876
|
-
* Check a guarantee (Datalog rule) and return violations
|
|
877
|
-
*/
|
|
878
|
-
async checkGuarantee(ruleSource: string): Promise<DatalogResult[]> {
|
|
879
|
-
const response = await this._send('checkGuarantee', { ruleSource });
|
|
880
|
-
return (response as { violations?: DatalogResult[] }).violations || [];
|
|
881
|
-
}
|
|
882
|
-
|
|
883
|
-
/**
|
|
884
|
-
* Execute unified Datalog — handles both direct queries and rule-based programs.
|
|
885
|
-
* Auto-detects the head predicate instead of hardcoding violation(X).
|
|
886
|
-
*/
|
|
887
|
-
async executeDatalog(source: string): Promise<DatalogResult[]> {
|
|
888
|
-
const response = await this._send('executeDatalog', { source });
|
|
889
|
-
return (response as { results?: DatalogResult[] }).results || [];
|
|
890
|
-
}
|
|
891
|
-
|
|
892
|
-
/**
|
|
893
|
-
* Ping the server
|
|
894
|
-
*/
|
|
895
|
-
async ping(): Promise<string | false> {
|
|
896
|
-
const response = await this._send('ping') as { pong?: boolean; version?: string };
|
|
897
|
-
return response.pong && response.version ? response.version : false;
|
|
898
|
-
}
|
|
899
|
-
|
|
900
|
-
// ===========================================================================
|
|
901
|
-
// Protocol v2 - Multi-Database Commands
|
|
902
|
-
// ===========================================================================
|
|
903
|
-
|
|
904
|
-
/**
|
|
905
|
-
* Negotiate protocol version with server
|
|
906
|
-
* @param protocolVersion - Protocol version to negotiate (default: 2)
|
|
907
|
-
* @returns Server capabilities including protocolVersion, serverVersion, features
|
|
908
|
-
*/
|
|
909
|
-
async hello(protocolVersion: number = 3): Promise<HelloResponse> {
|
|
910
|
-
const response = await this._send('hello' as RFDBCommand, { protocolVersion });
|
|
911
|
-
const hello = response as HelloResponse;
|
|
912
|
-
this._supportsStreaming = hello.features?.includes('streaming') ?? false;
|
|
913
|
-
return hello;
|
|
914
|
-
}
|
|
915
|
-
|
|
916
|
-
/**
|
|
917
|
-
* Create a new database
|
|
918
|
-
* @param name - Database name (alphanumeric, _, -)
|
|
919
|
-
* @param ephemeral - If true, database is in-memory and auto-cleaned on disconnect
|
|
920
|
-
*/
|
|
921
|
-
async createDatabase(name: string, ephemeral: boolean = false): Promise<CreateDatabaseResponse> {
|
|
922
|
-
const response = await this._send('createDatabase' as RFDBCommand, { name, ephemeral });
|
|
923
|
-
return response as CreateDatabaseResponse;
|
|
924
|
-
}
|
|
925
|
-
|
|
926
|
-
/**
|
|
927
|
-
* Open a database and set as current for this session
|
|
928
|
-
* @param name - Database name
|
|
929
|
-
* @param mode - 'rw' (read-write) or 'ro' (read-only)
|
|
930
|
-
*/
|
|
931
|
-
async openDatabase(name: string, mode: 'rw' | 'ro' = 'rw'): Promise<OpenDatabaseResponse> {
|
|
932
|
-
const response = await this._send('openDatabase' as RFDBCommand, { name, mode });
|
|
933
|
-
return response as OpenDatabaseResponse;
|
|
934
|
-
}
|
|
935
|
-
|
|
936
|
-
/**
|
|
937
|
-
* Close current database
|
|
938
|
-
*/
|
|
939
|
-
async closeDatabase(): Promise<RFDBResponse> {
|
|
940
|
-
return this._send('closeDatabase' as RFDBCommand);
|
|
941
|
-
}
|
|
942
|
-
|
|
943
|
-
/**
|
|
944
|
-
* Drop (delete) a database - must not be in use
|
|
945
|
-
* @param name - Database name
|
|
946
|
-
*/
|
|
947
|
-
async dropDatabase(name: string): Promise<RFDBResponse> {
|
|
948
|
-
return this._send('dropDatabase' as RFDBCommand, { name });
|
|
949
|
-
}
|
|
950
|
-
|
|
951
|
-
/**
|
|
952
|
-
* List all databases
|
|
953
|
-
*/
|
|
954
|
-
async listDatabases(): Promise<ListDatabasesResponse> {
|
|
955
|
-
const response = await this._send('listDatabases' as RFDBCommand);
|
|
956
|
-
return response as ListDatabasesResponse;
|
|
957
|
-
}
|
|
958
|
-
|
|
959
|
-
/**
|
|
960
|
-
* Get current database for this session
|
|
961
|
-
*/
|
|
962
|
-
async currentDatabase(): Promise<CurrentDatabaseResponse> {
|
|
963
|
-
const response = await this._send('currentDatabase' as RFDBCommand);
|
|
964
|
-
return response as CurrentDatabaseResponse;
|
|
965
|
-
}
|
|
966
|
-
|
|
967
|
-
// ===========================================================================
|
|
968
|
-
// Snapshot Operations
|
|
969
|
-
// ===========================================================================
|
|
970
|
-
|
|
971
|
-
/**
|
|
972
|
-
* Convert a SnapshotRef to wire format payload fields.
|
|
973
|
-
*
|
|
974
|
-
* - number -> { version: N }
|
|
975
|
-
* - { tag, value } -> { tagKey, tagValue }
|
|
976
|
-
*/
|
|
977
|
-
private _resolveSnapshotRef(ref: SnapshotRef): Record<string, unknown> {
|
|
978
|
-
if (typeof ref === 'number') return { version: ref };
|
|
979
|
-
return { tagKey: ref.tag, tagValue: ref.value };
|
|
980
|
-
}
|
|
981
|
-
|
|
982
|
-
/**
|
|
983
|
-
* Compute diff between two snapshots.
|
|
984
|
-
* @param from - Source snapshot (version number or tag reference)
|
|
985
|
-
* @param to - Target snapshot (version number or tag reference)
|
|
986
|
-
* @returns SnapshotDiff with added/removed segments and stats
|
|
987
|
-
*/
|
|
988
|
-
async diffSnapshots(from: SnapshotRef, to: SnapshotRef): Promise<SnapshotDiff> {
|
|
989
|
-
const response = await this._send('diffSnapshots', {
|
|
990
|
-
from: this._resolveSnapshotRef(from),
|
|
991
|
-
to: this._resolveSnapshotRef(to),
|
|
992
|
-
});
|
|
993
|
-
return (response as DiffSnapshotsResponse).diff;
|
|
994
|
-
}
|
|
995
|
-
|
|
996
|
-
/**
|
|
997
|
-
* Tag a snapshot with key-value metadata.
|
|
998
|
-
* @param version - Snapshot version to tag
|
|
999
|
-
* @param tags - Key-value pairs to apply (e.g. { "release": "v1.0" })
|
|
1000
|
-
*/
|
|
1001
|
-
async tagSnapshot(version: number, tags: Record<string, string>): Promise<void> {
|
|
1002
|
-
await this._send('tagSnapshot', { version, tags });
|
|
1003
|
-
}
|
|
1004
|
-
|
|
1005
|
-
/**
|
|
1006
|
-
* Find a snapshot by tag key/value pair.
|
|
1007
|
-
* @param tagKey - Tag key to search for
|
|
1008
|
-
* @param tagValue - Tag value to match
|
|
1009
|
-
* @returns Snapshot version number, or null if not found
|
|
1010
|
-
*/
|
|
1011
|
-
async findSnapshot(tagKey: string, tagValue: string): Promise<number | null> {
|
|
1012
|
-
const response = await this._send('findSnapshot', { tagKey, tagValue });
|
|
1013
|
-
return (response as FindSnapshotResponse).version;
|
|
1014
|
-
}
|
|
1015
|
-
|
|
1016
|
-
/**
|
|
1017
|
-
* List snapshots, optionally filtered by tag key.
|
|
1018
|
-
* @param filterTag - Optional tag key to filter by (only snapshots with this tag)
|
|
1019
|
-
* @returns Array of SnapshotInfo objects
|
|
1020
|
-
*/
|
|
1021
|
-
async listSnapshots(filterTag?: string): Promise<SnapshotInfo[]> {
|
|
1022
|
-
const payload: Record<string, unknown> = {};
|
|
1023
|
-
if (filterTag !== undefined) payload.filterTag = filterTag;
|
|
1024
|
-
const response = await this._send('listSnapshots', payload);
|
|
1025
|
-
return (response as ListSnapshotsResponse).snapshots;
|
|
1026
|
-
}
|
|
1027
|
-
|
|
1028
|
-
// ===========================================================================
|
|
1029
|
-
// Batch Operations
|
|
1030
|
-
// ===========================================================================
|
|
1031
|
-
|
|
1032
|
-
/**
|
|
1033
|
-
* Begin a batch operation.
|
|
1034
|
-
* While batching, addNodes/addEdges buffer locally instead of sending to server.
|
|
1035
|
-
* Call commitBatch() to send all buffered data atomically.
|
|
1036
|
-
*/
|
|
1037
|
-
beginBatch(): void {
|
|
1038
|
-
if (this._batching) throw new Error('Batch already in progress');
|
|
1039
|
-
this._batching = true;
|
|
1040
|
-
this._batchNodes = [];
|
|
1041
|
-
this._batchEdges = [];
|
|
1042
|
-
this._batchFiles = new Set();
|
|
1043
|
-
}
|
|
1044
|
-
|
|
1045
|
-
/**
|
|
1046
|
-
* Synchronously batch a single node. Must be inside beginBatch/commitBatch.
|
|
1047
|
-
* Skips async wrapper — pushes directly to batch array.
|
|
1048
|
-
*/
|
|
1049
|
-
batchNode(node: Partial<WireNode> & { id: string; type?: string; node_type?: string; nodeType?: string }): void {
|
|
1050
|
-
if (!this._batching) throw new Error('No batch in progress');
|
|
1051
|
-
const nodeRecord = node as Record<string, unknown>;
|
|
1052
|
-
const { id, type, node_type, nodeType, name, file, exported, metadata, semanticId, semantic_id, ...rest } = nodeRecord;
|
|
1053
|
-
const existingMeta = typeof metadata === 'string' ? JSON.parse(metadata as string) : (metadata || {});
|
|
1054
|
-
const combinedMeta = { ...existingMeta, ...rest };
|
|
1055
|
-
const wire: WireNode = {
|
|
1056
|
-
id: String(id),
|
|
1057
|
-
nodeType: (node_type || nodeType || type || 'UNKNOWN') as NodeType,
|
|
1058
|
-
name: (name as string) || '',
|
|
1059
|
-
file: (file as string) || '',
|
|
1060
|
-
exported: (exported as boolean) || false,
|
|
1061
|
-
metadata: JSON.stringify(combinedMeta),
|
|
1062
|
-
};
|
|
1063
|
-
const sid = semanticId || semantic_id;
|
|
1064
|
-
if (sid) {
|
|
1065
|
-
(wire as WireNode & { semanticId: string }).semanticId = String(sid);
|
|
1066
|
-
}
|
|
1067
|
-
this._batchNodes.push(wire);
|
|
1068
|
-
if (wire.file) this._batchFiles.add(wire.file);
|
|
1069
|
-
}
|
|
1070
|
-
|
|
1071
|
-
/**
|
|
1072
|
-
* Synchronously batch a single edge. Must be inside beginBatch/commitBatch.
|
|
1073
|
-
*/
|
|
1074
|
-
batchEdge(edge: WireEdge | Record<string, unknown>): void {
|
|
1075
|
-
if (!this._batching) throw new Error('No batch in progress');
|
|
1076
|
-
const edgeRecord = edge as Record<string, unknown>;
|
|
1077
|
-
const { src, dst, type, edge_type, edgeType, metadata, ...rest } = edgeRecord;
|
|
1078
|
-
const existingMeta = typeof metadata === 'string' ? JSON.parse(metadata as string) : (metadata || {});
|
|
1079
|
-
const combinedMeta = { ...existingMeta, ...rest };
|
|
1080
|
-
this._batchEdges.push({
|
|
1081
|
-
src: String(src),
|
|
1082
|
-
dst: String(dst),
|
|
1083
|
-
edgeType: (edge_type || edgeType || type || (edge as WireEdge).edgeType || 'UNKNOWN') as EdgeType,
|
|
1084
|
-
metadata: JSON.stringify(combinedMeta),
|
|
1085
|
-
});
|
|
1086
|
-
}
|
|
1087
|
-
|
|
1088
|
-
/**
|
|
1089
|
-
* Commit the current batch to the server.
|
|
1090
|
-
* Sends all buffered nodes/edges with the list of changed files.
|
|
1091
|
-
* Server atomically replaces old data for changed files with new data.
|
|
1092
|
-
*
|
|
1093
|
-
* @param tags - Optional tags for the commit (e.g., plugin name, phase)
|
|
1094
|
-
* @param deferIndex - When true, server writes data but skips index rebuild.
|
|
1095
|
-
* Caller must send rebuildIndexes() after all deferred commits complete.
|
|
1096
|
-
*/
|
|
1097
|
-
async commitBatch(tags?: string[], deferIndex?: boolean, protectedTypes?: string[]): Promise<CommitDelta> {
|
|
1098
|
-
if (!this._batching) throw new Error('No batch in progress');
|
|
1099
|
-
|
|
1100
|
-
const allNodes = this._batchNodes;
|
|
1101
|
-
const allEdges = this._batchEdges;
|
|
1102
|
-
const changedFiles = [...this._batchFiles];
|
|
1103
|
-
|
|
1104
|
-
this._batching = false;
|
|
1105
|
-
this._batchNodes = [];
|
|
1106
|
-
this._batchEdges = [];
|
|
1107
|
-
this._batchFiles = new Set();
|
|
1108
|
-
|
|
1109
|
-
return this._sendCommitBatch(changedFiles, allNodes, allEdges, tags, deferIndex, protectedTypes);
|
|
1110
|
-
}
|
|
1111
|
-
|
|
1112
|
-
/**
|
|
1113
|
-
* Internal helper: send a commitBatch with chunking for large payloads.
|
|
1114
|
-
* Used by both commitBatch() and BatchHandle.commit().
|
|
1115
|
-
* @internal
|
|
1116
|
-
*/
|
|
1117
|
-
async _sendCommitBatch(
|
|
1118
|
-
changedFiles: string[],
|
|
1119
|
-
allNodes: WireNode[],
|
|
1120
|
-
allEdges: WireEdge[],
|
|
1121
|
-
tags?: string[],
|
|
1122
|
-
deferIndex?: boolean,
|
|
1123
|
-
protectedTypes?: string[],
|
|
1124
|
-
): Promise<CommitDelta> {
|
|
1125
|
-
// Chunk large batches to stay under server's 100MB message limit.
|
|
1126
|
-
// First chunk includes changedFiles (triggers old data deletion),
|
|
1127
|
-
// subsequent chunks use empty changedFiles (additive only).
|
|
1128
|
-
const CHUNK = 10_000;
|
|
1129
|
-
if (allNodes.length <= CHUNK && allEdges.length <= CHUNK) {
|
|
1130
|
-
const response = await this._send('commitBatch', {
|
|
1131
|
-
changedFiles, nodes: allNodes, edges: allEdges, tags,
|
|
1132
|
-
...(deferIndex ? { deferIndex: true } : {}),
|
|
1133
|
-
...(protectedTypes?.length ? { protectedTypes } : {}),
|
|
1134
|
-
});
|
|
1135
|
-
return (response as CommitBatchResponse).delta;
|
|
1136
|
-
}
|
|
1137
|
-
|
|
1138
|
-
const merged: CommitDelta = {
|
|
1139
|
-
changedFiles,
|
|
1140
|
-
nodesAdded: 0, nodesRemoved: 0,
|
|
1141
|
-
edgesAdded: 0, edgesRemoved: 0,
|
|
1142
|
-
changedNodeTypes: [], changedEdgeTypes: [],
|
|
1143
|
-
};
|
|
1144
|
-
const nodeTypes = new Set<string>();
|
|
1145
|
-
const edgeTypes = new Set<string>();
|
|
1146
|
-
|
|
1147
|
-
const maxI = Math.max(
|
|
1148
|
-
Math.ceil(allNodes.length / CHUNK),
|
|
1149
|
-
Math.ceil(allEdges.length / CHUNK),
|
|
1150
|
-
1,
|
|
1151
|
-
);
|
|
1152
|
-
|
|
1153
|
-
for (let i = 0; i < maxI; i++) {
|
|
1154
|
-
const nodes = allNodes.slice(i * CHUNK, (i + 1) * CHUNK);
|
|
1155
|
-
const edges = allEdges.slice(i * CHUNK, (i + 1) * CHUNK);
|
|
1156
|
-
const response = await this._send('commitBatch', {
|
|
1157
|
-
changedFiles: i === 0 ? changedFiles : [],
|
|
1158
|
-
nodes, edges, tags,
|
|
1159
|
-
...(deferIndex ? { deferIndex: true } : {}),
|
|
1160
|
-
...(i === 0 && protectedTypes?.length ? { protectedTypes } : {}),
|
|
1161
|
-
});
|
|
1162
|
-
const d = (response as CommitBatchResponse).delta;
|
|
1163
|
-
merged.nodesAdded += d.nodesAdded;
|
|
1164
|
-
merged.nodesRemoved += d.nodesRemoved;
|
|
1165
|
-
merged.edgesAdded += d.edgesAdded;
|
|
1166
|
-
merged.edgesRemoved += d.edgesRemoved;
|
|
1167
|
-
for (const t of d.changedNodeTypes) nodeTypes.add(t);
|
|
1168
|
-
for (const t of d.changedEdgeTypes) edgeTypes.add(t);
|
|
1169
|
-
}
|
|
1170
|
-
|
|
1171
|
-
merged.changedNodeTypes = [...nodeTypes];
|
|
1172
|
-
merged.changedEdgeTypes = [...edgeTypes];
|
|
1173
|
-
return merged;
|
|
1174
|
-
}
|
|
1175
|
-
|
|
1176
|
-
/**
|
|
1177
|
-
* Rebuild all secondary indexes after a series of deferred-index commits.
|
|
1178
|
-
* Call this once after bulk loading data with commitBatch(tags, true).
|
|
1179
|
-
*/
|
|
1180
|
-
async rebuildIndexes(): Promise<void> {
|
|
1181
|
-
await this._send('rebuildIndexes', {});
|
|
1182
|
-
}
|
|
1183
|
-
|
|
1184
424
|
/**
|
|
1185
425
|
* Create an isolated batch handle for concurrent-safe batching.
|
|
1186
|
-
* Each BatchHandle has its own node/edge buffers, avoiding the shared
|
|
1187
|
-
* instance-level _batching state race condition with multiple workers.
|
|
1188
426
|
*/
|
|
1189
427
|
createBatch(): BatchHandle {
|
|
1190
428
|
return new BatchHandle(this);
|
|
1191
429
|
}
|
|
1192
430
|
|
|
1193
|
-
/**
|
|
1194
|
-
* Abort the current batch, discarding all buffered data.
|
|
1195
|
-
*/
|
|
1196
|
-
abortBatch(): void {
|
|
1197
|
-
this._batching = false;
|
|
1198
|
-
this._batchNodes = [];
|
|
1199
|
-
this._batchEdges = [];
|
|
1200
|
-
this._batchFiles = new Set();
|
|
1201
|
-
}
|
|
1202
|
-
|
|
1203
|
-
/**
|
|
1204
|
-
* Check if a batch is currently in progress.
|
|
1205
|
-
*/
|
|
1206
|
-
isBatching(): boolean {
|
|
1207
|
-
return this._batching;
|
|
1208
|
-
}
|
|
1209
|
-
|
|
1210
|
-
/**
|
|
1211
|
-
* Find files that depend on the given changed files.
|
|
1212
|
-
* Uses backward reachability to find dependent modules.
|
|
1213
|
-
*
|
|
1214
|
-
* Note: For large result sets, each reachable node requires a separate
|
|
1215
|
-
* getNode RPC. A future server-side optimization could return file paths
|
|
1216
|
-
* directly from the reachability query.
|
|
1217
|
-
*/
|
|
1218
|
-
async findDependentFiles(changedFiles: string[]): Promise<string[]> {
|
|
1219
|
-
const nodeIds: string[] = [];
|
|
1220
|
-
for (const file of changedFiles) {
|
|
1221
|
-
const ids = await this.findByAttr({ file });
|
|
1222
|
-
nodeIds.push(...ids);
|
|
1223
|
-
}
|
|
1224
|
-
|
|
1225
|
-
if (nodeIds.length === 0) return [];
|
|
1226
|
-
|
|
1227
|
-
const reachable = await this.reachability(
|
|
1228
|
-
nodeIds,
|
|
1229
|
-
2,
|
|
1230
|
-
['IMPORTS_FROM', 'DEPENDS_ON', 'CALLS'] as EdgeType[],
|
|
1231
|
-
true,
|
|
1232
|
-
);
|
|
1233
|
-
|
|
1234
|
-
const changedSet = new Set(changedFiles);
|
|
1235
|
-
const files = new Set<string>();
|
|
1236
|
-
for (const id of reachable) {
|
|
1237
|
-
const node = await this.getNode(id);
|
|
1238
|
-
if (node?.file && !changedSet.has(node.file)) {
|
|
1239
|
-
files.add(node.file);
|
|
1240
|
-
}
|
|
1241
|
-
}
|
|
1242
|
-
|
|
1243
|
-
return [...files];
|
|
1244
|
-
}
|
|
1245
|
-
|
|
1246
431
|
/**
|
|
1247
432
|
* Unref the socket so it doesn't keep the process alive.
|
|
1248
|
-
*
|
|
1249
|
-
* Call this in test environments to allow process to exit
|
|
1250
|
-
* even if connections remain open.
|
|
1251
433
|
*/
|
|
1252
434
|
unref(): void {
|
|
1253
435
|
if (this.socket) {
|
|
@@ -1265,31 +447,14 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
|
|
|
1265
447
|
this.connected = false;
|
|
1266
448
|
}
|
|
1267
449
|
}
|
|
1268
|
-
|
|
1269
|
-
/**
|
|
1270
|
-
* Shutdown the server
|
|
1271
|
-
*/
|
|
1272
|
-
async shutdown(): Promise<void> {
|
|
1273
|
-
try {
|
|
1274
|
-
await this._send('shutdown');
|
|
1275
|
-
} catch {
|
|
1276
|
-
// Expected - server closes connection
|
|
1277
|
-
}
|
|
1278
|
-
await this.close();
|
|
1279
|
-
}
|
|
1280
450
|
}
|
|
1281
451
|
|
|
1282
452
|
/**
|
|
1283
453
|
* Isolated batch handle for concurrent-safe batching (REG-487).
|
|
1284
|
-
*
|
|
1285
|
-
* Each BatchHandle maintains its own node/edge/file buffers, completely
|
|
1286
|
-
* independent of the RFDBClient's instance-level _batching state.
|
|
1287
|
-
* Multiple workers can each create their own BatchHandle and commit
|
|
1288
|
-
* independently without race conditions.
|
|
1289
454
|
*/
|
|
1290
455
|
export class BatchHandle {
|
|
1291
456
|
private _nodes: WireNode[] = [];
|
|
1292
|
-
private _edges: WireEdge[] = [];
|
|
457
|
+
private _edges: import('@grafema/types').WireEdge[] = [];
|
|
1293
458
|
private _files: Set<string> = new Set();
|
|
1294
459
|
|
|
1295
460
|
constructor(private client: RFDBClient) {}
|
|
@@ -1300,7 +465,7 @@ export class BatchHandle {
|
|
|
1300
465
|
else if (node.file) this._files.add(node.file);
|
|
1301
466
|
}
|
|
1302
467
|
|
|
1303
|
-
addEdge(edge: WireEdge): void {
|
|
468
|
+
addEdge(edge: import('@grafema/types').WireEdge): void {
|
|
1304
469
|
this._edges.push(edge);
|
|
1305
470
|
}
|
|
1306
471
|
|