@grafema/rfdb-client 0.2.5-beta → 0.2.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/ts/client.ts CHANGED
@@ -8,6 +8,7 @@
8
8
  import { createConnection, Socket } from 'net';
9
9
  import { encode, decode } from '@msgpack/msgpack';
10
10
  import { EventEmitter } from 'events';
11
+ import { StreamQueue } from './stream-queue.js';
11
12
 
12
13
  import type {
13
14
  RFDBCommand,
@@ -25,6 +26,15 @@ import type {
25
26
  OpenDatabaseResponse,
26
27
  ListDatabasesResponse,
27
28
  CurrentDatabaseResponse,
29
+ SnapshotRef,
30
+ SnapshotDiff,
31
+ SnapshotInfo,
32
+ DiffSnapshotsResponse,
33
+ FindSnapshotResponse,
34
+ ListSnapshotsResponse,
35
+ CommitDelta,
36
+ CommitBatchResponse,
37
+ NodesChunkResponse,
28
38
  } from '@grafema/types';
29
39
 
30
40
  interface PendingRequest {
@@ -40,6 +50,17 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
40
50
  private reqId: number;
41
51
  private buffer: Buffer;
42
52
 
53
+ // Batch state
54
+ private _batching: boolean = false;
55
+ private _batchNodes: WireNode[] = [];
56
+ private _batchEdges: WireEdge[] = [];
57
+ private _batchFiles: Set<string> = new Set();
58
+
59
+ // Streaming state
60
+ private _supportsStreaming: boolean = false;
61
+ private _pendingStreams: Map<number, StreamQueue<WireNode>> = new Map();
62
+ private _streamTimers: Map<number, ReturnType<typeof setTimeout>> = new Map();
63
+
43
64
  constructor(socketPath: string = '/tmp/rfdb.sock') {
44
65
  super();
45
66
  this.socketPath = socketPath;
@@ -50,6 +71,14 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
50
71
  this.buffer = Buffer.alloc(0);
51
72
  }
52
73
 
74
+ /**
75
+ * Whether the connected server supports streaming responses.
76
+ * Set after calling hello(). Defaults to false.
77
+ */
78
+ get supportsStreaming(): boolean {
79
+ return this._supportsStreaming;
80
+ }
81
+
53
82
  /**
54
83
  * Connect to RFDB server
55
84
  */
@@ -82,6 +111,15 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
82
111
  reject(new Error('Connection closed'));
83
112
  }
84
113
  this.pending.clear();
114
+ // Fail all pending streams
115
+ for (const [, stream] of this._pendingStreams) {
116
+ stream.fail(new Error('Connection closed'));
117
+ }
118
+ this._pendingStreams.clear();
119
+ for (const [, timer] of this._streamTimers) {
120
+ clearTimeout(timer);
121
+ }
122
+ this._streamTimers.clear();
85
123
  });
86
124
 
87
125
  this.socket.on('data', (chunk: Buffer) => {
@@ -154,16 +192,48 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
154
192
  }
155
193
 
156
194
  /**
157
- * Handle decoded response
195
+ * Handle decoded response — match by requestId, route streaming chunks
196
+ * to StreamQueue or resolve single-response Promise.
158
197
  */
159
198
  private _handleResponse(response: RFDBResponse): void {
160
- if (this.pending.size === 0) {
199
+ if (this.pending.size === 0 && this._pendingStreams.size === 0) {
161
200
  this.emit('error', new Error('Received response with no pending request'));
162
201
  return;
163
202
  }
164
203
 
165
- // Get the oldest pending request (FIFO)
166
- const [id, { resolve, reject }] = this.pending.entries().next().value as [number, PendingRequest];
204
+ let id: number;
205
+
206
+ if (response.requestId) {
207
+ const parsed = this._parseRequestId(response.requestId);
208
+ if (parsed === null) {
209
+ this.emit('error', new Error(`Received response for unknown requestId: ${response.requestId}`));
210
+ return;
211
+ }
212
+ id = parsed;
213
+ } else {
214
+ // FIFO fallback for servers that don't echo requestId
215
+ if (this.pending.size > 0) {
216
+ id = (this.pending.entries().next().value as [number, PendingRequest])[0];
217
+ } else {
218
+ this.emit('error', new Error('Received response with no pending request'));
219
+ return;
220
+ }
221
+ }
222
+
223
+ // Route to streaming handler if this requestId has a StreamQueue
224
+ const streamQueue = this._pendingStreams.get(id);
225
+ if (streamQueue) {
226
+ this._handleStreamingResponse(id, response, streamQueue);
227
+ return;
228
+ }
229
+
230
+ // Non-streaming response — existing behavior
231
+ if (!this.pending.has(id)) {
232
+ this.emit('error', new Error(`Received response for unknown requestId: ${response.requestId}`));
233
+ return;
234
+ }
235
+
236
+ const { resolve, reject } = this.pending.get(id)!;
167
237
  this.pending.delete(id);
168
238
 
169
239
  if (response.error) {
@@ -173,6 +243,88 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
173
243
  }
174
244
  }
175
245
 
246
+ /**
247
+ * Handle a response for a streaming request.
248
+ * Routes chunk data to StreamQueue and manages stream lifecycle.
249
+ * Resets per-chunk timeout on each successful chunk arrival.
250
+ */
251
+ private _handleStreamingResponse(
252
+ id: number,
253
+ response: RFDBResponse,
254
+ streamQueue: StreamQueue<WireNode>,
255
+ ): void {
256
+ // Error response — fail the stream
257
+ if (response.error) {
258
+ this._cleanupStream(id);
259
+ streamQueue.fail(new Error(response.error));
260
+ return;
261
+ }
262
+
263
+ // Streaming chunk (has `done` field)
264
+ if ('done' in response) {
265
+ const chunk = response as unknown as NodesChunkResponse;
266
+ const nodes = chunk.nodes || [];
267
+ for (const node of nodes) {
268
+ streamQueue.push(node);
269
+ }
270
+
271
+ if (chunk.done) {
272
+ this._cleanupStream(id);
273
+ streamQueue.end();
274
+ } else {
275
+ // Reset per-chunk timeout
276
+ this._resetStreamTimer(id, streamQueue);
277
+ }
278
+ return;
279
+ }
280
+
281
+ // Auto-fallback: server sent a non-streaming Nodes response
282
+ // (server doesn't support streaming or result was below threshold)
283
+ const nodesResponse = response as unknown as { nodes?: WireNode[] };
284
+ const nodes = nodesResponse.nodes || [];
285
+ for (const node of nodes) {
286
+ streamQueue.push(node);
287
+ }
288
+ this._cleanupStream(id);
289
+ streamQueue.end();
290
+ }
291
+
292
+ /**
293
+ * Reset the per-chunk timeout for a streaming request.
294
+ */
295
+ private _resetStreamTimer(id: number, streamQueue: StreamQueue<WireNode>): void {
296
+ const existing = this._streamTimers.get(id);
297
+ if (existing) clearTimeout(existing);
298
+
299
+ const timer = setTimeout(() => {
300
+ this._cleanupStream(id);
301
+ streamQueue.fail(new Error(
302
+ `RFDB queryNodesStream timed out after ${RFDBClient.DEFAULT_TIMEOUT_MS}ms (no chunk received)`
303
+ ));
304
+ }, RFDBClient.DEFAULT_TIMEOUT_MS);
305
+
306
+ this._streamTimers.set(id, timer);
307
+ }
308
+
309
+ /**
310
+ * Clean up all state for a completed/failed streaming request.
311
+ */
312
+ private _cleanupStream(id: number): void {
313
+ this._pendingStreams.delete(id);
314
+ this.pending.delete(id);
315
+ const timer = this._streamTimers.get(id);
316
+ if (timer) {
317
+ clearTimeout(timer);
318
+ this._streamTimers.delete(id);
319
+ }
320
+ }
321
+
322
+ private _parseRequestId(requestId: string): number | null {
323
+ if (!requestId.startsWith('r')) return null;
324
+ const num = parseInt(requestId.slice(1), 10);
325
+ return Number.isNaN(num) ? null : num;
326
+ }
327
+
176
328
  /**
177
329
  * Default timeout for operations (60 seconds)
178
330
  * Flush/compact may take time for large graphs, but should not hang indefinitely
@@ -191,11 +343,10 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
191
343
  throw new Error('Not connected to RFDB server');
192
344
  }
193
345
 
194
- const request = { cmd, ...payload };
195
- const msgBytes = encode(request);
196
-
197
346
  return new Promise((resolve, reject) => {
198
347
  const id = this.reqId++;
348
+ const request = { requestId: `r${id}`, cmd, ...payload };
349
+ const msgBytes = encode(request);
199
350
 
200
351
  // Setup timeout
201
352
  const timer = setTimeout(() => {
@@ -246,13 +397,13 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
246
397
  const nodeRecord = n as Record<string, unknown>;
247
398
 
248
399
  // Extract known wire format fields, rest goes to metadata
249
- const { id, type, node_type, nodeType, name, file, exported, metadata, ...rest } = nodeRecord;
400
+ const { id, type, node_type, nodeType, name, file, exported, metadata, semanticId, semantic_id, ...rest } = nodeRecord;
250
401
 
251
402
  // Merge explicit metadata with extra properties
252
403
  const existingMeta = typeof metadata === 'string' ? JSON.parse(metadata as string) : (metadata || {});
253
404
  const combinedMeta = { ...existingMeta, ...rest };
254
405
 
255
- return {
406
+ const wire: WireNode = {
256
407
  id: String(id),
257
408
  nodeType: (node_type || nodeType || type || 'UNKNOWN') as NodeType,
258
409
  name: (name as string) || '',
@@ -260,8 +411,24 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
260
411
  exported: (exported as boolean) || false,
261
412
  metadata: JSON.stringify(combinedMeta),
262
413
  };
414
+
415
+ // Preserve semanticId as top-level field for v3 protocol
416
+ const sid = semanticId || semantic_id;
417
+ if (sid) {
418
+ (wire as WireNode & { semanticId: string }).semanticId = String(sid);
419
+ }
420
+
421
+ return wire;
263
422
  });
264
423
 
424
+ if (this._batching) {
425
+ this._batchNodes.push(...wireNodes);
426
+ for (const node of wireNodes) {
427
+ if (node.file) this._batchFiles.add(node.file);
428
+ }
429
+ return { ok: true } as RFDBResponse;
430
+ }
431
+
265
432
  return this._send('addNodes', { nodes: wireNodes });
266
433
  }
267
434
 
@@ -292,6 +459,11 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
292
459
  };
293
460
  });
294
461
 
462
+ if (this._batching) {
463
+ this._batchEdges.push(...wireEdges);
464
+ return { ok: true } as RFDBResponse;
465
+ }
466
+
295
467
  return this._send('addEdges', { edges: wireEdges, skipValidation });
296
468
  }
297
469
 
@@ -521,18 +693,87 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
521
693
  * Query nodes (async generator)
522
694
  */
523
695
  async *queryNodes(query: AttrQuery): AsyncGenerator<WireNode, void, unknown> {
696
+ // When server supports streaming (protocol v3+), delegate to streaming handler
697
+ // to correctly handle chunked NodesChunk responses for large result sets.
698
+ if (this._supportsStreaming) {
699
+ yield* this.queryNodesStream(query);
700
+ return;
701
+ }
702
+
703
+ const serverQuery = this._buildServerQuery(query);
704
+ const response = await this._send('queryNodes', { query: serverQuery });
705
+ const nodes = (response as { nodes?: WireNode[] }).nodes || [];
706
+
707
+ for (const node of nodes) {
708
+ yield node;
709
+ }
710
+ }
711
+
712
+ /**
713
+ * Build a server query object from an AttrQuery.
714
+ */
715
+ private _buildServerQuery(query: AttrQuery): Record<string, unknown> {
524
716
  const serverQuery: Record<string, unknown> = {};
525
717
  if (query.nodeType) serverQuery.nodeType = query.nodeType;
526
718
  if (query.type) serverQuery.nodeType = query.type;
527
719
  if (query.name) serverQuery.name = query.name;
528
720
  if (query.file) serverQuery.file = query.file;
529
721
  if (query.exported !== undefined) serverQuery.exported = query.exported;
722
+ return serverQuery;
723
+ }
530
724
 
531
- const response = await this._send('queryNodes', { query: serverQuery });
532
- const nodes = (response as { nodes?: WireNode[] }).nodes || [];
725
+ /**
726
+ * Stream nodes matching query with true streaming support.
727
+ *
728
+ * Behavior depends on server capabilities:
729
+ * - Server supports streaming (protocol v3): receives chunked NodesChunk
730
+ * responses via StreamQueue. Nodes are yielded as they arrive.
731
+ * - Server does NOT support streaming (fallback): delegates to queryNodes()
732
+ * which yields nodes one by one from bulk response.
733
+ *
734
+ * The generator can be aborted by breaking out of the loop or calling .return().
735
+ */
736
+ async *queryNodesStream(query: AttrQuery): AsyncGenerator<WireNode, void, unknown> {
737
+ if (!this._supportsStreaming) {
738
+ yield* this.queryNodes(query);
739
+ return;
740
+ }
533
741
 
534
- for (const node of nodes) {
535
- yield node;
742
+ if (!this.connected || !this.socket) {
743
+ throw new Error('Not connected to RFDB server');
744
+ }
745
+
746
+ const serverQuery = this._buildServerQuery(query);
747
+ const id = this.reqId++;
748
+ const streamQueue = new StreamQueue<WireNode>();
749
+ this._pendingStreams.set(id, streamQueue);
750
+
751
+ // Build and send request manually (can't use _send which expects single response)
752
+ const request = { requestId: `r${id}`, cmd: 'queryNodes', query: serverQuery };
753
+ const msgBytes = encode(request);
754
+ const header = Buffer.alloc(4);
755
+ header.writeUInt32BE(msgBytes.length);
756
+
757
+ // Register in pending map for error routing
758
+ this.pending.set(id, {
759
+ resolve: () => { this._cleanupStream(id); },
760
+ reject: (error) => {
761
+ this._cleanupStream(id);
762
+ streamQueue.fail(error);
763
+ },
764
+ });
765
+
766
+ // Start per-chunk timeout (resets on each chunk in _handleStreamingResponse)
767
+ this._resetStreamTimer(id, streamQueue);
768
+
769
+ this.socket!.write(Buffer.concat([header, Buffer.from(msgBytes)]));
770
+
771
+ try {
772
+ for await (const node of streamQueue) {
773
+ yield node;
774
+ }
775
+ } finally {
776
+ this._cleanupStream(id);
536
777
  }
537
778
  }
538
779
 
@@ -639,6 +880,15 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
639
880
  return (response as { violations?: DatalogResult[] }).violations || [];
640
881
  }
641
882
 
883
+ /**
884
+ * Execute unified Datalog — handles both direct queries and rule-based programs.
885
+ * Auto-detects the head predicate instead of hardcoding violation(X).
886
+ */
887
+ async executeDatalog(source: string): Promise<DatalogResult[]> {
888
+ const response = await this._send('executeDatalog', { source });
889
+ return (response as { results?: DatalogResult[] }).results || [];
890
+ }
891
+
642
892
  /**
643
893
  * Ping the server
644
894
  */
@@ -656,9 +906,11 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
656
906
  * @param protocolVersion - Protocol version to negotiate (default: 2)
657
907
  * @returns Server capabilities including protocolVersion, serverVersion, features
658
908
  */
659
- async hello(protocolVersion: number = 2): Promise<HelloResponse> {
909
+ async hello(protocolVersion: number = 3): Promise<HelloResponse> {
660
910
  const response = await this._send('hello' as RFDBCommand, { protocolVersion });
661
- return response as HelloResponse;
911
+ const hello = response as HelloResponse;
912
+ this._supportsStreaming = hello.features?.includes('streaming') ?? false;
913
+ return hello;
662
914
  }
663
915
 
664
916
  /**
@@ -712,6 +964,159 @@ export class RFDBClient extends EventEmitter implements IRFDBClient {
712
964
  return response as CurrentDatabaseResponse;
713
965
  }
714
966
 
967
+ // ===========================================================================
968
+ // Snapshot Operations
969
+ // ===========================================================================
970
+
971
+ /**
972
+ * Convert a SnapshotRef to wire format payload fields.
973
+ *
974
+ * - number -> { version: N }
975
+ * - { tag, value } -> { tagKey, tagValue }
976
+ */
977
+ private _resolveSnapshotRef(ref: SnapshotRef): Record<string, unknown> {
978
+ if (typeof ref === 'number') return { version: ref };
979
+ return { tagKey: ref.tag, tagValue: ref.value };
980
+ }
981
+
982
+ /**
983
+ * Compute diff between two snapshots.
984
+ * @param from - Source snapshot (version number or tag reference)
985
+ * @param to - Target snapshot (version number or tag reference)
986
+ * @returns SnapshotDiff with added/removed segments and stats
987
+ */
988
+ async diffSnapshots(from: SnapshotRef, to: SnapshotRef): Promise<SnapshotDiff> {
989
+ const response = await this._send('diffSnapshots', {
990
+ from: this._resolveSnapshotRef(from),
991
+ to: this._resolveSnapshotRef(to),
992
+ });
993
+ return (response as DiffSnapshotsResponse).diff;
994
+ }
995
+
996
+ /**
997
+ * Tag a snapshot with key-value metadata.
998
+ * @param version - Snapshot version to tag
999
+ * @param tags - Key-value pairs to apply (e.g. { "release": "v1.0" })
1000
+ */
1001
+ async tagSnapshot(version: number, tags: Record<string, string>): Promise<void> {
1002
+ await this._send('tagSnapshot', { version, tags });
1003
+ }
1004
+
1005
+ /**
1006
+ * Find a snapshot by tag key/value pair.
1007
+ * @param tagKey - Tag key to search for
1008
+ * @param tagValue - Tag value to match
1009
+ * @returns Snapshot version number, or null if not found
1010
+ */
1011
+ async findSnapshot(tagKey: string, tagValue: string): Promise<number | null> {
1012
+ const response = await this._send('findSnapshot', { tagKey, tagValue });
1013
+ return (response as FindSnapshotResponse).version;
1014
+ }
1015
+
1016
+ /**
1017
+ * List snapshots, optionally filtered by tag key.
1018
+ * @param filterTag - Optional tag key to filter by (only snapshots with this tag)
1019
+ * @returns Array of SnapshotInfo objects
1020
+ */
1021
+ async listSnapshots(filterTag?: string): Promise<SnapshotInfo[]> {
1022
+ const payload: Record<string, unknown> = {};
1023
+ if (filterTag !== undefined) payload.filterTag = filterTag;
1024
+ const response = await this._send('listSnapshots', payload);
1025
+ return (response as ListSnapshotsResponse).snapshots;
1026
+ }
1027
+
1028
+ // ===========================================================================
1029
+ // Batch Operations
1030
+ // ===========================================================================
1031
+
1032
+ /**
1033
+ * Begin a batch operation.
1034
+ * While batching, addNodes/addEdges buffer locally instead of sending to server.
1035
+ * Call commitBatch() to send all buffered data atomically.
1036
+ */
1037
+ beginBatch(): void {
1038
+ if (this._batching) throw new Error('Batch already in progress');
1039
+ this._batching = true;
1040
+ this._batchNodes = [];
1041
+ this._batchEdges = [];
1042
+ this._batchFiles = new Set();
1043
+ }
1044
+
1045
+ /**
1046
+ * Commit the current batch to the server.
1047
+ * Sends all buffered nodes/edges with the list of changed files.
1048
+ * Server atomically replaces old data for changed files with new data.
1049
+ */
1050
+ async commitBatch(tags?: string[]): Promise<CommitDelta> {
1051
+ if (!this._batching) throw new Error('No batch in progress');
1052
+ const response = await this._send('commitBatch', {
1053
+ changedFiles: [...this._batchFiles],
1054
+ nodes: this._batchNodes,
1055
+ edges: this._batchEdges,
1056
+ tags,
1057
+ });
1058
+
1059
+ this._batching = false;
1060
+ this._batchNodes = [];
1061
+ this._batchEdges = [];
1062
+ this._batchFiles = new Set();
1063
+
1064
+ return (response as CommitBatchResponse).delta;
1065
+ }
1066
+
1067
+ /**
1068
+ * Abort the current batch, discarding all buffered data.
1069
+ */
1070
+ abortBatch(): void {
1071
+ this._batching = false;
1072
+ this._batchNodes = [];
1073
+ this._batchEdges = [];
1074
+ this._batchFiles = new Set();
1075
+ }
1076
+
1077
+ /**
1078
+ * Check if a batch is currently in progress.
1079
+ */
1080
+ isBatching(): boolean {
1081
+ return this._batching;
1082
+ }
1083
+
1084
+ /**
1085
+ * Find files that depend on the given changed files.
1086
+ * Uses backward reachability to find dependent modules.
1087
+ *
1088
+ * Note: For large result sets, each reachable node requires a separate
1089
+ * getNode RPC. A future server-side optimization could return file paths
1090
+ * directly from the reachability query.
1091
+ */
1092
+ async findDependentFiles(changedFiles: string[]): Promise<string[]> {
1093
+ const nodeIds: string[] = [];
1094
+ for (const file of changedFiles) {
1095
+ const ids = await this.findByAttr({ file });
1096
+ nodeIds.push(...ids);
1097
+ }
1098
+
1099
+ if (nodeIds.length === 0) return [];
1100
+
1101
+ const reachable = await this.reachability(
1102
+ nodeIds,
1103
+ 2,
1104
+ ['IMPORTS_FROM', 'DEPENDS_ON', 'CALLS'] as EdgeType[],
1105
+ true,
1106
+ );
1107
+
1108
+ const changedSet = new Set(changedFiles);
1109
+ const files = new Set<string>();
1110
+ for (const id of reachable) {
1111
+ const node = await this.getNode(id);
1112
+ if (node?.file && !changedSet.has(node.file)) {
1113
+ files.add(node.file);
1114
+ }
1115
+ }
1116
+
1117
+ return [...files];
1118
+ }
1119
+
715
1120
  /**
716
1121
  * Unref the socket so it doesn't keep the process alive.
717
1122
  *
package/ts/index.ts CHANGED
@@ -10,6 +10,7 @@
10
10
 
11
11
  // Client
12
12
  export { RFDBClient } from './client.js';
13
+ export { StreamQueue } from './stream-queue.js';
13
14
 
14
15
  // Protocol types (re-exported from @grafema/types for convenience)
15
16
  export type {
@@ -21,4 +22,13 @@ export type {
21
22
  AttrQuery,
22
23
  DatalogResult,
23
24
  IRFDBClient,
25
+ // Snapshot types
26
+ SnapshotRef,
27
+ SnapshotStats,
28
+ SegmentInfo,
29
+ SnapshotDiff,
30
+ SnapshotInfo,
31
+ DiffSnapshotsResponse,
32
+ FindSnapshotResponse,
33
+ ListSnapshotsResponse,
24
34
  } from './protocol.js';
package/ts/protocol.ts CHANGED
@@ -43,12 +43,23 @@ export type {
43
43
  CountResponse,
44
44
  CountsByTypeResponse,
45
45
  PingResponse,
46
+ NodesChunkResponse,
46
47
 
47
48
  // Query types
48
49
  AttrQuery,
49
50
  DatalogBinding,
50
51
  DatalogResult,
51
52
 
53
+ // Snapshot types
54
+ SnapshotRef,
55
+ SnapshotStats,
56
+ SegmentInfo,
57
+ SnapshotDiff,
58
+ SnapshotInfo,
59
+ DiffSnapshotsResponse,
60
+ FindSnapshotResponse,
61
+ ListSnapshotsResponse,
62
+
52
63
  // Client interface
53
64
  IRFDBClient,
54
65
  } from '@grafema/types';