@query-farm/vgi-rpc 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. package/LICENSE.md +191 -0
  2. package/README.md +332 -0
  3. package/dist/client/connect.d.ts +10 -0
  4. package/dist/client/connect.d.ts.map +1 -0
  5. package/dist/client/index.d.ts +6 -0
  6. package/dist/client/index.d.ts.map +1 -0
  7. package/dist/client/introspect.d.ts +30 -0
  8. package/dist/client/introspect.d.ts.map +1 -0
  9. package/dist/client/ipc.d.ts +34 -0
  10. package/dist/client/ipc.d.ts.map +1 -0
  11. package/dist/client/pipe.d.ts +63 -0
  12. package/dist/client/pipe.d.ts.map +1 -0
  13. package/dist/client/stream.d.ts +52 -0
  14. package/dist/client/stream.d.ts.map +1 -0
  15. package/dist/client/types.d.ts +25 -0
  16. package/dist/client/types.d.ts.map +1 -0
  17. package/dist/constants.d.ts +15 -0
  18. package/dist/constants.d.ts.map +1 -0
  19. package/dist/dispatch/describe.d.ts +14 -0
  20. package/dist/dispatch/describe.d.ts.map +1 -0
  21. package/dist/dispatch/stream.d.ts +20 -0
  22. package/dist/dispatch/stream.d.ts.map +1 -0
  23. package/dist/dispatch/unary.d.ts +9 -0
  24. package/dist/dispatch/unary.d.ts.map +1 -0
  25. package/dist/errors.d.ts +12 -0
  26. package/dist/errors.d.ts.map +1 -0
  27. package/dist/http/common.d.ts +16 -0
  28. package/dist/http/common.d.ts.map +1 -0
  29. package/dist/http/dispatch.d.ts +18 -0
  30. package/dist/http/dispatch.d.ts.map +1 -0
  31. package/dist/http/handler.d.ts +16 -0
  32. package/dist/http/handler.d.ts.map +1 -0
  33. package/dist/http/index.d.ts +4 -0
  34. package/dist/http/index.d.ts.map +1 -0
  35. package/dist/http/token.d.ts +24 -0
  36. package/dist/http/token.d.ts.map +1 -0
  37. package/dist/http/types.d.ts +30 -0
  38. package/dist/http/types.d.ts.map +1 -0
  39. package/dist/index.d.ts +9 -0
  40. package/dist/index.d.ts.map +1 -0
  41. package/dist/index.js +2493 -0
  42. package/dist/index.js.map +34 -0
  43. package/dist/protocol.d.ts +62 -0
  44. package/dist/protocol.d.ts.map +1 -0
  45. package/dist/schema.d.ts +38 -0
  46. package/dist/schema.d.ts.map +1 -0
  47. package/dist/server.d.ts +19 -0
  48. package/dist/server.d.ts.map +1 -0
  49. package/dist/types.d.ts +71 -0
  50. package/dist/types.d.ts.map +1 -0
  51. package/dist/util/schema.d.ts +20 -0
  52. package/dist/util/schema.d.ts.map +1 -0
  53. package/dist/util/zstd.d.ts +5 -0
  54. package/dist/util/zstd.d.ts.map +1 -0
  55. package/dist/wire/reader.d.ts +40 -0
  56. package/dist/wire/reader.d.ts.map +1 -0
  57. package/dist/wire/request.d.ts +15 -0
  58. package/dist/wire/request.d.ts.map +1 -0
  59. package/dist/wire/response.d.ts +25 -0
  60. package/dist/wire/response.d.ts.map +1 -0
  61. package/dist/wire/writer.d.ts +59 -0
  62. package/dist/wire/writer.d.ts.map +1 -0
  63. package/package.json +32 -0
  64. package/src/client/connect.ts +310 -0
  65. package/src/client/index.ts +14 -0
  66. package/src/client/introspect.ts +138 -0
  67. package/src/client/ipc.ts +225 -0
  68. package/src/client/pipe.ts +661 -0
  69. package/src/client/stream.ts +297 -0
  70. package/src/client/types.ts +31 -0
  71. package/src/constants.ts +22 -0
  72. package/src/dispatch/describe.ts +155 -0
  73. package/src/dispatch/stream.ts +151 -0
  74. package/src/dispatch/unary.ts +35 -0
  75. package/src/errors.ts +22 -0
  76. package/src/http/common.ts +89 -0
  77. package/src/http/dispatch.ts +340 -0
  78. package/src/http/handler.ts +247 -0
  79. package/src/http/index.ts +6 -0
  80. package/src/http/token.ts +149 -0
  81. package/src/http/types.ts +49 -0
  82. package/src/index.ts +52 -0
  83. package/src/protocol.ts +144 -0
  84. package/src/schema.ts +114 -0
  85. package/src/server.ts +159 -0
  86. package/src/types.ts +162 -0
  87. package/src/util/schema.ts +31 -0
  88. package/src/util/zstd.ts +49 -0
  89. package/src/wire/reader.ts +113 -0
  90. package/src/wire/request.ts +98 -0
  91. package/src/wire/response.ts +181 -0
  92. package/src/wire/writer.ts +137 -0
@@ -0,0 +1,661 @@
1
+ // © Copyright 2025-2026, Query.Farm LLC - https://query.farm
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ import {
5
+ RecordBatch,
6
+ RecordBatchStreamWriter,
7
+ Schema,
8
+ Field,
9
+ Struct,
10
+ makeData,
11
+ vectorFromArray,
12
+ } from "apache-arrow";
13
+ import { DESCRIBE_METHOD_NAME } from "../constants.js";
14
+ import { serializeIpcStream } from "../http/common.js";
15
+ import { IpcStreamReader } from "../wire/reader.js";
16
+ import {
17
+ inferArrowType,
18
+ buildRequestIpc,
19
+ dispatchLogOrError,
20
+ extractBatchRows,
21
+ } from "./ipc.js";
22
+ import {
23
+ parseDescribeResponse,
24
+ type MethodInfo,
25
+ type ServiceDescription,
26
+ } from "./introspect.js";
27
+ import type {
28
+ LogMessage,
29
+ PipeConnectOptions,
30
+ SubprocessConnectOptions,
31
+ StreamSession,
32
+ } from "./types.js";
33
+ import type { RpcClient } from "./connect.js";
34
+ import { RpcError } from "../errors.js";
35
+
36
+ // ---------------------------------------------------------------------------
37
+ // Writable abstraction
38
+ // ---------------------------------------------------------------------------
39
+
40
+ interface PipeWritable {
41
+ write(data: Uint8Array): void;
42
+ flush?(): void;
43
+ end(): void;
44
+ }
45
+
46
+ type WriteFn = (bytes: Uint8Array) => void;
47
+
48
+ // ---------------------------------------------------------------------------
49
+ // PipeIncrementalWriter — batch-by-batch IPC writing for lockstep streaming
50
+ // ---------------------------------------------------------------------------
51
+
52
+ class PipeIncrementalWriter {
53
+ private writer: RecordBatchStreamWriter;
54
+ private writeFn: WriteFn;
55
+ private closed = false;
56
+
57
+ constructor(writeFn: WriteFn, schema: Schema) {
58
+ this.writeFn = writeFn;
59
+ this.writer = new RecordBatchStreamWriter();
60
+ this.writer.reset(undefined, schema);
61
+ this.drain(); // flushes schema message
62
+ }
63
+
64
+ write(batch: RecordBatch): void {
65
+ if (this.closed) throw new Error("PipeIncrementalWriter already closed");
66
+ (this.writer as any)._writeRecordBatch(batch);
67
+ this.drain();
68
+ }
69
+
70
+ close(): void {
71
+ if (this.closed) return;
72
+ this.closed = true;
73
+ // EOS marker: continuation (0xFFFFFFFF) + metadata length (0x00000000)
74
+ const eos = new Uint8Array(new Int32Array([-1, 0]).buffer);
75
+ this.writeFn(eos);
76
+ }
77
+
78
+ private drain(): void {
79
+ const values = (this.writer as any)._sink._values as Uint8Array[];
80
+ for (const chunk of values) {
81
+ this.writeFn(chunk);
82
+ }
83
+ values.length = 0;
84
+ }
85
+ }
86
+
87
+ // ---------------------------------------------------------------------------
88
+ // PipeStreamSession — lockstep streaming over pipes
89
+ // ---------------------------------------------------------------------------
90
+
91
+ export class PipeStreamSession implements StreamSession {
92
+ private _reader: IpcStreamReader;
93
+ private _writeFn: WriteFn;
94
+ private _onLog?: (msg: LogMessage) => void;
95
+ private _header: Record<string, any> | null;
96
+ private _inputWriter: PipeIncrementalWriter | null = null;
97
+ private _inputSchema: Schema | null = null;
98
+ private _outputStreamOpened = false;
99
+ private _closed = false;
100
+ private _outputSchema: Schema;
101
+ private _releaseBusy: () => void;
102
+ private _setDrainPromise: (p: Promise<void>) => void;
103
+
104
+ constructor(opts: {
105
+ reader: IpcStreamReader;
106
+ writeFn: WriteFn;
107
+ onLog?: (msg: LogMessage) => void;
108
+ header: Record<string, any> | null;
109
+ outputSchema: Schema;
110
+ releaseBusy: () => void;
111
+ setDrainPromise: (p: Promise<void>) => void;
112
+ }) {
113
+ this._reader = opts.reader;
114
+ this._writeFn = opts.writeFn;
115
+ this._onLog = opts.onLog;
116
+ this._header = opts.header;
117
+ this._outputSchema = opts.outputSchema;
118
+ this._releaseBusy = opts.releaseBusy;
119
+ this._setDrainPromise = opts.setDrainPromise;
120
+ }
121
+
122
+ get header(): Record<string, any> | null {
123
+ return this._header;
124
+ }
125
+
126
+ /**
127
+ * Read output batches from the server until a data batch is found.
128
+ * Dispatches log/error batches along the way.
129
+ * Returns null when server closes output stream (EOS).
130
+ */
131
+ private async _readOutputBatch(): Promise<RecordBatch | null> {
132
+ while (true) {
133
+ const batch = await this._reader.readNextBatch();
134
+ if (batch === null) return null; // Server closed output stream
135
+
136
+ if (batch.numRows === 0) {
137
+ // Check if it's a log/error batch. If so, dispatch and continue.
138
+ // Otherwise it's a zero-row data batch — return it.
139
+ if (dispatchLogOrError(batch, this._onLog)) {
140
+ continue;
141
+ }
142
+ }
143
+
144
+ return batch;
145
+ }
146
+ }
147
+
148
+ /**
149
+ * Ensure the server's output stream is opened for reading.
150
+ * Must be called AFTER sending the first input batch, because
151
+ * the server's output schema may not be flushed until it processes
152
+ * the first input and writes the first output batch.
153
+ */
154
+ private async _ensureOutputStream(): Promise<void> {
155
+ if (this._outputStreamOpened) return;
156
+ this._outputStreamOpened = true;
157
+ const schema = await this._reader.openNextStream();
158
+ if (!schema) {
159
+ throw new RpcError("ProtocolError", "Expected output stream but got EOF", "");
160
+ }
161
+ }
162
+
163
+ /**
164
+ * Send an exchange request and return the data rows.
165
+ */
166
+ async exchange(input: Record<string, any>[]): Promise<Record<string, any>[]> {
167
+ if (this._closed) {
168
+ throw new RpcError("ProtocolError", "Stream session is closed", "");
169
+ }
170
+
171
+ // Build input batch
172
+ let inputSchema: Schema;
173
+ let batch: RecordBatch;
174
+
175
+ if (input.length === 0) {
176
+ // Zero-row exchange: use cached input schema from a prior exchange,
177
+ // then fall back to the output schema from describe. The cached
178
+ // schema is preferred because input and output schemas may differ
179
+ // (e.g. exchange_accumulate: input {value} → output {running_sum, exchange_count}).
180
+ inputSchema = this._inputSchema ?? this._outputSchema;
181
+ const children = inputSchema.fields.map((f) => {
182
+ return makeData({ type: f.type, length: 0, nullCount: 0 });
183
+ });
184
+ const structType = new Struct(inputSchema.fields);
185
+ const data = makeData({
186
+ type: structType,
187
+ length: 0,
188
+ children,
189
+ nullCount: 0,
190
+ });
191
+ batch = new RecordBatch(inputSchema, data);
192
+ } else {
193
+ // Infer schema from first row.
194
+ // Always use nullable fields — the server validates input schemas
195
+ // strictly and its schema typically uses nullable columns.
196
+ const keys = Object.keys(input[0]);
197
+ const fields = keys.map((key) => {
198
+ let sample: any = undefined;
199
+ for (const row of input) {
200
+ if (row[key] != null) { sample = row[key]; break; }
201
+ }
202
+ const arrowType = inferArrowType(sample);
203
+ return new Field(key, arrowType, /* nullable */ true);
204
+ });
205
+ inputSchema = new Schema(fields);
206
+
207
+ // Validate schema consistency: all exchanges on the same pipe session
208
+ // share a single IPC stream, so the schema is locked to the first call.
209
+ if (this._inputSchema) {
210
+ const cached = this._inputSchema;
211
+ if (
212
+ cached.fields.length !== inputSchema.fields.length ||
213
+ cached.fields.some((f, i) => f.name !== inputSchema.fields[i].name)
214
+ ) {
215
+ throw new RpcError(
216
+ "ProtocolError",
217
+ `Exchange input schema changed: expected [${cached.fields.map((f) => f.name).join(", ")}] ` +
218
+ `but got [${inputSchema.fields.map((f) => f.name).join(", ")}]`,
219
+ "",
220
+ );
221
+ }
222
+ } else {
223
+ this._inputSchema = inputSchema;
224
+ }
225
+
226
+ const children = inputSchema.fields.map((f) => {
227
+ const values = input.map((row) => row[f.name]);
228
+ return vectorFromArray(values, f.type).data[0];
229
+ });
230
+ const structType = new Struct(inputSchema.fields);
231
+ const data = makeData({
232
+ type: structType,
233
+ length: input.length,
234
+ children,
235
+ nullCount: 0,
236
+ });
237
+ batch = new RecordBatch(inputSchema, data);
238
+ }
239
+
240
+ // Lazy-open input writer on first exchange
241
+ if (!this._inputWriter) {
242
+ this._inputWriter = new PipeIncrementalWriter(this._writeFn, inputSchema);
243
+ }
244
+
245
+ // Write one input batch FIRST, then open output stream.
246
+ // The server may not flush the output schema until it processes the
247
+ // first input batch and writes the first output batch.
248
+ this._inputWriter.write(batch);
249
+ await this._ensureOutputStream();
250
+
251
+ // Read output batch(es) from server
252
+ try {
253
+ const outputBatch = await this._readOutputBatch();
254
+ if (outputBatch === null) {
255
+ return [];
256
+ }
257
+ return extractBatchRows(outputBatch);
258
+ } catch (e) {
259
+ // On error, clean up the pipe so it's ready for the next request
260
+ await this._cleanup();
261
+ throw e;
262
+ }
263
+ }
264
+
265
+ /**
266
+ * Clean up after an error: close input, drain output, release busy.
267
+ */
268
+ private async _cleanup(): Promise<void> {
269
+ if (this._closed) return;
270
+ this._closed = true;
271
+ if (this._inputWriter) {
272
+ this._inputWriter.close();
273
+ this._inputWriter = null;
274
+ }
275
+ try {
276
+ if (this._outputStreamOpened) {
277
+ while ((await this._reader.readNextBatch()) !== null) {}
278
+ }
279
+ } catch {
280
+ // Suppress errors during drain
281
+ }
282
+ this._releaseBusy();
283
+ }
284
+
285
+ /**
286
+ * Iterate over producer stream batches (lockstep).
287
+ */
288
+ async *[Symbol.asyncIterator](): AsyncIterableIterator<Record<string, any>[]> {
289
+ if (this._closed) return;
290
+
291
+ try {
292
+ // Open input writer with empty schema for tick batches
293
+ const tickSchema = new Schema([]);
294
+ this._inputWriter = new PipeIncrementalWriter(this._writeFn, tickSchema);
295
+
296
+ // Build a zero-row tick batch
297
+ const structType = new Struct(tickSchema.fields);
298
+ const tickData = makeData({
299
+ type: structType,
300
+ length: 0,
301
+ children: [],
302
+ nullCount: 0,
303
+ });
304
+ const tickBatch = new RecordBatch(tickSchema, tickData);
305
+
306
+ while (true) {
307
+ // Send one tick FIRST, then open output stream on first iteration.
308
+ // The server may not flush the output schema until it processes the
309
+ // first tick and writes the first output batch.
310
+ this._inputWriter.write(tickBatch);
311
+ await this._ensureOutputStream();
312
+
313
+ // Read output batch(es)
314
+ const outputBatch = await this._readOutputBatch();
315
+ if (outputBatch === null) {
316
+ // Server finished — EOS on output stream
317
+ break;
318
+ }
319
+
320
+ yield extractBatchRows(outputBatch);
321
+ }
322
+ } finally {
323
+ // Close input stream if still open
324
+ if (this._inputWriter) {
325
+ this._inputWriter.close();
326
+ this._inputWriter = null;
327
+ }
328
+ // Drain any remaining output batches
329
+ try {
330
+ if (this._outputStreamOpened) {
331
+ while ((await this._reader.readNextBatch()) !== null) {}
332
+ }
333
+ } catch {
334
+ // Suppress errors during drain
335
+ }
336
+ this._closed = true;
337
+ this._releaseBusy();
338
+ }
339
+ }
340
+
341
+ close(): void {
342
+ if (this._closed) return;
343
+ this._closed = true;
344
+
345
+ if (this._inputWriter) {
346
+ // Close the input stream (EOS)
347
+ this._inputWriter.close();
348
+ this._inputWriter = null;
349
+ } else {
350
+ // Never iterated/exchanged — send empty schema stream so server unblocks.
351
+ // Server is blocked at reader.openNextStream() waiting for client's input.
352
+ const emptySchema = new Schema([]);
353
+ const ipc = serializeIpcStream(emptySchema, []);
354
+ this._writeFn(ipc);
355
+ }
356
+
357
+ // Drain remaining output batches asynchronously. Register the drain
358
+ // promise so that the next acquireBusy() waits for it to complete.
359
+ const drainPromise = (async () => {
360
+ try {
361
+ if (!this._outputStreamOpened) {
362
+ const schema = await this._reader.openNextStream();
363
+ if (schema) {
364
+ while ((await this._reader.readNextBatch()) !== null) {}
365
+ }
366
+ } else {
367
+ while ((await this._reader.readNextBatch()) !== null) {}
368
+ }
369
+ } catch {
370
+ // Suppress errors during drain
371
+ } finally {
372
+ this._releaseBusy();
373
+ }
374
+ })();
375
+ this._setDrainPromise(drainPromise);
376
+ }
377
+ }
378
+
379
+ // ---------------------------------------------------------------------------
380
+ // pipeConnect — create an RpcClient over raw readable/writable streams
381
+ // ---------------------------------------------------------------------------
382
+
383
+ export function pipeConnect(
384
+ readable: ReadableStream<Uint8Array>,
385
+ writable: PipeWritable,
386
+ options?: PipeConnectOptions,
387
+ ): RpcClient {
388
+ const onLog = options?.onLog;
389
+
390
+ let reader: IpcStreamReader | null = null;
391
+ let readerPromise: Promise<IpcStreamReader> | null = null;
392
+ let methodCache: Map<string, MethodInfo> | null = null;
393
+ let protocolName = "";
394
+ let _busy = false;
395
+ let _drainPromise: Promise<void> | null = null;
396
+ let closed = false;
397
+
398
+ const writeFn: WriteFn = (bytes: Uint8Array) => {
399
+ writable.write(bytes);
400
+ writable.flush?.();
401
+ };
402
+
403
+ // The IpcStreamReader.create() blocks until the first IPC schema arrives
404
+ // on the readable. To avoid deadlock, we must send our first request
405
+ // (the __describe__ call) BEFORE opening the reader. After that, the
406
+ // response bytes are in the pipe buffer and the reader can consume them.
407
+ async function ensureReader(): Promise<IpcStreamReader> {
408
+ if (reader) return reader;
409
+ if (!readerPromise) {
410
+ readerPromise = IpcStreamReader.create(readable);
411
+ }
412
+ reader = await readerPromise;
413
+ return reader;
414
+ }
415
+
416
+ async function acquireBusy(): Promise<void> {
417
+ // Wait for any pending drain from a previous close()
418
+ if (_drainPromise) {
419
+ await _drainPromise;
420
+ _drainPromise = null;
421
+ }
422
+ if (_busy) {
423
+ throw new Error(
424
+ "Pipe transport is busy — another call or stream is in progress. " +
425
+ "Pipe connections are single-threaded; wait for the current operation to complete.",
426
+ );
427
+ }
428
+ _busy = true;
429
+ }
430
+
431
+ function releaseBusy(): void {
432
+ _busy = false;
433
+ }
434
+
435
+ function setDrainPromise(p: Promise<void>): void {
436
+ _drainPromise = p;
437
+ }
438
+
439
+ async function ensureMethodCache(): Promise<Map<string, MethodInfo>> {
440
+ if (methodCache) return methodCache;
441
+
442
+ await acquireBusy();
443
+ try {
444
+ // Send __describe__ request BEFORE opening the reader.
445
+ // IpcStreamReader.create() blocks on reader.open() which reads the
446
+ // first schema message. The server won't write anything until it
447
+ // receives a request. Sending first avoids deadlock.
448
+ const emptySchema = new Schema([]);
449
+ const body = buildRequestIpc(emptySchema, {}, DESCRIBE_METHOD_NAME);
450
+ writeFn(body);
451
+
452
+ const r = await ensureReader();
453
+
454
+ // Read response (first IPC stream = describe response schema + batches)
455
+ // ensureReader() consumed the schema via open(). Use readStream()
456
+ // which — on the first call (initialized=false) — returns the current
457
+ // stream without calling reset().
458
+ const response = await r.readStream();
459
+ if (!response) {
460
+ throw new Error("EOF reading __describe__ response");
461
+ }
462
+
463
+ const desc = await parseDescribeResponse(response.batches, onLog);
464
+ protocolName = desc.protocolName;
465
+ methodCache = new Map(desc.methods.map((m) => [m.name, m]));
466
+ return methodCache;
467
+ } finally {
468
+ releaseBusy();
469
+ }
470
+ }
471
+
472
+ return {
473
+ async call(
474
+ method: string,
475
+ params?: Record<string, any>,
476
+ ): Promise<Record<string, any> | null> {
477
+ const methods = await ensureMethodCache();
478
+ await acquireBusy();
479
+ try {
480
+ const info = methods.get(method);
481
+ if (!info) {
482
+ throw new Error(`Unknown method: '${method}'`);
483
+ }
484
+
485
+ const r = await ensureReader();
486
+
487
+ // Apply defaults
488
+ const fullParams = { ...(info.defaults ?? {}), ...(params ?? {}) };
489
+
490
+ // Send request
491
+ const body = buildRequestIpc(info.paramsSchema, fullParams, method);
492
+ writeFn(body);
493
+
494
+ // Read response
495
+ const response = await r.readStream();
496
+ if (!response) {
497
+ throw new Error("EOF reading response");
498
+ }
499
+
500
+ // Process batches: dispatch logs, find result
501
+ let resultBatch: RecordBatch | null = null;
502
+ for (const batch of response.batches) {
503
+ if (batch.numRows === 0) {
504
+ dispatchLogOrError(batch, onLog);
505
+ continue;
506
+ }
507
+ resultBatch = batch;
508
+ }
509
+
510
+ if (!resultBatch) {
511
+ return null;
512
+ }
513
+
514
+ const rows = extractBatchRows(resultBatch);
515
+ if (rows.length === 0) return null;
516
+
517
+ if (info.resultSchema.fields.length === 0) return null;
518
+
519
+ return rows[0];
520
+ } finally {
521
+ releaseBusy();
522
+ }
523
+ },
524
+
525
+ async stream(
526
+ method: string,
527
+ params?: Record<string, any>,
528
+ ): Promise<StreamSession> {
529
+ const methods = await ensureMethodCache();
530
+ await acquireBusy();
531
+
532
+ try {
533
+ const info = methods.get(method);
534
+ if (!info) {
535
+ throw new Error(`Unknown method: '${method}'`);
536
+ }
537
+
538
+ const r = await ensureReader();
539
+
540
+ // Apply defaults
541
+ const fullParams = { ...(info.defaults ?? {}), ...(params ?? {}) };
542
+
543
+ // Send init request (params as a complete IPC stream)
544
+ const body = buildRequestIpc(info.paramsSchema, fullParams, method);
545
+ writeFn(body);
546
+
547
+ // Read header if method has headerSchema
548
+ let header: Record<string, any> | null = null;
549
+ if (info.headerSchema) {
550
+ const headerStream = await r.readStream();
551
+ if (headerStream) {
552
+ for (const batch of headerStream.batches) {
553
+ if (batch.numRows === 0) {
554
+ dispatchLogOrError(batch, onLog);
555
+ continue;
556
+ }
557
+ const rows = extractBatchRows(batch);
558
+ if (rows.length > 0) {
559
+ header = rows[0];
560
+ }
561
+ }
562
+ }
563
+ }
564
+
565
+ const outputSchema = info.outputSchema ?? info.resultSchema;
566
+
567
+ // Don't release busy here — PipeStreamSession owns the lock
568
+ // and will release it when done
569
+ return new PipeStreamSession({
570
+ reader: r,
571
+ writeFn,
572
+ onLog,
573
+ header,
574
+ outputSchema,
575
+ releaseBusy,
576
+ setDrainPromise,
577
+ });
578
+ } catch (e) {
579
+ // Init error (e.g., server raised exception during init).
580
+ // Send empty input stream so server's drain unblocks, then
581
+ // drain the server's output stream if needed.
582
+ try {
583
+ const r = await ensureReader();
584
+ const emptySchema = new Schema([]);
585
+ const ipc = serializeIpcStream(emptySchema, []);
586
+ writeFn(ipc);
587
+ // Drain server's output stream (error response + EOS)
588
+ const outStream = await r.readStream();
589
+ // outStream may be null or contain remaining batches — just consume
590
+ void outStream;
591
+ } catch {
592
+ // Suppress errors during cleanup
593
+ }
594
+ releaseBusy();
595
+ throw e;
596
+ }
597
+ },
598
+
599
+ async describe(): Promise<ServiceDescription> {
600
+ const methods = await ensureMethodCache();
601
+ return {
602
+ protocolName,
603
+ methods: [...methods.values()],
604
+ };
605
+ },
606
+
607
+ close(): void {
608
+ if (closed) return;
609
+ closed = true;
610
+ writable.end();
611
+ },
612
+ };
613
+ }
614
+
615
+ // ---------------------------------------------------------------------------
616
+ // subprocessConnect — spawn a process and wrap with pipeConnect
617
+ // ---------------------------------------------------------------------------
618
+
619
+ export function subprocessConnect(
620
+ cmd: string[],
621
+ options?: SubprocessConnectOptions,
622
+ ): RpcClient {
623
+ const proc = Bun.spawn(cmd, {
624
+ stdin: "pipe",
625
+ stdout: "pipe",
626
+ stderr: options?.stderr ?? "ignore",
627
+ cwd: options?.cwd,
628
+ env: options?.env ? { ...process.env, ...options.env } : undefined,
629
+ });
630
+
631
+ const stdout = proc.stdout as ReadableStream<Uint8Array>;
632
+
633
+ const writable: PipeWritable = {
634
+ write(data: Uint8Array) {
635
+ (proc.stdin as any).write(data);
636
+ },
637
+ flush() {
638
+ (proc.stdin as any).flush();
639
+ },
640
+ end() {
641
+ (proc.stdin as any).end();
642
+ },
643
+ };
644
+
645
+ const client = pipeConnect(stdout, writable, {
646
+ onLog: options?.onLog,
647
+ });
648
+
649
+ // Wrap close to also kill the subprocess
650
+ const originalClose = client.close;
651
+ client.close = () => {
652
+ originalClose.call(client);
653
+ try {
654
+ proc.kill();
655
+ } catch {
656
+ // Process may have already exited
657
+ }
658
+ };
659
+
660
+ return client;
661
+ }