@query-farm/vgi-rpc 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE.md +191 -0
- package/README.md +332 -0
- package/dist/client/connect.d.ts +10 -0
- package/dist/client/connect.d.ts.map +1 -0
- package/dist/client/index.d.ts +6 -0
- package/dist/client/index.d.ts.map +1 -0
- package/dist/client/introspect.d.ts +30 -0
- package/dist/client/introspect.d.ts.map +1 -0
- package/dist/client/ipc.d.ts +34 -0
- package/dist/client/ipc.d.ts.map +1 -0
- package/dist/client/pipe.d.ts +63 -0
- package/dist/client/pipe.d.ts.map +1 -0
- package/dist/client/stream.d.ts +52 -0
- package/dist/client/stream.d.ts.map +1 -0
- package/dist/client/types.d.ts +25 -0
- package/dist/client/types.d.ts.map +1 -0
- package/dist/constants.d.ts +15 -0
- package/dist/constants.d.ts.map +1 -0
- package/dist/dispatch/describe.d.ts +14 -0
- package/dist/dispatch/describe.d.ts.map +1 -0
- package/dist/dispatch/stream.d.ts +20 -0
- package/dist/dispatch/stream.d.ts.map +1 -0
- package/dist/dispatch/unary.d.ts +9 -0
- package/dist/dispatch/unary.d.ts.map +1 -0
- package/dist/errors.d.ts +12 -0
- package/dist/errors.d.ts.map +1 -0
- package/dist/http/common.d.ts +16 -0
- package/dist/http/common.d.ts.map +1 -0
- package/dist/http/dispatch.d.ts +18 -0
- package/dist/http/dispatch.d.ts.map +1 -0
- package/dist/http/handler.d.ts +16 -0
- package/dist/http/handler.d.ts.map +1 -0
- package/dist/http/index.d.ts +4 -0
- package/dist/http/index.d.ts.map +1 -0
- package/dist/http/token.d.ts +24 -0
- package/dist/http/token.d.ts.map +1 -0
- package/dist/http/types.d.ts +30 -0
- package/dist/http/types.d.ts.map +1 -0
- package/dist/index.d.ts +9 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +2493 -0
- package/dist/index.js.map +34 -0
- package/dist/protocol.d.ts +62 -0
- package/dist/protocol.d.ts.map +1 -0
- package/dist/schema.d.ts +38 -0
- package/dist/schema.d.ts.map +1 -0
- package/dist/server.d.ts +19 -0
- package/dist/server.d.ts.map +1 -0
- package/dist/types.d.ts +71 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/util/schema.d.ts +20 -0
- package/dist/util/schema.d.ts.map +1 -0
- package/dist/util/zstd.d.ts +5 -0
- package/dist/util/zstd.d.ts.map +1 -0
- package/dist/wire/reader.d.ts +40 -0
- package/dist/wire/reader.d.ts.map +1 -0
- package/dist/wire/request.d.ts +15 -0
- package/dist/wire/request.d.ts.map +1 -0
- package/dist/wire/response.d.ts +25 -0
- package/dist/wire/response.d.ts.map +1 -0
- package/dist/wire/writer.d.ts +59 -0
- package/dist/wire/writer.d.ts.map +1 -0
- package/package.json +32 -0
- package/src/client/connect.ts +310 -0
- package/src/client/index.ts +14 -0
- package/src/client/introspect.ts +138 -0
- package/src/client/ipc.ts +225 -0
- package/src/client/pipe.ts +661 -0
- package/src/client/stream.ts +297 -0
- package/src/client/types.ts +31 -0
- package/src/constants.ts +22 -0
- package/src/dispatch/describe.ts +155 -0
- package/src/dispatch/stream.ts +151 -0
- package/src/dispatch/unary.ts +35 -0
- package/src/errors.ts +22 -0
- package/src/http/common.ts +89 -0
- package/src/http/dispatch.ts +340 -0
- package/src/http/handler.ts +247 -0
- package/src/http/index.ts +6 -0
- package/src/http/token.ts +149 -0
- package/src/http/types.ts +49 -0
- package/src/index.ts +52 -0
- package/src/protocol.ts +144 -0
- package/src/schema.ts +114 -0
- package/src/server.ts +159 -0
- package/src/types.ts +162 -0
- package/src/util/schema.ts +31 -0
- package/src/util/zstd.ts +49 -0
- package/src/wire/reader.ts +113 -0
- package/src/wire/request.ts +98 -0
- package/src/wire/response.ts +181 -0
- package/src/wire/writer.ts +137 -0
package/dist/server.d.ts
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import { Protocol } from "./protocol.js";
|
|
2
|
+
/**
|
|
3
|
+
* RPC server that reads Arrow IPC requests from stdin and writes responses to stdout.
|
|
4
|
+
* Supports unary and streaming (producer/exchange) methods.
|
|
5
|
+
*/
|
|
6
|
+
export declare class VgiRpcServer {
|
|
7
|
+
private protocol;
|
|
8
|
+
private enableDescribe;
|
|
9
|
+
private serverId;
|
|
10
|
+
private describeBatch;
|
|
11
|
+
constructor(protocol: Protocol, options?: {
|
|
12
|
+
enableDescribe?: boolean;
|
|
13
|
+
serverId?: string;
|
|
14
|
+
});
|
|
15
|
+
/** Start the server loop. Reads requests until stdin closes. */
|
|
16
|
+
run(): Promise<void>;
|
|
17
|
+
private serveOne;
|
|
18
|
+
}
|
|
19
|
+
//# sourceMappingURL=server.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"server.d.ts","sourceRoot":"","sources":["../src/server.ts"],"names":[],"mappings":"AAIA,OAAO,EAAE,QAAQ,EAAE,MAAM,eAAe,CAAC;AAczC;;;GAGG;AACH,qBAAa,YAAY;IACvB,OAAO,CAAC,QAAQ,CAAW;IAC3B,OAAO,CAAC,cAAc,CAAU;IAChC,OAAO,CAAC,QAAQ,CAAS;IACzB,OAAO,CAAC,aAAa,CAAmD;gBAGtE,QAAQ,EAAE,QAAQ,EAClB,OAAO,CAAC,EAAE;QAAE,cAAc,CAAC,EAAE,OAAO,CAAC;QAAC,QAAQ,CAAC,EAAE,MAAM,CAAA;KAAE;IAiB3D,gEAAgE;IAC1D,GAAG,IAAI,OAAO,CAAC,IAAI,CAAC;YAwCZ,QAAQ;CAsEvB"}
|
package/dist/types.d.ts
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import { type Schema, RecordBatch } from "apache-arrow";
|
|
2
|
+
export declare enum MethodType {
|
|
3
|
+
UNARY = "unary",
|
|
4
|
+
STREAM = "stream"
|
|
5
|
+
}
|
|
6
|
+
/** Logging interface available to handlers. */
|
|
7
|
+
export interface LogContext {
|
|
8
|
+
clientLog(level: string, message: string, extra?: Record<string, string>): void;
|
|
9
|
+
}
|
|
10
|
+
/** Handler for unary (request-response) RPC methods. */
|
|
11
|
+
export type UnaryHandler = (params: Record<string, any>, ctx: LogContext) => Promise<Record<string, any>> | Record<string, any>;
|
|
12
|
+
/** Initialization function for producer streams. Returns the initial state object. */
|
|
13
|
+
export type ProducerInit<S = any> = (params: Record<string, any>) => Promise<S> | S;
|
|
14
|
+
/** Called repeatedly to produce output batches. Call `out.finish()` to end the stream. */
|
|
15
|
+
export type ProducerFn<S = any> = (state: S, out: OutputCollector) => Promise<void> | void;
|
|
16
|
+
/** Initialization function for exchange streams. Returns the initial state object. */
|
|
17
|
+
export type ExchangeInit<S = any> = (params: Record<string, any>) => Promise<S> | S;
|
|
18
|
+
/** Called once per input batch. Must emit exactly one output batch per call. */
|
|
19
|
+
export type ExchangeFn<S = any> = (state: S, input: RecordBatch, out: OutputCollector) => Promise<void> | void;
|
|
20
|
+
/** Produces a header batch sent before the first output batch in a stream. */
|
|
21
|
+
export type HeaderInit = (params: Record<string, any>, state: any, ctx: LogContext) => Record<string, any>;
|
|
22
|
+
export interface MethodDefinition {
|
|
23
|
+
name: string;
|
|
24
|
+
type: MethodType;
|
|
25
|
+
paramsSchema: Schema;
|
|
26
|
+
resultSchema: Schema;
|
|
27
|
+
outputSchema?: Schema;
|
|
28
|
+
inputSchema?: Schema;
|
|
29
|
+
handler?: UnaryHandler;
|
|
30
|
+
producerInit?: ProducerInit;
|
|
31
|
+
producerFn?: ProducerFn;
|
|
32
|
+
exchangeInit?: ExchangeInit;
|
|
33
|
+
exchangeFn?: ExchangeFn;
|
|
34
|
+
headerSchema?: Schema;
|
|
35
|
+
headerInit?: HeaderInit;
|
|
36
|
+
doc?: string;
|
|
37
|
+
defaults?: Record<string, any>;
|
|
38
|
+
paramTypes?: Record<string, string>;
|
|
39
|
+
}
|
|
40
|
+
export interface EmittedBatch {
|
|
41
|
+
batch: RecordBatch;
|
|
42
|
+
metadata?: Map<string, string>;
|
|
43
|
+
}
|
|
44
|
+
/**
|
|
45
|
+
* Accumulates output batches during a produce/exchange call.
|
|
46
|
+
* Enforces that exactly one data batch is emitted per call (plus any number of log batches).
|
|
47
|
+
*/
|
|
48
|
+
export declare class OutputCollector implements LogContext {
|
|
49
|
+
private _batches;
|
|
50
|
+
private _dataBatchIdx;
|
|
51
|
+
private _finished;
|
|
52
|
+
private _producerMode;
|
|
53
|
+
private _outputSchema;
|
|
54
|
+
private _serverId;
|
|
55
|
+
private _requestId;
|
|
56
|
+
constructor(outputSchema: Schema, producerMode?: boolean, serverId?: string, requestId?: string | null);
|
|
57
|
+
get outputSchema(): Schema;
|
|
58
|
+
get finished(): boolean;
|
|
59
|
+
get batches(): EmittedBatch[];
|
|
60
|
+
/** Emit a pre-built RecordBatch as the data batch for this call. */
|
|
61
|
+
emit(batch: RecordBatch, metadata?: Map<string, string>): void;
|
|
62
|
+
/** Emit a data batch from column arrays keyed by field name. Int64 Number values are coerced to BigInt. */
|
|
63
|
+
emit(columns: Record<string, any[]>): void;
|
|
64
|
+
/** Single-row convenience. Wraps each value in `[value]` then calls `emit()`. */
|
|
65
|
+
emitRow(values: Record<string, any>): void;
|
|
66
|
+
/** Signal stream completion for producer streams. Throws if called on exchange streams. */
|
|
67
|
+
finish(): void;
|
|
68
|
+
/** Emit a zero-row client-directed log batch. */
|
|
69
|
+
clientLog(level: string, message: string, extra?: Record<string, string>): void;
|
|
70
|
+
}
|
|
71
|
+
//# sourceMappingURL=types.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAGA,OAAO,EAAE,KAAK,MAAM,EAAE,WAAW,EAAyB,MAAM,cAAc,CAAC;AAG/E,oBAAY,UAAU;IACpB,KAAK,UAAU;IACf,MAAM,WAAW;CAClB;AAED,+CAA+C;AAC/C,MAAM,WAAW,UAAU;IACzB,SAAS,CAAC,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,EAAE,KAAK,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,GAAG,IAAI,CAAC;CACjF;AAED,wDAAwD;AACxD,MAAM,MAAM,YAAY,GAAG,CACzB,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,EAC3B,GAAG,EAAE,UAAU,KACZ,OAAO,CAAC,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;AAExD,sFAAsF;AACtF,MAAM,MAAM,YAAY,CAAC,CAAC,GAAG,GAAG,IAAI,CAClC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,KACxB,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AACpB,0FAA0F;AAC1F,MAAM,MAAM,UAAU,CAAC,CAAC,GAAG,GAAG,IAAI,CAChC,KAAK,EAAE,CAAC,EACR,GAAG,EAAE,eAAe,KACjB,OAAO,CAAC,IAAI,CAAC,GAAG,IAAI,CAAC;AAE1B,sFAAsF;AACtF,MAAM,MAAM,YAAY,CAAC,CAAC,GAAG,GAAG,IAAI,CAClC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,KACxB,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AACpB,gFAAgF;AAChF,MAAM,MAAM,UAAU,CAAC,CAAC,GAAG,GAAG,IAAI,CAChC,KAAK,EAAE,CAAC,EACR,KAAK,EAAE,WAAW,EAClB,GAAG,EAAE,eAAe,KACjB,OAAO,CAAC,IAAI,CAAC,GAAG,IAAI,CAAC;AAE1B,8EAA8E;AAC9E,MAAM,MAAM,UAAU,GAAG,CACvB,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,EAC3B,KAAK,EAAE,GAAG,EACV,GAAG,EAAE,UAAU,KACZ,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;AAEzB,MAAM,WAAW,gBAAgB;IAC/B,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,UAAU,CAAC;IACjB,YAAY,EAAE,MAAM,CAAC;IACrB,YAAY,EAAE,MAAM,CAAC;IACrB,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,OAAO,CAAC,EAAE,YAAY,CAAC;IACvB,YAAY,CAAC,EAAE,YAAY,CAAC;IAC5B,UAAU,CAAC,EAAE,UAAU,CAAC;IACxB,YAAY,CAAC,EAAE,YAAY,CAAC;IAC5B,UAAU,CAAC,EAAE,UAAU,CAAC;IACxB,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,UAAU,CAAC,EAAE,UAAU,CAAC;IACxB,GAAG,CAAC,EAAE,MAAM,CAAC;IACb,QAAQ,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;IAC/B,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;CACrC;AAED,MAAM,WAAW,YAAY;IAC3B,KAAK,EAAE,WAAW,CAAC;IACnB,QAAQ,CAAC,EAAE,GAAG,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;CAChC;AAED;;;GAGG;AACH,qBAAa,eAAgB,YAAW,UAAU;IAChD,OAAO,CAAC,QAAQ,CAAsB;IACtC,OAAO,CAAC,aAAa,CAAuB;IAC5C,OAAO,CAAC,SAAS,CAAS;IAC1B,OAAO,CAAC,aAAa,CAAU;IAC/B,OAAO,CAAC,aAAa,CAAS;IAC9B,OAAO,CAAC,SAAS,CAAS;IAC1B,OAAO,CAAC,UAAU,CAAgB;gBAEtB,YAAY,EAAE,MAAM,EAAE,YAAY,UAAO,EAAE,QAAQ,SAAK,EAAE,SAAS,GAAE,MAAM,GAAG,IAAW;IAOrG,IAAI,YAAY,IAAI,MAAM,CAEzB;IAED,IAAI,QAAQ,IAAI,OAAO,CAEtB;IAED,IAAI,OAAO,IAAI,YAAY,EAAE,CAE5B;IAED,oEAAoE;IACpE,IAAI,CAAC,KAAK,EAAE,WAAW,EAAE,QAAQ,CAAC,EAAE,GAAG,CAAC,MAAM,EAAE,MAAM,CAAC,GAAG,IAAI;IAC9D,2GAA2G;IAC3G,IAAI,CAAC,OAAO,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,EAAE,CAAC,GAAG,IAAI;IAmB1C,iFAAiF;IACjF,OAAO,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,GAAG,IAAI;IAQ1C,2FAA2F;IAC3F,MAAM,IAAI,IAAI;IAUd,iDAAiD;IACjD,SAAS,CAAC,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,EAAE,KAAK,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,GAAG,IAAI;CAYhF"}
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import { type Schema } from "apache-arrow";
|
|
2
|
+
/**
|
|
3
|
+
* Serialize a Schema to the Arrow IPC Schema message format.
|
|
4
|
+
* This produces bytes compatible with Python's `pa.ipc.read_schema()`.
|
|
5
|
+
*
|
|
6
|
+
* We serialize by writing an empty-batch IPC stream and extracting
|
|
7
|
+
* the bytes, which includes the schema message. Python's read_schema()
|
|
8
|
+
* uses `pa.ipc.read_schema(pa.py_buffer(bytes))` which expects
|
|
9
|
+
* the schema flatbuffer message bytes directly — but the Python side
|
|
10
|
+
* actually uses `schema.serialize()` which produces Schema message bytes.
|
|
11
|
+
*
|
|
12
|
+
* In arrow-js, we can get the equivalent by using Message.from(schema)
|
|
13
|
+
* and encoding it, or by serializing a zero-batch stream.
|
|
14
|
+
*
|
|
15
|
+
* The Python `schema.serialize()` produces the Schema flatbuffer message bytes,
|
|
16
|
+
* and `pa.ipc.read_schema()` expects an IPC stream containing a schema message.
|
|
17
|
+
* The actual format is: continuation marker (0xFFFFFFFF) + length + flatbuffer bytes.
|
|
18
|
+
*/
|
|
19
|
+
export declare function serializeSchema(schema: Schema): Uint8Array;
|
|
20
|
+
//# sourceMappingURL=schema.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"schema.d.ts","sourceRoot":"","sources":["../../src/util/schema.ts"],"names":[],"mappings":"AAGA,OAAO,EAA2B,KAAK,MAAM,EAAE,MAAM,cAAc,CAAC;AAEpE;;;;;;;;;;;;;;;;GAgBG;AACH,wBAAgB,eAAe,CAAC,MAAM,EAAE,MAAM,GAAG,UAAU,CAQ1D"}
|
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
/** Compress data with zstd at the given level (1-22). */
|
|
2
|
+
export declare function zstdCompress(data: Uint8Array, level: number): Uint8Array<ArrayBuffer>;
|
|
3
|
+
/** Decompress zstd-compressed data. */
|
|
4
|
+
export declare function zstdDecompress(data: Uint8Array): Uint8Array<ArrayBuffer>;
|
|
5
|
+
//# sourceMappingURL=zstd.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"zstd.d.ts","sourceRoot":"","sources":["../../src/util/zstd.ts"],"names":[],"mappings":"AAcA,yDAAyD;AACzD,wBAAgB,YAAY,CAAC,IAAI,EAAE,UAAU,EAAE,KAAK,EAAE,MAAM,GAAG,UAAU,CAAC,WAAW,CAAC,CAkBrF;AAED,uCAAuC;AACvC,wBAAgB,cAAc,CAAC,IAAI,EAAE,UAAU,GAAG,UAAU,CAAC,WAAW,CAAC,CAYxE"}
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import { type RecordBatch, type Schema } from "apache-arrow";
|
|
2
|
+
export interface StreamMessage {
|
|
3
|
+
schema: Schema;
|
|
4
|
+
batches: RecordBatch[];
|
|
5
|
+
}
|
|
6
|
+
/**
|
|
7
|
+
* Reads sequential IPC streams from a byte source (e.g., process.stdin).
|
|
8
|
+
* Uses autoDestroy: false + reset/open pattern to read multiple streams
|
|
9
|
+
* from the same underlying byte source.
|
|
10
|
+
*/
|
|
11
|
+
export declare class IpcStreamReader {
|
|
12
|
+
private reader;
|
|
13
|
+
private initialized;
|
|
14
|
+
/** True once readNextBatch() returns null (EOS reached for current stream). */
|
|
15
|
+
private streamEnded;
|
|
16
|
+
private constructor();
|
|
17
|
+
static create(input: ReadableStream<Uint8Array> | NodeJS.ReadableStream): Promise<IpcStreamReader>;
|
|
18
|
+
/**
|
|
19
|
+
* Read one complete IPC stream (schema + all batches).
|
|
20
|
+
* Returns null on EOF (no more streams).
|
|
21
|
+
*/
|
|
22
|
+
readStream(): Promise<StreamMessage | null>;
|
|
23
|
+
/**
|
|
24
|
+
* Open the next IPC stream and return its schema.
|
|
25
|
+
* Use readNextBatch() to read batches one at a time.
|
|
26
|
+
* Returns null on EOF.
|
|
27
|
+
*/
|
|
28
|
+
openNextStream(): Promise<Schema | null>;
|
|
29
|
+
/**
|
|
30
|
+
* Read the next batch from the currently open IPC stream.
|
|
31
|
+
* Returns null when the stream ends (EOS).
|
|
32
|
+
*
|
|
33
|
+
* Once EOS is reached, subsequent calls return null immediately without
|
|
34
|
+
* reading from the underlying byte source. This prevents the Arrow-JS
|
|
35
|
+
* reader from consuming bytes that belong to the next IPC stream.
|
|
36
|
+
*/
|
|
37
|
+
readNextBatch(): Promise<RecordBatch | null>;
|
|
38
|
+
cancel(): Promise<void>;
|
|
39
|
+
}
|
|
40
|
+
//# sourceMappingURL=reader.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"reader.d.ts","sourceRoot":"","sources":["../../src/wire/reader.ts"],"names":[],"mappings":"AAGA,OAAO,EAAqB,KAAK,WAAW,EAAE,KAAK,MAAM,EAAE,MAAM,cAAc,CAAC;AAEhF,MAAM,WAAW,aAAa;IAC5B,MAAM,EAAE,MAAM,CAAC;IACf,OAAO,EAAE,WAAW,EAAE,CAAC;CACxB;AAED;;;;GAIG;AACH,qBAAa,eAAe;IAC1B,OAAO,CAAC,MAAM,CAAoB;IAClC,OAAO,CAAC,WAAW,CAAS;IAC5B,+EAA+E;IAC/E,OAAO,CAAC,WAAW,CAAS;IAE5B,OAAO;WAIM,MAAM,CACjB,KAAK,EAAE,cAAc,CAAC,UAAU,CAAC,GAAG,MAAM,CAAC,cAAc,GACxD,OAAO,CAAC,eAAe,CAAC;IAS3B;;;OAGG;IACG,UAAU,IAAI,OAAO,CAAC,aAAa,GAAG,IAAI,CAAC;IA2BjD;;;;OAIG;IACG,cAAc,IAAI,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC;IAY9C;;;;;;;OAOG;IACG,aAAa,IAAI,OAAO,CAAC,WAAW,GAAG,IAAI,CAAC;IAiB5C,MAAM,IAAI,OAAO,CAAC,IAAI,CAAC;CAG9B"}
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import { type Schema, type RecordBatch } from "apache-arrow";
|
|
2
|
+
export interface ParsedRequest {
|
|
3
|
+
methodName: string;
|
|
4
|
+
requestVersion: string;
|
|
5
|
+
requestId: string | null;
|
|
6
|
+
schema: Schema;
|
|
7
|
+
params: Record<string, any>;
|
|
8
|
+
rawMetadata: Map<string, string>;
|
|
9
|
+
}
|
|
10
|
+
/**
|
|
11
|
+
* Parse a request from a RecordBatch with metadata.
|
|
12
|
+
* Extracts method name, version, and params from the batch.
|
|
13
|
+
*/
|
|
14
|
+
export declare function parseRequest(schema: Schema, batch: RecordBatch): ParsedRequest;
|
|
15
|
+
//# sourceMappingURL=request.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"request.d.ts","sourceRoot":"","sources":["../../src/wire/request.ts"],"names":[],"mappings":"AAGA,OAAO,EAAY,KAAK,MAAM,EAAE,KAAK,WAAW,EAAE,MAAM,cAAc,CAAC;AASvE,MAAM,WAAW,aAAa;IAC5B,UAAU,EAAE,MAAM,CAAC;IACnB,cAAc,EAAE,MAAM,CAAC;IACvB,SAAS,EAAE,MAAM,GAAG,IAAI,CAAC;IACzB,MAAM,EAAE,MAAM,CAAC;IACf,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;IAC5B,WAAW,EAAE,GAAG,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;CAClC;AAED;;;GAGG;AACH,wBAAgB,YAAY,CAC1B,MAAM,EAAE,MAAM,EACd,KAAK,EAAE,WAAW,GACjB,aAAa,CAqEf"}
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import { RecordBatch, Schema } from "apache-arrow";
|
|
2
|
+
/**
|
|
3
|
+
* Coerce values for Int64 schema fields from Number to BigInt.
|
|
4
|
+
* Handles both single values and arrays. Returns a new record with coerced values.
|
|
5
|
+
*/
|
|
6
|
+
export declare function coerceInt64(schema: Schema, values: Record<string, any>): Record<string, any>;
|
|
7
|
+
/**
|
|
8
|
+
* Build a 1-row result batch with optional metadata.
|
|
9
|
+
* For unary methods, `values` maps field names to single values.
|
|
10
|
+
*/
|
|
11
|
+
export declare function buildResultBatch(schema: Schema, values: Record<string, any>, serverId: string, requestId: string | null): RecordBatch;
|
|
12
|
+
/**
|
|
13
|
+
* Build a 0-row error batch with EXCEPTION metadata matching Python's Message.from_exception().
|
|
14
|
+
*/
|
|
15
|
+
export declare function buildErrorBatch(schema: Schema, error: Error, serverId: string, requestId: string | null): RecordBatch;
|
|
16
|
+
/**
|
|
17
|
+
* Build a 0-row log batch.
|
|
18
|
+
*/
|
|
19
|
+
export declare function buildLogBatch(schema: Schema, level: string, message: string, extra?: Record<string, any>, serverId?: string, requestId?: string | null): RecordBatch;
|
|
20
|
+
/**
|
|
21
|
+
* Build a 0-row batch from a schema with metadata.
|
|
22
|
+
* Used for error/log batches.
|
|
23
|
+
*/
|
|
24
|
+
export declare function buildEmptyBatch(schema: Schema, metadata?: Map<string, string>): RecordBatch;
|
|
25
|
+
//# sourceMappingURL=response.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"response.d.ts","sourceRoot":"","sources":["../../src/wire/response.ts"],"names":[],"mappings":"AAGA,OAAO,EACL,WAAW,EACX,MAAM,EAOP,MAAM,cAAc,CAAC;AAStB;;;GAGG;AACH,wBAAgB,WAAW,CAAC,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAc5F;AAED;;;GAGG;AACH,wBAAgB,gBAAgB,CAC9B,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,EAC3B,QAAQ,EAAE,MAAM,EAChB,SAAS,EAAE,MAAM,GAAG,IAAI,GACvB,WAAW,CA0Cb;AAED;;GAEG;AACH,wBAAgB,eAAe,CAC7B,MAAM,EAAE,MAAM,EACd,KAAK,EAAE,KAAK,EACZ,QAAQ,EAAE,MAAM,EAChB,SAAS,EAAE,MAAM,GAAG,IAAI,GACvB,WAAW,CAiBb;AAED;;GAEG;AACH,wBAAgB,aAAa,CAC3B,MAAM,EAAE,MAAM,EACd,KAAK,EAAE,MAAM,EACb,OAAO,EAAE,MAAM,EACf,KAAK,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,EAC3B,QAAQ,CAAC,EAAE,MAAM,EACjB,SAAS,CAAC,EAAE,MAAM,GAAG,IAAI,GACxB,WAAW,CAeb;AAED;;;GAGG;AACH,wBAAgB,eAAe,CAC7B,MAAM,EAAE,MAAM,EACd,QAAQ,CAAC,EAAE,GAAG,CAAC,MAAM,EAAE,MAAM,CAAC,GAC7B,WAAW,CAyBb"}
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
import { type RecordBatch, type Schema } from "apache-arrow";
|
|
2
|
+
/**
|
|
3
|
+
* Writes sequential IPC streams to a file descriptor (e.g., stdout).
|
|
4
|
+
* Each call to writeStream() writes a complete IPC stream: schema + batches + EOS.
|
|
5
|
+
*
|
|
6
|
+
* All writes use synchronous I/O (writeSync) to avoid deadlocks when
|
|
7
|
+
* interleaving stdout writes with blocking stdin reads.
|
|
8
|
+
*/
|
|
9
|
+
export declare class IpcStreamWriter {
|
|
10
|
+
private readonly fd;
|
|
11
|
+
constructor(fd?: number);
|
|
12
|
+
/**
|
|
13
|
+
* Write a complete IPC stream with the given schema and batches.
|
|
14
|
+
* Creates schema message, writes all batches (with their metadata), writes EOS.
|
|
15
|
+
*/
|
|
16
|
+
writeStream(schema: Schema, batches: RecordBatch[]): void;
|
|
17
|
+
/**
|
|
18
|
+
* Open an incremental IPC stream for writing batches one at a time.
|
|
19
|
+
* Used for streaming methods where output batches are produced incrementally.
|
|
20
|
+
* Bytes are written synchronously after each batch.
|
|
21
|
+
*/
|
|
22
|
+
openStream(schema: Schema): IncrementalStream;
|
|
23
|
+
}
|
|
24
|
+
/**
|
|
25
|
+
* An open IPC stream that supports incremental batch writes.
|
|
26
|
+
*
|
|
27
|
+
* Uses RecordBatchStreamWriter with internal buffering (no pipe to stdout).
|
|
28
|
+
* After each operation, drains the writer's internal AsyncByteQueue buffer
|
|
29
|
+
* and writes bytes synchronously via writeAll(). This avoids deadlocks
|
|
30
|
+
* caused by Node.js async stream piping when stdin reads block before
|
|
31
|
+
* stdout writes flush through the event loop.
|
|
32
|
+
*/
|
|
33
|
+
export declare class IncrementalStream {
|
|
34
|
+
private writer;
|
|
35
|
+
private readonly fd;
|
|
36
|
+
private closed;
|
|
37
|
+
constructor(fd: number, schema: Schema);
|
|
38
|
+
/**
|
|
39
|
+
* Write a single batch to the stream. Bytes are flushed synchronously.
|
|
40
|
+
*
|
|
41
|
+
* Uses _writeRecordBatch() directly to bypass the Arrow writer's schema
|
|
42
|
+
* comparison in write(). The public write() method calls compareSchemas()
|
|
43
|
+
* and auto-closes the writer if the batch's schema differs (e.g., in
|
|
44
|
+
* nullability), silently dropping the batch. Since our output schema is
|
|
45
|
+
* set at stream open time and all batches are structurally compatible,
|
|
46
|
+
* we skip the comparison.
|
|
47
|
+
*/
|
|
48
|
+
write(batch: RecordBatch): void;
|
|
49
|
+
/**
|
|
50
|
+
* Close the stream (writes EOS marker synchronously).
|
|
51
|
+
*/
|
|
52
|
+
close(): void;
|
|
53
|
+
/**
|
|
54
|
+
* Drain buffered bytes from the Arrow writer's internal queue
|
|
55
|
+
* and write them synchronously to the output fd.
|
|
56
|
+
*/
|
|
57
|
+
private drain;
|
|
58
|
+
}
|
|
59
|
+
//# sourceMappingURL=writer.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"writer.d.ts","sourceRoot":"","sources":["../../src/wire/writer.ts"],"names":[],"mappings":"AAGA,OAAO,EAEL,KAAK,WAAW,EAChB,KAAK,MAAM,EACZ,MAAM,cAAc,CAAC;AA6BtB;;;;;;GAMG;AACH,qBAAa,eAAe;IAC1B,OAAO,CAAC,QAAQ,CAAC,EAAE,CAAS;gBAEhB,EAAE,GAAE,MAAkB;IAIlC;;;OAGG;IACH,WAAW,CAAC,MAAM,EAAE,MAAM,EAAE,OAAO,EAAE,WAAW,EAAE,GAAG,IAAI;IAYzD;;;;OAIG;IACH,UAAU,CAAC,MAAM,EAAE,MAAM,GAAG,iBAAiB;CAG9C;AAED;;;;;;;;GAQG;AACH,qBAAa,iBAAiB;IAC5B,OAAO,CAAC,MAAM,CAA0B;IACxC,OAAO,CAAC,QAAQ,CAAC,EAAE,CAAS;IAC5B,OAAO,CAAC,MAAM,CAAS;gBAEX,EAAE,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM;IAQtC;;;;;;;;;OASG;IACH,KAAK,CAAC,KAAK,EAAE,WAAW,GAAG,IAAI;IAM/B;;OAEG;IACH,KAAK,IAAI,IAAI;IAQb;;;OAGG;IACH,OAAO,CAAC,KAAK;CAOd"}
|
package/package.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@query-farm/vgi-rpc",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"license": "Apache-2.0",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "dist/index.js",
|
|
7
|
+
"types": "dist/index.d.ts",
|
|
8
|
+
"exports": {
|
|
9
|
+
".": {
|
|
10
|
+
"import": "./dist/index.js",
|
|
11
|
+
"types": "./dist/index.d.ts",
|
|
12
|
+
"bun": "./src/index.ts"
|
|
13
|
+
}
|
|
14
|
+
},
|
|
15
|
+
"files": [
|
|
16
|
+
"dist",
|
|
17
|
+
"src"
|
|
18
|
+
],
|
|
19
|
+
"dependencies": {
|
|
20
|
+
"apache-arrow": "github:Query-farm/arrow-js#feat_query_farm_1"
|
|
21
|
+
},
|
|
22
|
+
"devDependencies": {
|
|
23
|
+
"@types/bun": "latest"
|
|
24
|
+
},
|
|
25
|
+
"scripts": {
|
|
26
|
+
"build": "bun run build:types && bun run build:js",
|
|
27
|
+
"build:types": "bunx tsc -p tsconfig.build.json",
|
|
28
|
+
"build:js": "bun build ./src/index.ts --outdir dist --target node --format esm --sourcemap=external --external apache-arrow",
|
|
29
|
+
"postinstall": "cd node_modules/apache-arrow && node -e \"const fs=require('fs');const p=JSON.parse(fs.readFileSync('package.json','utf8'));p.main='index.ts';fs.writeFileSync('package.json',JSON.stringify(p,null,2)+'\\n')\"",
|
|
30
|
+
"test": "bun test"
|
|
31
|
+
}
|
|
32
|
+
}
|
|
@@ -0,0 +1,310 @@
|
|
|
1
|
+
// © Copyright 2025-2026, Query.Farm LLC - https://query.farm
|
|
2
|
+
// SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
|
|
4
|
+
import { RecordBatch, Schema } from "apache-arrow";
|
|
5
|
+
import { STATE_KEY, LOG_LEVEL_KEY, DESCRIBE_METHOD_NAME } from "../constants.js";
|
|
6
|
+
import { ARROW_CONTENT_TYPE } from "../http/common.js";
|
|
7
|
+
import {
|
|
8
|
+
buildRequestIpc,
|
|
9
|
+
readResponseBatches,
|
|
10
|
+
dispatchLogOrError,
|
|
11
|
+
extractBatchRows,
|
|
12
|
+
readSequentialStreams,
|
|
13
|
+
} from "./ipc.js";
|
|
14
|
+
import { httpIntrospect, type MethodInfo, type ServiceDescription } from "./introspect.js";
|
|
15
|
+
import { HttpStreamSession } from "./stream.js";
|
|
16
|
+
import type { HttpConnectOptions, LogMessage, StreamSession } from "./types.js";
|
|
17
|
+
|
|
18
|
+
type CompressFn = (data: Uint8Array, level: number) => Uint8Array;
|
|
19
|
+
type DecompressFn = (data: Uint8Array) => Uint8Array;
|
|
20
|
+
|
|
21
|
+
export interface RpcClient {
|
|
22
|
+
call(method: string, params?: Record<string, any>): Promise<Record<string, any> | null>;
|
|
23
|
+
stream(method: string, params?: Record<string, any>): Promise<StreamSession>;
|
|
24
|
+
describe(): Promise<ServiceDescription>;
|
|
25
|
+
close(): void;
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
export function httpConnect(
|
|
29
|
+
baseUrl: string,
|
|
30
|
+
options?: HttpConnectOptions,
|
|
31
|
+
): RpcClient {
|
|
32
|
+
const prefix = (options?.prefix ?? "/vgi").replace(/\/+$/, "");
|
|
33
|
+
const onLog = options?.onLog;
|
|
34
|
+
const compressionLevel = options?.compressionLevel;
|
|
35
|
+
|
|
36
|
+
let methodCache: Map<string, MethodInfo> | null = null;
|
|
37
|
+
let compressFn: CompressFn | undefined;
|
|
38
|
+
let decompressFn: DecompressFn | undefined;
|
|
39
|
+
let compressionLoaded = false;
|
|
40
|
+
|
|
41
|
+
async function ensureCompression(): Promise<void> {
|
|
42
|
+
if (compressionLoaded || compressionLevel == null) return;
|
|
43
|
+
compressionLoaded = true;
|
|
44
|
+
try {
|
|
45
|
+
const mod = await import("../util/zstd.js");
|
|
46
|
+
compressFn = mod.zstdCompress;
|
|
47
|
+
decompressFn = mod.zstdDecompress;
|
|
48
|
+
} catch {
|
|
49
|
+
// zstd not available in this runtime
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
function buildHeaders(): Record<string, string> {
|
|
54
|
+
const headers: Record<string, string> = {
|
|
55
|
+
"Content-Type": ARROW_CONTENT_TYPE,
|
|
56
|
+
};
|
|
57
|
+
if (compressionLevel != null) {
|
|
58
|
+
headers["Content-Encoding"] = "zstd";
|
|
59
|
+
headers["Accept-Encoding"] = "zstd";
|
|
60
|
+
}
|
|
61
|
+
return headers;
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
function prepareBody(content: Uint8Array): Uint8Array {
|
|
65
|
+
if (compressionLevel != null && compressFn) {
|
|
66
|
+
return compressFn(content, compressionLevel);
|
|
67
|
+
}
|
|
68
|
+
return content;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
async function readResponse(resp: Response): Promise<Uint8Array<ArrayBuffer>> {
|
|
72
|
+
let body = new Uint8Array(await resp.arrayBuffer());
|
|
73
|
+
if (resp.headers.get("Content-Encoding") === "zstd" && decompressFn) {
|
|
74
|
+
body = new Uint8Array(decompressFn(body));
|
|
75
|
+
}
|
|
76
|
+
return body;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
async function ensureMethodCache(): Promise<Map<string, MethodInfo>> {
|
|
80
|
+
if (methodCache) return methodCache;
|
|
81
|
+
const desc = await httpIntrospect(baseUrl, { prefix });
|
|
82
|
+
methodCache = new Map(desc.methods.map((m) => [m.name, m]));
|
|
83
|
+
return methodCache;
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
return {
|
|
87
|
+
async call(
|
|
88
|
+
method: string,
|
|
89
|
+
params?: Record<string, any>,
|
|
90
|
+
): Promise<Record<string, any> | null> {
|
|
91
|
+
await ensureCompression();
|
|
92
|
+
const methods = await ensureMethodCache();
|
|
93
|
+
const info = methods.get(method);
|
|
94
|
+
if (!info) {
|
|
95
|
+
throw new Error(`Unknown method: '${method}'`);
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
// Apply defaults
|
|
99
|
+
const fullParams = { ...(info.defaults ?? {}), ...(params ?? {}) };
|
|
100
|
+
|
|
101
|
+
const body = buildRequestIpc(info.paramsSchema, fullParams, method);
|
|
102
|
+
const resp = await fetch(`${baseUrl}${prefix}/${method}`, {
|
|
103
|
+
method: "POST",
|
|
104
|
+
headers: buildHeaders(),
|
|
105
|
+
body: prepareBody(body) as unknown as BodyInit,
|
|
106
|
+
});
|
|
107
|
+
|
|
108
|
+
const responseBody = await readResponse(resp);
|
|
109
|
+
const { batches } = await readResponseBatches(responseBody);
|
|
110
|
+
|
|
111
|
+
// Process batches: dispatch logs, find result
|
|
112
|
+
let resultBatch: RecordBatch | null = null;
|
|
113
|
+
for (const batch of batches) {
|
|
114
|
+
if (batch.numRows === 0) {
|
|
115
|
+
dispatchLogOrError(batch, onLog);
|
|
116
|
+
continue;
|
|
117
|
+
}
|
|
118
|
+
resultBatch = batch;
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
if (!resultBatch) {
|
|
122
|
+
// Void return (result schema has no fields)
|
|
123
|
+
return null;
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
// Extract single-row result
|
|
127
|
+
const rows = extractBatchRows(resultBatch);
|
|
128
|
+
if (rows.length === 0) return null;
|
|
129
|
+
|
|
130
|
+
const result = rows[0];
|
|
131
|
+
// For void methods (empty result schema), return null
|
|
132
|
+
if (info.resultSchema.fields.length === 0) return null;
|
|
133
|
+
|
|
134
|
+
// For single-field results, return the whole object
|
|
135
|
+
return result;
|
|
136
|
+
},
|
|
137
|
+
|
|
138
|
+
async stream(
|
|
139
|
+
method: string,
|
|
140
|
+
params?: Record<string, any>,
|
|
141
|
+
): Promise<HttpStreamSession> {
|
|
142
|
+
await ensureCompression();
|
|
143
|
+
const methods = await ensureMethodCache();
|
|
144
|
+
const info = methods.get(method);
|
|
145
|
+
if (!info) {
|
|
146
|
+
throw new Error(`Unknown method: '${method}'`);
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
// Apply defaults
|
|
150
|
+
const fullParams = { ...(info.defaults ?? {}), ...(params ?? {}) };
|
|
151
|
+
|
|
152
|
+
const body = buildRequestIpc(info.paramsSchema, fullParams, method);
|
|
153
|
+
const resp = await fetch(`${baseUrl}${prefix}/${method}/init`, {
|
|
154
|
+
method: "POST",
|
|
155
|
+
headers: buildHeaders(),
|
|
156
|
+
body: prepareBody(body) as unknown as BodyInit,
|
|
157
|
+
});
|
|
158
|
+
|
|
159
|
+
const responseBody = await readResponse(resp);
|
|
160
|
+
|
|
161
|
+
// Parse the response: may contain header stream + data stream
|
|
162
|
+
let header: Record<string, any> | null = null;
|
|
163
|
+
let stateToken: string | null = null;
|
|
164
|
+
const pendingBatches: RecordBatch[] = [];
|
|
165
|
+
let finished = false;
|
|
166
|
+
let streamSchema: Schema | null = null;
|
|
167
|
+
|
|
168
|
+
if (info.headerSchema) {
|
|
169
|
+
// Response may contain two concatenated IPC streams:
|
|
170
|
+
// 1. Header stream
|
|
171
|
+
// 2. Data stream (with state token and/or data batches)
|
|
172
|
+
const reader = await readSequentialStreams(responseBody);
|
|
173
|
+
|
|
174
|
+
// First stream: header
|
|
175
|
+
const headerStream = await reader.readStream();
|
|
176
|
+
if (headerStream) {
|
|
177
|
+
for (const batch of headerStream.batches) {
|
|
178
|
+
if (batch.numRows === 0) {
|
|
179
|
+
dispatchLogOrError(batch, onLog);
|
|
180
|
+
continue;
|
|
181
|
+
}
|
|
182
|
+
const rows = extractBatchRows(batch);
|
|
183
|
+
if (rows.length > 0) {
|
|
184
|
+
header = rows[0];
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
// Second stream: data/state
|
|
190
|
+
const dataStream = await reader.readStream();
|
|
191
|
+
if (dataStream) {
|
|
192
|
+
streamSchema = dataStream.schema;
|
|
193
|
+
}
|
|
194
|
+
const headerErrorBatches: RecordBatch[] = [];
|
|
195
|
+
if (dataStream) {
|
|
196
|
+
for (const batch of dataStream.batches) {
|
|
197
|
+
if (batch.numRows === 0) {
|
|
198
|
+
// Check for state token
|
|
199
|
+
const token = batch.metadata?.get(STATE_KEY);
|
|
200
|
+
if (token) {
|
|
201
|
+
stateToken = token;
|
|
202
|
+
continue;
|
|
203
|
+
}
|
|
204
|
+
const level = batch.metadata?.get(LOG_LEVEL_KEY);
|
|
205
|
+
if (level === "EXCEPTION") {
|
|
206
|
+
headerErrorBatches.push(batch);
|
|
207
|
+
continue;
|
|
208
|
+
}
|
|
209
|
+
dispatchLogOrError(batch, onLog);
|
|
210
|
+
continue;
|
|
211
|
+
}
|
|
212
|
+
pendingBatches.push(batch);
|
|
213
|
+
}
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
if (headerErrorBatches.length > 0) {
|
|
217
|
+
if (pendingBatches.length > 0 || stateToken !== null) {
|
|
218
|
+
pendingBatches.push(...headerErrorBatches);
|
|
219
|
+
} else {
|
|
220
|
+
for (const batch of headerErrorBatches) {
|
|
221
|
+
dispatchLogOrError(batch, onLog);
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
if (!dataStream && !stateToken) {
|
|
227
|
+
finished = true;
|
|
228
|
+
}
|
|
229
|
+
} else {
|
|
230
|
+
// Single IPC stream: data/state (no header)
|
|
231
|
+
const { schema: responseSchema, batches } = await readResponseBatches(responseBody);
|
|
232
|
+
streamSchema = responseSchema;
|
|
233
|
+
|
|
234
|
+
// Collect error batches separately — only defer them if there are
|
|
235
|
+
// data batches or state tokens (mid-stream errors). Otherwise throw
|
|
236
|
+
// immediately (init-only errors like exchange_error_on_init).
|
|
237
|
+
const errorBatches: RecordBatch[] = [];
|
|
238
|
+
|
|
239
|
+
for (const batch of batches) {
|
|
240
|
+
if (batch.numRows === 0) {
|
|
241
|
+
// Check for state token
|
|
242
|
+
const token = batch.metadata?.get(STATE_KEY);
|
|
243
|
+
if (token) {
|
|
244
|
+
stateToken = token;
|
|
245
|
+
continue;
|
|
246
|
+
}
|
|
247
|
+
// Collect EXCEPTION batches for deferred dispatch
|
|
248
|
+
const level = batch.metadata?.get(LOG_LEVEL_KEY);
|
|
249
|
+
if (level === "EXCEPTION") {
|
|
250
|
+
errorBatches.push(batch);
|
|
251
|
+
continue;
|
|
252
|
+
}
|
|
253
|
+
dispatchLogOrError(batch, onLog);
|
|
254
|
+
continue;
|
|
255
|
+
}
|
|
256
|
+
pendingBatches.push(batch);
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
// If we have data batches or a state token, defer errors to iteration.
|
|
260
|
+
// Otherwise throw immediately (error on init).
|
|
261
|
+
if (errorBatches.length > 0) {
|
|
262
|
+
if (pendingBatches.length > 0 || stateToken !== null) {
|
|
263
|
+
pendingBatches.push(...errorBatches);
|
|
264
|
+
} else {
|
|
265
|
+
// No data, no state — this is a pure init error. Throw now.
|
|
266
|
+
for (const batch of errorBatches) {
|
|
267
|
+
dispatchLogOrError(batch, onLog);
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
if (pendingBatches.length === 0 && stateToken === null) {
|
|
274
|
+
finished = true;
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
// Determine output schema: prefer the IPC stream schema from the init
|
|
278
|
+
// response (it carries the server's actual output schema even for
|
|
279
|
+
// zero-row token batches), then pending batch schemas, then describe info.
|
|
280
|
+
const outputSchema =
|
|
281
|
+
(streamSchema && streamSchema.fields.length > 0 ? streamSchema : null)
|
|
282
|
+
?? (pendingBatches.length > 0 ? pendingBatches[0].schema : null)
|
|
283
|
+
?? info.outputSchema ?? info.resultSchema;
|
|
284
|
+
|
|
285
|
+
return new HttpStreamSession({
|
|
286
|
+
baseUrl,
|
|
287
|
+
prefix,
|
|
288
|
+
method,
|
|
289
|
+
stateToken,
|
|
290
|
+
outputSchema,
|
|
291
|
+
inputSchema: info.inputSchema,
|
|
292
|
+
onLog,
|
|
293
|
+
pendingBatches,
|
|
294
|
+
finished,
|
|
295
|
+
header,
|
|
296
|
+
compressionLevel,
|
|
297
|
+
compressFn,
|
|
298
|
+
decompressFn,
|
|
299
|
+
});
|
|
300
|
+
},
|
|
301
|
+
|
|
302
|
+
async describe(): Promise<ServiceDescription> {
|
|
303
|
+
return httpIntrospect(baseUrl, { prefix });
|
|
304
|
+
},
|
|
305
|
+
|
|
306
|
+
close(): void {
|
|
307
|
+
// No-op (HTTP stateless)
|
|
308
|
+
},
|
|
309
|
+
};
|
|
310
|
+
}
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
// © Copyright 2025-2026, Query.Farm LLC - https://query.farm
|
|
2
|
+
// SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
|
|
4
|
+
export { httpConnect, type RpcClient } from "./connect.js";
|
|
5
|
+
export { httpIntrospect, parseDescribeResponse, type ServiceDescription, type MethodInfo } from "./introspect.js";
|
|
6
|
+
export { HttpStreamSession } from "./stream.js";
|
|
7
|
+
export { pipeConnect, subprocessConnect, PipeStreamSession } from "./pipe.js";
|
|
8
|
+
export {
|
|
9
|
+
type HttpConnectOptions,
|
|
10
|
+
type LogMessage,
|
|
11
|
+
type StreamSession,
|
|
12
|
+
type PipeConnectOptions,
|
|
13
|
+
type SubprocessConnectOptions,
|
|
14
|
+
} from "./types.js";
|