@query-farm/vgi-rpc 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. package/LICENSE.md +191 -0
  2. package/README.md +332 -0
  3. package/dist/client/connect.d.ts +10 -0
  4. package/dist/client/connect.d.ts.map +1 -0
  5. package/dist/client/index.d.ts +6 -0
  6. package/dist/client/index.d.ts.map +1 -0
  7. package/dist/client/introspect.d.ts +30 -0
  8. package/dist/client/introspect.d.ts.map +1 -0
  9. package/dist/client/ipc.d.ts +34 -0
  10. package/dist/client/ipc.d.ts.map +1 -0
  11. package/dist/client/pipe.d.ts +63 -0
  12. package/dist/client/pipe.d.ts.map +1 -0
  13. package/dist/client/stream.d.ts +52 -0
  14. package/dist/client/stream.d.ts.map +1 -0
  15. package/dist/client/types.d.ts +25 -0
  16. package/dist/client/types.d.ts.map +1 -0
  17. package/dist/constants.d.ts +15 -0
  18. package/dist/constants.d.ts.map +1 -0
  19. package/dist/dispatch/describe.d.ts +14 -0
  20. package/dist/dispatch/describe.d.ts.map +1 -0
  21. package/dist/dispatch/stream.d.ts +20 -0
  22. package/dist/dispatch/stream.d.ts.map +1 -0
  23. package/dist/dispatch/unary.d.ts +9 -0
  24. package/dist/dispatch/unary.d.ts.map +1 -0
  25. package/dist/errors.d.ts +12 -0
  26. package/dist/errors.d.ts.map +1 -0
  27. package/dist/http/common.d.ts +16 -0
  28. package/dist/http/common.d.ts.map +1 -0
  29. package/dist/http/dispatch.d.ts +18 -0
  30. package/dist/http/dispatch.d.ts.map +1 -0
  31. package/dist/http/handler.d.ts +16 -0
  32. package/dist/http/handler.d.ts.map +1 -0
  33. package/dist/http/index.d.ts +4 -0
  34. package/dist/http/index.d.ts.map +1 -0
  35. package/dist/http/token.d.ts +24 -0
  36. package/dist/http/token.d.ts.map +1 -0
  37. package/dist/http/types.d.ts +30 -0
  38. package/dist/http/types.d.ts.map +1 -0
  39. package/dist/index.d.ts +9 -0
  40. package/dist/index.d.ts.map +1 -0
  41. package/dist/index.js +2493 -0
  42. package/dist/index.js.map +34 -0
  43. package/dist/protocol.d.ts +62 -0
  44. package/dist/protocol.d.ts.map +1 -0
  45. package/dist/schema.d.ts +38 -0
  46. package/dist/schema.d.ts.map +1 -0
  47. package/dist/server.d.ts +19 -0
  48. package/dist/server.d.ts.map +1 -0
  49. package/dist/types.d.ts +71 -0
  50. package/dist/types.d.ts.map +1 -0
  51. package/dist/util/schema.d.ts +20 -0
  52. package/dist/util/schema.d.ts.map +1 -0
  53. package/dist/util/zstd.d.ts +5 -0
  54. package/dist/util/zstd.d.ts.map +1 -0
  55. package/dist/wire/reader.d.ts +40 -0
  56. package/dist/wire/reader.d.ts.map +1 -0
  57. package/dist/wire/request.d.ts +15 -0
  58. package/dist/wire/request.d.ts.map +1 -0
  59. package/dist/wire/response.d.ts +25 -0
  60. package/dist/wire/response.d.ts.map +1 -0
  61. package/dist/wire/writer.d.ts +59 -0
  62. package/dist/wire/writer.d.ts.map +1 -0
  63. package/package.json +32 -0
  64. package/src/client/connect.ts +310 -0
  65. package/src/client/index.ts +14 -0
  66. package/src/client/introspect.ts +138 -0
  67. package/src/client/ipc.ts +225 -0
  68. package/src/client/pipe.ts +661 -0
  69. package/src/client/stream.ts +297 -0
  70. package/src/client/types.ts +31 -0
  71. package/src/constants.ts +22 -0
  72. package/src/dispatch/describe.ts +155 -0
  73. package/src/dispatch/stream.ts +151 -0
  74. package/src/dispatch/unary.ts +35 -0
  75. package/src/errors.ts +22 -0
  76. package/src/http/common.ts +89 -0
  77. package/src/http/dispatch.ts +340 -0
  78. package/src/http/handler.ts +247 -0
  79. package/src/http/index.ts +6 -0
  80. package/src/http/token.ts +149 -0
  81. package/src/http/types.ts +49 -0
  82. package/src/index.ts +52 -0
  83. package/src/protocol.ts +144 -0
  84. package/src/schema.ts +114 -0
  85. package/src/server.ts +159 -0
  86. package/src/types.ts +162 -0
  87. package/src/util/schema.ts +31 -0
  88. package/src/util/zstd.ts +49 -0
  89. package/src/wire/reader.ts +113 -0
  90. package/src/wire/request.ts +98 -0
  91. package/src/wire/response.ts +181 -0
  92. package/src/wire/writer.ts +137 -0
package/src/types.ts ADDED
@@ -0,0 +1,162 @@
1
+ // © Copyright 2025-2026, Query.Farm LLC - https://query.farm
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ import { type Schema, RecordBatch, recordBatchFromArrays } from "apache-arrow";
5
+ import { buildLogBatch, coerceInt64 } from "./wire/response.js";
6
+
7
+ export enum MethodType {
8
+ UNARY = "unary",
9
+ STREAM = "stream",
10
+ }
11
+
12
+ /** Logging interface available to handlers. */
13
+ export interface LogContext {
14
+ clientLog(level: string, message: string, extra?: Record<string, string>): void;
15
+ }
16
+
17
+ /** Handler for unary (request-response) RPC methods. */
18
+ export type UnaryHandler = (
19
+ params: Record<string, any>,
20
+ ctx: LogContext,
21
+ ) => Promise<Record<string, any>> | Record<string, any>;
22
+
23
+ /** Initialization function for producer streams. Returns the initial state object. */
24
+ export type ProducerInit<S = any> = (
25
+ params: Record<string, any>,
26
+ ) => Promise<S> | S;
27
+ /** Called repeatedly to produce output batches. Call `out.finish()` to end the stream. */
28
+ export type ProducerFn<S = any> = (
29
+ state: S,
30
+ out: OutputCollector,
31
+ ) => Promise<void> | void;
32
+
33
+ /** Initialization function for exchange streams. Returns the initial state object. */
34
+ export type ExchangeInit<S = any> = (
35
+ params: Record<string, any>,
36
+ ) => Promise<S> | S;
37
+ /** Called once per input batch. Must emit exactly one output batch per call. */
38
+ export type ExchangeFn<S = any> = (
39
+ state: S,
40
+ input: RecordBatch,
41
+ out: OutputCollector,
42
+ ) => Promise<void> | void;
43
+
44
+ /** Produces a header batch sent before the first output batch in a stream. */
45
+ export type HeaderInit = (
46
+ params: Record<string, any>,
47
+ state: any,
48
+ ctx: LogContext,
49
+ ) => Record<string, any>;
50
+
51
+ export interface MethodDefinition {
52
+ name: string;
53
+ type: MethodType;
54
+ paramsSchema: Schema;
55
+ resultSchema: Schema;
56
+ outputSchema?: Schema;
57
+ inputSchema?: Schema;
58
+ handler?: UnaryHandler;
59
+ producerInit?: ProducerInit;
60
+ producerFn?: ProducerFn;
61
+ exchangeInit?: ExchangeInit;
62
+ exchangeFn?: ExchangeFn;
63
+ headerSchema?: Schema;
64
+ headerInit?: HeaderInit;
65
+ doc?: string;
66
+ defaults?: Record<string, any>;
67
+ paramTypes?: Record<string, string>;
68
+ }
69
+
70
+ export interface EmittedBatch {
71
+ batch: RecordBatch;
72
+ metadata?: Map<string, string>;
73
+ }
74
+
75
+ /**
76
+ * Accumulates output batches during a produce/exchange call.
77
+ * Enforces that exactly one data batch is emitted per call (plus any number of log batches).
78
+ */
79
+ export class OutputCollector implements LogContext {
80
+ private _batches: EmittedBatch[] = [];
81
+ private _dataBatchIdx: number | null = null;
82
+ private _finished = false;
83
+ private _producerMode: boolean;
84
+ private _outputSchema: Schema;
85
+ private _serverId: string;
86
+ private _requestId: string | null;
87
+
88
+ constructor(outputSchema: Schema, producerMode = true, serverId = "", requestId: string | null = null) {
89
+ this._outputSchema = outputSchema;
90
+ this._producerMode = producerMode;
91
+ this._serverId = serverId;
92
+ this._requestId = requestId;
93
+ }
94
+
95
+ get outputSchema(): Schema {
96
+ return this._outputSchema;
97
+ }
98
+
99
+ get finished(): boolean {
100
+ return this._finished;
101
+ }
102
+
103
+ get batches(): EmittedBatch[] {
104
+ return this._batches;
105
+ }
106
+
107
+ /** Emit a pre-built RecordBatch as the data batch for this call. */
108
+ emit(batch: RecordBatch, metadata?: Map<string, string>): void;
109
+ /** Emit a data batch from column arrays keyed by field name. Int64 Number values are coerced to BigInt. */
110
+ emit(columns: Record<string, any[]>): void;
111
+ emit(
112
+ batchOrColumns: RecordBatch | Record<string, any[]>,
113
+ metadata?: Map<string, string>,
114
+ ): void {
115
+ let batch: RecordBatch;
116
+ if (batchOrColumns instanceof RecordBatch) {
117
+ batch = batchOrColumns;
118
+ } else {
119
+ const coerced = coerceInt64(this._outputSchema, batchOrColumns);
120
+ batch = recordBatchFromArrays(coerced, this._outputSchema);
121
+ }
122
+ if (this._dataBatchIdx !== null) {
123
+ throw new Error("Only one data batch may be emitted per call");
124
+ }
125
+ this._dataBatchIdx = this._batches.length;
126
+ this._batches.push({ batch, metadata });
127
+ }
128
+
129
+ /** Single-row convenience. Wraps each value in `[value]` then calls `emit()`. */
130
+ emitRow(values: Record<string, any>): void {
131
+ const columns: Record<string, any[]> = {};
132
+ for (const [key, value] of Object.entries(values)) {
133
+ columns[key] = [value];
134
+ }
135
+ this.emit(columns);
136
+ }
137
+
138
+ /** Signal stream completion for producer streams. Throws if called on exchange streams. */
139
+ finish(): void {
140
+ if (!this._producerMode) {
141
+ throw new Error(
142
+ "finish() is not allowed on exchange streams; " +
143
+ "exchange streams must emit exactly one data batch per call",
144
+ );
145
+ }
146
+ this._finished = true;
147
+ }
148
+
149
+ /** Emit a zero-row client-directed log batch. */
150
+ clientLog(level: string, message: string, extra?: Record<string, string>): void {
151
+ const batch = buildLogBatch(
152
+ this._outputSchema,
153
+ level,
154
+ message,
155
+ extra,
156
+ this._serverId,
157
+ this._requestId,
158
+ );
159
+ this._batches.push({ batch });
160
+ }
161
+
162
+ }
@@ -0,0 +1,31 @@
1
+ // © Copyright 2025-2026, Query.Farm LLC - https://query.farm
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ import { RecordBatchStreamWriter, type Schema } from "apache-arrow";
5
+
6
+ /**
7
+ * Serialize a Schema to the Arrow IPC Schema message format.
8
+ * This produces bytes compatible with Python's `pa.ipc.read_schema()`.
9
+ *
10
+ * We serialize by writing an empty-batch IPC stream and extracting
11
+ * the bytes, which includes the schema message. Python's read_schema()
12
+ * uses `pa.ipc.read_schema(pa.py_buffer(bytes))` which expects
13
+ * the schema flatbuffer message bytes directly — but the Python side
14
+ * actually uses `schema.serialize()` which produces Schema message bytes.
15
+ *
16
+ * In arrow-js, we can get the equivalent by using Message.from(schema)
17
+ * and encoding it, or by serializing a zero-batch stream.
18
+ *
19
+ * The Python `schema.serialize()` produces the Schema flatbuffer message bytes,
20
+ * and `pa.ipc.read_schema()` expects an IPC stream containing a schema message.
21
+ * The actual format is: continuation marker (0xFFFFFFFF) + length + flatbuffer bytes.
22
+ */
23
+ export function serializeSchema(schema: Schema): Uint8Array {
24
+ // Write a complete IPC stream with no batches.
25
+ // This writes: Schema message + EOS marker.
26
+ // Python's pa.ipc.read_schema() can read this format.
27
+ const writer = new RecordBatchStreamWriter();
28
+ writer.reset(undefined, schema);
29
+ writer.close();
30
+ return writer.toUint8Array(true);
31
+ }
@@ -0,0 +1,49 @@
1
+ // © Copyright 2025-2026, Query.Farm LLC - https://query.farm
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ /**
5
+ * Cross-runtime zstd compression/decompression.
6
+ *
7
+ * Uses Bun.zstd* when running on Bun, otherwise falls back to node:zlib
8
+ * (available on Node.js 22.15+ and Deno 2.6.9+).
9
+ */
10
+
11
+ import * as zlib from "node:zlib";
12
+
13
+ const isBun = typeof globalThis.Bun !== "undefined";
14
+
15
+ /** Compress data with zstd at the given level (1-22). */
16
+ export function zstdCompress(data: Uint8Array, level: number): Uint8Array<ArrayBuffer> {
17
+ if (isBun) {
18
+ return new Uint8Array(Bun.zstdCompressSync(data, { level }));
19
+ }
20
+ const fn = (zlib as any).zstdCompressSync;
21
+ if (typeof fn !== "function") {
22
+ throw new Error(
23
+ "zstd is not available in this runtime. " +
24
+ "Requires Bun, Node.js >= 22.15, or Deno >= 2.6.9.",
25
+ );
26
+ }
27
+ return new Uint8Array(
28
+ fn(data, {
29
+ params: {
30
+ [(zlib.constants as any).ZSTD_c_compressionLevel]: level,
31
+ },
32
+ }),
33
+ );
34
+ }
35
+
36
+ /** Decompress zstd-compressed data. */
37
+ export function zstdDecompress(data: Uint8Array): Uint8Array<ArrayBuffer> {
38
+ if (isBun) {
39
+ return new Uint8Array(Bun.zstdDecompressSync(data));
40
+ }
41
+ const fn = (zlib as any).zstdDecompressSync;
42
+ if (typeof fn !== "function") {
43
+ throw new Error(
44
+ "zstd is not available in this runtime. " +
45
+ "Requires Bun, Node.js >= 22.15, or Deno >= 2.6.9.",
46
+ );
47
+ }
48
+ return new Uint8Array(fn(data));
49
+ }
@@ -0,0 +1,113 @@
1
+ // © Copyright 2025-2026, Query.Farm LLC - https://query.farm
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ import { RecordBatchReader, type RecordBatch, type Schema } from "apache-arrow";
5
+
6
+ export interface StreamMessage {
7
+ schema: Schema;
8
+ batches: RecordBatch[];
9
+ }
10
+
11
+ /**
12
+ * Reads sequential IPC streams from a byte source (e.g., process.stdin).
13
+ * Uses autoDestroy: false + reset/open pattern to read multiple streams
14
+ * from the same underlying byte source.
15
+ */
16
+ export class IpcStreamReader {
17
+ private reader: RecordBatchReader;
18
+ private initialized = false;
19
+ /** True once readNextBatch() returns null (EOS reached for current stream). */
20
+ private streamEnded = false;
21
+
22
+ private constructor(reader: RecordBatchReader) {
23
+ this.reader = reader;
24
+ }
25
+
26
+ static async create(
27
+ input: ReadableStream<Uint8Array> | NodeJS.ReadableStream,
28
+ ): Promise<IpcStreamReader> {
29
+ const reader = await RecordBatchReader.from(input as any);
30
+ await reader.open({ autoDestroy: false });
31
+ if (reader.closed) {
32
+ throw new Error("Input stream closed before first IPC message");
33
+ }
34
+ return new IpcStreamReader(reader);
35
+ }
36
+
37
+ /**
38
+ * Read one complete IPC stream (schema + all batches).
39
+ * Returns null on EOF (no more streams).
40
+ */
41
+ async readStream(): Promise<StreamMessage | null> {
42
+ if (this.initialized) {
43
+ // Advance to next stream
44
+ await this.reader.reset().open();
45
+ if (this.reader.closed) {
46
+ return null;
47
+ }
48
+ }
49
+ this.initialized = true;
50
+
51
+ const schema = this.reader.schema;
52
+ if (!schema) {
53
+ return null;
54
+ }
55
+
56
+ const batches: RecordBatch[] = [];
57
+ while (true) {
58
+ const result = await this.reader.next();
59
+ if (result.done) break;
60
+ // Skip Arrow-JS synthetic placeholder for empty streams
61
+ if (result.value.constructor.name === "_InternalEmptyPlaceholderRecordBatch") break;
62
+ batches.push(result.value);
63
+ }
64
+
65
+ return { schema, batches };
66
+ }
67
+
68
+ /**
69
+ * Open the next IPC stream and return its schema.
70
+ * Use readNextBatch() to read batches one at a time.
71
+ * Returns null on EOF.
72
+ */
73
+ async openNextStream(): Promise<Schema | null> {
74
+ if (this.initialized) {
75
+ await this.reader.reset().open();
76
+ if (this.reader.closed) {
77
+ return null;
78
+ }
79
+ }
80
+ this.initialized = true;
81
+ this.streamEnded = false;
82
+ return this.reader.schema ?? null;
83
+ }
84
+
85
+ /**
86
+ * Read the next batch from the currently open IPC stream.
87
+ * Returns null when the stream ends (EOS).
88
+ *
89
+ * Once EOS is reached, subsequent calls return null immediately without
90
+ * reading from the underlying byte source. This prevents the Arrow-JS
91
+ * reader from consuming bytes that belong to the next IPC stream.
92
+ */
93
+ async readNextBatch(): Promise<RecordBatch | null> {
94
+ if (this.streamEnded) return null;
95
+ const result = await this.reader.next();
96
+ if (result.done) {
97
+ this.streamEnded = true;
98
+ return null;
99
+ }
100
+ // Arrow-JS synthesizes a placeholder batch for streams with a schema but
101
+ // zero real batches. Treat it as EOS so callers don't block trying to
102
+ // read more bytes from a stream that has already ended.
103
+ if (result.value.constructor.name === "_InternalEmptyPlaceholderRecordBatch") {
104
+ this.streamEnded = true;
105
+ return null;
106
+ }
107
+ return result.value;
108
+ }
109
+
110
+ async cancel(): Promise<void> {
111
+ await this.reader.cancel();
112
+ }
113
+ }
@@ -0,0 +1,98 @@
1
+ // © Copyright 2025-2026, Query.Farm LLC - https://query.farm
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ import { DataType, type Schema, type RecordBatch } from "apache-arrow";
5
+ import {
6
+ RPC_METHOD_KEY,
7
+ REQUEST_VERSION_KEY,
8
+ REQUEST_VERSION,
9
+ REQUEST_ID_KEY,
10
+ } from "../constants.js";
11
+ import { RpcError, VersionError } from "../errors.js";
12
+
13
+ export interface ParsedRequest {
14
+ methodName: string;
15
+ requestVersion: string;
16
+ requestId: string | null;
17
+ schema: Schema;
18
+ params: Record<string, any>;
19
+ rawMetadata: Map<string, string>;
20
+ }
21
+
22
+ /**
23
+ * Parse a request from a RecordBatch with metadata.
24
+ * Extracts method name, version, and params from the batch.
25
+ */
26
+ export function parseRequest(
27
+ schema: Schema,
28
+ batch: RecordBatch,
29
+ ): ParsedRequest {
30
+ const metadata: Map<string, string> = batch.metadata ?? new Map();
31
+
32
+ const methodName = metadata.get(RPC_METHOD_KEY);
33
+ if (methodName === undefined) {
34
+ throw new RpcError(
35
+ "ProtocolError",
36
+ "Missing 'vgi_rpc.method' in request batch custom_metadata. " +
37
+ "Each request batch must carry a 'vgi_rpc.method' key in its Arrow IPC custom_metadata " +
38
+ "with the method name as a UTF-8 string.",
39
+ "",
40
+ );
41
+ }
42
+
43
+ const version = metadata.get(REQUEST_VERSION_KEY);
44
+ if (version === undefined) {
45
+ throw new VersionError(
46
+ "Missing 'vgi_rpc.request_version' in request batch custom_metadata. " +
47
+ `Set the 'vgi_rpc.request_version' custom_metadata value to '${REQUEST_VERSION}'.`,
48
+ );
49
+ }
50
+ if (version !== REQUEST_VERSION) {
51
+ throw new VersionError(
52
+ `Unsupported request version '${version}', expected '${REQUEST_VERSION}'. ` +
53
+ `Set the 'vgi_rpc.request_version' custom_metadata value to '${REQUEST_VERSION}'.`,
54
+ );
55
+ }
56
+
57
+ const requestId = metadata.get(REQUEST_ID_KEY) ?? null;
58
+
59
+ // Extract params from single-row batch
60
+ const params: Record<string, any> = {};
61
+ if (schema.fields.length > 0 && batch.numRows !== 1) {
62
+ throw new RpcError(
63
+ "ProtocolError",
64
+ `Expected 1 row in request batch, got ${batch.numRows}. ` +
65
+ "Each parameter is a column (not a row). The batch should have exactly 1 row.",
66
+ "",
67
+ );
68
+ }
69
+
70
+ for (let i = 0; i < schema.fields.length; i++) {
71
+ const field = schema.fields[i];
72
+ // Map_ columns have a broken .get() in arrow-js — pass through raw Data
73
+ if (DataType.isMap(field.type)) {
74
+ params[field.name] = batch.getChildAt(i)!.data[0];
75
+ continue;
76
+ }
77
+ let value = batch.getChildAt(i)?.get(0);
78
+ // Convert BigInt to Number when safe
79
+ if (typeof value === "bigint") {
80
+ if (
81
+ value >= BigInt(Number.MIN_SAFE_INTEGER) &&
82
+ value <= BigInt(Number.MAX_SAFE_INTEGER)
83
+ ) {
84
+ value = Number(value);
85
+ }
86
+ }
87
+ params[field.name] = value;
88
+ }
89
+
90
+ return {
91
+ methodName,
92
+ requestVersion: version,
93
+ requestId,
94
+ schema,
95
+ params,
96
+ rawMetadata: metadata,
97
+ };
98
+ }
@@ -0,0 +1,181 @@
1
+ // © Copyright 2025-2026, Query.Farm LLC - https://query.farm
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ import {
5
+ RecordBatch,
6
+ Schema,
7
+ Field,
8
+ Data,
9
+ DataType,
10
+ makeData,
11
+ Struct,
12
+ vectorFromArray,
13
+ } from "apache-arrow";
14
+ import {
15
+ LOG_LEVEL_KEY,
16
+ LOG_MESSAGE_KEY,
17
+ LOG_EXTRA_KEY,
18
+ SERVER_ID_KEY,
19
+ REQUEST_ID_KEY,
20
+ } from "../constants.js";
21
+
22
+ /**
23
+ * Coerce values for Int64 schema fields from Number to BigInt.
24
+ * Handles both single values and arrays. Returns a new record with coerced values.
25
+ */
26
+ export function coerceInt64(schema: Schema, values: Record<string, any>): Record<string, any> {
27
+ const result: Record<string, any> = { ...values };
28
+ for (const field of schema.fields) {
29
+ const val = result[field.name];
30
+ if (val === undefined) continue;
31
+ if (!DataType.isInt(field.type) || (field.type as any).bitWidth !== 64) continue;
32
+
33
+ if (Array.isArray(val)) {
34
+ result[field.name] = val.map((v: any) => (typeof v === "number" ? BigInt(v) : v));
35
+ } else if (typeof val === "number") {
36
+ result[field.name] = BigInt(val);
37
+ }
38
+ }
39
+ return result;
40
+ }
41
+
42
+ /**
43
+ * Build a 1-row result batch with optional metadata.
44
+ * For unary methods, `values` maps field names to single values.
45
+ */
46
+ export function buildResultBatch(
47
+ schema: Schema,
48
+ values: Record<string, any>,
49
+ serverId: string,
50
+ requestId: string | null,
51
+ ): RecordBatch {
52
+ const metadata = new Map<string, string>();
53
+ metadata.set(SERVER_ID_KEY, serverId);
54
+ if (requestId !== null) {
55
+ metadata.set(REQUEST_ID_KEY, requestId);
56
+ }
57
+
58
+ if (schema.fields.length === 0) {
59
+ return buildEmptyBatch(schema, metadata);
60
+ }
61
+
62
+ // Validate required fields
63
+ for (const field of schema.fields) {
64
+ if (values[field.name] === undefined && !field.nullable) {
65
+ const got = Object.keys(values);
66
+ throw new TypeError(
67
+ `Handler result missing required field '${field.name}'. Got keys: [${got.join(", ")}]`,
68
+ );
69
+ }
70
+ }
71
+
72
+ const coerced = coerceInt64(schema, values);
73
+
74
+ const children = schema.fields.map((f: Field) => {
75
+ let val = coerced[f.name];
76
+ // Raw Data passthrough for Map_ types (whose .get() is broken in arrow-js)
77
+ if (val instanceof Data) {
78
+ return val;
79
+ }
80
+ const arr = vectorFromArray([val], f.type);
81
+ return arr.data[0];
82
+ });
83
+
84
+ const structType = new Struct(schema.fields);
85
+ const data = makeData({
86
+ type: structType,
87
+ length: 1,
88
+ children,
89
+ nullCount: 0,
90
+ });
91
+
92
+ return new RecordBatch(schema, data, metadata);
93
+ }
94
+
95
+ /**
96
+ * Build a 0-row error batch with EXCEPTION metadata matching Python's Message.from_exception().
97
+ */
98
+ export function buildErrorBatch(
99
+ schema: Schema,
100
+ error: Error,
101
+ serverId: string,
102
+ requestId: string | null,
103
+ ): RecordBatch {
104
+ const metadata = new Map<string, string>();
105
+ metadata.set(LOG_LEVEL_KEY, "EXCEPTION");
106
+ metadata.set(LOG_MESSAGE_KEY, `${error.constructor.name}: ${error.message}`);
107
+
108
+ const extra: Record<string, any> = {
109
+ exception_type: error.constructor.name,
110
+ exception_message: error.message,
111
+ traceback: error.stack ?? "",
112
+ };
113
+ metadata.set(LOG_EXTRA_KEY, JSON.stringify(extra));
114
+ metadata.set(SERVER_ID_KEY, serverId);
115
+ if (requestId !== null) {
116
+ metadata.set(REQUEST_ID_KEY, requestId);
117
+ }
118
+
119
+ return buildEmptyBatch(schema, metadata);
120
+ }
121
+
122
+ /**
123
+ * Build a 0-row log batch.
124
+ */
125
+ export function buildLogBatch(
126
+ schema: Schema,
127
+ level: string,
128
+ message: string,
129
+ extra?: Record<string, any>,
130
+ serverId?: string,
131
+ requestId?: string | null,
132
+ ): RecordBatch {
133
+ const metadata = new Map<string, string>();
134
+ metadata.set(LOG_LEVEL_KEY, level);
135
+ metadata.set(LOG_MESSAGE_KEY, message);
136
+ if (extra) {
137
+ metadata.set(LOG_EXTRA_KEY, JSON.stringify(extra));
138
+ }
139
+ if (serverId != null) {
140
+ metadata.set(SERVER_ID_KEY, serverId);
141
+ }
142
+ if (requestId != null) {
143
+ metadata.set(REQUEST_ID_KEY, requestId);
144
+ }
145
+
146
+ return buildEmptyBatch(schema, metadata);
147
+ }
148
+
149
+ /**
150
+ * Build a 0-row batch from a schema with metadata.
151
+ * Used for error/log batches.
152
+ */
153
+ export function buildEmptyBatch(
154
+ schema: Schema,
155
+ metadata?: Map<string, string>,
156
+ ): RecordBatch {
157
+ const children = schema.fields.map((f: Field) => {
158
+ return makeData({ type: f.type, length: 0, nullCount: 0 });
159
+ });
160
+
161
+ if (schema.fields.length === 0) {
162
+ const structType = new Struct(schema.fields);
163
+ const data = makeData({
164
+ type: structType,
165
+ length: 0,
166
+ children: [],
167
+ nullCount: 0,
168
+ });
169
+ return new RecordBatch(schema, data, metadata);
170
+ }
171
+
172
+ const structType = new Struct(schema.fields);
173
+ const data = makeData({
174
+ type: structType,
175
+ length: 0,
176
+ children,
177
+ nullCount: 0,
178
+ });
179
+
180
+ return new RecordBatch(schema, data, metadata);
181
+ }