@databricks/zerobus-ingest-sdk 0.1.0 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +10 -10
- package/index.d.ts +0 -387
- package/index.js +0 -318
- package/schemas/air_quality_descriptor.pb +0 -9
- package/zerobus-sdk-ts.linux-x64-gnu.node +0 -0
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@databricks/zerobus-ingest-sdk",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.1",
|
|
4
4
|
"description": "TypeScript/Node.js SDK for streaming data ingestion into Databricks Delta tables using Zerobus",
|
|
5
5
|
"main": "index.js",
|
|
6
6
|
"types": "index.d.ts",
|
|
@@ -32,7 +32,7 @@
|
|
|
32
32
|
"*.node"
|
|
33
33
|
],
|
|
34
34
|
"napi": {
|
|
35
|
-
"name": "zerobus-sdk
|
|
35
|
+
"name": "zerobus-ingest-sdk",
|
|
36
36
|
"triples": {
|
|
37
37
|
"defaults": true,
|
|
38
38
|
"additional": [
|
|
@@ -82,12 +82,12 @@
|
|
|
82
82
|
"glob": "^10.0.0"
|
|
83
83
|
},
|
|
84
84
|
"optionalDependencies": {
|
|
85
|
-
"@databricks/zerobus-ingest-sdk-win32-x64-msvc": "0.1.
|
|
86
|
-
"@databricks/zerobus-ingest-sdk-darwin-x64": "0.1.
|
|
87
|
-
"@databricks/zerobus-ingest-sdk-linux-x64-gnu": "0.1.
|
|
88
|
-
"@databricks/zerobus-ingest-sdk-linux-x64-musl": "0.1.
|
|
89
|
-
"@databricks/zerobus-ingest-sdk-linux-arm64-gnu": "0.1.
|
|
90
|
-
"@databricks/zerobus-ingest-sdk-darwin-arm64": "0.1.
|
|
91
|
-
"@databricks/zerobus-ingest-sdk-linux-arm64-musl": "0.1.
|
|
85
|
+
"@databricks/zerobus-ingest-sdk-win32-x64-msvc": "0.1.1",
|
|
86
|
+
"@databricks/zerobus-ingest-sdk-darwin-x64": "0.1.1",
|
|
87
|
+
"@databricks/zerobus-ingest-sdk-linux-x64-gnu": "0.1.1",
|
|
88
|
+
"@databricks/zerobus-ingest-sdk-linux-x64-musl": "0.1.1",
|
|
89
|
+
"@databricks/zerobus-ingest-sdk-linux-arm64-gnu": "0.1.1",
|
|
90
|
+
"@databricks/zerobus-ingest-sdk-darwin-arm64": "0.1.1",
|
|
91
|
+
"@databricks/zerobus-ingest-sdk-linux-arm64-musl": "0.1.1"
|
|
92
92
|
}
|
|
93
|
-
}
|
|
93
|
+
}
|
package/index.d.ts
DELETED
|
@@ -1,387 +0,0 @@
|
|
|
1
|
-
/* tslint:disable */
|
|
2
|
-
/* eslint-disable */
|
|
3
|
-
|
|
4
|
-
/* auto-generated by NAPI-RS */
|
|
5
|
-
|
|
6
|
-
/**
|
|
7
|
-
* Record serialization format.
|
|
8
|
-
*
|
|
9
|
-
* Specifies how records should be encoded when ingested into the stream.
|
|
10
|
-
*/
|
|
11
|
-
export const enum RecordType {
|
|
12
|
-
/** JSON encoding - records are JSON-encoded strings */
|
|
13
|
-
Json = 0,
|
|
14
|
-
/** Protocol Buffers encoding - records are binary protobuf messages */
|
|
15
|
-
Proto = 1
|
|
16
|
-
}
|
|
17
|
-
/**
|
|
18
|
-
* Configuration options for the Zerobus stream.
|
|
19
|
-
*
|
|
20
|
-
* These options control stream behavior including recovery, timeouts, and inflight limits.
|
|
21
|
-
*/
|
|
22
|
-
export interface StreamConfigurationOptions {
|
|
23
|
-
/**
|
|
24
|
-
* Maximum number of unacknowledged requests that can be in flight.
|
|
25
|
-
* Default: 10,000
|
|
26
|
-
*/
|
|
27
|
-
maxInflightRequests?: number
|
|
28
|
-
/**
|
|
29
|
-
* Enable automatic stream recovery on transient failures.
|
|
30
|
-
* Default: true
|
|
31
|
-
*/
|
|
32
|
-
recovery?: boolean
|
|
33
|
-
/**
|
|
34
|
-
* Timeout for recovery operations in milliseconds.
|
|
35
|
-
* Default: 15,000 (15 seconds)
|
|
36
|
-
*/
|
|
37
|
-
recoveryTimeoutMs?: number
|
|
38
|
-
/**
|
|
39
|
-
* Delay between recovery retry attempts in milliseconds.
|
|
40
|
-
* Default: 2,000 (2 seconds)
|
|
41
|
-
*/
|
|
42
|
-
recoveryBackoffMs?: number
|
|
43
|
-
/**
|
|
44
|
-
* Maximum number of recovery attempts before giving up.
|
|
45
|
-
* Default: 4
|
|
46
|
-
*/
|
|
47
|
-
recoveryRetries?: number
|
|
48
|
-
/**
|
|
49
|
-
* Timeout for flush operations in milliseconds.
|
|
50
|
-
* Default: 300,000 (5 minutes)
|
|
51
|
-
*/
|
|
52
|
-
flushTimeoutMs?: number
|
|
53
|
-
/**
|
|
54
|
-
* Timeout waiting for server acknowledgments in milliseconds.
|
|
55
|
-
* Default: 60,000 (1 minute)
|
|
56
|
-
*/
|
|
57
|
-
serverLackOfAckTimeoutMs?: number
|
|
58
|
-
/**
|
|
59
|
-
* Record serialization format.
|
|
60
|
-
* Use RecordType.Json for JSON encoding or RecordType.Proto for Protocol Buffers.
|
|
61
|
-
* Default: RecordType.Proto (Protocol Buffers)
|
|
62
|
-
*/
|
|
63
|
-
recordType?: number
|
|
64
|
-
}
|
|
65
|
-
/**
|
|
66
|
-
* Properties of the target Delta table for ingestion.
|
|
67
|
-
*
|
|
68
|
-
* Specifies which Unity Catalog table to write to and optionally the schema descriptor
|
|
69
|
-
* for Protocol Buffers encoding.
|
|
70
|
-
*/
|
|
71
|
-
export interface TableProperties {
|
|
72
|
-
/** Full table name in Unity Catalog (e.g., "catalog.schema.table") */
|
|
73
|
-
tableName: string
|
|
74
|
-
/**
|
|
75
|
-
* Optional Protocol Buffer descriptor as a base64-encoded string.
|
|
76
|
-
* If not provided, JSON encoding will be used.
|
|
77
|
-
*/
|
|
78
|
-
descriptorProto?: string
|
|
79
|
-
}
|
|
80
|
-
/**
|
|
81
|
-
* JavaScript headers provider callback wrapper.
|
|
82
|
-
*
|
|
83
|
-
* Allows TypeScript code to provide custom authentication headers
|
|
84
|
-
* by implementing a getHeaders() function.
|
|
85
|
-
*/
|
|
86
|
-
export interface JsHeadersProvider {
|
|
87
|
-
/** JavaScript function: () => Promise<Array<[string, string]>> */
|
|
88
|
-
getHeadersCallback: (...args: any[]) => any
|
|
89
|
-
}
|
|
90
|
-
/**
|
|
91
|
-
* Custom error type for Zerobus operations.
|
|
92
|
-
*
|
|
93
|
-
* This error type includes information about whether the error is retryable,
|
|
94
|
-
* which helps determine if automatic recovery can resolve the issue.
|
|
95
|
-
*/
|
|
96
|
-
export declare class ZerobusError {
|
|
97
|
-
/** Returns true if this error can be automatically retried by the SDK. */
|
|
98
|
-
get isRetryable(): boolean
|
|
99
|
-
/** Get the error message. */
|
|
100
|
-
get message(): string
|
|
101
|
-
}
|
|
102
|
-
/**
|
|
103
|
-
* A stream for ingesting data into a Databricks Delta table.
|
|
104
|
-
*
|
|
105
|
-
* The stream manages a bidirectional gRPC connection, handles acknowledgments,
|
|
106
|
-
* and provides automatic recovery on transient failures.
|
|
107
|
-
*
|
|
108
|
-
* # Example
|
|
109
|
-
*
|
|
110
|
-
* ```typescript
|
|
111
|
-
* const stream = await sdk.createStream(tableProps, clientId, clientSecret, options);
|
|
112
|
-
* const ackPromise = await stream.ingestRecord(Buffer.from([1, 2, 3]));
|
|
113
|
-
* const offset = await ackPromise;
|
|
114
|
-
* await stream.close();
|
|
115
|
-
* ```
|
|
116
|
-
*/
|
|
117
|
-
export declare class ZerobusStream {
|
|
118
|
-
/**
|
|
119
|
-
* Ingests a single record into the stream.
|
|
120
|
-
*
|
|
121
|
-
* This method accepts either:
|
|
122
|
-
* - A Protocol Buffer encoded record as a Buffer (Vec<u8>)
|
|
123
|
-
* - A JSON string
|
|
124
|
-
*
|
|
125
|
-
* This method BLOCKS until the record is sent to the SDK's internal landing zone,
|
|
126
|
-
* then returns a Promise for the server acknowledgment. This allows you to send
|
|
127
|
-
* many records immediately without waiting for acknowledgments:
|
|
128
|
-
*
|
|
129
|
-
* ```typescript
|
|
130
|
-
* let lastAckPromise;
|
|
131
|
-
* for (let i = 0; i < 1000; i++) {
|
|
132
|
-
* // This call blocks until record is sent (in SDK)
|
|
133
|
-
* lastAckPromise = stream.ingestRecord(record);
|
|
134
|
-
* }
|
|
135
|
-
* // All 1000 records are now in the SDK's internal queue
|
|
136
|
-
* // Wait for the last acknowledgment
|
|
137
|
-
* await lastAckPromise;
|
|
138
|
-
* // Flush to ensure all records are acknowledged
|
|
139
|
-
* await stream.flush();
|
|
140
|
-
* ```
|
|
141
|
-
*
|
|
142
|
-
* # Arguments
|
|
143
|
-
*
|
|
144
|
-
* * `payload` - The record data. Accepts:
|
|
145
|
-
* - Buffer (low-level proto bytes)
|
|
146
|
-
* - string (low-level JSON string)
|
|
147
|
-
* - Protobuf message object with .encode() method (high-level, auto-serializes)
|
|
148
|
-
* - Plain JavaScript object (high-level, auto-stringifies to JSON)
|
|
149
|
-
*
|
|
150
|
-
* # Returns
|
|
151
|
-
*
|
|
152
|
-
* A Promise that resolves to the offset ID when the server acknowledges the record.
|
|
153
|
-
*/
|
|
154
|
-
ingestRecord(payload: unknown): Promise<bigint>
|
|
155
|
-
/**
|
|
156
|
-
* Ingests multiple records as a single atomic batch.
|
|
157
|
-
*
|
|
158
|
-
* This method accepts an array of records (Protocol Buffer buffers or JSON strings)
|
|
159
|
-
* and ingests them as a batch. The batch receives a single acknowledgment from
|
|
160
|
-
* the server with all-or-nothing semantics.
|
|
161
|
-
*
|
|
162
|
-
* Similar to ingestRecord(), this BLOCKS until the batch is sent to the SDK's
|
|
163
|
-
* internal landing zone, then returns a Promise for the server acknowledgment.
|
|
164
|
-
*
|
|
165
|
-
* # Arguments
|
|
166
|
-
*
|
|
167
|
-
* * `records` - Array of record data (Buffer for protobuf, string for JSON)
|
|
168
|
-
*
|
|
169
|
-
* # Returns
|
|
170
|
-
*
|
|
171
|
-
* Promise resolving to:
|
|
172
|
-
* - `bigint`: offset ID for non-empty batches
|
|
173
|
-
* - `null`: for empty batches
|
|
174
|
-
*
|
|
175
|
-
* # Example
|
|
176
|
-
*
|
|
177
|
-
* ```typescript
|
|
178
|
-
* const buffers = records.map(r => Buffer.from(encode(r)));
|
|
179
|
-
* const offsetId = await stream.ingestRecords(buffers);
|
|
180
|
-
*
|
|
181
|
-
* if (offsetId !== null) {
|
|
182
|
-
* console.log('Batch acknowledged at offset:', offsetId);
|
|
183
|
-
* }
|
|
184
|
-
* ```
|
|
185
|
-
*/
|
|
186
|
-
ingestRecords(records: Array<unknown>): Promise<bigint | null>
|
|
187
|
-
/**
|
|
188
|
-
* Flushes all pending records and waits for acknowledgments.
|
|
189
|
-
*
|
|
190
|
-
* This method ensures all previously ingested records have been sent to the server
|
|
191
|
-
* and acknowledged. It's useful for checkpointing or ensuring data durability.
|
|
192
|
-
*
|
|
193
|
-
* # Errors
|
|
194
|
-
*
|
|
195
|
-
* - Timeout errors if flush takes longer than configured timeout
|
|
196
|
-
* - Network errors if the connection fails during flush
|
|
197
|
-
*/
|
|
198
|
-
flush(): Promise<void>
|
|
199
|
-
/**
|
|
200
|
-
* Closes the stream gracefully.
|
|
201
|
-
*
|
|
202
|
-
* This method flushes all pending records, waits for acknowledgments, and then
|
|
203
|
-
* closes the underlying gRPC connection. Always call this method when done with
|
|
204
|
-
* the stream to ensure data integrity.
|
|
205
|
-
*
|
|
206
|
-
* # Errors
|
|
207
|
-
*
|
|
208
|
-
* - Returns an error if some records could not be acknowledged
|
|
209
|
-
* - Network errors during the close operation
|
|
210
|
-
*/
|
|
211
|
-
close(): Promise<void>
|
|
212
|
-
/**
|
|
213
|
-
* Gets the list of unacknowledged records.
|
|
214
|
-
*
|
|
215
|
-
* This method should only be called after a stream failure to retrieve records
|
|
216
|
-
* that were sent but not acknowledged by the server. These records can be
|
|
217
|
-
* re-ingested into a new stream.
|
|
218
|
-
*
|
|
219
|
-
* # Returns
|
|
220
|
-
*
|
|
221
|
-
* An array of Buffers containing the unacknowledged record payloads.
|
|
222
|
-
*/
|
|
223
|
-
getUnackedRecords(): Promise<Array<Buffer>>
|
|
224
|
-
/**
|
|
225
|
-
* Gets unacknowledged records grouped by their original batches.
|
|
226
|
-
*
|
|
227
|
-
* This preserves the batch structure from ingestion:
|
|
228
|
-
* - Each ingestRecord() call → 1-element batch
|
|
229
|
-
* - Each ingestRecords() call → N-element batch
|
|
230
|
-
*
|
|
231
|
-
* Should only be called after stream failure. All records returned as Buffers
|
|
232
|
-
* (JSON strings are converted to UTF-8 bytes).
|
|
233
|
-
*
|
|
234
|
-
* # Returns
|
|
235
|
-
*
|
|
236
|
-
* Array of batches, where each batch is an array of Buffers
|
|
237
|
-
*
|
|
238
|
-
* # Example
|
|
239
|
-
*
|
|
240
|
-
* ```typescript
|
|
241
|
-
* try {
|
|
242
|
-
* await stream.ingestRecords(batch1);
|
|
243
|
-
* await stream.ingestRecords(batch2);
|
|
244
|
-
* } catch (error) {
|
|
245
|
-
* const unackedBatches = await stream.getUnackedBatches();
|
|
246
|
-
*
|
|
247
|
-
* // Re-ingest with new stream
|
|
248
|
-
* for (const batch of unackedBatches) {
|
|
249
|
-
* await newStream.ingestRecords(batch);
|
|
250
|
-
* }
|
|
251
|
-
* }
|
|
252
|
-
* ```
|
|
253
|
-
*/
|
|
254
|
-
getUnackedBatches(): Promise<Array<Array<Buffer>>>
|
|
255
|
-
}
|
|
256
|
-
/**
|
|
257
|
-
* The main SDK for interacting with the Databricks Zerobus service.
|
|
258
|
-
*
|
|
259
|
-
* This is the entry point for creating ingestion streams to Delta tables.
|
|
260
|
-
*
|
|
261
|
-
* # Example
|
|
262
|
-
*
|
|
263
|
-
* ```typescript
|
|
264
|
-
* const sdk = new ZerobusSdk(
|
|
265
|
-
* "https://workspace-id.zerobus.region.cloud.databricks.com",
|
|
266
|
-
* "https://workspace.cloud.databricks.com"
|
|
267
|
-
* );
|
|
268
|
-
*
|
|
269
|
-
* const stream = await sdk.createStream(
|
|
270
|
-
* { tableName: "catalog.schema.table" },
|
|
271
|
-
* "client-id",
|
|
272
|
-
* "client-secret"
|
|
273
|
-
* );
|
|
274
|
-
* ```
|
|
275
|
-
*/
|
|
276
|
-
export declare class ZerobusSdk {
|
|
277
|
-
/**
|
|
278
|
-
* Creates a new Zerobus SDK instance.
|
|
279
|
-
*
|
|
280
|
-
* # Arguments
|
|
281
|
-
*
|
|
282
|
-
* * `zerobus_endpoint` - The Zerobus API endpoint URL
|
|
283
|
-
* (e.g., "https://workspace-id.zerobus.region.cloud.databricks.com")
|
|
284
|
-
* * `unity_catalog_url` - The Unity Catalog endpoint URL
|
|
285
|
-
* (e.g., "https://workspace.cloud.databricks.com")
|
|
286
|
-
*
|
|
287
|
-
* # Errors
|
|
288
|
-
*
|
|
289
|
-
* - Invalid endpoint URLs
|
|
290
|
-
* - Failed to extract workspace ID from the endpoint
|
|
291
|
-
*/
|
|
292
|
-
constructor(zerobusEndpoint: string, unityCatalogUrl: string)
|
|
293
|
-
/**
|
|
294
|
-
* Creates a new ingestion stream to a Delta table.
|
|
295
|
-
*
|
|
296
|
-
* This method establishes a bidirectional gRPC connection to the Zerobus service
|
|
297
|
-
* and prepares it for data ingestion. By default, it uses OAuth 2.0 Client Credentials
|
|
298
|
-
* authentication. For custom authentication (e.g., Personal Access Tokens), provide
|
|
299
|
-
* a custom headers_provider.
|
|
300
|
-
*
|
|
301
|
-
* # Arguments
|
|
302
|
-
*
|
|
303
|
-
* * `table_properties` - Properties of the target table including name and optional schema
|
|
304
|
-
* * `client_id` - OAuth 2.0 client ID (ignored if headers_provider is provided)
|
|
305
|
-
* * `client_secret` - OAuth 2.0 client secret (ignored if headers_provider is provided)
|
|
306
|
-
* * `options` - Optional stream configuration (uses defaults if not provided)
|
|
307
|
-
* * `headers_provider` - Optional custom headers provider for authentication.
|
|
308
|
-
* If not provided, uses OAuth with client_id and client_secret.
|
|
309
|
-
*
|
|
310
|
-
* # Returns
|
|
311
|
-
*
|
|
312
|
-
* A Promise that resolves to a ZerobusStream ready for data ingestion.
|
|
313
|
-
*
|
|
314
|
-
* # Errors
|
|
315
|
-
*
|
|
316
|
-
* - Authentication failures (invalid credentials)
|
|
317
|
-
* - Invalid table name or insufficient permissions
|
|
318
|
-
* - Network connectivity issues
|
|
319
|
-
* - Schema validation errors
|
|
320
|
-
*
|
|
321
|
-
* # Examples
|
|
322
|
-
*
|
|
323
|
-
* OAuth authentication (default):
|
|
324
|
-
* ```typescript
|
|
325
|
-
* const stream = await sdk.createStream(
|
|
326
|
-
* { tableName: "catalog.schema.table" },
|
|
327
|
-
* "client-id",
|
|
328
|
-
* "client-secret"
|
|
329
|
-
* );
|
|
330
|
-
* ```
|
|
331
|
-
*
|
|
332
|
-
* Custom authentication with headers provider:
|
|
333
|
-
* ```typescript
|
|
334
|
-
* const headersProvider = {
|
|
335
|
-
* getHeadersCallback: async () => [
|
|
336
|
-
* ["authorization", `Bearer ${myToken}`],
|
|
337
|
-
* ["x-databricks-zerobus-table-name", tableName]
|
|
338
|
-
* ]
|
|
339
|
-
* };
|
|
340
|
-
* const stream = await sdk.createStream(
|
|
341
|
-
* { tableName: "catalog.schema.table" },
|
|
342
|
-
* "", // ignored
|
|
343
|
-
* "", // ignored
|
|
344
|
-
* undefined,
|
|
345
|
-
* headersProvider
|
|
346
|
-
* );
|
|
347
|
-
* ```
|
|
348
|
-
*/
|
|
349
|
-
createStream(tableProperties: TableProperties, clientId: string, clientSecret: string, options?: StreamConfigurationOptions | undefined | null, headersProvider?: JsHeadersProvider | undefined | null): Promise<ZerobusStream>
|
|
350
|
-
/**
|
|
351
|
-
* Recreates a stream with the same configuration and re-ingests unacknowledged batches.
|
|
352
|
-
*
|
|
353
|
-
* This method is the recommended approach for recovering from stream failures. It:
|
|
354
|
-
* 1. Retrieves all unacknowledged batches from the failed stream
|
|
355
|
-
* 2. Creates a new stream with identical configuration
|
|
356
|
-
* 3. Re-ingests all unacknowledged batches in order
|
|
357
|
-
* 4. Returns the new stream ready for continued ingestion
|
|
358
|
-
*
|
|
359
|
-
* # Arguments
|
|
360
|
-
*
|
|
361
|
-
* * `stream` - The failed or closed stream to recreate
|
|
362
|
-
*
|
|
363
|
-
* # Returns
|
|
364
|
-
*
|
|
365
|
-
* A Promise that resolves to a new ZerobusStream with all unacknowledged batches re-ingested.
|
|
366
|
-
*
|
|
367
|
-
* # Errors
|
|
368
|
-
*
|
|
369
|
-
* - Failed to retrieve unacknowledged batches from the original stream
|
|
370
|
-
* - Authentication failures when creating the new stream
|
|
371
|
-
* - Network connectivity issues during re-ingestion
|
|
372
|
-
*
|
|
373
|
-
* # Examples
|
|
374
|
-
*
|
|
375
|
-
* ```typescript
|
|
376
|
-
* try {
|
|
377
|
-
* await stream.ingestRecords(batch);
|
|
378
|
-
* } catch (error) {
|
|
379
|
-
* await stream.close();
|
|
380
|
-
* // Recreate stream with all unacked batches re-ingested
|
|
381
|
-
* const newStream = await sdk.recreateStream(stream);
|
|
382
|
-
* // Continue ingesting with newStream
|
|
383
|
-
* }
|
|
384
|
-
* ```
|
|
385
|
-
*/
|
|
386
|
-
recreateStream(stream: ZerobusStream): Promise<ZerobusStream>
|
|
387
|
-
}
|
package/index.js
DELETED
|
@@ -1,318 +0,0 @@
|
|
|
1
|
-
/* tslint:disable */
|
|
2
|
-
/* eslint-disable */
|
|
3
|
-
/* prettier-ignore */
|
|
4
|
-
|
|
5
|
-
/* auto-generated by NAPI-RS */
|
|
6
|
-
|
|
7
|
-
const { existsSync, readFileSync } = require('fs')
|
|
8
|
-
const { join } = require('path')
|
|
9
|
-
|
|
10
|
-
const { platform, arch } = process
|
|
11
|
-
|
|
12
|
-
let nativeBinding = null
|
|
13
|
-
let localFileExisted = false
|
|
14
|
-
let loadError = null
|
|
15
|
-
|
|
16
|
-
function isMusl() {
|
|
17
|
-
// For Node 10
|
|
18
|
-
if (!process.report || typeof process.report.getReport !== 'function') {
|
|
19
|
-
try {
|
|
20
|
-
const lddPath = require('child_process').execSync('which ldd').toString().trim()
|
|
21
|
-
return readFileSync(lddPath, 'utf8').includes('musl')
|
|
22
|
-
} catch (e) {
|
|
23
|
-
return true
|
|
24
|
-
}
|
|
25
|
-
} else {
|
|
26
|
-
const { glibcVersionRuntime } = process.report.getReport().header
|
|
27
|
-
return !glibcVersionRuntime
|
|
28
|
-
}
|
|
29
|
-
}
|
|
30
|
-
|
|
31
|
-
switch (platform) {
|
|
32
|
-
case 'android':
|
|
33
|
-
switch (arch) {
|
|
34
|
-
case 'arm64':
|
|
35
|
-
localFileExisted = existsSync(join(__dirname, 'zerobus-sdk-ts.android-arm64.node'))
|
|
36
|
-
try {
|
|
37
|
-
if (localFileExisted) {
|
|
38
|
-
nativeBinding = require('./zerobus-sdk-ts.android-arm64.node')
|
|
39
|
-
} else {
|
|
40
|
-
nativeBinding = require('@databricks/zerobus-ingest-sdk-android-arm64')
|
|
41
|
-
}
|
|
42
|
-
} catch (e) {
|
|
43
|
-
loadError = e
|
|
44
|
-
}
|
|
45
|
-
break
|
|
46
|
-
case 'arm':
|
|
47
|
-
localFileExisted = existsSync(join(__dirname, 'zerobus-sdk-ts.android-arm-eabi.node'))
|
|
48
|
-
try {
|
|
49
|
-
if (localFileExisted) {
|
|
50
|
-
nativeBinding = require('./zerobus-sdk-ts.android-arm-eabi.node')
|
|
51
|
-
} else {
|
|
52
|
-
nativeBinding = require('@databricks/zerobus-ingest-sdk-android-arm-eabi')
|
|
53
|
-
}
|
|
54
|
-
} catch (e) {
|
|
55
|
-
loadError = e
|
|
56
|
-
}
|
|
57
|
-
break
|
|
58
|
-
default:
|
|
59
|
-
throw new Error(`Unsupported architecture on Android ${arch}`)
|
|
60
|
-
}
|
|
61
|
-
break
|
|
62
|
-
case 'win32':
|
|
63
|
-
switch (arch) {
|
|
64
|
-
case 'x64':
|
|
65
|
-
localFileExisted = existsSync(
|
|
66
|
-
join(__dirname, 'zerobus-sdk-ts.win32-x64-msvc.node')
|
|
67
|
-
)
|
|
68
|
-
try {
|
|
69
|
-
if (localFileExisted) {
|
|
70
|
-
nativeBinding = require('./zerobus-sdk-ts.win32-x64-msvc.node')
|
|
71
|
-
} else {
|
|
72
|
-
nativeBinding = require('@databricks/zerobus-ingest-sdk-win32-x64-msvc')
|
|
73
|
-
}
|
|
74
|
-
} catch (e) {
|
|
75
|
-
loadError = e
|
|
76
|
-
}
|
|
77
|
-
break
|
|
78
|
-
case 'ia32':
|
|
79
|
-
localFileExisted = existsSync(
|
|
80
|
-
join(__dirname, 'zerobus-sdk-ts.win32-ia32-msvc.node')
|
|
81
|
-
)
|
|
82
|
-
try {
|
|
83
|
-
if (localFileExisted) {
|
|
84
|
-
nativeBinding = require('./zerobus-sdk-ts.win32-ia32-msvc.node')
|
|
85
|
-
} else {
|
|
86
|
-
nativeBinding = require('@databricks/zerobus-ingest-sdk-win32-ia32-msvc')
|
|
87
|
-
}
|
|
88
|
-
} catch (e) {
|
|
89
|
-
loadError = e
|
|
90
|
-
}
|
|
91
|
-
break
|
|
92
|
-
case 'arm64':
|
|
93
|
-
localFileExisted = existsSync(
|
|
94
|
-
join(__dirname, 'zerobus-sdk-ts.win32-arm64-msvc.node')
|
|
95
|
-
)
|
|
96
|
-
try {
|
|
97
|
-
if (localFileExisted) {
|
|
98
|
-
nativeBinding = require('./zerobus-sdk-ts.win32-arm64-msvc.node')
|
|
99
|
-
} else {
|
|
100
|
-
nativeBinding = require('@databricks/zerobus-ingest-sdk-win32-arm64-msvc')
|
|
101
|
-
}
|
|
102
|
-
} catch (e) {
|
|
103
|
-
loadError = e
|
|
104
|
-
}
|
|
105
|
-
break
|
|
106
|
-
default:
|
|
107
|
-
throw new Error(`Unsupported architecture on Windows: ${arch}`)
|
|
108
|
-
}
|
|
109
|
-
break
|
|
110
|
-
case 'darwin':
|
|
111
|
-
localFileExisted = existsSync(join(__dirname, 'zerobus-sdk-ts.darwin-universal.node'))
|
|
112
|
-
try {
|
|
113
|
-
if (localFileExisted) {
|
|
114
|
-
nativeBinding = require('./zerobus-sdk-ts.darwin-universal.node')
|
|
115
|
-
} else {
|
|
116
|
-
nativeBinding = require('@databricks/zerobus-ingest-sdk-darwin-universal')
|
|
117
|
-
}
|
|
118
|
-
break
|
|
119
|
-
} catch {}
|
|
120
|
-
switch (arch) {
|
|
121
|
-
case 'x64':
|
|
122
|
-
localFileExisted = existsSync(join(__dirname, 'zerobus-sdk-ts.darwin-x64.node'))
|
|
123
|
-
try {
|
|
124
|
-
if (localFileExisted) {
|
|
125
|
-
nativeBinding = require('./zerobus-sdk-ts.darwin-x64.node')
|
|
126
|
-
} else {
|
|
127
|
-
nativeBinding = require('@databricks/zerobus-ingest-sdk-darwin-x64')
|
|
128
|
-
}
|
|
129
|
-
} catch (e) {
|
|
130
|
-
loadError = e
|
|
131
|
-
}
|
|
132
|
-
break
|
|
133
|
-
case 'arm64':
|
|
134
|
-
localFileExisted = existsSync(
|
|
135
|
-
join(__dirname, 'zerobus-sdk-ts.darwin-arm64.node')
|
|
136
|
-
)
|
|
137
|
-
try {
|
|
138
|
-
if (localFileExisted) {
|
|
139
|
-
nativeBinding = require('./zerobus-sdk-ts.darwin-arm64.node')
|
|
140
|
-
} else {
|
|
141
|
-
nativeBinding = require('@databricks/zerobus-ingest-sdk-darwin-arm64')
|
|
142
|
-
}
|
|
143
|
-
} catch (e) {
|
|
144
|
-
loadError = e
|
|
145
|
-
}
|
|
146
|
-
break
|
|
147
|
-
default:
|
|
148
|
-
throw new Error(`Unsupported architecture on macOS: ${arch}`)
|
|
149
|
-
}
|
|
150
|
-
break
|
|
151
|
-
case 'freebsd':
|
|
152
|
-
if (arch !== 'x64') {
|
|
153
|
-
throw new Error(`Unsupported architecture on FreeBSD: ${arch}`)
|
|
154
|
-
}
|
|
155
|
-
localFileExisted = existsSync(join(__dirname, 'zerobus-sdk-ts.freebsd-x64.node'))
|
|
156
|
-
try {
|
|
157
|
-
if (localFileExisted) {
|
|
158
|
-
nativeBinding = require('./zerobus-sdk-ts.freebsd-x64.node')
|
|
159
|
-
} else {
|
|
160
|
-
nativeBinding = require('@databricks/zerobus-ingest-sdk-freebsd-x64')
|
|
161
|
-
}
|
|
162
|
-
} catch (e) {
|
|
163
|
-
loadError = e
|
|
164
|
-
}
|
|
165
|
-
break
|
|
166
|
-
case 'linux':
|
|
167
|
-
switch (arch) {
|
|
168
|
-
case 'x64':
|
|
169
|
-
if (isMusl()) {
|
|
170
|
-
localFileExisted = existsSync(
|
|
171
|
-
join(__dirname, 'zerobus-sdk-ts.linux-x64-musl.node')
|
|
172
|
-
)
|
|
173
|
-
try {
|
|
174
|
-
if (localFileExisted) {
|
|
175
|
-
nativeBinding = require('./zerobus-sdk-ts.linux-x64-musl.node')
|
|
176
|
-
} else {
|
|
177
|
-
nativeBinding = require('@databricks/zerobus-ingest-sdk-linux-x64-musl')
|
|
178
|
-
}
|
|
179
|
-
} catch (e) {
|
|
180
|
-
loadError = e
|
|
181
|
-
}
|
|
182
|
-
} else {
|
|
183
|
-
localFileExisted = existsSync(
|
|
184
|
-
join(__dirname, 'zerobus-sdk-ts.linux-x64-gnu.node')
|
|
185
|
-
)
|
|
186
|
-
try {
|
|
187
|
-
if (localFileExisted) {
|
|
188
|
-
nativeBinding = require('./zerobus-sdk-ts.linux-x64-gnu.node')
|
|
189
|
-
} else {
|
|
190
|
-
nativeBinding = require('@databricks/zerobus-ingest-sdk-linux-x64-gnu')
|
|
191
|
-
}
|
|
192
|
-
} catch (e) {
|
|
193
|
-
loadError = e
|
|
194
|
-
}
|
|
195
|
-
}
|
|
196
|
-
break
|
|
197
|
-
case 'arm64':
|
|
198
|
-
if (isMusl()) {
|
|
199
|
-
localFileExisted = existsSync(
|
|
200
|
-
join(__dirname, 'zerobus-sdk-ts.linux-arm64-musl.node')
|
|
201
|
-
)
|
|
202
|
-
try {
|
|
203
|
-
if (localFileExisted) {
|
|
204
|
-
nativeBinding = require('./zerobus-sdk-ts.linux-arm64-musl.node')
|
|
205
|
-
} else {
|
|
206
|
-
nativeBinding = require('@databricks/zerobus-ingest-sdk-linux-arm64-musl')
|
|
207
|
-
}
|
|
208
|
-
} catch (e) {
|
|
209
|
-
loadError = e
|
|
210
|
-
}
|
|
211
|
-
} else {
|
|
212
|
-
localFileExisted = existsSync(
|
|
213
|
-
join(__dirname, 'zerobus-sdk-ts.linux-arm64-gnu.node')
|
|
214
|
-
)
|
|
215
|
-
try {
|
|
216
|
-
if (localFileExisted) {
|
|
217
|
-
nativeBinding = require('./zerobus-sdk-ts.linux-arm64-gnu.node')
|
|
218
|
-
} else {
|
|
219
|
-
nativeBinding = require('@databricks/zerobus-ingest-sdk-linux-arm64-gnu')
|
|
220
|
-
}
|
|
221
|
-
} catch (e) {
|
|
222
|
-
loadError = e
|
|
223
|
-
}
|
|
224
|
-
}
|
|
225
|
-
break
|
|
226
|
-
case 'arm':
|
|
227
|
-
if (isMusl()) {
|
|
228
|
-
localFileExisted = existsSync(
|
|
229
|
-
join(__dirname, 'zerobus-sdk-ts.linux-arm-musleabihf.node')
|
|
230
|
-
)
|
|
231
|
-
try {
|
|
232
|
-
if (localFileExisted) {
|
|
233
|
-
nativeBinding = require('./zerobus-sdk-ts.linux-arm-musleabihf.node')
|
|
234
|
-
} else {
|
|
235
|
-
nativeBinding = require('@databricks/zerobus-ingest-sdk-linux-arm-musleabihf')
|
|
236
|
-
}
|
|
237
|
-
} catch (e) {
|
|
238
|
-
loadError = e
|
|
239
|
-
}
|
|
240
|
-
} else {
|
|
241
|
-
localFileExisted = existsSync(
|
|
242
|
-
join(__dirname, 'zerobus-sdk-ts.linux-arm-gnueabihf.node')
|
|
243
|
-
)
|
|
244
|
-
try {
|
|
245
|
-
if (localFileExisted) {
|
|
246
|
-
nativeBinding = require('./zerobus-sdk-ts.linux-arm-gnueabihf.node')
|
|
247
|
-
} else {
|
|
248
|
-
nativeBinding = require('@databricks/zerobus-ingest-sdk-linux-arm-gnueabihf')
|
|
249
|
-
}
|
|
250
|
-
} catch (e) {
|
|
251
|
-
loadError = e
|
|
252
|
-
}
|
|
253
|
-
}
|
|
254
|
-
break
|
|
255
|
-
case 'riscv64':
|
|
256
|
-
if (isMusl()) {
|
|
257
|
-
localFileExisted = existsSync(
|
|
258
|
-
join(__dirname, 'zerobus-sdk-ts.linux-riscv64-musl.node')
|
|
259
|
-
)
|
|
260
|
-
try {
|
|
261
|
-
if (localFileExisted) {
|
|
262
|
-
nativeBinding = require('./zerobus-sdk-ts.linux-riscv64-musl.node')
|
|
263
|
-
} else {
|
|
264
|
-
nativeBinding = require('@databricks/zerobus-ingest-sdk-linux-riscv64-musl')
|
|
265
|
-
}
|
|
266
|
-
} catch (e) {
|
|
267
|
-
loadError = e
|
|
268
|
-
}
|
|
269
|
-
} else {
|
|
270
|
-
localFileExisted = existsSync(
|
|
271
|
-
join(__dirname, 'zerobus-sdk-ts.linux-riscv64-gnu.node')
|
|
272
|
-
)
|
|
273
|
-
try {
|
|
274
|
-
if (localFileExisted) {
|
|
275
|
-
nativeBinding = require('./zerobus-sdk-ts.linux-riscv64-gnu.node')
|
|
276
|
-
} else {
|
|
277
|
-
nativeBinding = require('@databricks/zerobus-ingest-sdk-linux-riscv64-gnu')
|
|
278
|
-
}
|
|
279
|
-
} catch (e) {
|
|
280
|
-
loadError = e
|
|
281
|
-
}
|
|
282
|
-
}
|
|
283
|
-
break
|
|
284
|
-
case 's390x':
|
|
285
|
-
localFileExisted = existsSync(
|
|
286
|
-
join(__dirname, 'zerobus-sdk-ts.linux-s390x-gnu.node')
|
|
287
|
-
)
|
|
288
|
-
try {
|
|
289
|
-
if (localFileExisted) {
|
|
290
|
-
nativeBinding = require('./zerobus-sdk-ts.linux-s390x-gnu.node')
|
|
291
|
-
} else {
|
|
292
|
-
nativeBinding = require('@databricks/zerobus-ingest-sdk-linux-s390x-gnu')
|
|
293
|
-
}
|
|
294
|
-
} catch (e) {
|
|
295
|
-
loadError = e
|
|
296
|
-
}
|
|
297
|
-
break
|
|
298
|
-
default:
|
|
299
|
-
throw new Error(`Unsupported architecture on Linux: ${arch}`)
|
|
300
|
-
}
|
|
301
|
-
break
|
|
302
|
-
default:
|
|
303
|
-
throw new Error(`Unsupported OS: ${platform}, architecture: ${arch}`)
|
|
304
|
-
}
|
|
305
|
-
|
|
306
|
-
if (!nativeBinding) {
|
|
307
|
-
if (loadError) {
|
|
308
|
-
throw loadError
|
|
309
|
-
}
|
|
310
|
-
throw new Error(`Failed to load native binding`)
|
|
311
|
-
}
|
|
312
|
-
|
|
313
|
-
const { RecordType, ZerobusError, ZerobusStream, ZerobusSdk } = nativeBinding
|
|
314
|
-
|
|
315
|
-
module.exports.RecordType = RecordType
|
|
316
|
-
module.exports.ZerobusError = ZerobusError
|
|
317
|
-
module.exports.ZerobusStream = ZerobusStream
|
|
318
|
-
module.exports.ZerobusSdk = ZerobusSdk
|
|
Binary file
|