@playcanvas/splat-transform 1.3.0 → 1.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli.mjs +103 -30
- package/dist/cli.mjs.map +1 -1
- package/dist/index.cjs +101 -29
- package/dist/index.cjs.map +1 -1
- package/dist/index.mjs +101 -30
- package/dist/index.mjs.map +1 -1
- package/dist/lib/data-table/data-table.d.ts +2 -1
- package/dist/lib/index.d.cts +1 -1
- package/dist/lib/index.d.ts +1 -1
- package/dist/lib/io/read/buffered-read-stream.d.ts +26 -0
- package/dist/lib/io/read/index.d.ts +1 -0
- package/package.json +1 -1
package/dist/cli.mjs
CHANGED
|
@@ -11029,42 +11029,38 @@ class DataTable {
|
|
|
11029
11029
|
* After calling, row `i` will contain the data that was previously at row `indices[i]`.
|
|
11030
11030
|
*
|
|
11031
11031
|
* This is a memory-efficient alternative to `permuteRows` that modifies the table
|
|
11032
|
-
* in-place rather than creating a copy.
|
|
11032
|
+
* in-place rather than creating a copy. It reuses ArrayBuffers between columns to
|
|
11033
|
+
* minimize memory allocations.
|
|
11033
11034
|
*
|
|
11034
11035
|
* @param indices - Array of indices defining the permutation. Must have the same
|
|
11035
11036
|
* length as the number of rows, and must be a valid permutation
|
|
11036
11037
|
* (each index 0 to n-1 appears exactly once).
|
|
11037
11038
|
*/
|
|
11038
11039
|
permuteRowsInPlace(indices) {
|
|
11039
|
-
|
|
11040
|
-
const
|
|
11041
|
-
const
|
|
11042
|
-
|
|
11043
|
-
|
|
11044
|
-
|
|
11045
|
-
|
|
11046
|
-
// Save values at position i
|
|
11047
|
-
for (let c = 0; c < numCols; c++) {
|
|
11048
|
-
temps[c] = this.columns[c].data[i];
|
|
11040
|
+
// Cache for reusing ArrayBuffers by size
|
|
11041
|
+
const cache = new Map();
|
|
11042
|
+
const getBuffer = (size) => {
|
|
11043
|
+
const cached = cache.get(size);
|
|
11044
|
+
if (cached) {
|
|
11045
|
+
cache.delete(size);
|
|
11046
|
+
return cached;
|
|
11049
11047
|
}
|
|
11050
|
-
|
|
11051
|
-
|
|
11052
|
-
|
|
11053
|
-
|
|
11054
|
-
|
|
11055
|
-
|
|
11056
|
-
|
|
11057
|
-
|
|
11058
|
-
|
|
11059
|
-
|
|
11060
|
-
|
|
11061
|
-
|
|
11062
|
-
|
|
11063
|
-
for (let c = 0; c < numCols; c++) {
|
|
11064
|
-
this.columns[c].data[j] = this.columns[c].data[next];
|
|
11065
|
-
}
|
|
11066
|
-
j = next;
|
|
11048
|
+
return new ArrayBuffer(size);
|
|
11049
|
+
};
|
|
11050
|
+
const returnBuffer = (buffer) => {
|
|
11051
|
+
cache.set(buffer.byteLength, buffer);
|
|
11052
|
+
};
|
|
11053
|
+
const n = this.numRows;
|
|
11054
|
+
for (const column of this.columns) {
|
|
11055
|
+
const src = column.data;
|
|
11056
|
+
const constructor = src.constructor;
|
|
11057
|
+
const dst = new constructor(getBuffer(src.byteLength));
|
|
11058
|
+
// Sequential writes are cache-friendly
|
|
11059
|
+
for (let i = 0; i < n; i++) {
|
|
11060
|
+
dst[i] = src[indices[i]];
|
|
11067
11061
|
}
|
|
11062
|
+
returnBuffer(src.buffer);
|
|
11063
|
+
column.data = dst;
|
|
11068
11064
|
}
|
|
11069
11065
|
}
|
|
11070
11066
|
}
|
|
@@ -12388,6 +12384,81 @@ const readKsplat = async (source) => {
|
|
|
12388
12384
|
return new DataTable(columns);
|
|
12389
12385
|
};
|
|
12390
12386
|
|
|
12387
|
+
/**
|
|
12388
|
+
* ReadStream wrapper that adds read-ahead buffering to reduce async overhead.
|
|
12389
|
+
* Reads larger chunks from the inner stream and buffers excess data for
|
|
12390
|
+
* subsequent small reads. Useful for sources with high per-call overhead.
|
|
12391
|
+
*
|
|
12392
|
+
* @example
|
|
12393
|
+
* // Wrap a stream with 4MB read-ahead buffering
|
|
12394
|
+
* const buffered = new BufferedReadStream(rawStream, 4 * 1024 * 1024);
|
|
12395
|
+
* const data = await buffered.readAll();
|
|
12396
|
+
*/
|
|
12397
|
+
class BufferedReadStream extends ReadStream {
|
|
12398
|
+
inner;
|
|
12399
|
+
chunkSize;
|
|
12400
|
+
// Buffer state
|
|
12401
|
+
buffer = null;
|
|
12402
|
+
bufferOffset = 0;
|
|
12403
|
+
/**
|
|
12404
|
+
* Create a caching wrapper around a stream.
|
|
12405
|
+
* @param inner - The underlying stream to read from
|
|
12406
|
+
* @param chunkSize - Minimum bytes to read at once from inner stream (default 64KB)
|
|
12407
|
+
*/
|
|
12408
|
+
constructor(inner, chunkSize = 65536) {
|
|
12409
|
+
super(inner.expectedSize);
|
|
12410
|
+
this.inner = inner;
|
|
12411
|
+
this.chunkSize = chunkSize;
|
|
12412
|
+
}
|
|
12413
|
+
async pull(target) {
|
|
12414
|
+
// Early return for zero-length requests (e.g., EOF check from readAll)
|
|
12415
|
+
if (target.length === 0) {
|
|
12416
|
+
return 0;
|
|
12417
|
+
}
|
|
12418
|
+
let written = 0;
|
|
12419
|
+
// Serve from buffer first
|
|
12420
|
+
if (this.buffer && this.bufferOffset < this.buffer.length) {
|
|
12421
|
+
const available = this.buffer.length - this.bufferOffset;
|
|
12422
|
+
const toCopy = Math.min(available, target.length);
|
|
12423
|
+
target.set(this.buffer.subarray(this.bufferOffset, this.bufferOffset + toCopy));
|
|
12424
|
+
this.bufferOffset += toCopy;
|
|
12425
|
+
written += toCopy;
|
|
12426
|
+
this.bytesRead += toCopy;
|
|
12427
|
+
// Clear exhausted buffer
|
|
12428
|
+
if (this.bufferOffset >= this.buffer.length) {
|
|
12429
|
+
this.buffer = null;
|
|
12430
|
+
this.bufferOffset = 0;
|
|
12431
|
+
}
|
|
12432
|
+
if (written >= target.length) {
|
|
12433
|
+
return written;
|
|
12434
|
+
}
|
|
12435
|
+
}
|
|
12436
|
+
// Read a chunk from inner stream
|
|
12437
|
+
const remaining = target.length - written;
|
|
12438
|
+
const readSize = Math.max(this.chunkSize, remaining);
|
|
12439
|
+
const chunk = new Uint8Array(readSize);
|
|
12440
|
+
const n = await this.inner.pull(chunk);
|
|
12441
|
+
if (n === 0) {
|
|
12442
|
+
return written;
|
|
12443
|
+
}
|
|
12444
|
+
// Copy what we need to target
|
|
12445
|
+
const toCopy = Math.min(n, remaining);
|
|
12446
|
+
target.set(chunk.subarray(0, toCopy), written);
|
|
12447
|
+
written += toCopy;
|
|
12448
|
+
this.bytesRead += toCopy;
|
|
12449
|
+
// Cache the excess
|
|
12450
|
+
if (toCopy < n) {
|
|
12451
|
+
this.buffer = chunk.subarray(0, n);
|
|
12452
|
+
this.bufferOffset = toCopy;
|
|
12453
|
+
}
|
|
12454
|
+
return written;
|
|
12455
|
+
}
|
|
12456
|
+
close() {
|
|
12457
|
+
this.inner.close();
|
|
12458
|
+
this.buffer = null;
|
|
12459
|
+
}
|
|
12460
|
+
}
|
|
12461
|
+
|
|
12391
12462
|
const _DRIVE_LETTER_START_RE = /^[A-Za-z]:\//;
|
|
12392
12463
|
function normalizeWindowsPath(input = "") {
|
|
12393
12464
|
if (!input) {
|
|
@@ -14224,7 +14295,7 @@ class CompressedChunk {
|
|
|
14224
14295
|
}
|
|
14225
14296
|
}
|
|
14226
14297
|
|
|
14227
|
-
var version = "1.
|
|
14298
|
+
var version = "1.4.1";
|
|
14228
14299
|
|
|
14229
14300
|
const generatedByString = `Generated by splat-transform ${version}`;
|
|
14230
14301
|
const chunkProps = [
|
|
@@ -16344,7 +16415,9 @@ class NodeReadSource {
|
|
|
16344
16415
|
// Clamp range to valid bounds
|
|
16345
16416
|
const clampedStart = Math.max(0, Math.min(start, this.size));
|
|
16346
16417
|
const clampedEnd = Math.max(clampedStart, Math.min(end, this.size));
|
|
16347
|
-
|
|
16418
|
+
// Wrap with BufferedReadStream to reduce async overhead from file reads
|
|
16419
|
+
const raw = new NodeReadStream(this.fileHandle, clampedStart, clampedEnd);
|
|
16420
|
+
return new BufferedReadStream(raw, 4 * 1024 * 1024); // 4MB chunks
|
|
16348
16421
|
}
|
|
16349
16422
|
close() {
|
|
16350
16423
|
this.closed = true;
|