@ifc-lite/renderer 1.14.9 → 1.15.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/scene.js CHANGED
@@ -21,6 +21,8 @@ export class Scene {
21
21
  nextSplitId = 0; // Monotonic counter for sub-bucket keys
22
22
  nextBatchId = 0; // Monotonic counter for unique batch identifiers
23
23
  cachedMaxBufferSize = 0; // device.limits.maxBufferSize * safety factor (set on first use)
24
+ static STREAMING_FRAGMENT_MAX_INDICES = 180_000;
25
+ static STREAMING_FRAGMENT_MAX_VERTEX_BYTES = 8 * 1024 * 1024;
24
26
  // Sub-batch cache for partially visible batches (PERFORMANCE FIX)
25
27
  // Key = colorKey + ":" + sorted visible expressIds hash
26
28
  // This allows rendering partially visible batches as single draw calls instead of 10,000+ individual draws
@@ -41,11 +43,13 @@ export class Scene {
41
43
  // via queueMeshes() (instant, no GPU), and the animation loop drains
42
44
  // the queue via flushPending() with a per-frame time budget.
43
45
  meshQueue = [];
46
+ meshQueueReadIndex = 0;
44
47
  // ─── GPU-resident mode ──────────────────────────────────────────────
45
48
  // After releaseGeometryData(), JS-side typed arrays are freed.
46
49
  // Only lightweight metadata is retained for operations that don't need
47
50
  // raw vertex data (bounding boxes, color key lookups, expressId sets).
48
51
  geometryReleased = false;
52
+ ephemeralStreamingMode = false;
49
53
  /**
50
54
  * Add mesh to scene
51
55
  */
@@ -82,6 +86,25 @@ export class Scene {
82
86
  * Accumulates multiple mesh pieces per expressId (elements can have multiple geometry pieces)
83
87
  */
84
88
  addMeshData(meshData) {
89
+ // For color-merged batches with per-vertex entityIds, register the mesh
90
+ // under EVERY unique entity so picking/visibility/selection can find it.
91
+ if (meshData.entityIds && meshData.entityIds.length > 0) {
92
+ const seen = new Set();
93
+ for (let i = 0; i < meshData.entityIds.length; i++) {
94
+ const eid = meshData.entityIds[i];
95
+ if (seen.has(eid))
96
+ continue;
97
+ seen.add(eid);
98
+ const existing = this.meshDataMap.get(eid);
99
+ if (existing) {
100
+ existing.push(meshData);
101
+ }
102
+ else {
103
+ this.meshDataMap.set(eid, [meshData]);
104
+ }
105
+ }
106
+ return;
107
+ }
85
108
  const existing = this.meshDataMap.get(meshData.expressId);
86
109
  if (existing) {
87
110
  existing.push(meshData);
@@ -107,8 +130,37 @@ export class Scene {
107
130
  if (pieces.length === 0)
108
131
  return undefined;
109
132
  }
110
- if (pieces.length === 1)
111
- return pieces[0];
133
+ if (pieces.length === 1) {
134
+ const single = pieces[0];
135
+ // For color-merged batches, extract only the vertices belonging to
136
+ // this expressId so selection highlighting is per-entity, not the
137
+ // entire merged batch.
138
+ if (single.entityIds) {
139
+ return this.extractEntityFromMergedMesh(single, expressId);
140
+ }
141
+ return single;
142
+ }
143
+ // For multiple pieces that are ALL merged batches referencing the same
144
+ // entity, extract from each and concatenate.
145
+ if (pieces.some(p => p.entityIds)) {
146
+ const extracted = [];
147
+ for (const piece of pieces) {
148
+ if (piece.entityIds) {
149
+ const ex = this.extractEntityFromMergedMesh(piece, expressId);
150
+ if (ex)
151
+ extracted.push(ex);
152
+ }
153
+ else {
154
+ extracted.push(piece);
155
+ }
156
+ }
157
+ if (extracted.length === 0)
158
+ return undefined;
159
+ if (extracted.length === 1)
160
+ return extracted[0];
161
+ pieces = extracted;
162
+ // Fall through to the normal multi-piece merge below
163
+ }
112
164
  // Check if all pieces have the same color (within tolerance)
113
165
  // This handles multi-material elements like windows (frame vs glass)
114
166
  const firstColor = pieces[0].color;
@@ -169,6 +221,70 @@ export class Scene {
169
221
  * @param expressId - The expressId to look up
170
222
  * @param modelIndex - Optional modelIndex to filter by (for multi-model support)
171
223
  */
224
+ /**
225
+ * Extract only the vertices/triangles belonging to `targetId` from a
226
+ * color-merged MeshData that contains many entities. Returns a new
227
+ * lightweight MeshData suitable for selection highlighting.
228
+ */
229
+ extractEntityFromMergedMesh(merged, targetId) {
230
+ const entityIds = merged.entityIds;
231
+ const positions = merged.positions;
232
+ const normals = merged.normals;
233
+ const indices = merged.indices;
234
+ // Build a vertex mask and remap table
235
+ const vertexCount = entityIds.length;
236
+ const keep = new Uint8Array(vertexCount);
237
+ let keptCount = 0;
238
+ for (let i = 0; i < vertexCount; i++) {
239
+ if (entityIds[i] === targetId) {
240
+ keep[i] = 1;
241
+ keptCount++;
242
+ }
243
+ }
244
+ if (keptCount === 0)
245
+ return undefined;
246
+ // Remap old vertex index → new compacted index
247
+ const remap = new Uint32Array(vertexCount);
248
+ let newIdx = 0;
249
+ for (let i = 0; i < vertexCount; i++) {
250
+ if (keep[i]) {
251
+ remap[i] = newIdx++;
252
+ }
253
+ }
254
+ // Compact positions & normals
255
+ const outPos = new Float32Array(keptCount * 3);
256
+ const outNorm = new Float32Array(keptCount * 3);
257
+ let outOff = 0;
258
+ for (let i = 0; i < vertexCount; i++) {
259
+ if (!keep[i])
260
+ continue;
261
+ const src = i * 3;
262
+ outPos[outOff] = positions[src];
263
+ outPos[outOff + 1] = positions[src + 1];
264
+ outPos[outOff + 2] = positions[src + 2];
265
+ outNorm[outOff] = normals[src];
266
+ outNorm[outOff + 1] = normals[src + 1];
267
+ outNorm[outOff + 2] = normals[src + 2];
268
+ outOff += 3;
269
+ }
270
+ // Compact indices (only triangles where ALL 3 vertices belong to target)
271
+ const tmpIdx = [];
272
+ for (let i = 0; i < indices.length; i += 3) {
273
+ const a = indices[i], b = indices[i + 1], c = indices[i + 2];
274
+ if (keep[a] && keep[b] && keep[c]) {
275
+ tmpIdx.push(remap[a], remap[b], remap[c]);
276
+ }
277
+ }
278
+ if (tmpIdx.length === 0)
279
+ return undefined;
280
+ return {
281
+ expressId: targetId,
282
+ positions: outPos,
283
+ normals: outNorm,
284
+ indices: new Uint32Array(tmpIdx),
285
+ color: merged.color,
286
+ };
287
+ }
172
288
  hasMeshData(expressId, modelIndex) {
173
289
  const pieces = this.meshDataMap.get(expressId);
174
290
  if (!pieces || pieces.length === 0)
@@ -183,13 +299,31 @@ export class Scene {
183
299
  * Optionally filter by modelIndex for multi-model safety.
184
300
  */
185
301
  getMeshDataPieces(expressId, modelIndex) {
186
- const pieces = this.meshDataMap.get(expressId);
302
+ let pieces = this.meshDataMap.get(expressId);
187
303
  if (!pieces || pieces.length === 0)
188
304
  return undefined;
189
- if (modelIndex === undefined)
190
- return pieces;
191
- const filtered = pieces.filter((p) => p.modelIndex === modelIndex);
192
- return filtered.length > 0 ? filtered : undefined;
305
+ if (modelIndex !== undefined) {
306
+ pieces = pieces.filter((p) => p.modelIndex === modelIndex);
307
+ if (pieces.length === 0)
308
+ return undefined;
309
+ }
310
+ // For color-merged batches, extract only this entity's vertices so
311
+ // selection highlighting is per-entity, not the entire merged batch.
312
+ if (pieces.some(p => p.entityIds)) {
313
+ const extracted = [];
314
+ for (const piece of pieces) {
315
+ if (piece.entityIds) {
316
+ const ex = this.extractEntityFromMergedMesh(piece, expressId);
317
+ if (ex)
318
+ extracted.push(ex);
319
+ }
320
+ else {
321
+ extracted.push(piece);
322
+ }
323
+ }
324
+ return extracted.length > 0 ? extracted : undefined;
325
+ }
326
+ return pieces;
193
327
  }
194
328
  /**
195
329
  * Generate color key for grouping meshes.
@@ -219,24 +353,29 @@ export class Scene {
219
353
  if (this.cachedMaxBufferSize === 0) {
220
354
  this.cachedMaxBufferSize = this.getMaxBufferSize(device);
221
355
  }
356
+ const retainStreamingGeometry = !(isStreaming && this.ephemeralStreamingMode);
222
357
  // Route each mesh into a size-aware bucket for its color
223
358
  for (const meshData of meshDataArray) {
224
359
  const baseKey = this.colorKey(meshData.color);
225
360
  const bucketKey = this.resolveActiveBucket(baseKey, meshData);
226
- // Accumulate mesh data in the bucket (always needed for final merge)
227
- let bucket = this.buckets.get(bucketKey);
228
- if (!bucket) {
229
- bucket = { key: bucketKey, meshData: [], batchedMesh: null, vertexBytes: 0 };
230
- this.buckets.set(bucketKey, bucket);
231
- }
232
- bucket.meshData.push(meshData);
233
- // Track reverse mapping for O(1) bucket lookup in updateMeshColors
234
- this.meshDataBucket.set(meshData, bucket);
235
- // Also store individual mesh data for visibility filtering
236
- this.addMeshData(meshData);
237
- // Track pending keys for non-streaming rebuild only
238
- if (!isStreaming) {
239
- this.pendingBatchKeys.add(bucketKey);
361
+ if (retainStreamingGeometry || !isStreaming) {
362
+ // Accumulate mesh data in the bucket when we need later rebatching or
363
+ // CPU-side lookups. Huge-file mode intentionally skips this to keep JS
364
+ // memory bounded while fragments render directly from GPU batches.
365
+ let bucket = this.buckets.get(bucketKey);
366
+ if (!bucket) {
367
+ bucket = { key: bucketKey, meshData: [], batchedMesh: null, vertexBytes: 0 };
368
+ this.buckets.set(bucketKey, bucket);
369
+ }
370
+ bucket.meshData.push(meshData);
371
+ // Track reverse mapping for O(1) bucket lookup in updateMeshColors
372
+ this.meshDataBucket.set(meshData, bucket);
373
+ // Also store individual mesh data for visibility filtering
374
+ this.addMeshData(meshData);
375
+ // Track pending keys for non-streaming rebuild only
376
+ if (!isStreaming) {
377
+ this.pendingBatchKeys.add(bucketKey);
378
+ }
240
379
  }
241
380
  }
242
381
  if (isStreaming) {
@@ -303,12 +442,18 @@ export class Scene {
303
442
  */
304
443
  queueMeshes(meshes) {
305
444
  for (let i = 0; i < meshes.length; i++) {
306
- this.meshQueue.push(meshes[i]);
445
+ const fragments = this.splitMeshForStreaming(meshes[i]);
446
+ for (let j = 0; j < fragments.length; j++) {
447
+ this.meshQueue.push(fragments[j]);
448
+ }
307
449
  }
308
450
  }
309
451
  /** True if the mesh queue has pending work. */
310
452
  hasQueuedMeshes() {
311
- return this.meshQueue.length > 0;
453
+ return this.meshQueueReadIndex < this.meshQueue.length;
454
+ }
455
+ setEphemeralStreamingMode(enabled) {
456
+ this.ephemeralStreamingMode = enabled;
312
457
  }
313
458
  /**
314
459
  * Drain the mesh queue with a per-frame time budget.
@@ -318,15 +463,35 @@ export class Scene {
318
463
  * @returns true if any meshes were processed (caller should render)
319
464
  */
320
465
  flushPending(device, pipeline) {
321
- if (this.meshQueue.length === 0)
466
+ if (!this.hasQueuedMeshes())
322
467
  return false;
323
- // Drain the entire queue in one appendToBatches call.
324
- // The queue coalesces multiple React batches into a single GPU upload,
325
- // which is already bounded by the WASM→JS batch interval (~50-200ms).
326
- const meshes = this.meshQueue;
327
- this.meshQueue = [];
328
- this.appendToBatches(meshes, device, pipeline, true);
329
- return true;
468
+ // Drain the queue in moderately sized chunks instead of one mesh at a time.
469
+ // This preserves the per-frame time budget while cutting appendToBatches()
470
+ // overhead and front-of-array churn during huge desktop streams.
471
+ const MAX_MESHES_PER_FLUSH = 4096;
472
+ const MESHES_PER_APPEND = 128;
473
+ const FLUSH_BUDGET_MS = 10;
474
+ const start = performance.now();
475
+ let processed = 0;
476
+ while (this.meshQueueReadIndex < this.meshQueue.length && processed < MAX_MESHES_PER_FLUSH) {
477
+ const chunkSize = Math.min(MESHES_PER_APPEND, MAX_MESHES_PER_FLUSH - processed, this.meshQueue.length - this.meshQueueReadIndex);
478
+ const chunk = this.meshQueue.slice(this.meshQueueReadIndex, this.meshQueueReadIndex + chunkSize);
479
+ this.meshQueueReadIndex += chunkSize;
480
+ this.appendToBatches(chunk, device, pipeline, true);
481
+ processed += chunk.length;
482
+ if (processed >= MESHES_PER_APPEND && performance.now() - start >= FLUSH_BUDGET_MS) {
483
+ break;
484
+ }
485
+ }
486
+ if (this.meshQueueReadIndex >= this.meshQueue.length) {
487
+ this.meshQueue.length = 0;
488
+ this.meshQueueReadIndex = 0;
489
+ }
490
+ else if (this.meshQueueReadIndex >= 8192 && this.meshQueueReadIndex * 2 >= this.meshQueue.length) {
491
+ this.meshQueue = this.meshQueue.slice(this.meshQueueReadIndex);
492
+ this.meshQueueReadIndex = 0;
493
+ }
494
+ return processed > 0;
330
495
  }
331
496
  /**
332
497
  * Create lightweight fragment batches from a single streaming batch.
@@ -339,13 +504,15 @@ export class Scene {
339
504
  // Group new meshes by color for efficient fragment batches
340
505
  const colorGroups = new Map();
341
506
  for (const meshData of meshDataArray) {
342
- const key = this.colorKey(meshData.color);
343
- let group = colorGroups.get(key);
344
- if (!group) {
345
- group = [];
346
- colorGroups.set(key, group);
507
+ for (const fragment of this.splitMeshForStreaming(meshData)) {
508
+ const key = this.colorKey(fragment.color);
509
+ let group = colorGroups.get(key);
510
+ if (!group) {
511
+ group = [];
512
+ colorGroups.set(key, group);
513
+ }
514
+ group.push(fragment);
347
515
  }
348
- group.push(meshData);
349
516
  }
350
517
  // Create one fragment batch per color group (with buffer limit splitting)
351
518
  for (const [, group] of colorGroups) {
@@ -358,6 +525,44 @@ export class Scene {
358
525
  }
359
526
  }
360
527
  }
528
+ splitMeshForStreaming(meshData) {
529
+ const vertexBytes = meshData.positions.byteLength + meshData.normals.byteLength;
530
+ if (meshData.indices.length <= Scene.STREAMING_FRAGMENT_MAX_INDICES &&
531
+ vertexBytes <= Scene.STREAMING_FRAGMENT_MAX_VERTEX_BYTES) {
532
+ return [meshData];
533
+ }
534
+ const maxIndexCount = Math.max(3, Math.floor(Scene.STREAMING_FRAGMENT_MAX_INDICES / 3) * 3);
535
+ const fragments = [];
536
+ for (let start = 0; start < meshData.indices.length; start += maxIndexCount) {
537
+ const end = Math.min(start + maxIndexCount, meshData.indices.length);
538
+ const sourceIndices = meshData.indices.subarray(start, end);
539
+ const remap = new Map();
540
+ const positions = [];
541
+ const normals = [];
542
+ const indices = new Uint32Array(sourceIndices.length);
543
+ for (let i = 0; i < sourceIndices.length; i++) {
544
+ const sourceIndex = sourceIndices[i];
545
+ let nextIndex = remap.get(sourceIndex);
546
+ if (nextIndex === undefined) {
547
+ nextIndex = remap.size;
548
+ remap.set(sourceIndex, nextIndex);
549
+ const base = sourceIndex * 3;
550
+ positions.push(meshData.positions[base], meshData.positions[base + 1], meshData.positions[base + 2]);
551
+ normals.push(meshData.normals[base], meshData.normals[base + 1], meshData.normals[base + 2]);
552
+ }
553
+ indices[i] = nextIndex;
554
+ }
555
+ fragments.push({
556
+ expressId: meshData.expressId,
557
+ ifcType: meshData.ifcType,
558
+ positions: new Float32Array(positions),
559
+ normals: new Float32Array(normals),
560
+ indices,
561
+ color: meshData.color,
562
+ });
563
+ }
564
+ return fragments;
565
+ }
361
566
  /**
362
567
  * Finalize streaming: destroy temporary fragment batches and do one full
363
568
  * O(N) merge of all accumulated mesh data into proper batches.
@@ -450,6 +655,10 @@ export class Scene {
450
655
  * @returns Promise that resolves when all batches are rebuilt
451
656
  */
452
657
  finalizeStreamingAsync(device, pipeline, budgetMs = 8) {
658
+ if (this.ephemeralStreamingMode) {
659
+ this.finishEphemeralStreaming();
660
+ return Promise.resolve();
661
+ }
453
662
  if (this.streamingFragments.length === 0)
454
663
  return Promise.resolve();
455
664
  // --- Synchronous preamble (fast O(N) bookkeeping) ---
@@ -540,6 +749,60 @@ export class Scene {
540
749
  processChunk();
541
750
  });
542
751
  }
752
+ finishEphemeralStreaming() {
753
+ if (this.streamingFragments.length === 0) {
754
+ this.ephemeralStreamingMode = false;
755
+ return;
756
+ }
757
+ // Preserve lightweight per-entity bounds so large-model picking and
758
+ // selection can continue to work after we discard CPU mesh arrays.
759
+ for (const [expressId, pieces] of this.meshDataMap) {
760
+ if (this.boundingBoxes.has(expressId))
761
+ continue;
762
+ let minX = Infinity, minY = Infinity, minZ = Infinity;
763
+ let maxX = -Infinity, maxY = -Infinity, maxZ = -Infinity;
764
+ for (const piece of pieces) {
765
+ const positions = piece.positions;
766
+ for (let i = 0; i < positions.length; i += 3) {
767
+ const x = positions[i];
768
+ const y = positions[i + 1];
769
+ const z = positions[i + 2];
770
+ if (x < minX)
771
+ minX = x;
772
+ if (y < minY)
773
+ minY = y;
774
+ if (z < minZ)
775
+ minZ = z;
776
+ if (x > maxX)
777
+ maxX = x;
778
+ if (y > maxY)
779
+ maxY = y;
780
+ if (z > maxZ)
781
+ maxZ = z;
782
+ }
783
+ }
784
+ this.boundingBoxes.set(expressId, {
785
+ min: { x: minX, y: minY, z: minZ },
786
+ max: { x: maxX, y: maxY, z: maxZ },
787
+ });
788
+ }
789
+ this.streamingFragments = [];
790
+ this.buckets.clear();
791
+ this.meshDataBucket = new Map();
792
+ this.meshDataMap.clear();
793
+ this.activeBucketKey.clear();
794
+ this.pendingBatchKeys.clear();
795
+ for (const batch of this.partialBatchCache.values()) {
796
+ batch.vertexBuffer.destroy();
797
+ batch.indexBuffer.destroy();
798
+ if (batch.uniformBuffer)
799
+ batch.uniformBuffer.destroy();
800
+ }
801
+ this.partialBatchCache.clear();
802
+ this.partialBatchCacheKeys.clear();
803
+ this.geometryReleased = true;
804
+ this.ephemeralStreamingMode = false;
805
+ }
543
806
  /**
544
807
  * Release JS-side mesh geometry data (positions, normals, indices) after
545
808
  * GPU batches have been built. This frees the ~1.9GB of typed arrays that
@@ -786,17 +1049,19 @@ export class Scene {
786
1049
  const positions = mesh.positions;
787
1050
  const normals = mesh.normals;
788
1051
  const vertexCount = positions.length / 3;
789
- // Interleave vertex data (position + normal)
1052
+ // Interleave vertex data (position + normal + entityId)
790
1053
  // This loop is O(n) per mesh and unavoidable for interleaving
791
1054
  let outIdx = vertexBase * 7;
1055
+ const perVertexEntityIds = mesh.entityIds; // color-merged batches
792
1056
  let entityId = mesh.expressId >>> 0;
793
- if (entityId > MAX_ENCODED_ENTITY_ID) {
1057
+ if (!perVertexEntityIds && entityId > MAX_ENCODED_ENTITY_ID) {
794
1058
  if (!warnedEntityIdRange) {
795
1059
  warnedEntityIdRange = true;
796
1060
  console.warn('[Renderer] expressId exceeds 24-bit seam-ID encoding range; seam lines may collide.');
797
1061
  }
798
1062
  entityId = entityId & MAX_ENCODED_ENTITY_ID;
799
1063
  }
1064
+ const hasNormals = normals.length > 0;
800
1065
  for (let i = 0; i < vertexCount; i++) {
801
1066
  const srcIdx = i * 3;
802
1067
  const px = positions[srcIdx];
@@ -805,10 +1070,10 @@ export class Scene {
805
1070
  vertexData[outIdx++] = px;
806
1071
  vertexData[outIdx++] = py;
807
1072
  vertexData[outIdx++] = pz;
808
- vertexData[outIdx++] = normals[srcIdx];
809
- vertexData[outIdx++] = normals[srcIdx + 1];
810
- vertexData[outIdx++] = normals[srcIdx + 2];
811
- vertexDataU32[outIdx++] = entityId;
1073
+ vertexData[outIdx++] = hasNormals ? normals[srcIdx] : 0;
1074
+ vertexData[outIdx++] = hasNormals ? normals[srcIdx + 1] : 0;
1075
+ vertexData[outIdx++] = hasNormals ? normals[srcIdx + 2] : 0;
1076
+ vertexDataU32[outIdx++] = perVertexEntityIds ? (perVertexEntityIds[i] >>> 0) : entityId;
812
1077
  // Update bounds
813
1078
  if (px < minX)
814
1079
  minX = px;
@@ -1146,7 +1411,9 @@ export class Scene {
1146
1411
  this.partialBatchCache.clear();
1147
1412
  this.partialBatchCacheKeys.clear();
1148
1413
  this.meshQueue = [];
1414
+ this.meshQueueReadIndex = 0;
1149
1415
  this.geometryReleased = false;
1416
+ this.ephemeralStreamingMode = false;
1150
1417
  }
1151
1418
  /**
1152
1419
  * Calculate bounding box from actual mesh vertex data
@@ -1461,8 +1728,17 @@ export class Scene {
1461
1728
  for (const piece of pieces) {
1462
1729
  const positions = piece.positions;
1463
1730
  const indices = piece.indices;
1731
+ const pieceEntityIds = piece.entityIds; // per-vertex IDs for merged meshes
1464
1732
  // Test each triangle
1465
1733
  for (let i = 0; i < indices.length; i += 3) {
1734
+ // For color-merged meshes, skip triangles that don't belong to
1735
+ // this entity. Without this check, hitting ANY triangle in the
1736
+ // merged batch would attribute it to the candidate expressId.
1737
+ if (pieceEntityIds) {
1738
+ const vertIdx = indices[i];
1739
+ if (pieceEntityIds[vertIdx] !== expressId)
1740
+ continue;
1741
+ }
1466
1742
  const i0 = indices[i] * 3;
1467
1743
  const i1 = indices[i + 1] * 3;
1468
1744
  const i2 = indices[i + 2] * 3;