@vitessce/neuroglancer 3.9.5 → 3.9.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,21 +1,41 @@
1
+ /* eslint-disable max-len */
1
2
  /* eslint-disable no-unused-vars */
2
- import React, { useCallback, useMemo, useRef, useEffect, useState } from 'react';
3
+ import React, { useCallback, useMemo, useRef, useEffect, useState, useReducer } from 'react';
3
4
  import {
4
5
  TitleInfo,
6
+ useReady,
7
+ useInitialCoordination,
5
8
  useCoordination,
6
- useObsSetsData,
7
- useLoaders,
8
- useObsEmbeddingData,
9
9
  useCoordinationScopes,
10
+ useCoordinationScopesBy,
11
+ useComplexCoordination,
12
+ useMultiCoordinationScopesNonNull,
13
+ useMultiCoordinationScopesSecondaryNonNull,
14
+ useComplexCoordinationSecondary,
15
+ useLoaders,
16
+ useMergeCoordination,
17
+ useMultiObsPoints,
18
+ usePointMultiObsFeatureMatrixIndices,
19
+ useMultiObsSegmentations,
20
+ useSegmentationMultiFeatureSelection,
21
+ useSegmentationMultiObsFeatureMatrixIndices,
22
+ useSegmentationMultiObsSets,
23
+ useGridItemSize,
10
24
  } from '@vitessce/vit-s';
11
25
  import {
12
26
  ViewHelpMapping,
13
27
  ViewType,
28
+ CoordinationType,
14
29
  COMPONENT_COORDINATION_TYPES,
15
30
  } from '@vitessce/constants-internal';
16
31
  import { mergeObsSets, getCellColors, setObsSelection } from '@vitessce/sets-utils';
32
+ import { MultiLegend } from '@vitessce/legend';
17
33
  import { NeuroglancerComp } from './Neuroglancer.js';
18
34
  import { useNeuroglancerViewerState } from './data-hook-ng-utils.js';
35
+ import {
36
+ useMemoCustomComparison,
37
+ customIsEqualForCellColors,
38
+ } from './use-memo-custom-comparison.js';
19
39
  import { useStyles } from './styles.js';
20
40
  import {
21
41
  quaternionToEuler,
@@ -50,17 +70,31 @@ function rgbToHex(rgb) {
50
70
 
51
71
  export function NeuroglancerSubscriber(props) {
52
72
  const {
73
+ uuid,
53
74
  coordinationScopes: coordinationScopesRaw,
75
+ coordinationScopesBy: coordinationScopesByRaw,
54
76
  closeButtonVisible,
55
77
  downloadButtonVisible,
56
78
  removeGridComponent,
57
79
  theme,
58
- title = 'Neuroglancer',
80
+ title = 'Spatial',
81
+ subtitle = 'Powered by Neuroglancer',
59
82
  helpText = ViewHelpMapping.NEUROGLANCER,
83
+ // Note: this is a temporary mechanism
84
+ // to pass an initial NG camera state.
85
+ // Ideally, all camera state should be passed via
86
+ // the existing spatialZoom, spatialTargetX, spatialRotationOrbit, etc,
87
+ // and then NeuroglancerSubscriber should internally convert
88
+ // to NG-compatible values, which would eliminate the need for this.
89
+ initialNgCameraState,
60
90
  } = props;
61
91
 
62
92
  const loaders = useLoaders();
93
+ const mergeCoordination = useMergeCoordination();
94
+
95
+ // Acccount for possible meta-coordination.
63
96
  const coordinationScopes = useCoordinationScopes(coordinationScopesRaw);
97
+ const coordinationScopesBy = useCoordinationScopesBy(coordinationScopes, coordinationScopesByRaw);
64
98
 
65
99
  const [{
66
100
  dataset,
@@ -90,32 +124,245 @@ export function NeuroglancerSubscriber(props) {
90
124
  // setSpatialRotationZ: setRotationZ,
91
125
  setSpatialRotationOrbit: setRotationOrbit,
92
126
  setSpatialZoom: setZoom,
93
- }] = useCoordination(COMPONENT_COORDINATION_TYPES[ViewType.NEUROGLANCER], coordinationScopes);
127
+ }] = useCoordination(
128
+ COMPONENT_COORDINATION_TYPES[ViewType.NEUROGLANCER],
129
+ coordinationScopes,
130
+ );
94
131
 
132
+ const [ngWidth, ngHeight, containerRef] = useGridItemSize();
95
133
 
96
- // console.log("NG Subs Render orbit", spatialRotationX, spatialRotationY, spatialRotationOrbit);
134
+ const [
135
+ segmentationLayerScopes,
136
+ segmentationChannelScopesByLayer,
137
+ ] = useMultiCoordinationScopesSecondaryNonNull(
138
+ CoordinationType.SEGMENTATION_CHANNEL,
139
+ CoordinationType.SEGMENTATION_LAYER,
140
+ coordinationScopes,
141
+ coordinationScopesBy,
142
+ );
97
143
 
98
- const { classes } = useStyles();
144
+ const pointLayerScopes = useMultiCoordinationScopesNonNull(
145
+ CoordinationType.POINT_LAYER,
146
+ coordinationScopes,
147
+ );
148
+
149
+ // Object keys are coordination scope names for spatialSegmentationLayer.
150
+ const segmentationLayerCoordination = useComplexCoordination(
151
+ [
152
+ CoordinationType.FILE_UID,
153
+ CoordinationType.SEGMENTATION_CHANNEL,
154
+ CoordinationType.SPATIAL_LAYER_VISIBLE,
155
+ CoordinationType.SPATIAL_LAYER_OPACITY,
156
+ ],
157
+ coordinationScopes,
158
+ coordinationScopesBy,
159
+ CoordinationType.SEGMENTATION_LAYER,
160
+ );
161
+
162
+ // Object keys are coordination scope names for spatialSegmentationChannel.
163
+ const segmentationChannelCoordination = useComplexCoordinationSecondary(
164
+ [
165
+ CoordinationType.OBS_TYPE,
166
+ CoordinationType.SPATIAL_TARGET_C,
167
+ CoordinationType.SPATIAL_CHANNEL_VISIBLE,
168
+ CoordinationType.SPATIAL_CHANNEL_OPACITY,
169
+ CoordinationType.SPATIAL_CHANNEL_COLOR,
170
+ CoordinationType.SPATIAL_SEGMENTATION_FILLED,
171
+ CoordinationType.SPATIAL_SEGMENTATION_STROKE_WIDTH,
172
+ CoordinationType.OBS_COLOR_ENCODING,
173
+ CoordinationType.FEATURE_SELECTION,
174
+ CoordinationType.FEATURE_AGGREGATION_STRATEGY,
175
+ CoordinationType.FEATURE_VALUE_COLORMAP,
176
+ CoordinationType.FEATURE_VALUE_COLORMAP_RANGE,
177
+ CoordinationType.OBS_SET_COLOR,
178
+ CoordinationType.OBS_SET_SELECTION,
179
+ CoordinationType.ADDITIONAL_OBS_SETS,
180
+ CoordinationType.OBS_HIGHLIGHT,
181
+ CoordinationType.TOOLTIPS_VISIBLE,
182
+ CoordinationType.TOOLTIP_CROSSHAIRS_VISIBLE,
183
+ CoordinationType.LEGEND_VISIBLE,
184
+ ],
185
+ coordinationScopes,
186
+ coordinationScopesBy,
187
+ CoordinationType.SEGMENTATION_LAYER,
188
+ CoordinationType.SEGMENTATION_CHANNEL,
189
+ );
190
+
191
+ // Point layer
192
+ const pointLayerCoordination = useComplexCoordination(
193
+ [
194
+ CoordinationType.OBS_TYPE,
195
+ CoordinationType.SPATIAL_LAYER_VISIBLE,
196
+ CoordinationType.SPATIAL_LAYER_OPACITY,
197
+ CoordinationType.OBS_COLOR_ENCODING,
198
+ CoordinationType.FEATURE_COLOR,
199
+ CoordinationType.FEATURE_FILTER_MODE,
200
+ CoordinationType.FEATURE_SELECTION,
201
+ CoordinationType.FEATURE_VALUE_COLORMAP,
202
+ CoordinationType.FEATURE_VALUE_COLORMAP_RANGE,
203
+ CoordinationType.SPATIAL_LAYER_COLOR,
204
+ CoordinationType.OBS_HIGHLIGHT,
205
+ CoordinationType.TOOLTIPS_VISIBLE,
206
+ CoordinationType.TOOLTIP_CROSSHAIRS_VISIBLE,
207
+ CoordinationType.LEGEND_VISIBLE,
208
+ ],
209
+ coordinationScopes,
210
+ coordinationScopesBy,
211
+ CoordinationType.POINT_LAYER,
212
+ );
213
+
214
+ // Points data
215
+ const [obsPointsData, obsPointsDataStatus, obsPointsUrls, obsPointsErrors] = useMultiObsPoints(
216
+ coordinationScopes, coordinationScopesBy, loaders, dataset,
217
+ mergeCoordination, uuid,
218
+ );
219
+
220
+ const [pointMultiIndicesData, pointMultiIndicesDataStatus, pointMultiIndicesDataErrors] = usePointMultiObsFeatureMatrixIndices(
221
+ coordinationScopes, coordinationScopesBy, loaders, dataset,
222
+ );
223
+
224
+
225
+ // Segmentations data
226
+ const [obsSegmentationsData, obsSegmentationsDataStatus, obsSegmentationsUrls, obsSegmentationsDataErrors] = useMultiObsSegmentations(
227
+ coordinationScopes, coordinationScopesBy, loaders, dataset,
228
+ mergeCoordination, uuid,
229
+ );
99
230
 
100
- const [{ obsSets: cellSets }] = useObsSetsData(
101
- loaders, dataset, false,
102
- { setObsSetSelection: setCellSetSelection, setObsSetColor: setCellSetColor },
103
- { cellSetSelection, obsSetColor: cellSetColor },
104
- { obsType },
231
+ const [obsSegmentationsSetsData, obsSegmentationsSetsDataStatus, obsSegmentationsSetsDataErrors] = useSegmentationMultiObsSets(
232
+ coordinationScopes, coordinationScopesBy, loaders, dataset,
105
233
  );
106
234
 
107
- const [{ obsIndex }] = useObsEmbeddingData(
108
- loaders, dataset, true, {}, {},
109
- { obsType, embeddingType: mapping },
235
+ const [
236
+ segmentationMultiExpressionData,
237
+ segmentationMultiLoadedFeatureSelection,
238
+ segmentationMultiExpressionExtents,
239
+ segmentationMultiExpressionNormData,
240
+ segmentationMultiFeatureSelectionStatus,
241
+ segmentationMultiFeatureSelectionErrors,
242
+ ] = useSegmentationMultiFeatureSelection(
243
+ coordinationScopes, coordinationScopesBy, loaders, dataset,
110
244
  );
111
245
 
112
- const [initalViewerState] = useNeuroglancerViewerState(
113
- loaders, dataset, false,
114
- undefined, undefined,
115
- { obsType: 'cell' },
246
+ const [segmentationMultiIndicesData, segmentationMultiIndicesDataStatus, segmentationMultiIndicesDataErrors] = useSegmentationMultiObsFeatureMatrixIndices(
247
+ coordinationScopes, coordinationScopesBy, loaders, dataset,
116
248
  );
117
249
 
118
- const latestViewerStateRef = useRef(initalViewerState);
250
+ const errors = [
251
+ ...obsPointsErrors,
252
+ ...obsSegmentationsDataErrors,
253
+ ...obsSegmentationsSetsDataErrors,
254
+ ...pointMultiIndicesDataErrors,
255
+ ...segmentationMultiFeatureSelectionErrors,
256
+ ...segmentationMultiIndicesDataErrors,
257
+ ];
258
+
259
+ const isReady = useReady([
260
+ // Points
261
+ obsPointsDataStatus,
262
+ pointMultiIndicesDataStatus,
263
+ // Segmentations
264
+ obsSegmentationsDataStatus,
265
+ obsSegmentationsSetsDataStatus,
266
+ segmentationMultiFeatureSelectionStatus,
267
+ segmentationMultiIndicesDataStatus,
268
+ ]);
269
+
270
+ // console.log("NG Subs Render orbit", spatialRotationX, spatialRotationY, spatialRotationOrbit);
271
+
272
+ const { classes } = useStyles();
273
+
274
+ const segmentationColorMapping = useMemoCustomComparison(() => {
275
+ // TODO: ultimately, segmentationColorMapping becomes cellColorMapping, and makes its way into the viewerState.
276
+ // It may make sense to merge the multiple useMemoCustomComparisons upstream of derivedViewerState into one.
277
+ // This would complicate the comparison function, but the multiple separate useMemos are not really necessary.
278
+ const result = {};
279
+ segmentationLayerScopes?.forEach((layerScope) => {
280
+ result[layerScope] = {};
281
+ segmentationChannelScopesByLayer?.[layerScope]?.forEach((channelScope) => {
282
+ const { obsSets: layerSets, obsIndex: layerIndex } = obsSegmentationsSetsData
283
+ ?.[layerScope]?.[channelScope] || {};
284
+ if (layerSets && layerIndex) {
285
+ const {
286
+ obsSetColor,
287
+ obsColorEncoding,
288
+ obsSetSelection,
289
+ additionalObsSets,
290
+ } = segmentationChannelCoordination[0][layerScope][channelScope];
291
+ const mergedCellSets = mergeObsSets(layerSets, additionalObsSets);
292
+ const cellColors = getCellColors({
293
+ cellSets: mergedCellSets,
294
+ cellSetSelection: obsSetSelection,
295
+ cellSetColor: obsSetColor,
296
+ obsIndex: layerIndex,
297
+ theme,
298
+ });
299
+ // Convert the list of colors to an object of hex strings, which NG requires.
300
+ const ngCellColors = {};
301
+ cellColors.forEach((color, i) => {
302
+ ngCellColors[i] = rgbToHex(color);
303
+ });
304
+ /* // TODO: Is this necessary?
305
+ const obsColorIndices = treeToCellSetColorIndicesBySetNames(
306
+ mergedLayerSets,
307
+ obsSetSelection,
308
+ obsSetColor,
309
+ );
310
+ */
311
+ result[layerScope][channelScope] = ngCellColors;
312
+ }
313
+ });
314
+ });
315
+ return result;
316
+ }, {
317
+ // The dependencies for the comparison,
318
+ // used by the custom equality function.
319
+ segmentationLayerScopes,
320
+ segmentationChannelScopesByLayer,
321
+ obsSegmentationsSetsData,
322
+ segmentationChannelCoordination,
323
+ theme,
324
+ }, customIsEqualForCellColors);
325
+
326
+
327
+ // Obtain the Neuroglancer viewerState object.
328
+ const initalViewerState = useNeuroglancerViewerState(
329
+ theme,
330
+ segmentationLayerScopes,
331
+ segmentationChannelScopesByLayer,
332
+ segmentationLayerCoordination,
333
+ segmentationChannelCoordination,
334
+ obsSegmentationsUrls,
335
+ obsSegmentationsData,
336
+ pointLayerScopes,
337
+ pointLayerCoordination,
338
+ obsPointsUrls,
339
+ obsPointsData,
340
+ pointMultiIndicesData,
341
+ );
342
+
343
+
344
+ const [latestViewerStateIteration, incrementLatestViewerStateIteration] = useReducer(x => x + 1, 0);
345
+ const latestViewerStateRef = useRef({
346
+ ...initalViewerState,
347
+ ...(initialNgCameraState ?? {}),
348
+ });
349
+
350
+ useEffect(() => {
351
+ const prevNgCameraState = {
352
+ position: latestViewerStateRef.current.position,
353
+ projectionOrientation: latestViewerStateRef.current.projectionOrientation,
354
+ projectionScale: latestViewerStateRef.current.projectionScale,
355
+ };
356
+ latestViewerStateRef.current = {
357
+ ...initalViewerState,
358
+ ...prevNgCameraState,
359
+ };
360
+ // Force a re-render by incrementing a piece of state.
361
+ // This works because we have made latestViewerStateIteration
362
+ // a dependency for derivedViewerState, triggering the useMemo downstream.
363
+ incrementLatestViewerStateIteration();
364
+ }, [initalViewerState]);
365
+
119
366
  const initialRotationPushedRef = useRef(false);
120
367
 
121
368
  const ngRotPushAtRef = useRef(0);
@@ -147,19 +394,6 @@ export function NeuroglancerSubscriber(props) {
147
394
  ty: spatialTargetY,
148
395
  });
149
396
 
150
- const mergedCellSets = useMemo(() => mergeObsSets(
151
- cellSets, additionalCellSets,
152
- ), [cellSets, additionalCellSets]);
153
-
154
- const cellColors = useMemo(() => getCellColors({
155
- cellSets: mergedCellSets,
156
- cellSetSelection,
157
- cellSetColor,
158
- obsIndex,
159
- theme,
160
- }), [mergedCellSets, theme,
161
- cellSetColor, cellSetSelection, obsIndex]);
162
-
163
397
  /*
164
398
  * handleStateUpdate - Interactions from NG to Vitessce are pushed here
165
399
  */
@@ -286,6 +520,10 @@ export function NeuroglancerSubscriber(props) {
286
520
  if (alreadySelectedId) {
287
521
  return;
288
522
  }
523
+ // TODO: update this now that we are using layer/channel-based organization of segmentations.
524
+ // There is no more "top-level" obsSets coordination; it is only on a per-layer basis.
525
+ // We should probably just assume the first segmentation layer/channel when updating the logic,
526
+ // since it is not clear how we would determine which layer/channel to update if there are multiple.
289
527
  setObsSelection(
290
528
  selectedCellIds, additionalCellSets, cellSetColor,
291
529
  setCellSetSelection, setAdditionalCellSets, setCellSetColor,
@@ -298,34 +536,26 @@ export function NeuroglancerSubscriber(props) {
298
536
  setCellColorEncoding, setCellSetColor, setCellSetSelection,
299
537
  ]);
300
538
 
301
- const batchedUpdateTimeoutRef = useRef(null);
302
- const [batchedCellColors, setBatchedCellColors] = useState(cellColors);
303
-
304
- useEffect(() => {
305
- if (batchedUpdateTimeoutRef.current) {
306
- clearTimeout(batchedUpdateTimeoutRef.current);
307
- }
308
- batchedUpdateTimeoutRef.current = setTimeout(() => {
309
- setBatchedCellColors(cellColors);
310
- }, 100);
311
-
312
- // TODO: look into deferredValue from React
313
- // startTransition(() => {
314
- // setBatchedCellColors(cellColors);
315
- // });
316
- }, [cellColors]);
317
- // TODO use a ref if slow - see prev commits
318
- const cellColorMapping = useMemo(() => {
319
- const colorMapping = {};
320
- batchedCellColors.forEach((color, cell) => {
321
- colorMapping[cell] = rgbToHex(color);
322
- });
323
- return colorMapping;
324
- }, [batchedCellColors]);
539
+ // Get the ultimate cellColorMapping to pass to NeuroglancerComp as a prop.
540
+ // For now, we take the first layer and channel for cell colors.
541
+ const cellColorMapping = useMemo(() => (segmentationColorMapping
542
+ ?.[segmentationLayerScopes?.[0]]
543
+ ?.[segmentationChannelScopesByLayer?.[segmentationLayerScopes?.[0]]?.[0]]
544
+ ?? {}
545
+ ), [segmentationColorMapping]);
325
546
 
326
547
 
548
+ // TODO: try to simplify using useMemoCustomComparison?
549
+ // This would allow us to refactor a lot of the checking-for-changes logic into a comparison function,
550
+ // simplify some of the manual bookkeeping like with prevCoordsRef and lastInteractionSource,
551
+ // and would allow us to potentially remove usage of some refs (e.g., latestViewerStateRef)
552
+ // by relying on the memoization to prevent unnecessary updates.
327
553
  const derivedViewerState = useMemo(() => {
328
554
  const { current } = latestViewerStateRef;
555
+ if (current.layers.length <= 0) {
556
+ return current;
557
+ }
558
+
329
559
  const nextSegments = Object.keys(cellColorMapping);
330
560
  const prevLayer = current?.layers?.[0] || {};
331
561
  const prevSegments = prevLayer.segments || [];
@@ -495,37 +725,58 @@ export function NeuroglancerSubscriber(props) {
495
725
 
496
726
  return updated;
497
727
  }, [cellColorMapping, spatialZoom, spatialRotationX, spatialRotationY,
498
- spatialRotationZ, spatialTargetX, spatialTargetY]);
728
+ spatialRotationZ, spatialTargetX, spatialTargetY, initalViewerState,
729
+ latestViewerStateIteration]);
499
730
 
500
731
  const onSegmentHighlight = useCallback((obsId) => {
501
732
  setCellHighlight(String(obsId));
502
- }, [obsIndex, setCellHighlight]);
733
+ }, [setCellHighlight]);
503
734
 
504
735
  // TODO: if all cells are deselected, a black view is shown, rather we want to show empty NG view?
505
736
  // if (!cellColorMapping || Object.keys(cellColorMapping).length === 0) {
506
737
  // return;
507
738
  // }
508
739
 
740
+ const hasLayers = derivedViewerState?.layers?.length > 0;
741
+ // console.log(derivedViewerState);
742
+
509
743
  return (
510
744
  <TitleInfo
511
745
  title={title}
746
+ info={subtitle}
512
747
  helpText={helpText}
513
748
  isSpatial
514
749
  theme={theme}
515
750
  closeButtonVisible={closeButtonVisible}
516
751
  downloadButtonVisible={downloadButtonVisible}
517
752
  removeGridComponent={removeGridComponent}
518
- isReady
753
+ isReady={isReady}
754
+ errors={errors}
519
755
  withPadding={false}
520
756
  >
521
- <NeuroglancerComp
522
- classes={classes}
523
- onSegmentClick={onSegmentClick}
524
- onSelectHoveredCoords={onSegmentHighlight}
525
- viewerState={derivedViewerState}
526
- cellColorMapping={cellColorMapping}
527
- setViewerState={handleStateUpdate}
528
- />
757
+ <div style={{ position: 'relative', width: '100%', height: '100%' }} ref={containerRef}>
758
+ <div style={{ position: 'absolute', top: 0, right: 0, zIndex: 50 }}>
759
+ <MultiLegend
760
+ theme="dark"
761
+ maxHeight={ngHeight}
762
+ segmentationLayerScopes={segmentationLayerScopes}
763
+ segmentationLayerCoordination={segmentationLayerCoordination}
764
+ segmentationChannelScopesByLayer={segmentationChannelScopesByLayer}
765
+ segmentationChannelCoordination={segmentationChannelCoordination}
766
+ />
767
+ </div>
768
+
769
+ {hasLayers ? (
770
+ <NeuroglancerComp
771
+ classes={classes}
772
+ onSegmentClick={onSegmentClick}
773
+ onSelectHoveredCoords={onSegmentHighlight}
774
+ viewerState={derivedViewerState}
775
+ cellColorMapping={cellColorMapping}
776
+ setViewerState={handleStateUpdate}
777
+ />
778
+ ) : null}
779
+ </div>
529
780
  </TitleInfo>
530
781
  );
531
782
  }
package/src/README.md ADDED
@@ -0,0 +1,28 @@
1
+ # neuroglancer view
2
+
3
+ This view is powered by neuroglancer.
4
+ Here, we provide developer-facing documentation on working with Neuroglancer and its `viewerState`:
5
+
6
+ ## viewerState
7
+
8
+ ### Camera position (zoom, translation, rotation)
9
+
10
+ The following properties in the viewerState control the camera:
11
+
12
+ - `viewerState.position`
13
+ - `viewerState.projectionScale`
14
+ - `viewerState.projectionOrientation`
15
+
16
+ The complete viewerState schema is available in the [Neuroglancer documentation](https://neuroglancer-docs.web.app/json/api/index.html).
17
+
18
+ ## Mesh format
19
+
20
+ See the Neuroglancer documentation to learn about the [precomputed multi-resolution mesh format](https://github.com/google/neuroglancer/blob/master/src/datasource/precomputed/meshes.md#multi-resolution-mesh-format).
21
+
22
+ ## Points format
23
+
24
+
25
+
26
+ ## Converting a SpatialData object to the Neuroglancer data formats
27
+
28
+ Use [tissue-map-tools](https://github.com/hms-dbmi/tissue-map-tools) to convert data from a SpatialData object to the mesh and point formats that are compatible with Neuroglancer.