nystrom-ncut 0.0.12__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nystrom_ncut/__init__.py CHANGED
@@ -1,5 +1,4 @@
1
1
  from .nystrom import (
2
- DistanceRealization,
3
2
  NCut,
4
3
  axis_align,
5
4
  )
@@ -15,7 +14,7 @@ from .visualize_utils import (
15
14
  rgb_from_tsne_2d,
16
15
  rgb_from_umap_3d,
17
16
  rgb_from_umap_2d,
18
- rgb_from_cosine_tsne_3d,
17
+ rgb_from_euclidean_tsne_3d,
19
18
  rotate_rgb_cube,
20
19
  convert_to_lab_color,
21
20
  get_mask,
nystrom_ncut/common.py CHANGED
@@ -24,7 +24,16 @@ def lazy_normalize(x: torch.Tensor, n: int = 1000, **normalize_kwargs: Any) -> t
24
24
  return Fn.normalize(x, **normalize_kwargs)
25
25
 
26
26
 
27
- def quantile_min_max(x, q1=0.01, q2=0.99, n_sample=10000):
27
+ def to_euclidean(x: torch.Tensor, disttype: DistanceOptions) -> torch.Tensor:
28
+ if disttype == "cosine":
29
+ return lazy_normalize(x, p=2, dim=-1)
30
+ elif disttype == "rbf":
31
+ return x
32
+ else:
33
+ raise ValueError(f"to_euclidean not implemented for disttype {disttype}.")
34
+
35
+
36
+ def quantile_min_max(x: torch.Tensor, q1: float, q2: float, n_sample: int = 10000):
28
37
  if x.shape[0] > n_sample:
29
38
  np.random.seed(0)
30
39
  random_idx = np.random.choice(x.shape[0], n_sample, replace=False)
@@ -34,7 +43,7 @@ def quantile_min_max(x, q1=0.01, q2=0.99, n_sample=10000):
34
43
  return vmin, vmax
35
44
 
36
45
 
37
- def quantile_normalize(x, q=0.95):
46
+ def quantile_normalize(x: torch.Tensor, q: float = 0.95):
38
47
  """normalize each dimension of x to [0, 1], take 95-th percentage, this robust to outliers
39
48
  </br> 1. sort x
40
49
  </br> 2. take q-th quantile
@@ -122,6 +122,7 @@ class DistanceRealization(OnlineNystromSubsampleFit):
122
122
  n_components=n_components,
123
123
  kernel=GramKernel(distance, eig_solver),
124
124
  num_sample=num_sample,
125
+ distance=distance,
125
126
  sample_method=sample_method,
126
127
  eig_solver=eig_solver,
127
128
  chunk_size=chunk_size,
@@ -118,11 +118,11 @@ class NCut(OnlineNystromSubsampleFit):
118
118
  n_components=n_components,
119
119
  kernel=LaplacianKernel(affinity_focal_gamma, distance, eig_solver),
120
120
  num_sample=num_sample,
121
+ distance=distance,
121
122
  sample_method=sample_method,
122
123
  eig_solver=eig_solver,
123
124
  chunk_size=chunk_size,
124
125
  )
125
- self.distance: DistanceOptions = distance
126
126
 
127
127
 
128
128
  def axis_align(eigen_vectors: torch.Tensor, max_iter=300):
@@ -4,6 +4,7 @@ from typing import Literal, Tuple
4
4
  import torch
5
5
 
6
6
  from ..common import (
7
+ DistanceOptions,
7
8
  SampleOptions,
8
9
  ceildiv,
9
10
  )
@@ -145,6 +146,7 @@ class OnlineNystromSubsampleFit(OnlineNystrom):
145
146
  n_components: int,
146
147
  kernel: OnlineKernel,
147
148
  num_sample: int,
149
+ distance: DistanceOptions,
148
150
  sample_method: SampleOptions,
149
151
  eig_solver: EigSolverOptions = "svd_lowrank",
150
152
  chunk_size: int = 8192,
@@ -157,6 +159,7 @@ class OnlineNystromSubsampleFit(OnlineNystrom):
157
159
  chunk_size=chunk_size,
158
160
  )
159
161
  self.num_sample: int = num_sample
162
+ self.distance: DistanceOptions = distance
160
163
  self.sample_method: SampleOptions = sample_method
161
164
  self.anchor_indices: torch.Tensor = None
162
165
 
@@ -176,8 +179,9 @@ class OnlineNystromSubsampleFit(OnlineNystrom):
176
179
  self.anchor_indices = precomputed_sampled_indices
177
180
  else:
178
181
  self.anchor_indices = run_subgraph_sampling(
179
- features,
180
- self.num_sample,
182
+ features=features,
183
+ num_sample=self.num_sample,
184
+ disttype=self.distance,
181
185
  sample_method=self.sample_method,
182
186
  )
183
187
  sampled_features = features[self.anchor_indices]
@@ -9,15 +9,17 @@ from .common import (
9
9
  SampleOptions,
10
10
  ceildiv,
11
11
  lazy_normalize,
12
+ to_euclidean,
12
13
  )
13
14
 
14
15
 
15
- @torch.no_grad()
16
+ # @torch.no_grad()
16
17
  def run_subgraph_sampling(
17
18
  features: torch.Tensor,
18
19
  num_sample: int,
20
+ disttype: DistanceOptions,
21
+ sample_method: SampleOptions,
19
22
  max_draw: int = 1000000,
20
- sample_method: SampleOptions = "farthest",
21
23
  ):
22
24
  if num_sample >= features.shape[0]:
23
25
  # if too many samples, use all samples and bypass Nystrom-like approximation
@@ -28,6 +30,7 @@ def run_subgraph_sampling(
28
30
  else:
29
31
  # sample subgraph
30
32
  if sample_method == "farthest": # default
33
+ features = to_euclidean(features, disttype)
31
34
  if num_sample > max_draw:
32
35
  logging.warning(
33
36
  f"num_sample is larger than max_draw, apply farthest point sampling on random sampled {max_draw} samples"
@@ -144,12 +147,12 @@ def extrapolate_knn(
144
147
  anchor_features: torch.Tensor, # [n x d]
145
148
  anchor_output: torch.Tensor, # [n x d']
146
149
  extrapolation_features: torch.Tensor, # [m x d]
150
+ distance: DistanceOptions,
147
151
  knn: int = 10, # k
148
- distance: DistanceOptions = "cosine",
149
152
  affinity_focal_gamma: float = 1.0,
150
153
  chunk_size: int = 8192,
151
154
  device: str = None,
152
- move_output_to_cpu: bool = False
155
+ move_output_to_cpu: bool = False,
153
156
  ) -> torch.Tensor: # [m x d']
154
157
  """A generic function to propagate new nodes using KNN.
155
158
 
@@ -168,7 +171,7 @@ def extrapolate_knn(
168
171
  >>> old_eigenvectors = torch.randn(3000, 20)
169
172
  >>> old_features = torch.randn(3000, 100)
170
173
  >>> new_features = torch.randn(200, 100)
171
- >>> new_eigenvectors = extrapolate_knn(old_features,old_eigenvectors,new_features,knn=3)
174
+ >>> new_eigenvectors = extrapolate_knn(old_features, old_eigenvectors, new_features, knn=3)
172
175
  >>> # new_eigenvectors.shape = (200, 20)
173
176
 
174
177
  """
@@ -197,21 +200,24 @@ def extrapolate_knn(
197
200
  _V = _V.cpu()
198
201
  V_list.append(_V)
199
202
 
200
- anchor_output = torch.cat(V_list, dim=0)
201
- return anchor_output
203
+ extrapolation_output = torch.cat(V_list, dim=0)
204
+ return extrapolation_output
202
205
 
203
206
 
204
207
  # wrapper functions for adding new nodes to existing graph
205
208
  def extrapolate_knn_with_subsampling(
206
- full_features: torch.Tensor,
207
- full_output: torch.Tensor,
208
- extrapolation_features: torch.Tensor,
209
- knn: int,
210
- num_sample: int,
209
+ full_features: torch.Tensor, # [n x d]
210
+ full_output: torch.Tensor, # [n x d']
211
+ extrapolation_features: torch.Tensor, # [m x d]
212
+ num_sample: int, # n'
211
213
  sample_method: SampleOptions,
212
- chunk_size: int,
213
- device: str
214
- ):
214
+ distance: DistanceOptions,
215
+ knn: int = 10, # k
216
+ affinity_focal_gamma: float = 1.0,
217
+ chunk_size: int = 8192,
218
+ device: str = None,
219
+ move_output_to_cpu: bool = False,
220
+ ) -> torch.Tensor: # [m x d']
215
221
  """Propagate eigenvectors to new nodes using KNN. Note: this is equivalent to the class API `NCUT.tranform(new_features)`, expect for the sampling is re-done in this function.
216
222
  Args:
217
223
  full_output (torch.Tensor): eigenvectors from existing nodes, shape (num_sample, num_eig)
@@ -237,8 +243,9 @@ def extrapolate_knn_with_subsampling(
237
243
 
238
244
  # sample subgraph
239
245
  anchor_indices = run_subgraph_sampling(
240
- full_features,
241
- num_sample,
246
+ features=full_features,
247
+ num_sample=num_sample,
248
+ disttype=distance,
242
249
  sample_method=sample_method,
243
250
  )
244
251
 
@@ -247,12 +254,15 @@ def extrapolate_knn_with_subsampling(
247
254
  extrapolation_features = extrapolation_features.to(device)
248
255
 
249
256
  # propagate eigenvectors from subgraph to new nodes
250
- new_eigenvectors = extrapolate_knn(
257
+ extrapolation_output = extrapolate_knn(
251
258
  anchor_features,
252
259
  anchor_output,
253
260
  extrapolation_features,
261
+ distance,
254
262
  knn=knn,
263
+ affinity_focal_gamma=affinity_focal_gamma,
255
264
  chunk_size=chunk_size,
256
- device=device
265
+ device=device,
266
+ move_output_to_cpu=move_output_to_cpu,
257
267
  )
258
- return new_eigenvectors
268
+ return extrapolation_output
@@ -1,5 +1,5 @@
1
1
  import logging
2
- from typing import Any, Callable, Dict, Literal, Tuple
2
+ from typing import Any, Callable, Dict, Literal
3
3
 
4
4
  import numpy as np
5
5
  import torch
@@ -8,88 +8,82 @@ from sklearn.base import BaseEstimator
8
8
 
9
9
  from .common import (
10
10
  lazy_normalize,
11
+ to_euclidean,
11
12
  quantile_min_max,
12
13
  quantile_normalize,
13
14
  )
14
- from .nystrom import (
15
- DistanceRealization,
16
- )
17
15
  from .propagation_utils import (
18
16
  run_subgraph_sampling,
19
17
  extrapolate_knn,
20
18
  )
21
19
 
22
20
 
23
- def _identity(X: torch.Tensor) -> torch.Tensor:
24
- return X
25
-
26
-
27
21
  def _rgb_with_dimensionality_reduction(
28
22
  features: torch.Tensor,
29
23
  num_sample: int,
30
- metric: Literal["cosine", "euclidean"],
24
+ disttype: Literal["cosine", "euclidean"],
31
25
  rgb_func: Callable[[torch.Tensor, float], torch.Tensor],
32
26
  q: float,
33
27
  knn: int,
34
28
  reduction: Callable[..., BaseEstimator],
35
29
  reduction_dim: int,
36
30
  reduction_kwargs: Dict[str, Any],
37
- transform_func: Callable[[torch.Tensor], torch.Tensor],
38
31
  seed: int,
39
32
  device: str,
40
- ) -> Tuple[torch.Tensor, torch.Tensor]:
33
+ ) -> torch.Tensor:
41
34
 
42
35
  if True:
43
36
  _subgraph_indices = run_subgraph_sampling(
44
- features,
37
+ features=features,
45
38
  num_sample=10000,
39
+ disttype=disttype,
46
40
  sample_method="farthest",
47
41
  )
48
42
  features = extrapolate_knn(
49
- features[_subgraph_indices],
50
- features[_subgraph_indices],
51
- features,
52
- distance="cosine",
43
+ anchor_features=features[_subgraph_indices],
44
+ anchor_output=features[_subgraph_indices],
45
+ extrapolation_features=features,
46
+ distance=disttype,
53
47
  )
54
48
 
55
49
  subgraph_indices = run_subgraph_sampling(
56
- features,
57
- num_sample,
50
+ features=features,
51
+ num_sample=num_sample,
52
+ disttype=disttype,
58
53
  sample_method="farthest",
59
54
  )
60
55
 
61
56
  _inp = features[subgraph_indices].numpy(force=True)
62
57
  _subgraph_embed = reduction(
63
58
  n_components=reduction_dim,
64
- metric=metric,
59
+ metric=disttype,
65
60
  random_state=seed,
66
61
  **reduction_kwargs
67
62
  ).fit_transform(_inp)
68
63
 
69
64
  _subgraph_embed = torch.tensor(_subgraph_embed, dtype=torch.float32)
70
- X_nd = transform_func(extrapolate_knn(
65
+ rgb = rgb_func(extrapolate_knn(
71
66
  features[subgraph_indices],
72
67
  _subgraph_embed,
73
68
  features,
69
+ disttype,
74
70
  knn=knn,
75
- distance=metric,
76
71
  device=device,
77
72
  move_output_to_cpu=True
78
- ))
79
- rgb = rgb_func(X_nd, q)
80
- return X_nd, rgb
73
+ ), q)
74
+ return rgb
81
75
 
82
76
 
83
77
  def rgb_from_tsne_2d(
84
78
  features: torch.Tensor,
85
79
  num_sample: int = 1000,
80
+ disttype: Literal["cosine", "euclidean"] = "cosine",
86
81
  perplexity: int = 150,
87
- metric: Literal["cosine", "euclidean"] = "cosine",
88
82
  q: float = 0.95,
89
83
  knn: int = 10,
90
84
  seed: int = 0,
91
85
  device: str = None,
92
- ):
86
+ ) -> torch.Tensor:
93
87
  """
94
88
  Returns:
95
89
  (torch.Tensor): Embedding in 2D, shape (n_samples, 2)
@@ -108,32 +102,32 @@ def rgb_from_tsne_2d(
108
102
  )
109
103
  perplexity = num_sample // 2
110
104
 
111
- x2d, rgb = _rgb_with_dimensionality_reduction(
105
+ rgb = _rgb_with_dimensionality_reduction(
112
106
  features=features,
113
107
  num_sample=num_sample,
114
- metric=metric,
108
+ disttype=disttype,
115
109
  rgb_func=rgb_from_2d_colormap,
116
110
  q=q,
117
111
  knn=knn,
118
112
  reduction=TSNE, reduction_dim=2, reduction_kwargs={
119
113
  "perplexity": perplexity,
120
- }, transform_func=_identity,
114
+ },
121
115
  seed=seed,
122
116
  device=device,
123
117
  )
124
- return x2d, rgb
118
+ return rgb
125
119
 
126
120
 
127
121
  def rgb_from_tsne_3d(
128
122
  features: torch.Tensor,
129
123
  num_sample: int = 1000,
124
+ disttype: Literal["cosine", "euclidean"] = "cosine",
130
125
  perplexity: int = 150,
131
- metric: Literal["cosine", "euclidean"] = "cosine",
132
126
  q: float = 0.95,
133
127
  knn: int = 10,
134
128
  seed: int = 0,
135
129
  device: str = None,
136
- ):
130
+ ) -> torch.Tensor:
137
131
  """
138
132
  Returns:
139
133
  (torch.Tensor): Embedding in 3D, shape (n_samples, 3)
@@ -152,31 +146,32 @@ def rgb_from_tsne_3d(
152
146
  )
153
147
  perplexity = num_sample // 2
154
148
 
155
- x3d, rgb = _rgb_with_dimensionality_reduction(
149
+ rgb = _rgb_with_dimensionality_reduction(
156
150
  features=features,
157
151
  num_sample=num_sample,
158
- metric=metric,
152
+ disttype=disttype,
159
153
  rgb_func=rgb_from_3d_rgb_cube,
160
154
  q=q,
161
155
  knn=knn,
162
156
  reduction=TSNE, reduction_dim=3, reduction_kwargs={
163
157
  "perplexity": perplexity,
164
- }, transform_func=_identity,
158
+ },
165
159
  seed=seed,
166
160
  device=device,
167
161
  )
168
- return x3d, rgb
162
+ return rgb
169
163
 
170
164
 
171
- def rgb_from_cosine_tsne_3d(
165
+ def rgb_from_euclidean_tsne_3d(
172
166
  features: torch.Tensor,
173
167
  num_sample: int = 1000,
168
+ disttype: Literal["cosine", "euclidean"] = "cosine",
174
169
  perplexity: int = 150,
175
170
  q: float = 0.95,
176
171
  knn: int = 10,
177
172
  seed: int = 0,
178
173
  device: str = None
179
- ):
174
+ ) -> torch.Tensor:
180
175
  """
181
176
  Returns:
182
177
  (torch.Tensor): Embedding in 3D, shape (n_samples, 3)
@@ -195,40 +190,36 @@ def rgb_from_cosine_tsne_3d(
195
190
  )
196
191
  perplexity = num_sample // 2
197
192
 
198
- def cosine_to_rbf(X: torch.Tensor) -> torch.Tensor:
199
- dr = DistanceRealization(n_components=3, num_sample=20000, distance="cosine", eig_solver="svd_lowrank")
200
- return dr.fit_transform(X)
201
-
202
- def rgb_from_cosine(X_3d: torch.Tensor, q: float) -> torch.Tensor:
203
- return rgb_from_3d_rgb_cube(cosine_to_rbf(X_3d), q=q)
193
+ def rgb_func(X_3d: torch.Tensor, q: float) -> torch.Tensor:
194
+ return rgb_from_3d_rgb_cube(to_euclidean(X_3d, disttype), q=q)
204
195
 
205
- x3d, rgb = _rgb_with_dimensionality_reduction(
196
+ rgb = _rgb_with_dimensionality_reduction(
206
197
  features=features,
207
198
  num_sample=num_sample,
208
- metric="cosine",
209
- rgb_func=rgb_from_cosine,
199
+ disttype="cosine",
200
+ rgb_func=rgb_func,
210
201
  q=q,
211
202
  knn=knn,
212
203
  reduction=TSNE, reduction_dim=3, reduction_kwargs={
213
204
  "perplexity": perplexity,
214
- }, transform_func=_identity,
205
+ },
215
206
  seed=seed,
216
207
  device=device,
217
208
  )
218
- return x3d, rgb
209
+ return rgb
219
210
 
220
211
 
221
212
  def rgb_from_umap_2d(
222
213
  features: torch.Tensor,
223
214
  num_sample: int = 1000,
215
+ disttype: Literal["cosine", "euclidean"] = "cosine",
224
216
  n_neighbors: int = 150,
225
217
  min_dist: float = 0.1,
226
- metric: Literal["cosine", "euclidean"] = "cosine",
227
218
  q: float = 0.95,
228
219
  knn: int = 10,
229
220
  seed: int = 0,
230
221
  device: str = None,
231
- ):
222
+ ) -> torch.Tensor:
232
223
  """
233
224
  Returns:
234
225
  (torch.Tensor): Embedding in 2D, shape (n_samples, 2)
@@ -239,34 +230,34 @@ def rgb_from_umap_2d(
239
230
  except ImportError:
240
231
  raise ImportError("umap import failed, please install `pip install umap-learn`")
241
232
 
242
- x2d, rgb = _rgb_with_dimensionality_reduction(
233
+ rgb = _rgb_with_dimensionality_reduction(
243
234
  features=features,
244
235
  num_sample=num_sample,
245
- metric=metric,
236
+ disttype=disttype,
246
237
  rgb_func=rgb_from_2d_colormap,
247
238
  q=q,
248
239
  knn=knn,
249
240
  reduction=UMAP, reduction_dim=2, reduction_kwargs={
250
241
  "n_neighbors": n_neighbors,
251
242
  "min_dist": min_dist,
252
- }, transform_func=_identity,
243
+ },
253
244
  seed=seed,
254
245
  device=device,
255
246
  )
256
- return x2d, rgb
247
+ return rgb
257
248
 
258
249
 
259
250
  def rgb_from_umap_sphere(
260
251
  features: torch.Tensor,
261
252
  num_sample: int = 1000,
253
+ disttype: Literal["cosine", "euclidean"] = "cosine",
262
254
  n_neighbors: int = 150,
263
255
  min_dist: float = 0.1,
264
- metric: Literal["cosine", "euclidean"] = "cosine",
265
256
  q: float = 0.95,
266
257
  knn: int = 10,
267
258
  seed: int = 0,
268
259
  device: str = None,
269
- ):
260
+ ) -> torch.Tensor:
270
261
  """
271
262
  Returns:
272
263
  (torch.Tensor): Embedding in 2D, shape (n_samples, 2)
@@ -277,37 +268,37 @@ def rgb_from_umap_sphere(
277
268
  except ImportError:
278
269
  raise ImportError("umap import failed, please install `pip install umap-learn`")
279
270
 
280
- def transform_func(X: torch.Tensor) -> torch.Tensor:
281
- return torch.stack((
271
+ def rgb_func(X: torch.Tensor, q: float) -> torch.Tensor:
272
+ return rgb_from_3d_rgb_cube(torch.stack((
282
273
  torch.sin(X[:, 0]) * torch.cos(X[:, 1]),
283
274
  torch.sin(X[:, 0]) * torch.sin(X[:, 1]),
284
275
  torch.cos(X[:, 0]),
285
- ), dim=1)
276
+ ), dim=1), q=q)
286
277
 
287
- x3d, rgb = _rgb_with_dimensionality_reduction(
278
+ rgb = _rgb_with_dimensionality_reduction(
288
279
  features=features,
289
280
  num_sample=num_sample,
290
- metric=metric,
291
- rgb_func=rgb_from_3d_rgb_cube,
281
+ disttype=disttype,
282
+ rgb_func=rgb_func,
292
283
  q=q,
293
284
  knn=knn,
294
285
  reduction=UMAP, reduction_dim=2, reduction_kwargs={
295
286
  "n_neighbors": n_neighbors,
296
287
  "min_dist": min_dist,
297
288
  "output_metric": "haversine",
298
- }, transform_func=transform_func,
289
+ },
299
290
  seed=seed,
300
291
  device=device,
301
292
  )
302
- return x3d, rgb
293
+ return rgb
303
294
 
304
295
 
305
296
  def rgb_from_umap_3d(
306
297
  features: torch.Tensor,
307
298
  num_sample: int = 1000,
299
+ disttype: Literal["cosine", "euclidean"] = "cosine",
308
300
  n_neighbors: int = 150,
309
301
  min_dist: float = 0.1,
310
- metric: Literal["cosine", "euclidean"] = "cosine",
311
302
  q: float = 0.95,
312
303
  knn: int = 10,
313
304
  seed: int = 0,
@@ -323,31 +314,31 @@ def rgb_from_umap_3d(
323
314
  except ImportError:
324
315
  raise ImportError("umap import failed, please install `pip install umap-learn`")
325
316
 
326
- x3d, rgb = _rgb_with_dimensionality_reduction(
317
+ rgb = _rgb_with_dimensionality_reduction(
327
318
  features=features,
328
319
  num_sample=num_sample,
329
- metric=metric,
320
+ disttype=disttype,
330
321
  rgb_func=rgb_from_3d_rgb_cube,
331
322
  q=q,
332
323
  knn=knn,
333
324
  reduction=UMAP, reduction_dim=3, reduction_kwargs={
334
325
  "n_neighbors": n_neighbors,
335
326
  "min_dist": min_dist,
336
- }, transform_func=_identity,
327
+ },
337
328
  seed=seed,
338
329
  device=device,
339
330
  )
340
- return x3d, rgb
331
+ return rgb
341
332
 
342
333
 
343
- def flatten_sphere(X_3d):
344
- x = np.arctan2(X_3d[:, 0], X_3d[:, 1])
345
- y = -np.arccos(X_3d[:, 2])
346
- X_2d = np.stack([x, y], axis=1)
334
+ def flatten_sphere(X_3d: torch.Tensor) -> torch.Tensor:
335
+ x = torch.atan2(X_3d[:, 0], X_3d[:, 1])
336
+ y = -torch.acos(X_3d[:, 2])
337
+ X_2d = torch.stack((x, y), dim=1)
347
338
  return X_2d
348
339
 
349
340
 
350
- def rotate_rgb_cube(rgb, position=1):
341
+ def rotate_rgb_cube(rgb: torch.Tensor, position: int = 1) -> torch.Tensor:
351
342
  """rotate RGB cube to different position
352
343
 
353
344
  Args:
@@ -371,7 +362,7 @@ def rotate_rgb_cube(rgb, position=1):
371
362
  return rgb
372
363
 
373
364
 
374
- def rgb_from_3d_rgb_cube(X_3d, q=0.95):
365
+ def rgb_from_3d_rgb_cube(X_3d: torch.Tensor, q: float = 0.95) -> torch.Tensor:
375
366
  """convert 3D t-SNE to RGB color space
376
367
  Args:
377
368
  X_3d (torch.Tensor): 3D t-SNE embedding, shape (n_samples, 3)
@@ -389,6 +380,26 @@ def rgb_from_3d_rgb_cube(X_3d, q=0.95):
389
380
  return rgb
390
381
 
391
382
 
383
+ def rgb_from_3d_lab_cube(X_3d: torch.Tensor, q: float = 0.95, full_range: bool = True) -> torch.Tensor:
384
+ from skimage import color
385
+ X_3d = X_3d - torch.mean(X_3d, dim=0)
386
+ U, S, VT = torch.linalg.svd(X_3d)
387
+ X_3d = torch.flip(U[:, :3] * S, dims=(1,))
388
+
389
+ AB_scale = 128.0 / torch.quantile(torch.linalg.norm(X_3d[:, 1:], dim=1), q=q, dim=0)
390
+ L_min, L_max = torch.quantile(X_3d[:, 0], q=torch.tensor(((1 - q) / 2, (1 + q) / 2)), dim=0)
391
+ L_scale = 100.0 / (L_max - L_min)
392
+
393
+ X_3d[:, 0] = X_3d[:, 0] - L_min
394
+ if full_range:
395
+ lab = X_3d * torch.tensor((L_scale, AB_scale, AB_scale))
396
+ else:
397
+ lab = X_3d * L_scale
398
+
399
+ rgb = torch.tensor(color.lab2rgb(lab.numpy(force=True)))
400
+ return rgb
401
+
402
+
392
403
  def convert_to_lab_color(rgb, full_range=True):
393
404
  from skimage import color
394
405
  import copy
@@ -407,7 +418,7 @@ def convert_to_lab_color(rgb, full_range=True):
407
418
  return lab_rgb
408
419
 
409
420
 
410
- def rgb_from_2d_colormap(X_2d, q=0.95):
421
+ def rgb_from_2d_colormap(X_2d: torch.Tensor, q: float = 0.95):
411
422
  xy = X_2d.clone()
412
423
  for i in range(2):
413
424
  xy[:, i] = quantile_normalize(xy[:, i], q=q)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: nystrom_ncut
3
- Version: 0.0.12
3
+ Version: 0.1.1
4
4
  Summary: Normalized Cut and Nyström Approximation
5
5
  Author-email: Huzheng Yang <huze.yann@gmail.com>, Wentinn Liao <wentinn.liao@gmail.com>
6
6
  Project-URL: Documentation, https://github.com/JophiArcana/Nystrom-NCUT/
@@ -0,0 +1,14 @@
1
+ __init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ nystrom_ncut/__init__.py,sha256=ffExLdGTaPsUweHcYc61Ose6a5A5Tfo9hm48zjEl6ho,441
3
+ nystrom_ncut/common.py,sha256=2nffH0I2UrE6-7gQV6NlA7xSXeAl2GPR5F_5Or1yMt4,2275
4
+ nystrom_ncut/propagation_utils.py,sha256=79M61iJfp_RWj_xLOn51PHiextWcEWTQ7NWl2T51-3Y,10907
5
+ nystrom_ncut/visualize_utils.py,sha256=EQKsNTfCssLYJv7HhwHxAyI18GLrkwbnOIrIKVoYZ1w,17344
6
+ nystrom_ncut/nystrom/__init__.py,sha256=4EpxD3Cmc8Fif4vo8DG-6FpTfCnNanD5zCZxK3WrMwQ,121
7
+ nystrom_ncut/nystrom/distance_realization.py,sha256=FGH7VjbtRrSROH0d8OPuCUxLQy5j7Z8BuE4hrSGGZG4,6031
8
+ nystrom_ncut/nystrom/normalized_cut.py,sha256=s9ZS3-tQbWnxAlPc01v9l7fqBhl28lvOalaCO2y-Gd8,7175
9
+ nystrom_ncut/nystrom/nystrom.py,sha256=OV5o9UL9fkrz9HdsD6rXh7MTsenPKrtCNRIczMuDS_4,12779
10
+ nystrom_ncut-0.1.1.dist-info/LICENSE,sha256=2bm9uFabQZ3Ykb_SaSU_uUbAj2-htc6WJQmS_65qD00,1073
11
+ nystrom_ncut-0.1.1.dist-info/METADATA,sha256=XQymsKFQrtzWDKjwUbyVv8KiWwdbsMSCGIKPS9WcBGk,6058
12
+ nystrom_ncut-0.1.1.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
13
+ nystrom_ncut-0.1.1.dist-info/top_level.txt,sha256=gM8IWWHYysIRTCvCTcdS4RShOyl9pxpylgSwPUZR2XM,22
14
+ nystrom_ncut-0.1.1.dist-info/RECORD,,
@@ -1,14 +0,0 @@
1
- __init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- nystrom_ncut/__init__.py,sha256=JKfF6atok5T9V692RhlhgeRO5a2cN-bfAVa9irmTLfs,463
3
- nystrom_ncut/common.py,sha256=RMPQvg9R2s7V-q7zAStN9YCZt7gpc5Ut-KSKtvELBQ4,1934
4
- nystrom_ncut/propagation_utils.py,sha256=WeWKxRBm01ITILMgjsit5_fCe9oW1kJOPmAjjcmliMo,10340
5
- nystrom_ncut/visualize_utils.py,sha256=JkDyWML6k7k6S2Z7xnpbUvMWssEXcXqXu7gBy8wnids,16809
6
- nystrom_ncut/nystrom/__init__.py,sha256=4EpxD3Cmc8Fif4vo8DG-6FpTfCnNanD5zCZxK3WrMwQ,121
7
- nystrom_ncut/nystrom/distance_realization.py,sha256=MWSdfPfUnr7BdiKFkogjQvcGagvj7OzLQklnVp0fPx8,6000
8
- nystrom_ncut/nystrom/normalized_cut.py,sha256=_U3zrbe6V-5TQ4uWmqckxs2JTIhygQlnRDTFBI1ghD4,7194
9
- nystrom_ncut/nystrom/nystrom.py,sha256=nL-zxbEE_ygJEZEPmeNrUpVeffvxdrsTcbxFanFuXQY,12613
10
- nystrom_ncut-0.0.12.dist-info/LICENSE,sha256=2bm9uFabQZ3Ykb_SaSU_uUbAj2-htc6WJQmS_65qD00,1073
11
- nystrom_ncut-0.0.12.dist-info/METADATA,sha256=pM-WT6Ly-IKYJ3DV2d-oOyc--K4VOyArB0sT5gHfHL4,6059
12
- nystrom_ncut-0.0.12.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
13
- nystrom_ncut-0.0.12.dist-info/top_level.txt,sha256=gM8IWWHYysIRTCvCTcdS4RShOyl9pxpylgSwPUZR2XM,22
14
- nystrom_ncut-0.0.12.dist-info/RECORD,,