nystrom-ncut 0.0.2__tar.gz → 0.0.3__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {nystrom_ncut-0.0.2/src/nystrom_ncut.egg-info → nystrom_ncut-0.0.3}/PKG-INFO +1 -1
- {nystrom_ncut-0.0.2 → nystrom_ncut-0.0.3}/pyproject.toml +1 -1
- {nystrom_ncut-0.0.2 → nystrom_ncut-0.0.3}/src/nystrom_ncut/ncut_pytorch.py +11 -19
- {nystrom_ncut-0.0.2 → nystrom_ncut-0.0.3}/src/nystrom_ncut/propagation_utils.py +4 -4
- {nystrom_ncut-0.0.2 → nystrom_ncut-0.0.3}/src/nystrom_ncut/visualize_utils.py +2 -2
- {nystrom_ncut-0.0.2 → nystrom_ncut-0.0.3/src/nystrom_ncut.egg-info}/PKG-INFO +1 -1
- {nystrom_ncut-0.0.2 → nystrom_ncut-0.0.3}/LICENSE +0 -0
- {nystrom_ncut-0.0.2 → nystrom_ncut-0.0.3}/MANIFEST.in +0 -0
- {nystrom_ncut-0.0.2 → nystrom_ncut-0.0.3}/README.md +0 -0
- {nystrom_ncut-0.0.2 → nystrom_ncut-0.0.3}/requirements.txt +0 -0
- {nystrom_ncut-0.0.2 → nystrom_ncut-0.0.3}/setup.cfg +0 -0
- {nystrom_ncut-0.0.2 → nystrom_ncut-0.0.3}/src/nystrom_ncut/__init__.py +0 -0
- {nystrom_ncut-0.0.2 → nystrom_ncut-0.0.3}/src/nystrom_ncut/common.py +0 -0
- {nystrom_ncut-0.0.2 → nystrom_ncut-0.0.3}/src/nystrom_ncut/nystrom.py +0 -0
- {nystrom_ncut-0.0.2 → nystrom_ncut-0.0.3}/src/nystrom_ncut.egg-info/SOURCES.txt +0 -0
- {nystrom_ncut-0.0.2 → nystrom_ncut-0.0.3}/src/nystrom_ncut.egg-info/dependency_links.txt +0 -0
- {nystrom_ncut-0.0.2 → nystrom_ncut-0.0.3}/src/nystrom_ncut.egg-info/top_level.txt +0 -0
- {nystrom_ncut-0.0.2 → nystrom_ncut-0.0.3}/tests/test.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: nystrom_ncut
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.3
|
4
4
|
Summary: Normalized Cut and Nyström Approximation
|
5
5
|
Author-email: Huzheng Yang <huze.yann@gmail.com>, Wentinn Liao <wentinn.liao@gmail.com>
|
6
6
|
Project-URL: Documentation, https://github.com/JophiArcana/Nystrom-NCUT/
|
@@ -94,20 +94,19 @@ class NCUT(OnlineNystrom):
|
|
94
94
|
|
95
95
|
def __init__(
|
96
96
|
self,
|
97
|
-
|
97
|
+
n_components: int = 100,
|
98
98
|
affinity_focal_gamma: float = 1.0,
|
99
99
|
num_sample: int = 10000,
|
100
100
|
sample_method: Literal["farthest", "random"] = "farthest",
|
101
101
|
distance: DistanceOptions = "cosine",
|
102
102
|
eig_solver: EigSolverOptions = "svd_lowrank",
|
103
103
|
normalize_features: bool = None,
|
104
|
-
device: str = None,
|
105
104
|
move_output_to_cpu: bool = False,
|
106
|
-
|
105
|
+
chunk_size: int = 8192,
|
107
106
|
):
|
108
107
|
"""
|
109
108
|
Args:
|
110
|
-
|
109
|
+
n_components (int): number of top eigenvectors to return
|
111
110
|
affinity_focal_gamma (float): affinity matrix temperature, lower t reduce the not-so-connected edge weights,
|
112
111
|
smaller t result in more sharp eigenvectors.
|
113
112
|
num_sample (int): number of samples for Nystrom-like approximation,
|
@@ -118,17 +117,15 @@ class NCUT(OnlineNystrom):
|
|
118
117
|
eig_solver (str): eigen decompose solver, ['svd_lowrank', 'lobpcg', 'svd', 'eigh'].
|
119
118
|
normalize_features (bool): normalize input features before computing affinity matrix,
|
120
119
|
default 'None' is True for cosine distance, False for euclidean distance and rbf
|
121
|
-
device (str): device to use for eigen computation,
|
122
|
-
move to GPU to speeds up a bit (~5x faster)
|
123
120
|
move_output_to_cpu (bool): move output to CPU, set to True if you have memory issue
|
124
|
-
|
121
|
+
chunk_size (int): chunk size for large-scale matrix multiplication
|
125
122
|
"""
|
126
123
|
OnlineNystrom.__init__(
|
127
124
|
self,
|
128
|
-
n_components=
|
125
|
+
n_components=n_components,
|
129
126
|
kernel=LaplacianKernel(affinity_focal_gamma, distance, eig_solver),
|
130
127
|
eig_solver=eig_solver,
|
131
|
-
chunk_size=
|
128
|
+
chunk_size=chunk_size,
|
132
129
|
)
|
133
130
|
self.num_sample = num_sample
|
134
131
|
self.sample_method = sample_method
|
@@ -140,19 +137,14 @@ class NCUT(OnlineNystrom):
|
|
140
137
|
if distance in ["euclidean", "rbf"]:
|
141
138
|
self.normalize_features = False
|
142
139
|
|
143
|
-
self.device = device
|
144
140
|
self.move_output_to_cpu = move_output_to_cpu
|
145
|
-
self.
|
141
|
+
self.chunk_size = chunk_size
|
146
142
|
|
147
143
|
def _fit_helper(
|
148
144
|
self,
|
149
145
|
features: torch.Tensor,
|
150
146
|
precomputed_sampled_indices: torch.Tensor,
|
151
147
|
) -> Tuple[torch.Tensor, torch.Tensor]:
|
152
|
-
# move subgraph gpu to speed up
|
153
|
-
original_device = features.device
|
154
|
-
device = original_device if self.device is None else self.device
|
155
|
-
|
156
148
|
_n = features.shape[0]
|
157
149
|
if self.num_sample >= _n:
|
158
150
|
logging.info(
|
@@ -184,13 +176,13 @@ class NCUT(OnlineNystrom):
|
|
184
176
|
num_sample=self.num_sample,
|
185
177
|
sample_method=self.sample_method,
|
186
178
|
)
|
187
|
-
sampled_features = features[sampled_indices]
|
179
|
+
sampled_features = features[sampled_indices]
|
188
180
|
OnlineNystrom.fit(self, sampled_features)
|
189
181
|
|
190
182
|
_n_not_sampled = _n - len(sampled_features)
|
191
183
|
if _n_not_sampled > 0:
|
192
|
-
unsampled_indices = torch.full((_n,), True).
|
193
|
-
unsampled_features = features[unsampled_indices]
|
184
|
+
unsampled_indices = torch.full((_n,), True, device=features.device).scatter_(0, sampled_indices, False)
|
185
|
+
unsampled_features = features[unsampled_indices]
|
194
186
|
V_unsampled, _ = OnlineNystrom.update(self, unsampled_features)
|
195
187
|
else:
|
196
188
|
unsampled_indices = V_unsampled = None
|
@@ -231,7 +223,7 @@ class NCUT(OnlineNystrom):
|
|
231
223
|
V_sampled, L = OnlineNystrom.transform(self)
|
232
224
|
|
233
225
|
if unsampled_indices is not None:
|
234
|
-
V = torch.zeros((len(unsampled_indices), self.n_components))
|
226
|
+
V = torch.zeros((len(unsampled_indices), self.n_components), device=features.device)
|
235
227
|
V[~unsampled_indices] = V_sampled
|
236
228
|
V[unsampled_indices] = V_unsampled
|
237
229
|
else:
|
@@ -43,7 +43,7 @@ def run_subgraph_sampling(
|
|
43
43
|
sampled_indices = torch.randperm(features.shape[0])[:num_sample]
|
44
44
|
else:
|
45
45
|
raise ValueError("sample_method should be 'farthest' or 'random'")
|
46
|
-
return sampled_indices
|
46
|
+
return sampled_indices.to(features.device)
|
47
47
|
|
48
48
|
|
49
49
|
def farthest_point_sampling(
|
@@ -139,7 +139,7 @@ def propagate_knn(
|
|
139
139
|
knn: int = 10,
|
140
140
|
distance: Literal["cosine", "euclidean", "rbf"] = "cosine",
|
141
141
|
affinity_focal_gamma: float = 1.0,
|
142
|
-
chunk_size: int =
|
142
|
+
chunk_size: int = 8192,
|
143
143
|
device: str = None,
|
144
144
|
move_output_to_cpu: bool = False,
|
145
145
|
):
|
@@ -206,7 +206,7 @@ def propagate_nearest(
|
|
206
206
|
inp_features: torch.Tensor,
|
207
207
|
subgraph_features: torch.Tensor,
|
208
208
|
distance: Literal["cosine", "euclidean", "rbf"] = "cosine",
|
209
|
-
chunk_size: int =
|
209
|
+
chunk_size: int = 8192,
|
210
210
|
device: str = None,
|
211
211
|
move_output_to_cpu: bool = False,
|
212
212
|
):
|
@@ -254,7 +254,7 @@ def propagate_eigenvectors(
|
|
254
254
|
knn (int): number of KNN to propagate eigenvectors, default 3
|
255
255
|
num_sample (int): number of samples for subgraph sampling, default 50000
|
256
256
|
sample_method (str): sample method, 'farthest' (default) or 'random'
|
257
|
-
chunk_size (int): chunk size for matrix multiplication, default
|
257
|
+
chunk_size (int): chunk size for matrix multiplication, default 8192
|
258
258
|
device (str): device to use for computation, if None, will not change device
|
259
259
|
Returns:
|
260
260
|
torch.Tensor: propagated eigenvectors, shape (n_new_samples, num_eig)
|
@@ -420,7 +420,7 @@ def propagate_rgb_color(
|
|
420
420
|
knn: int = 10,
|
421
421
|
num_sample: int = 1000,
|
422
422
|
sample_method: Literal["farthest", "random"] = "farthest",
|
423
|
-
chunk_size: int =
|
423
|
+
chunk_size: int = 8192,
|
424
424
|
device: str = None,
|
425
425
|
):
|
426
426
|
"""Propagate RGB color to new nodes using KNN.
|
@@ -431,7 +431,7 @@ def propagate_rgb_color(
|
|
431
431
|
knn (int): number of KNN to propagate RGB color, default 1
|
432
432
|
num_sample (int): number of samples for subgraph sampling, default 50000
|
433
433
|
sample_method (str): sample method, 'farthest' (default) or 'random'
|
434
|
-
chunk_size (int): chunk size for matrix multiplication, default
|
434
|
+
chunk_size (int): chunk size for matrix multiplication, default 8192
|
435
435
|
device (str): device to use for computation, if None, will not change device
|
436
436
|
Returns:
|
437
437
|
torch.Tensor: propagated RGB color for each data sample, shape (n_new_samples, 3)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: nystrom_ncut
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.3
|
4
4
|
Summary: Normalized Cut and Nyström Approximation
|
5
5
|
Author-email: Huzheng Yang <huze.yann@gmail.com>, Wentinn Liao <wentinn.liao@gmail.com>
|
6
6
|
Project-URL: Documentation, https://github.com/JophiArcana/Nystrom-NCUT/
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|