nystrom-ncut 0.0.4__tar.gz → 0.0.6__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.2
2
2
  Name: nystrom_ncut
3
- Version: 0.0.4
3
+ Version: 0.0.6
4
4
  Summary: Normalized Cut and Nyström Approximation
5
5
  Author-email: Huzheng Yang <huze.yann@gmail.com>, Wentinn Liao <wentinn.liao@gmail.com>
6
6
  Project-URL: Documentation, https://github.com/JophiArcana/Nystrom-NCUT/
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "nystrom_ncut"
7
- version = "0.0.4"
7
+ version = "0.0.6"
8
8
  authors = [
9
9
  { name = "Huzheng Yang", email = "huze.yann@gmail.com" },
10
10
  { name = "Wentinn Liao", email = "wentinn.liao@gmail.com" },
@@ -173,9 +173,10 @@ class NCUT(OnlineNystrom):
173
173
  else:
174
174
  sampled_indices = run_subgraph_sampling(
175
175
  features,
176
- num_sample=self.num_sample,
176
+ self.num_sample,
177
177
  sample_method=self.sample_method,
178
178
  )
179
+ sampled_indices = torch.sort(sampled_indices).values
179
180
  sampled_features = features[sampled_indices]
180
181
  OnlineNystrom.fit(self, sampled_features)
181
182
 
@@ -11,7 +11,7 @@ from .common import ceildiv, lazy_normalize
11
11
  @torch.no_grad()
12
12
  def run_subgraph_sampling(
13
13
  features: torch.Tensor,
14
- num_sample: int = 300,
14
+ num_sample: int,
15
15
  max_draw: int = 1000000,
16
16
  sample_method: Literal["farthest", "random"] = "farthest",
17
17
  ):
@@ -96,7 +96,6 @@ def distance_from_features(
96
96
  D = D / (2 * features.var(dim=0).sum())
97
97
  else:
98
98
  raise ValueError("distance should be 'cosine' or 'euclidean', 'rbf'")
99
-
100
99
  return D
101
100
 
102
101
 
@@ -184,13 +183,37 @@ def propagate_knn(
184
183
  V_list = []
185
184
  for _v in torch.chunk(inp_features, n_chunks, dim=0):
186
185
  _v = _v.to(device)
187
- _A = affinity_from_features(subgraph_features, _v, affinity_focal_gamma, distance).mT
188
186
 
189
- if knn is not None:
190
- mask = torch.full_like(_A, True, dtype=torch.bool)
191
- mask[torch.arange(len(_v))[:, None], _A.topk(knn, dim=-1, largest=True).indices] = False
192
- _A[mask] = 0.0
193
- _A = F.normalize(_A, p=1, dim=-1)
187
+ # _A = affinity_from_features(subgraph_features, _v, affinity_focal_gamma, distance).mT
188
+ # if knn is not None:
189
+ # mask = torch.full_like(_A, True, dtype=torch.bool)
190
+ # mask[torch.arange(len(_v))[:, None], _A.topk(knn, dim=-1, largest=True).indices] = False
191
+ # _A[mask] = 0.0
192
+ # _A = F.normalize(_A, p=1, dim=-1)
193
+
194
+ if distance == 'cosine':
195
+ _A = _v @ subgraph_features.T
196
+ elif distance == 'euclidean':
197
+ _A = - torch.cdist(_v, subgraph_features, p=2)
198
+ elif distance == 'rbf':
199
+ _A = - torch.cdist(_v, subgraph_features, p=2) ** 2
200
+ else:
201
+ raise ValueError("distance should be 'cosine' or 'euclidean', 'rbf'")
202
+
203
+ # keep topk KNN for each row
204
+ topk_sim, topk_idx = _A.topk(knn, dim=-1, largest=True)
205
+ row_id = torch.arange(topk_idx.shape[0], device=_A.device)[:, None].expand(
206
+ -1, topk_idx.shape[1]
207
+ )
208
+ _A = torch.sparse_coo_tensor(
209
+ torch.stack([row_id, topk_idx], dim=-1).reshape(-1, 2).T,
210
+ topk_sim.reshape(-1),
211
+ size=(_A.shape[0], _A.shape[1]),
212
+ device=_A.device,
213
+ )
214
+ _A = _A.to_dense().to(dtype=subgraph_output.dtype)
215
+ _D = _A.sum(-1)
216
+ _A /= _D[:, None]
194
217
 
195
218
  _V = _A @ subgraph_output
196
219
  if move_output_to_cpu:
@@ -272,7 +295,7 @@ def propagate_eigenvectors(
272
295
  # sample subgraph
273
296
  subgraph_indices = run_subgraph_sampling(
274
297
  features,
275
- num_sample=num_sample,
298
+ num_sample,
276
299
  sample_method=sample_method,
277
300
  )
278
301
 
@@ -34,7 +34,7 @@ def _rgb_with_dimensionality_reduction(
34
34
  ) -> Tuple[torch.Tensor, torch.Tensor]:
35
35
  subgraph_indices = run_subgraph_sampling(
36
36
  features,
37
- num_sample=num_sample,
37
+ num_sample,
38
38
  sample_method="farthest",
39
39
  )
40
40
 
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.2
2
2
  Name: nystrom_ncut
3
- Version: 0.0.4
3
+ Version: 0.0.6
4
4
  Summary: Normalized Cut and Nyström Approximation
5
5
  Author-email: Huzheng Yang <huze.yann@gmail.com>, Wentinn Liao <wentinn.liao@gmail.com>
6
6
  Project-URL: Documentation, https://github.com/JophiArcana/Nystrom-NCUT/
@@ -38,27 +38,28 @@ if __name__ == "__main__":
38
38
  # raise Exception(
39
39
 
40
40
  torch.set_printoptions(precision=8, sci_mode=False, linewidth=400)
41
- torch.set_default_dtype(torch.float64)
42
- # torch.manual_seed(1212)
43
- # np.random.seed(1212)
41
+ torch.set_default_dtype(torch.float32)
42
+ torch.manual_seed(1212)
43
+ np.random.seed(1212)
44
44
 
45
- M = torch.rand((200, 12))
46
- NC = NCUT(n_components=12, num_sample=80, sample_method="random", chunk_size=20)
45
+ M = torch.rand((1200, 12))
46
+ NC = NCUT(n_components=30, num_sample=1000, sample_method="farthest", eig_solver="svd")
47
47
 
48
48
  torch.manual_seed(1212)
49
49
  np.random.seed(1212)
50
50
  X, eigs = NC.fit_transform(M)
51
51
  print(eigs)
52
- raise Exception()
52
+ # print(X.mT @ X)
53
53
 
54
54
  normalized_M = Fn.normalize(M, p=2, dim=-1)
55
55
  A = torch.exp(-(1 - normalized_M @ normalized_M.mT))
56
56
  R = torch.diag(torch.sum(A, dim=-1) ** -0.5)
57
57
  L = R @ A @ R
58
58
  # print(L)
59
- print(X @ torch.diag(eigs) @ X.mT)
60
- print(L)
61
- print(torch.abs(X @ torch.diag(eigs) @ X.mT / L - 1))
59
+ # print(X @ torch.diag(eigs) @ X.mT)
60
+ # print(L)
61
+ RE = torch.abs(X @ torch.diag(eigs) @ X.mT / L - 1)
62
+ print(RE.max().item(), RE.mean().item())
62
63
 
63
64
  # torch.manual_seed(1212)
64
65
  # np.random.seed(1212)
File without changes
File without changes
File without changes
File without changes