nystrom-ncut 0.0.1__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- nystrom_ncut-0.0.1/LICENSE +19 -0
- nystrom_ncut-0.0.1/MANIFEST.in +9 -0
- nystrom_ncut-0.0.1/PKG-INFO +164 -0
- nystrom_ncut-0.0.1/README.md +150 -0
- nystrom_ncut-0.0.1/pyproject.toml +23 -0
- nystrom_ncut-0.0.1/requirements.txt +6 -0
- nystrom_ncut-0.0.1/setup.cfg +4 -0
- nystrom_ncut-0.0.1/src/nystrom_ncut/__init__.py +22 -0
- nystrom_ncut-0.0.1/src/nystrom_ncut/ncut_pytorch.py +561 -0
- nystrom_ncut-0.0.1/src/nystrom_ncut/new_ncut_pytorch.py +241 -0
- nystrom_ncut-0.0.1/src/nystrom_ncut/nystrom.py +170 -0
- nystrom_ncut-0.0.1/src/nystrom_ncut/propagation_utils.py +371 -0
- nystrom_ncut-0.0.1/src/nystrom_ncut/visualize_utils.py +655 -0
- nystrom_ncut-0.0.1/src/nystrom_ncut.egg-info/PKG-INFO +164 -0
- nystrom_ncut-0.0.1/src/nystrom_ncut.egg-info/SOURCES.txt +15 -0
- nystrom_ncut-0.0.1/src/nystrom_ncut.egg-info/dependency_links.txt +1 -0
- nystrom_ncut-0.0.1/src/nystrom_ncut.egg-info/top_level.txt +1 -0
@@ -0,0 +1,19 @@
|
|
1
|
+
Copyright (c) 2018 The Python Packaging Authority
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
4
|
+
of this software and associated documentation files (the "Software"), to deal
|
5
|
+
in the Software without restriction, including without limitation the rights
|
6
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
7
|
+
copies of the Software, and to permit persons to whom the Software is
|
8
|
+
furnished to do so, subject to the following conditions:
|
9
|
+
|
10
|
+
The above copyright notice and this permission notice shall be included in all
|
11
|
+
copies or substantial portions of the Software.
|
12
|
+
|
13
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
14
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
15
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
16
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
17
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
18
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
19
|
+
SOFTWARE.
|
@@ -0,0 +1,164 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: nystrom_ncut
|
3
|
+
Version: 0.0.1
|
4
|
+
Summary: Normalized Cut and Nyström Approximation
|
5
|
+
Author-email: Huzheng Yang <huze.yann@gmail.com>, Wentinn Liao <wentinn.liao@gmail.com>
|
6
|
+
Project-URL: Documentation, https://github.com/JophiArcana/Nystrom-NCUT/
|
7
|
+
Project-URL: Github, https://github.com/JophiArcana/Nystrom-NCUT/
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
9
|
+
Classifier: License :: OSI Approved :: MIT License
|
10
|
+
Classifier: Operating System :: OS Independent
|
11
|
+
Requires-Python: >=3
|
12
|
+
Description-Content-Type: text/markdown
|
13
|
+
License-File: LICENSE
|
14
|
+
|
15
|
+
|
16
|
+
|
17
|
+
<div style="text-align: center;">
|
18
|
+
<img src="./docs/images/ncut.svg" alt="NCUT" style="width: 80%; filter: brightness(60%) grayscale(100%);"/>
|
19
|
+
</div>
|
20
|
+
|
21
|
+
### [🌐Documentation](https://ncut-pytorch.readthedocs.io/) | [🤗HuggingFace Demo](https://huggingface.co/spaces/huzey/ncut-pytorch)
|
22
|
+
|
23
|
+
|
24
|
+
## NCUT: Nyström Normalized Cut
|
25
|
+
|
26
|
+
**Normalized Cut**, aka. spectral clustering, is a graphical method to analyze data grouping in the affinity eigenvector space. It has been widely used for unsupervised segmentation in the 2000s.
|
27
|
+
|
28
|
+
**Nyström Normalized Cut**, is a new approximation algorithm developed for large-scale graph cuts, a large-graph of million nodes can be processed in under 10s (cpu) or 2s (gpu).
|
29
|
+
|
30
|
+
|
31
|
+
|
32
|
+
https://github.com/user-attachments/assets/f0d40b1f-b8a5-4077-ab5f-e405f3ffb70f
|
33
|
+
|
34
|
+
|
35
|
+
|
36
|
+
<div align="center">
|
37
|
+
Video: NCUT applied to image encoder features from Segment Anything Model.
|
38
|
+
</div>
|
39
|
+
|
40
|
+
|
41
|
+
---
|
42
|
+
|
43
|
+
## Installation
|
44
|
+
|
45
|
+
#### 1. Install PyTorch
|
46
|
+
|
47
|
+
<div style="text-align:">
|
48
|
+
<pre><code class="language-shell">conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia
|
49
|
+
</code></pre>
|
50
|
+
</div>
|
51
|
+
|
52
|
+
#### 2. Install `nystrom-ncut`
|
53
|
+
|
54
|
+
<div style="text-align:">
|
55
|
+
<pre><code class="language-shell">pip install nystrom-ncut</code></pre>
|
56
|
+
</div>
|
57
|
+
|
58
|
+
|
59
|
+
#### Trouble Shooting
|
60
|
+
|
61
|
+
In case of `pip` install failed, please try install the build dependencies
|
62
|
+
|
63
|
+
Option A:
|
64
|
+
<div style="text-align:">
|
65
|
+
<pre><code class="language-shell">sudo apt-get update && sudo apt-get install build-essential cargo rustc -y</code></pre>
|
66
|
+
</div>
|
67
|
+
|
68
|
+
Option B:
|
69
|
+
<div style="text-align:">
|
70
|
+
<pre><code class="language-shell">conda install rust -c conda-forge</code></pre>
|
71
|
+
</div>
|
72
|
+
|
73
|
+
Option C:
|
74
|
+
<div style="text-align:">
|
75
|
+
<pre><code class="language-shell">curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh && . "$HOME/.cargo/env"</code></pre>
|
76
|
+
</div>
|
77
|
+
|
78
|
+
## Quick Start
|
79
|
+
|
80
|
+
|
81
|
+
Minimal example on how to run NCUT:
|
82
|
+
|
83
|
+
```py linenums="1"
|
84
|
+
import torch
|
85
|
+
from ncut_pytorch import NCUT, rgb_from_tsne_3d
|
86
|
+
|
87
|
+
model_features = torch.rand(20, 64, 64, 768) # (B, H, W, C)
|
88
|
+
|
89
|
+
inp = model_features.reshape(-1, 768) # flatten
|
90
|
+
eigvectors, eigvalues = NCUT(num_eig=100, device='cuda:0').fit_transform(inp)
|
91
|
+
tsne_x3d, tsne_rgb = rgb_from_tsne_3d(eigvectors, device='cuda:0')
|
92
|
+
|
93
|
+
eigvectors = eigvectors.reshape(20, 64, 64, 100) # (B, H, W, num_eig)
|
94
|
+
tsne_rgb = tsne_rgb.reshape(20, 64, 64, 3) # (B, H, W, 3)
|
95
|
+
```
|
96
|
+
|
97
|
+
#### Load Feature Extractor Model
|
98
|
+
|
99
|
+
Any backbone model works as plug-in feature extractor.
|
100
|
+
We have implemented some backbone models, here is a list of available models:
|
101
|
+
|
102
|
+
```py
|
103
|
+
from ncut_pytorch.backbone import list_models
|
104
|
+
print(list_models())
|
105
|
+
[
|
106
|
+
'SAM2(sam2_hiera_t)', 'SAM2(sam2_hiera_s)', 'SAM2(sam2_hiera_b+)', 'SAM2(sam2_hiera_l)',
|
107
|
+
'SAM(sam_vit_b)', 'SAM(sam_vit_l)', 'SAM(sam_vit_h)', 'MobileSAM(TinyViT)',
|
108
|
+
'DiNOv2reg(dinov2_vits14_reg)', 'DiNOv2reg(dinov2_vitb14_reg)', 'DiNOv2reg(dinov2_vitl14_reg)', 'DiNOv2reg(dinov2_vitg14_reg)',
|
109
|
+
'DiNOv2(dinov2_vits14)', 'DiNOv2(dinov2_vitb14)', 'DiNOv2(dinov2_vitl14)', 'DiNOv2(dinov2_vitg14)',
|
110
|
+
'DiNO(dino_vits8_896)', 'DiNO(dino_vitb8_896)', 'DiNO(dino_vits8_672)', 'DiNO(dino_vitb8_672)', 'DiNO(dino_vits8_448)', 'DiNO(dino_vitb8_448)', 'DiNO(dino_vits16_448)', 'DiNO(dino_vitb16_448)',
|
111
|
+
'Diffusion(stabilityai/stable-diffusion-2)', 'Diffusion(CompVis/stable-diffusion-v1-4)', 'Diffusion(stabilityai/stable-diffusion-3-medium-diffusers)',
|
112
|
+
'CLIP(ViT-B-16/openai)', 'CLIP(ViT-L-14/openai)', 'CLIP(ViT-H-14/openai)', 'CLIP(ViT-B-16/laion2b_s34b_b88k)',
|
113
|
+
'CLIP(convnext_base_w_320/laion_aesthetic_s13b_b82k)', 'CLIP(convnext_large_d_320/laion2b_s29b_b131k_ft_soup)', 'CLIP(convnext_xxlarge/laion2b_s34b_b82k_augreg_soup)',
|
114
|
+
'CLIP(eva02_base_patch14_448/mim_in22k_ft_in1k)', "CLIP(eva02_large_patch14_448/mim_m38m_ft_in22k_in1k)",
|
115
|
+
'MAE(vit_base)', 'MAE(vit_large)', 'MAE(vit_huge)',
|
116
|
+
'ImageNet(vit_base)'
|
117
|
+
]
|
118
|
+
```
|
119
|
+
|
120
|
+
#### Image model example:
|
121
|
+
|
122
|
+
```py linenums="1"
|
123
|
+
import torch
|
124
|
+
from ncut_pytorch import NCUT, rgb_from_tsne_3d
|
125
|
+
from ncut_pytorch.backbone import load_model, extract_features
|
126
|
+
|
127
|
+
model = load_model(model_name="SAM(sam_vit_b)")
|
128
|
+
images = torch.rand(20, 3, 1024, 1024)
|
129
|
+
model_features = extract_features(images, model, node_type='attn', layer=6)
|
130
|
+
# model_features = model(images)['attn'][6] # this also works
|
131
|
+
|
132
|
+
inp = model_features.reshape(-1, 768) # flatten
|
133
|
+
eigvectors, eigvalues = NCUT(num_eig=100, device='cuda:0').fit_transform(inp)
|
134
|
+
tsne_x3d, tsne_rgb = rgb_from_tsne_3d(eigvectors, device='cuda:0')
|
135
|
+
|
136
|
+
eigvectors = eigvectors.reshape(20, 64, 64, 100) # (B, H, W, num_eig)
|
137
|
+
tsne_rgb = tsne_rgb.reshape(20, 64, 64, 3) # (B, H, W, 3)
|
138
|
+
```
|
139
|
+
|
140
|
+
#### Text model example:
|
141
|
+
|
142
|
+
```py linenums="1"
|
143
|
+
import os
|
144
|
+
from ncut_pytorch import NCUT, rgb_from_tsne_3d
|
145
|
+
from ncut_pytorch.backbone_text import load_text_model
|
146
|
+
|
147
|
+
os.environ['HF_ACCESS_TOKEN'] = "your_huggingface_token"
|
148
|
+
llama = load_text_model("meta-llama/Meta-Llama-3.1-8B").cuda()
|
149
|
+
output_dict = llama("The quick white fox jumps over the lazy cat.")
|
150
|
+
|
151
|
+
model_features = output_dict['block'][31].squeeze(0) # 32nd block output
|
152
|
+
token_texts = output_dict['token_texts']
|
153
|
+
eigvectors, eigvalues = NCUT(num_eig=5, device='cuda:0').fit_transform(model_features)
|
154
|
+
tsne_x3d, tsne_rgb = rgb_from_tsne_3d(eigvectors, device='cuda:0')
|
155
|
+
# eigvectors.shape[0] == tsne_rgb.shape[0] == len(token_texts)
|
156
|
+
```
|
157
|
+
|
158
|
+
---
|
159
|
+
|
160
|
+
> paper in prep, Yang 2024
|
161
|
+
>
|
162
|
+
> AlignedCut: Visual Concepts Discovery on Brain-Guided Universal Feature Space, Huzheng Yang, James Gee\*, Jianbo Shi\*,2024
|
163
|
+
>
|
164
|
+
> Normalized Cuts and Image Segmentation, Jianbo Shi and Jitendra Malik, 2000
|
@@ -0,0 +1,150 @@
|
|
1
|
+
|
2
|
+
|
3
|
+
<div style="text-align: center;">
|
4
|
+
<img src="./docs/images/ncut.svg" alt="NCUT" style="width: 80%; filter: brightness(60%) grayscale(100%);"/>
|
5
|
+
</div>
|
6
|
+
|
7
|
+
### [🌐Documentation](https://ncut-pytorch.readthedocs.io/) | [🤗HuggingFace Demo](https://huggingface.co/spaces/huzey/ncut-pytorch)
|
8
|
+
|
9
|
+
|
10
|
+
## NCUT: Nyström Normalized Cut
|
11
|
+
|
12
|
+
**Normalized Cut**, aka. spectral clustering, is a graphical method to analyze data grouping in the affinity eigenvector space. It has been widely used for unsupervised segmentation in the 2000s.
|
13
|
+
|
14
|
+
**Nyström Normalized Cut**, is a new approximation algorithm developed for large-scale graph cuts, a large-graph of million nodes can be processed in under 10s (cpu) or 2s (gpu).
|
15
|
+
|
16
|
+
|
17
|
+
|
18
|
+
https://github.com/user-attachments/assets/f0d40b1f-b8a5-4077-ab5f-e405f3ffb70f
|
19
|
+
|
20
|
+
|
21
|
+
|
22
|
+
<div align="center">
|
23
|
+
Video: NCUT applied to image encoder features from Segment Anything Model.
|
24
|
+
</div>
|
25
|
+
|
26
|
+
|
27
|
+
---
|
28
|
+
|
29
|
+
## Installation
|
30
|
+
|
31
|
+
#### 1. Install PyTorch
|
32
|
+
|
33
|
+
<div style="text-align:">
|
34
|
+
<pre><code class="language-shell">conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia
|
35
|
+
</code></pre>
|
36
|
+
</div>
|
37
|
+
|
38
|
+
#### 2. Install `nystrom-ncut`
|
39
|
+
|
40
|
+
<div style="text-align:">
|
41
|
+
<pre><code class="language-shell">pip install nystrom-ncut</code></pre>
|
42
|
+
</div>
|
43
|
+
|
44
|
+
|
45
|
+
#### Trouble Shooting
|
46
|
+
|
47
|
+
In case of `pip` install failed, please try install the build dependencies
|
48
|
+
|
49
|
+
Option A:
|
50
|
+
<div style="text-align:">
|
51
|
+
<pre><code class="language-shell">sudo apt-get update && sudo apt-get install build-essential cargo rustc -y</code></pre>
|
52
|
+
</div>
|
53
|
+
|
54
|
+
Option B:
|
55
|
+
<div style="text-align:">
|
56
|
+
<pre><code class="language-shell">conda install rust -c conda-forge</code></pre>
|
57
|
+
</div>
|
58
|
+
|
59
|
+
Option C:
|
60
|
+
<div style="text-align:">
|
61
|
+
<pre><code class="language-shell">curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh && . "$HOME/.cargo/env"</code></pre>
|
62
|
+
</div>
|
63
|
+
|
64
|
+
## Quick Start
|
65
|
+
|
66
|
+
|
67
|
+
Minimal example on how to run NCUT:
|
68
|
+
|
69
|
+
```py linenums="1"
|
70
|
+
import torch
|
71
|
+
from ncut_pytorch import NCUT, rgb_from_tsne_3d
|
72
|
+
|
73
|
+
model_features = torch.rand(20, 64, 64, 768) # (B, H, W, C)
|
74
|
+
|
75
|
+
inp = model_features.reshape(-1, 768) # flatten
|
76
|
+
eigvectors, eigvalues = NCUT(num_eig=100, device='cuda:0').fit_transform(inp)
|
77
|
+
tsne_x3d, tsne_rgb = rgb_from_tsne_3d(eigvectors, device='cuda:0')
|
78
|
+
|
79
|
+
eigvectors = eigvectors.reshape(20, 64, 64, 100) # (B, H, W, num_eig)
|
80
|
+
tsne_rgb = tsne_rgb.reshape(20, 64, 64, 3) # (B, H, W, 3)
|
81
|
+
```
|
82
|
+
|
83
|
+
#### Load Feature Extractor Model
|
84
|
+
|
85
|
+
Any backbone model works as plug-in feature extractor.
|
86
|
+
We have implemented some backbone models, here is a list of available models:
|
87
|
+
|
88
|
+
```py
|
89
|
+
from ncut_pytorch.backbone import list_models
|
90
|
+
print(list_models())
|
91
|
+
[
|
92
|
+
'SAM2(sam2_hiera_t)', 'SAM2(sam2_hiera_s)', 'SAM2(sam2_hiera_b+)', 'SAM2(sam2_hiera_l)',
|
93
|
+
'SAM(sam_vit_b)', 'SAM(sam_vit_l)', 'SAM(sam_vit_h)', 'MobileSAM(TinyViT)',
|
94
|
+
'DiNOv2reg(dinov2_vits14_reg)', 'DiNOv2reg(dinov2_vitb14_reg)', 'DiNOv2reg(dinov2_vitl14_reg)', 'DiNOv2reg(dinov2_vitg14_reg)',
|
95
|
+
'DiNOv2(dinov2_vits14)', 'DiNOv2(dinov2_vitb14)', 'DiNOv2(dinov2_vitl14)', 'DiNOv2(dinov2_vitg14)',
|
96
|
+
'DiNO(dino_vits8_896)', 'DiNO(dino_vitb8_896)', 'DiNO(dino_vits8_672)', 'DiNO(dino_vitb8_672)', 'DiNO(dino_vits8_448)', 'DiNO(dino_vitb8_448)', 'DiNO(dino_vits16_448)', 'DiNO(dino_vitb16_448)',
|
97
|
+
'Diffusion(stabilityai/stable-diffusion-2)', 'Diffusion(CompVis/stable-diffusion-v1-4)', 'Diffusion(stabilityai/stable-diffusion-3-medium-diffusers)',
|
98
|
+
'CLIP(ViT-B-16/openai)', 'CLIP(ViT-L-14/openai)', 'CLIP(ViT-H-14/openai)', 'CLIP(ViT-B-16/laion2b_s34b_b88k)',
|
99
|
+
'CLIP(convnext_base_w_320/laion_aesthetic_s13b_b82k)', 'CLIP(convnext_large_d_320/laion2b_s29b_b131k_ft_soup)', 'CLIP(convnext_xxlarge/laion2b_s34b_b82k_augreg_soup)',
|
100
|
+
'CLIP(eva02_base_patch14_448/mim_in22k_ft_in1k)', "CLIP(eva02_large_patch14_448/mim_m38m_ft_in22k_in1k)",
|
101
|
+
'MAE(vit_base)', 'MAE(vit_large)', 'MAE(vit_huge)',
|
102
|
+
'ImageNet(vit_base)'
|
103
|
+
]
|
104
|
+
```
|
105
|
+
|
106
|
+
#### Image model example:
|
107
|
+
|
108
|
+
```py linenums="1"
|
109
|
+
import torch
|
110
|
+
from ncut_pytorch import NCUT, rgb_from_tsne_3d
|
111
|
+
from ncut_pytorch.backbone import load_model, extract_features
|
112
|
+
|
113
|
+
model = load_model(model_name="SAM(sam_vit_b)")
|
114
|
+
images = torch.rand(20, 3, 1024, 1024)
|
115
|
+
model_features = extract_features(images, model, node_type='attn', layer=6)
|
116
|
+
# model_features = model(images)['attn'][6] # this also works
|
117
|
+
|
118
|
+
inp = model_features.reshape(-1, 768) # flatten
|
119
|
+
eigvectors, eigvalues = NCUT(num_eig=100, device='cuda:0').fit_transform(inp)
|
120
|
+
tsne_x3d, tsne_rgb = rgb_from_tsne_3d(eigvectors, device='cuda:0')
|
121
|
+
|
122
|
+
eigvectors = eigvectors.reshape(20, 64, 64, 100) # (B, H, W, num_eig)
|
123
|
+
tsne_rgb = tsne_rgb.reshape(20, 64, 64, 3) # (B, H, W, 3)
|
124
|
+
```
|
125
|
+
|
126
|
+
#### Text model example:
|
127
|
+
|
128
|
+
```py linenums="1"
|
129
|
+
import os
|
130
|
+
from ncut_pytorch import NCUT, rgb_from_tsne_3d
|
131
|
+
from ncut_pytorch.backbone_text import load_text_model
|
132
|
+
|
133
|
+
os.environ['HF_ACCESS_TOKEN'] = "your_huggingface_token"
|
134
|
+
llama = load_text_model("meta-llama/Meta-Llama-3.1-8B").cuda()
|
135
|
+
output_dict = llama("The quick white fox jumps over the lazy cat.")
|
136
|
+
|
137
|
+
model_features = output_dict['block'][31].squeeze(0) # 32nd block output
|
138
|
+
token_texts = output_dict['token_texts']
|
139
|
+
eigvectors, eigvalues = NCUT(num_eig=5, device='cuda:0').fit_transform(model_features)
|
140
|
+
tsne_x3d, tsne_rgb = rgb_from_tsne_3d(eigvectors, device='cuda:0')
|
141
|
+
# eigvectors.shape[0] == tsne_rgb.shape[0] == len(token_texts)
|
142
|
+
```
|
143
|
+
|
144
|
+
---
|
145
|
+
|
146
|
+
> paper in prep, Yang 2024
|
147
|
+
>
|
148
|
+
> AlignedCut: Visual Concepts Discovery on Brain-Guided Universal Feature Space, Huzheng Yang, James Gee\*, Jianbo Shi\*,2024
|
149
|
+
>
|
150
|
+
> Normalized Cuts and Image Segmentation, Jianbo Shi and Jitendra Malik, 2000
|
@@ -0,0 +1,23 @@
|
|
1
|
+
[build-system]
|
2
|
+
requires = ["setuptools>=61.0"]
|
3
|
+
build-backend = "setuptools.build_meta"
|
4
|
+
|
5
|
+
[project]
|
6
|
+
name = "nystrom_ncut"
|
7
|
+
version = "0.0.1"
|
8
|
+
authors = [
|
9
|
+
{ name = "Huzheng Yang", email = "huze.yann@gmail.com" },
|
10
|
+
{ name = "Wentinn Liao", email = "wentinn.liao@gmail.com" },
|
11
|
+
]
|
12
|
+
description = "Normalized Cut and Nyström Approximation"
|
13
|
+
readme = "README.md"
|
14
|
+
requires-python = ">=3"
|
15
|
+
classifiers = [
|
16
|
+
"Programming Language :: Python :: 3",
|
17
|
+
"License :: OSI Approved :: MIT License",
|
18
|
+
"Operating System :: OS Independent",
|
19
|
+
]
|
20
|
+
|
21
|
+
[project.urls]
|
22
|
+
Documentation = "https://github.com/JophiArcana/Nystrom-NCUT/"
|
23
|
+
Github = "https://github.com/JophiArcana/Nystrom-NCUT/"
|
@@ -0,0 +1,22 @@
|
|
1
|
+
from .ncut_pytorch import NCUT
|
2
|
+
from .propagation_utils import (
|
3
|
+
affinity_from_features,
|
4
|
+
propagate_eigenvectors,
|
5
|
+
propagate_knn,
|
6
|
+
quantile_normalize,
|
7
|
+
)
|
8
|
+
from .visualize_utils import (
|
9
|
+
eigenvector_to_rgb,
|
10
|
+
rgb_from_tsne_3d,
|
11
|
+
rgb_from_umap_sphere,
|
12
|
+
rgb_from_tsne_2d,
|
13
|
+
rgb_from_umap_3d,
|
14
|
+
rgb_from_umap_2d,
|
15
|
+
rgb_from_cosine_tsne_3d,
|
16
|
+
rotate_rgb_cube,
|
17
|
+
convert_to_lab_color,
|
18
|
+
propagate_rgb_color,
|
19
|
+
get_mask,
|
20
|
+
)
|
21
|
+
from .ncut_pytorch import nystrom_ncut, ncut
|
22
|
+
from .ncut_pytorch import kway_ncut, axis_align
|