neuro-sam 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. neuro_sam/__init__.py +1 -0
  2. neuro_sam/brightest_path_lib/__init__.py +5 -0
  3. neuro_sam/brightest_path_lib/algorithm/__init__.py +3 -0
  4. neuro_sam/brightest_path_lib/algorithm/astar.py +586 -0
  5. neuro_sam/brightest_path_lib/algorithm/waypointastar.py +449 -0
  6. neuro_sam/brightest_path_lib/algorithm/waypointastar_speedup.py +1007 -0
  7. neuro_sam/brightest_path_lib/connected_componen.py +329 -0
  8. neuro_sam/brightest_path_lib/cost/__init__.py +8 -0
  9. neuro_sam/brightest_path_lib/cost/cost.py +33 -0
  10. neuro_sam/brightest_path_lib/cost/reciprocal.py +90 -0
  11. neuro_sam/brightest_path_lib/cost/reciprocal_transonic.py +86 -0
  12. neuro_sam/brightest_path_lib/heuristic/__init__.py +2 -0
  13. neuro_sam/brightest_path_lib/heuristic/euclidean.py +101 -0
  14. neuro_sam/brightest_path_lib/heuristic/heuristic.py +29 -0
  15. neuro_sam/brightest_path_lib/image/__init__.py +1 -0
  16. neuro_sam/brightest_path_lib/image/stats.py +197 -0
  17. neuro_sam/brightest_path_lib/input/__init__.py +1 -0
  18. neuro_sam/brightest_path_lib/input/inputs.py +14 -0
  19. neuro_sam/brightest_path_lib/node/__init__.py +2 -0
  20. neuro_sam/brightest_path_lib/node/bidirectional_node.py +240 -0
  21. neuro_sam/brightest_path_lib/node/node.py +125 -0
  22. neuro_sam/brightest_path_lib/visualization/__init__.py +4 -0
  23. neuro_sam/brightest_path_lib/visualization/flythrough.py +133 -0
  24. neuro_sam/brightest_path_lib/visualization/flythrough_all.py +394 -0
  25. neuro_sam/brightest_path_lib/visualization/tube_data.py +385 -0
  26. neuro_sam/brightest_path_lib/visualization/tube_flythrough.py +227 -0
  27. neuro_sam/napari_utils/anisotropic_scaling.py +503 -0
  28. neuro_sam/napari_utils/color_utils.py +135 -0
  29. neuro_sam/napari_utils/contrasting_color_system.py +169 -0
  30. neuro_sam/napari_utils/main_widget.py +1016 -0
  31. neuro_sam/napari_utils/path_tracing_module.py +1016 -0
  32. neuro_sam/napari_utils/punet_widget.py +424 -0
  33. neuro_sam/napari_utils/segmentation_model.py +769 -0
  34. neuro_sam/napari_utils/segmentation_module.py +649 -0
  35. neuro_sam/napari_utils/visualization_module.py +574 -0
  36. neuro_sam/plugin.py +260 -0
  37. neuro_sam/punet/__init__.py +0 -0
  38. neuro_sam/punet/deepd3_model.py +231 -0
  39. neuro_sam/punet/prob_unet_deepd3.py +431 -0
  40. neuro_sam/punet/prob_unet_with_tversky.py +375 -0
  41. neuro_sam/punet/punet_inference.py +236 -0
  42. neuro_sam/punet/run_inference.py +145 -0
  43. neuro_sam/punet/unet_blocks.py +81 -0
  44. neuro_sam/punet/utils.py +52 -0
  45. neuro_sam-0.1.0.dist-info/METADATA +269 -0
  46. neuro_sam-0.1.0.dist-info/RECORD +93 -0
  47. neuro_sam-0.1.0.dist-info/WHEEL +5 -0
  48. neuro_sam-0.1.0.dist-info/entry_points.txt +2 -0
  49. neuro_sam-0.1.0.dist-info/licenses/LICENSE +21 -0
  50. neuro_sam-0.1.0.dist-info/top_level.txt +2 -0
  51. sam2/__init__.py +11 -0
  52. sam2/automatic_mask_generator.py +454 -0
  53. sam2/benchmark.py +92 -0
  54. sam2/build_sam.py +174 -0
  55. sam2/configs/sam2/sam2_hiera_b+.yaml +113 -0
  56. sam2/configs/sam2/sam2_hiera_l.yaml +117 -0
  57. sam2/configs/sam2/sam2_hiera_s.yaml +116 -0
  58. sam2/configs/sam2/sam2_hiera_t.yaml +118 -0
  59. sam2/configs/sam2.1/sam2.1_hiera_b+.yaml +116 -0
  60. sam2/configs/sam2.1/sam2.1_hiera_l.yaml +120 -0
  61. sam2/configs/sam2.1/sam2.1_hiera_s.yaml +119 -0
  62. sam2/configs/sam2.1/sam2.1_hiera_t.yaml +121 -0
  63. sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml +339 -0
  64. sam2/configs/train.yaml +335 -0
  65. sam2/modeling/__init__.py +5 -0
  66. sam2/modeling/backbones/__init__.py +5 -0
  67. sam2/modeling/backbones/hieradet.py +317 -0
  68. sam2/modeling/backbones/image_encoder.py +134 -0
  69. sam2/modeling/backbones/utils.py +93 -0
  70. sam2/modeling/memory_attention.py +169 -0
  71. sam2/modeling/memory_encoder.py +181 -0
  72. sam2/modeling/position_encoding.py +239 -0
  73. sam2/modeling/sam/__init__.py +5 -0
  74. sam2/modeling/sam/mask_decoder.py +295 -0
  75. sam2/modeling/sam/prompt_encoder.py +202 -0
  76. sam2/modeling/sam/transformer.py +311 -0
  77. sam2/modeling/sam2_base.py +911 -0
  78. sam2/modeling/sam2_utils.py +323 -0
  79. sam2/sam2.1_hiera_b+.yaml +116 -0
  80. sam2/sam2.1_hiera_l.yaml +120 -0
  81. sam2/sam2.1_hiera_s.yaml +119 -0
  82. sam2/sam2.1_hiera_t.yaml +121 -0
  83. sam2/sam2_hiera_b+.yaml +113 -0
  84. sam2/sam2_hiera_l.yaml +117 -0
  85. sam2/sam2_hiera_s.yaml +116 -0
  86. sam2/sam2_hiera_t.yaml +118 -0
  87. sam2/sam2_image_predictor.py +475 -0
  88. sam2/sam2_video_predictor.py +1222 -0
  89. sam2/sam2_video_predictor_legacy.py +1172 -0
  90. sam2/utils/__init__.py +5 -0
  91. sam2/utils/amg.py +348 -0
  92. sam2/utils/misc.py +349 -0
  93. sam2/utils/transforms.py +118 -0
@@ -0,0 +1,145 @@
1
+
2
+ import argparse
3
+ from pathlib import Path
4
+
5
+ import numpy as np
6
+ import torch
7
+ import tifffile as tiff
8
+ from tqdm import tqdm
9
+
10
+ from prob_unet_with_tversky import ProbabilisticUnetDualLatent
11
+
12
+
13
+ def pad_to_multiple(img: np.ndarray, multiple: int = 32):
14
+ """Pad HxW to next multiple with reflect to keep context."""
15
+ H, W = img.shape
16
+ pad_h = (multiple - H % multiple) % multiple
17
+ pad_w = (multiple - W % multiple) % multiple
18
+ if pad_h == 0 and pad_w == 0:
19
+ return img, (0, 0)
20
+ img_p = np.pad(img, ((0, pad_h), (0, pad_w)), mode="reflect")
21
+ return img_p, (pad_h, pad_w)
22
+
23
+
24
+ @torch.no_grad()
25
+ def infer_slice(model, device, img_2d: np.ndarray, mc_samples: int = 8):
26
+ """
27
+ img_2d: float32 in [0,1], shape HxW
28
+ returns: (prob_dend, prob_spine) each HxW float32
29
+ """
30
+ # pad for UNet down/upsampling safety
31
+ x_np, (ph, pw) = pad_to_multiple(img_2d, multiple=32)
32
+ # to tensor [B,C,H,W] = [1,1,H,W]
33
+ x = torch.from_numpy(x_np).unsqueeze(0).unsqueeze(0).to(device)
34
+
35
+
36
+ model.forward(x, training=False)
37
+
38
+ # Multi-sample averaging from prior
39
+ pd_list, ps_list = [], []
40
+ for _ in range(max(1, mc_samples)):
41
+ ld, ls = model.sample(testing=True, use_posterior=False)
42
+ pd_list.append(torch.sigmoid(ld))
43
+ ps_list.append(torch.sigmoid(ls))
44
+
45
+ pd = torch.stack(pd_list, 0).mean(0) # [1,1,H,W]
46
+ ps = torch.stack(ps_list, 0).mean(0)
47
+
48
+ # back to numpy, remove padding
49
+ pd_np = pd.squeeze().float().cpu().numpy()
50
+ ps_np = ps.squeeze().float().cpu().numpy()
51
+ if ph or pw:
52
+ pd_np = pd_np[: pd_np.shape[0] - ph, : pd_np.shape[1] - pw]
53
+ ps_np = ps_np[: ps_np.shape[0] - ph, : ps_np.shape[1] - pw]
54
+ return pd_np.astype(np.float32), ps_np.astype(np.float32)
55
+
56
+
57
+ def main():
58
+ ap = argparse.ArgumentParser(description="Inference on DeepD3_Benchmark.tif with Dual-Latent Prob-UNet")
59
+ ap.add_argument("--weights", required=True, help="Path to checkpoint .pth (with model_state_dict)")
60
+ ap.add_argument("--tif", required=True, help="Path to DeepD3_Benchmark.tif")
61
+ ap.add_argument("--out", required=True, help="Output directory")
62
+ ap.add_argument("--samples", type=int, default=16, help="MC samples per slice (default: 8)")
63
+ ap.add_argument("--thr_d", type=float, default=0.5, help="Threshold for dendrite mask save")
64
+ ap.add_argument("--thr_s", type=float, default=0.5, help="Threshold for spine mask save")
65
+ ap.add_argument("--save_bin", action="store_true", help="Also save thresholded uint8 masks")
66
+ args = ap.parse_args()
67
+
68
+ outdir = Path(args.out)
69
+ outdir.mkdir(parents=True, exist_ok=True)
70
+
71
+ if torch.cuda.is_available():
72
+ device = torch.device("cuda")
73
+ elif torch.backends.mps.is_available():
74
+ device = torch.device("mps")
75
+ else:
76
+ device = torch.device("cpu")
77
+ print(f"Device: {device}")
78
+
79
+ model = ProbabilisticUnetDualLatent(
80
+ input_channels=1,
81
+ num_classes=1,
82
+ num_filters=[32, 64, 128, 192],
83
+ latent_dim_dendrite=12,
84
+ latent_dim_spine=12,
85
+ no_convs_fcomb=4,
86
+ recon_loss="tversky",
87
+ tversky_alpha=0.3, tversky_beta=0.7, tversky_gamma=1.0,
88
+ beta_dendrite=1.0, beta_spine=1.0,
89
+ loss_weight_dendrite=1.0, loss_weight_spine=1.0,
90
+ ).to(device)
91
+ model.eval()
92
+
93
+ # Load checkpoint
94
+ print(f"Loading checkpoint: {args.weights}")
95
+ ckpt = torch.load(args.weights, map_location=device, weights_only=False)
96
+ state = ckpt.get("model_state_dict", ckpt)
97
+ model.load_state_dict(state, strict=True)
98
+
99
+ print(f"Reading: {args.tif}")
100
+ vol = tiff.imread(args.tif) # shape: (Z,H,W) or (H,W)
101
+ if vol.ndim == 2:
102
+ vol = vol[np.newaxis, ...]
103
+ Z, H, W = vol.shape
104
+ print(f"Volume shape: Z={Z}, H={H}, W={W}")
105
+
106
+ # Output arrays (float32)
107
+ prob_d = np.zeros((Z, H, W), dtype=np.float32)
108
+ prob_s = np.zeros((Z, H, W), dtype=np.float32)
109
+
110
+ # ----- Run inference per slice -----
111
+ for z in tqdm(range(Z), desc="Inferring"):
112
+ img = vol[z].astype(np.float32)
113
+ # per-slice min-max normalize , avoid div by zero
114
+ vmin, vmax = float(img.min()), float(img.max())
115
+ if vmax > vmin:
116
+ img = (img - vmin) / (vmax - vmin)
117
+ else:
118
+ img = np.zeros_like(img, dtype=np.float32)
119
+
120
+ pd, ps = infer_slice(model, device, img, mc_samples=args.samples)
121
+ prob_d[z] = pd
122
+ prob_s[z] = ps
123
+
124
+ prob_d_path = outdir / "DeepD3_Benchmark_prob_dendrite.tif"
125
+ prob_s_path = outdir / "DeepD3_Benchmark_prob_spine.tif"
126
+ tiff.imwrite(prob_d_path.as_posix(), prob_d, dtype=np.float32)
127
+ tiff.imwrite(prob_s_path.as_posix(), prob_s, dtype=np.float32)
128
+ print(f"Saved: {prob_d_path}")
129
+ print(f"Saved: {prob_s_path}")
130
+
131
+ if args.save_bin:
132
+ bin_d = (prob_d >= args.thr_d).astype(np.uint8) * 255
133
+ bin_s = (prob_s >= args.thr_s).astype(np.uint8) * 255
134
+ bin_d_path = outdir / f"DeepD3_Benchmark_mask_dendrite_thr{args.thr_d:.2f}.tif"
135
+ bin_s_path = outdir / f"DeepD3_Benchmark_mask_spine_thr{args.thr_s:.2f}.tif"
136
+ tiff.imwrite(bin_d_path.as_posix(), bin_d, dtype=np.uint8)
137
+ tiff.imwrite(bin_s_path.as_posix(), bin_s, dtype=np.uint8)
138
+ print(f"Saved: {bin_d_path}")
139
+ print(f"Saved: {bin_s_path}")
140
+
141
+ print("Done.")
142
+
143
+
144
+ if __name__ == "__main__":
145
+ main()
@@ -0,0 +1,81 @@
1
+ import torch
2
+ import torch.nn as nn
3
+ from torch.autograd import Variable
4
+ import numpy as np
5
+ from utils import init_weights
6
+
7
+ class DownConvBlock(nn.Module):
8
+ """
9
+ A block of three convolutional layers where each layer is followed by a non-linear activation function
10
+ Between each block we add a pooling operation.
11
+ """
12
+ def __init__(self, input_dim, output_dim, initializers, padding, pool=True):
13
+ super(DownConvBlock, self).__init__()
14
+ layers = []
15
+
16
+ if pool:
17
+ #layers.append(nn.AvgPool2d(kernel_size=2, stride=2, padding=0, ceil_mode=True))
18
+ layers.append(nn.AdaptiveAvgPool2d((None, None)))
19
+ layers.append(nn.AvgPool2d(kernel_size=2, stride=2))
20
+
21
+ layers.append(nn.Conv2d(input_dim, output_dim, kernel_size=3, stride=1, padding=int(padding)))
22
+ layers.append(nn.ReLU(inplace=True))
23
+ layers.append(nn.Conv2d(output_dim, output_dim, kernel_size=3, stride=1, padding=int(padding)))
24
+ layers.append(nn.ReLU(inplace=True))
25
+ layers.append(nn.Conv2d(output_dim, output_dim, kernel_size=3, stride=1, padding=int(padding)))
26
+ layers.append(nn.ReLU(inplace=True))
27
+
28
+ self.layers = nn.Sequential(*layers)
29
+
30
+ self.layers.apply(init_weights)
31
+
32
+ def forward(self, patch):
33
+ return self.layers(patch)
34
+
35
+
36
+ class UpConvBlock(nn.Module):
37
+ """
38
+ A block consists of an upsampling layer followed by a convolutional layer to reduce the amount of channels and then a DownConvBlock
39
+ If bilinear is set to false, we do a transposed convolution instead of upsampling
40
+ """
41
+ def __init__(self, input_dim, output_dim, initializers, padding, bilinear=True):
42
+ super(UpConvBlock, self).__init__()
43
+ self.bilinear = bilinear
44
+
45
+ if not self.bilinear:
46
+ self.upconv_layer = nn.ConvTranspose2d(input_dim, output_dim, kernel_size=2, stride=2)
47
+ self.upconv_layer.apply(init_weights)
48
+
49
+ self.conv_block = DownConvBlock(input_dim, output_dim, initializers, padding, pool=False)
50
+
51
+ # def forward(self, x, bridge):
52
+ # if self.bilinear:
53
+ # up = nn.functional.interpolate(x, mode='bilinear', scale_factor=2, align_corners=True)
54
+ # else:
55
+ # up = self.upconv_layer(x)
56
+
57
+ # assert up.shape[3] == bridge.shape[3]
58
+ # out = torch.cat([up, bridge], 1)
59
+ # out = self.conv_block(out)
60
+
61
+ # return out
62
+
63
+ def forward(self, x, bridge):
64
+ # if self.bilinear:
65
+ # up = nn.functional.interpolate(x, size=bridge.shape[2:], mode='bilinear', align_corners=True)
66
+ # else:
67
+ # up = self.upconv_layer(x)
68
+ if self.bilinear:
69
+ up = nn.functional.interpolate(x, size=bridge.shape[2:], mode='bilinear', align_corners=True)
70
+ else:
71
+ up = self.upconv_layer(x)
72
+
73
+ # Instead of assert, handle mismatches dynamically
74
+ diffY = bridge.size()[2] - up.size()[2]
75
+ diffX = bridge.size()[3] - up.size()[3]
76
+
77
+ up = nn.functional.pad(up, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2])
78
+
79
+ out = torch.cat([up, bridge], 1)
80
+ out = self.conv_block(out)
81
+ return out
@@ -0,0 +1,52 @@
1
+ # File: utils.py (Updated for Hierarchical Probabilistic U-Net)
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import matplotlib.pyplot as plt
6
+ import numpy as np
7
+
8
+ def truncated_normal_(tensor, mean=0, std=1):
9
+ size = tensor.shape
10
+ tmp = tensor.new_empty(size + (4,)).normal_()
11
+ valid = (tmp < 2) & (tmp > -2)
12
+ ind = valid.max(-1, keepdim=True)[1]
13
+ tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1))
14
+ tensor.data.mul_(std).add_(mean)
15
+
16
+ def init_weights(m):
17
+ if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
18
+ nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
19
+ truncated_normal_(m.bias, mean=0, std=0.001)
20
+
21
+ def init_weights_orthogonal_normal(m):
22
+ if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
23
+ nn.init.orthogonal_(m.weight)
24
+ truncated_normal_(m.bias, mean=0, std=0.001)
25
+
26
+ def l2_regularisation(m):
27
+ l2_reg = None
28
+ for W in m.parameters():
29
+ if l2_reg is None:
30
+ l2_reg = W.norm(2)
31
+ else:
32
+ l2_reg = l2_reg + W.norm(2)
33
+ return l2_reg
34
+
35
+ # Latent Visualization for Experimentation
36
+ def visualize_latent_distribution(latent_list, title_prefix=""):
37
+ for idx, z in enumerate(latent_list):
38
+ mean = z.base_dist.loc.mean().item()
39
+ std = z.base_dist.scale.mean().item()
40
+ print(f"{title_prefix} Latent Level {idx+1} -> Mean: {mean:.4f}, Std: {std:.4f}")
41
+
42
+ # Save Prediction and Mask Images for Debugging
43
+ def save_mask_prediction_example(mask, pred, iter_id, save_path='images/'):
44
+ plt.imshow(pred, cmap='Greys')
45
+ plt.axis('off')
46
+ plt.savefig(f'{save_path}{iter_id}_prediction.png', bbox_inches='tight')
47
+ plt.close()
48
+
49
+ plt.imshow(mask, cmap='Greys')
50
+ plt.axis('off')
51
+ plt.savefig(f'{save_path}{iter_id}_mask.png', bbox_inches='tight')
52
+ plt.close()
@@ -0,0 +1,269 @@
1
+ Metadata-Version: 2.4
2
+ Name: neuro-sam
3
+ Version: 0.1.0
4
+ Summary: Neuro-SAM: Foundation Models for Dendrite and Dendritic Spine Segmentation
5
+ Author-email: Nipun Arora <nipun.arora@fau.de>
6
+ License: MIT License
7
+
8
+ Copyright (c) 2024 Nipun Arora
9
+
10
+ Permission is hereby granted, free of charge, to any person obtaining a copy
11
+ of this software and associated documentation files (the "Software"), to deal
12
+ in the Software without restriction, including without limitation the rights
13
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
+ copies of the Software, and to permit persons to whom the Software is
15
+ furnished to do so, subject to the following conditions:
16
+
17
+ The above copyright notice and this permission notice shall be included in all
18
+ copies or substantial portions of the Software.
19
+
20
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
+ SOFTWARE.
27
+
28
+ Project-URL: Homepage, https://github.com/nipunarora8/Neuro-SAM
29
+ Project-URL: Bug Tracker, https://github.com/nipunarora8/Neuro-SAM/issues
30
+ Classifier: Programming Language :: Python :: 3
31
+ Classifier: License :: OSI Approved :: MIT License
32
+ Classifier: Operating System :: OS Independent
33
+ Classifier: Framework :: napari
34
+ Requires-Python: >=3.10
35
+ Description-Content-Type: text/markdown
36
+ License-File: LICENSE
37
+ Requires-Dist: napari
38
+ Requires-Dist: numpy
39
+ Requires-Dist: scipy
40
+ Requires-Dist: imageio
41
+ Requires-Dist: torch>=2.0.0
42
+ Requires-Dist: torchvision>=0.15.0
43
+ Requires-Dist: hydra-core>=1.3.2
44
+ Requires-Dist: iopath>=0.1.10
45
+ Requires-Dist: pillow>=9.4.0
46
+ Requires-Dist: tqdm>=4.66.1
47
+ Requires-Dist: vispy
48
+ Requires-Dist: qtpy
49
+ Requires-Dist: superqt
50
+ Requires-Dist: magicgui
51
+ Requires-Dist: scikit-image
52
+ Requires-Dist: tifffile
53
+ Dynamic: license-file
54
+
55
+ <div align="center">
56
+
57
+ # Neuro-SAM
58
+ #### Foundation Models from Dendrite and Dendritic Spine Segmentation
59
+
60
+ [![python](https://img.shields.io/badge/-Python_3.10-blue?logo=python&logoColor=white)](https://github.com/pre-commit/pre-commit)
61
+ [![pytorch](https://img.shields.io/badge/PyTorch_2.0+-ee4c2c?logo=pytorch&logoColor=white)](https://pytorch.org/get-started/locally/)
62
+ [![wandb](https://img.shields.io/badge/Weights_&_Biases-FFCC33?logo=WeightsAndBiases&logoColor=black)](https://wandb.ai/site)
63
+
64
+ This project demonstrates an interactive UI to segment dendrites and dendritic spines.
65
+ The model of choice is SAMv2 and the framework used is pytorch.
66
+
67
+ ![A stack of neural dendrites and dendritic spines](./assets/merged_dendrites_spines.gif "Dendrites and Dendritic Spines")
68
+
69
+ </div>
70
+
71
+ ### 📝 Table of Contents
72
+
73
+ - [Neuro-SAM](#neuro-sam)
74
+ - [📑 Table of Contents](#--table-of-contents)
75
+ - [🧠 Overview](#-overview)
76
+ - [📦 Built With](#-built-with)
77
+ - [📂 Repository Structure](#-repository-structure)
78
+ - [🚀 Installation](#-installation)
79
+ - [📊 Usage](#-usage)
80
+ - [🛠 Workflow](#-workflow)
81
+ - [🧑‍💻 Model Training](#-model-training)
82
+ - [📁 Data Format](#-data-format)
83
+ - [📄 License](#-license)
84
+ - [📬 Contact](#-contact)
85
+
86
+
87
+ ### 🧠 Overview
88
+
89
+ Neuro-SAM provides an end-to-end pipeline for analyzing neural structures from 3D microscopy data, featuring:
90
+
91
+ - **Path Tracing**: Waypoint-based A* pathfinding
92
+ - **Dendrite Segmentation**: SAM2-based dendrite segmentation
93
+ - **Smart Spine Detection**: Multi-view analysis for spine detection
94
+ - **Spine Segmentation**: Individual spine segmentation using trained SAM2 model
95
+
96
+ ### 📦 Built With
97
+
98
+ [PyTorch](https://pytorch.org) - an open-source machine learning library for Python, widely used for deep learning applications.
99
+
100
+ [Segment Anything Model](https://segment-anything.com) - a foundation model used for segmentation built by Meta AI.
101
+
102
+ [Weights and Biases](https://wandb.ai/site) - a tool for tracking and visualizing machine learning experiments.
103
+
104
+ [Visual Studio Code](https://code.visualstudio.com/) - a code editor redefined and optimized for building applications.
105
+
106
+ [FAU High Performance Computing](https://doc.nhr.fau.de/) - a high-performance computing cluster at Friedrich-Alexander-Universität Erlangen-Nürnberg.
107
+
108
+ ### 📁 Repository Structure
109
+
110
+ ```
111
+ Neuro-SAM/
112
+ ├── Train-SAMv2/ # SAM2 training infrastructure
113
+ │ ├── sam2/ # SAM2 model implementation
114
+ │ ├── checkpoints/ # Model checkpoints
115
+ │ ├── results/ # Trained model outputs
116
+ │ ├── utils/ # Training utilities
117
+ │ ├── train_dendrites.py # Dendrite model training
118
+ │ └── train_spines.py # Spine model training
119
+ ├── brightest_path_lib/ # Advanced pathfinding algorithms
120
+ │ ├── algorithm/ # A* and waypoint search implementations
121
+ │ ├── cost/ # Cost function definitions
122
+ │ ├── heuristic/ # Heuristic functions
123
+ │ ├── visualization/ # Path visualization tools
124
+ │ └── ...
125
+ ├── napari_utils/ # Napari plugin components
126
+ │ ├── main_widget.py # Main interface with anisotropic scaling
127
+ │ ├── path_tracing_module.py # Interactive path tracing
128
+ │ ├── segmentation_module.py # Dendrite segmentation interface
129
+ │ ├── spine_detection_module.py # Spine detection with smart tracking
130
+ │ ├── spine_segmentation_module.py # Individual spine segmentation
131
+ │ └── visualization_module.py # Path management and visualization
132
+ └── neuro_sam_plugin.py # Main plugin entry point
133
+ ```
134
+
135
+ ### 🚀 Installation
136
+
137
+ #### Prerequisites
138
+
139
+ - Python 3.10+
140
+ - CUDA-compatible GPU (recommended)
141
+ - Conda/Miniconda
142
+
143
+ #### Environment Setup
144
+
145
+ 1. **Clone the repository:**
146
+ ```bash
147
+ git clone https://github.com/nipunarora8/Neuro-SAM.git
148
+ cd Neuro-SAM
149
+ ```
150
+
151
+ 2. **Create local environment:**
152
+ ```bash
153
+ conda create -p ./.venv python=3.10 -c conda-forge
154
+ conda activate ./.venv
155
+ ```
156
+
157
+ 3. **Install dependencies:**
158
+ ```bash
159
+ pip install uv
160
+ uv sync
161
+ ```
162
+
163
+ 4. **Download SAM2 checkpoints:**
164
+ ```bash
165
+ cd Train-SAMv2/checkpoints
166
+ bash download_ckpts.sh
167
+ ```
168
+
169
+ ### 📊 Usage
170
+
171
+ #### Quick Start
172
+
173
+ ```python
174
+ from neuro_sam_plugin import run_neuro_sam
175
+
176
+ # Launch with default spacing (94nm x 94nm x 500nm)
177
+ viewer = run_neuro_sam(image_path="your_image.tif")
178
+
179
+ # Launch with custom voxel spacing
180
+ viewer = run_neuro_sam(
181
+ image_path="your_image.tif",
182
+ spacing_xyz=(100.0, 100.0, 300.0) # X, Y, Z spacing in nm
183
+ )
184
+ ```
185
+
186
+ #### Command Line Interface
187
+
188
+ ```bash
189
+ # Basic usage
190
+ python neuro_sam_plugin.py --image_path /path/to/your/image.tif
191
+
192
+ # Custom spacing
193
+ python neuro_sam_plugin.py --image_path image.tif \
194
+ --x-spacing 100.0 --y-spacing 100.0 --z-spacing 300.0
195
+
196
+ # Load benchmark dataset
197
+ python neuro_sam_plugin.py
198
+ ```
199
+
200
+ ### 🔬 Workflow
201
+
202
+ #### 1. **Configure Voxel Spacing**
203
+ Set accurate X, Y, Z voxel spacing in the "Path Tracing" tab for proper anisotropic scaling:
204
+ - Typical two-photon: 94nm × 94nm × 500nm
205
+ - Confocal: varies by objective and zoom
206
+
207
+ #### 2. **Trace Dendritic Paths**
208
+ - Click waypoints along dendrite structures
209
+ - Algorithm automatically finds optimal brightess paths
210
+
211
+ #### 3. **Segment Dendrites**
212
+ - Load pre-trained SAMv2 dendrite model
213
+ - Segment individual path with SAMv2
214
+
215
+ #### 4. **Detect Spines**
216
+ - Smart multi-view detection using tube data generation
217
+ - Angle-based matching between 2D and tubular views
218
+
219
+ #### 5. **Segment Spines**
220
+ - Fine-grained spine segmentation using specialized SAMv2 model
221
+ - Dendrite mask overlay to suppress background signal
222
+ - Manual point extension across frames
223
+ - Contrasting color system for visualization
224
+
225
+ ### 🔧 Model Training
226
+
227
+ #### Dendrite Model
228
+ ```bash
229
+ cd Train-SAMv2
230
+ python train_dendrites.py --ppn 20 --pnn 10 --batch_size 32 --model_name "small"
231
+ ```
232
+
233
+ #### Spine Model
234
+ ```bash
235
+ python train_spines.py --model_name "small" --batch_size 16
236
+ ```
237
+
238
+ ### 📁 Data Format
239
+
240
+ #### Input Requirements
241
+ - **Image Format**: TIFF, .d3set (to reproduce training results)
242
+ - **Dimensions**: 3D volumes (Z×Y×X)
243
+ - **Bit Depth**: 8-bit or 16-bit grayscale
244
+ - **Size**: Tested up to 2048×2048×500 voxels
245
+
246
+ #### Output Formats
247
+ - **Paths**: NumPy arrays with coordinates
248
+ - **Masks**: Binary TIFF volumes
249
+
250
+ ### 📄 License
251
+
252
+ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
253
+
254
+
255
+ #### Useful VSCode Extensions
256
+
257
+ - [Remote Explorer](https://marketplace.visualstudio.com/items?itemName=ms-vscode.remote-explorer) - Open projects on remote servers.
258
+ - [Log Viewer](https://marketplace.visualstudio.com/items?itemName=berublan.vscode-log-viewer) - A log monitoring extension.
259
+ - [Black Formatter](https://marketplace.visualstudio.com/items?itemName=ms-python.black-formatter) - Python auto code formatter.
260
+ - [Markdown All in One](https://marketplace.visualstudio.com/items?itemName=yzhang.markdown-all-in-one) - Markdown preview and editing.
261
+
262
+ ### 📬 Contact
263
+
264
+ Nipun Arora - nipun.arora@fau.de
265
+
266
+ ---
267
+ <div align="center">
268
+ <b>Made with ♥️ at Anki Lab 🧠✨</b>
269
+ </div>
@@ -0,0 +1,93 @@
1
+ neuro_sam/__init__.py,sha256=0pbMqpI_nQyhP0_pfTaIg97FVEcFkS5w8gQrsMiBcG4,34
2
+ neuro_sam/plugin.py,sha256=pL2zJtslbuinkXUOJFVbuGwSIVIf45wVGiPAJ2XUZ-I,10092
3
+ neuro_sam/brightest_path_lib/__init__.py,sha256=vU3VvX26D2c9B26Lid09uThzKMQJYPx622SkPhxNlDI,123
4
+ neuro_sam/brightest_path_lib/connected_componen.py,sha256=x_kjDGZ_8U2ks9kZJOOyM8_ow84UugAsPUByI-NlXFk,12734
5
+ neuro_sam/brightest_path_lib/algorithm/__init__.py,sha256=XFYxFyx97FG7lK3_j7uA8GgunpfkOcAo2NIJU0GUn40,170
6
+ neuro_sam/brightest_path_lib/algorithm/astar.py,sha256=3F506tW29_XFn6HCAqf-DucDNF1Fwlj_Zg5-FCvp_-I,24198
7
+ neuro_sam/brightest_path_lib/algorithm/waypointastar.py,sha256=5XgrlRowyWVtwXCCTOCzFQ98ARFMb5FzcKGTVxUuPI0,17900
8
+ neuro_sam/brightest_path_lib/algorithm/waypointastar_speedup.py,sha256=t8LySe-aDeGSRdITQqkMOU9RLgyAczBW7MQz0AoPDtg,44666
9
+ neuro_sam/brightest_path_lib/cost/__init__.py,sha256=Fd92fQaAEu8BMxIJW6whLLTarxqI0WsA4-j0ZCbOsXk,223
10
+ neuro_sam/brightest_path_lib/cost/cost.py,sha256=huHU_XMCIzgPMF3IZfs4T6b2cHkAjlW55xqswkTDLSc,788
11
+ neuro_sam/brightest_path_lib/cost/reciprocal.py,sha256=lzGIUBBAJU0ao94hC6RW7XyLnhHknujD32qj5UW-CDQ,3203
12
+ neuro_sam/brightest_path_lib/cost/reciprocal_transonic.py,sha256=lGAD1GbiYrq1BuuDrolsWsathz0ppp4C1eMqtKcc3-Q,2749
13
+ neuro_sam/brightest_path_lib/heuristic/__init__.py,sha256=Fj7lnYrEGwaj-PrmFr_6XTs-uMEt32TthAVPamqlXnU,66
14
+ neuro_sam/brightest_path_lib/heuristic/euclidean.py,sha256=i9YCgajbOvRAagSJcNcOIYiqZXRwFp8l5F6EDDGP6uE,3680
15
+ neuro_sam/brightest_path_lib/heuristic/heuristic.py,sha256=mUli5G_HI-I79zoUHx593GcHBFJeySNEw3zlDku1bTk,936
16
+ neuro_sam/brightest_path_lib/image/__init__.py,sha256=uFceEO4Y2eei_jhIlvKE3bGDRnwudbeOEzEhF2l-lVA,29
17
+ neuro_sam/brightest_path_lib/image/stats.py,sha256=4KHxApJQ-Xqfer2Oeq8g9K-beXSwHe-9xkCRH4NTDOg,5951
18
+ neuro_sam/brightest_path_lib/input/__init__.py,sha256=GneTu78tyNUnzPznl7zW43IIUpfsGIH5JMou9x18qaQ,67
19
+ neuro_sam/brightest_path_lib/input/inputs.py,sha256=GqF5jnGs4eg3TC3083tpHquK434a_O3ppu6bk4iKrGs,290
20
+ neuro_sam/brightest_path_lib/node/__init__.py,sha256=ADsvx8Okxzg-mPwBGFmK94oCjvHJwJvi7mgHzE8E5P0,72
21
+ neuro_sam/brightest_path_lib/node/bidirectional_node.py,sha256=8goerVtkk8wXxD9HdwIU2kBA5UQt22yv7L-0eJPywOQ,9264
22
+ neuro_sam/brightest_path_lib/node/node.py,sha256=Pfq8Swm3XWu6gErJlXzN7JyJ9J4bFsvJH1PDoELww8I,3779
23
+ neuro_sam/brightest_path_lib/visualization/__init__.py,sha256=WZ1QT3JqhHquh3dxwXKeQ5vmfuBj70sNI0_JBzrNKNY,195
24
+ neuro_sam/brightest_path_lib/visualization/flythrough.py,sha256=bs-rqgwEXUZOYUkN6FOZAl2FkcRus7Ma1jWtZ1VRA1Q,5087
25
+ neuro_sam/brightest_path_lib/visualization/flythrough_all.py,sha256=3MIYTl2q3oVszuaLOl-QS4zQISsCk1JlVcURCtc-pw0,17886
26
+ neuro_sam/brightest_path_lib/visualization/tube_data.py,sha256=b0YfKUWzk4FT6_mUdLexIp-s4ipLsiGDt20diweA4BM,15570
27
+ neuro_sam/brightest_path_lib/visualization/tube_flythrough.py,sha256=Lt5sLqvj20AiiXl8SXlt6vQlHXQXtB91-FNFGl-YmU8,9160
28
+ neuro_sam/napari_utils/anisotropic_scaling.py,sha256=VA6Sd9zEhIAhzjAGto2cOjE9mocNx9NFWXec2TQpvew,19266
29
+ neuro_sam/napari_utils/color_utils.py,sha256=Hf5R8f0rh7b9CY1VT72o3tLGfGnnjRREkX8iWsiiu7k,4243
30
+ neuro_sam/napari_utils/contrasting_color_system.py,sha256=a-lt_3zJLDL9YyIdWJhFDGMYzBb6yH85cV7BNCabbdI,6771
31
+ neuro_sam/napari_utils/main_widget.py,sha256=yahfPLwmhBt_hImpRykIObzfMwbVZvVJTEKKzMZ11bw,48588
32
+ neuro_sam/napari_utils/path_tracing_module.py,sha256=0mMAtrMmtgK_ujMzaWzIguYVDPr8nfzalaTAwgF3NaQ,44062
33
+ neuro_sam/napari_utils/punet_widget.py,sha256=GzY8fGzx5xvitkIiD24TjWktW1ex4F7ujYjCu3Rd6xA,17058
34
+ neuro_sam/napari_utils/segmentation_model.py,sha256=uAnNvMdZVrtlaST36TpCdyoYiaNtVsQLeaPcroD-aT0,33939
35
+ neuro_sam/napari_utils/segmentation_module.py,sha256=MKp53jn3UM5fJurC9kAV6BcHpTJWjsmbno8bbSDMhHc,28988
36
+ neuro_sam/napari_utils/visualization_module.py,sha256=JtZlBoKlfIwVLa2Sqg7b2KTr07fNlAcwR0M7fHsn2oM,24723
37
+ neuro_sam/punet/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
+ neuro_sam/punet/deepd3_model.py,sha256=nGVEqzCPz_E4cFA6QmknW2CffDcjxH7VsdYAyTdAtY0,7509
39
+ neuro_sam/punet/prob_unet_deepd3.py,sha256=qZcKo4dZeEemqD7PxLHZu5YB_h3Q0--W3vTRhXjYxS0,14695
40
+ neuro_sam/punet/prob_unet_with_tversky.py,sha256=D-Ur5AfGsVl2dH35jbZL8BaHdJHzbTrHLsS-cagBzoQ,12843
41
+ neuro_sam/punet/punet_inference.py,sha256=U3ne-sP1y732PISFYlV50cPbXN6eEREQue1Jf6w1Io8,8032
42
+ neuro_sam/punet/run_inference.py,sha256=c9ATKWJvhOzNEaww_sUCI5fFS1q0bQ4GYUwNUqxWcwA,5312
43
+ neuro_sam/punet/unet_blocks.py,sha256=xc-nZql_CSsyEeCxKIY-WIhiXG1KyRiCSIESztGJKEs,3170
44
+ neuro_sam/punet/utils.py,sha256=ibwcpkqqZ3_3Afz2VYxzplz8_8FWQ5qYQqjJiKS8hIo,1786
45
+ neuro_sam-0.1.0.dist-info/licenses/LICENSE,sha256=akmTIN8IuZn3Y7UK_8qVQnyKDWSDcVUwB8RPGNXCojw,1068
46
+ sam2/__init__.py,sha256=uHyh6VzVS4F2box0rPDpN5UmOVKeQNK0CIaTKG9JQZ4,395
47
+ sam2/automatic_mask_generator.py,sha256=Zt8mbb4UQSMFrjOY8OwbshswOpMhaxAtdn5sTuXUw9c,18461
48
+ sam2/benchmark.py,sha256=m3o1BriIQuwJAx-3zQ_B0_7YLhN84G28oQSV5sGA3ak,2811
49
+ sam2/build_sam.py,sha256=hW1k1x5EQHQBKXtVGIT7TNGrqAgt4P9fxg-sPdQvCU0,6405
50
+ sam2/sam2.1_hiera_b+.yaml,sha256=N9bFawen-NCLqqMUMVxg3Dqr4u3GbNkrrG0e1QA454g,3650
51
+ sam2/sam2.1_hiera_l.yaml,sha256=Hb1stt_r6vWIxwBu4iLG77-pBJp61HKjzfsvXZGegQc,3798
52
+ sam2/sam2.1_hiera_s.yaml,sha256=9esKA_9iFSgbSn5Q6VVenPBPO6m3eSSf-h-plULkPk4,3760
53
+ sam2/sam2.1_hiera_t.yaml,sha256=-TLqwcYkHpEAMbLwAKgc2fio1IluInerX_tyHzeLGI0,3855
54
+ sam2/sam2_hiera_b+.yaml,sha256=ZE0amqp-FBg4AiRt7k9DYn4GfXfdifH0DYNlfbGNA4s,3548
55
+ sam2/sam2_hiera_l.yaml,sha256=DunyA3_ZhIkhK7C3ilbzOsgusXfPXlLIi-NxTLhO6iY,3696
56
+ sam2/sam2_hiera_s.yaml,sha256=D8DFUj7pnY2tLv6UpAsUInpHKxUlas_1axCJSbTe_Jk,3658
57
+ sam2/sam2_hiera_t.yaml,sha256=Mzmcc80Tu8PQwc6oQ_ovwlX9sgE5_gfwQnQ-BGrEa2s,3753
58
+ sam2/sam2_image_predictor.py,sha256=ti8Ik23gi5FyQUVxRqcWWz6WEb8qaIgyQE1V-7U3_94,20295
59
+ sam2/sam2_video_predictor.py,sha256=kGYJyUxkBJDQBvODdMeHKai7K8Z6sPQXYb9WNIMAULQ,58836
60
+ sam2/sam2_video_predictor_legacy.py,sha256=4MBUES8hv6Y2IPICbIuy4PTihLE40JQmoI058uiCw_I,58777
61
+ sam2/configs/train.yaml,sha256=MYgDHFACwrBODNKiJa5CsdF77USu8vP4_vbsD9lYGb0,10923
62
+ sam2/configs/sam2/sam2_hiera_b+.yaml,sha256=ZE0amqp-FBg4AiRt7k9DYn4GfXfdifH0DYNlfbGNA4s,3548
63
+ sam2/configs/sam2/sam2_hiera_l.yaml,sha256=DunyA3_ZhIkhK7C3ilbzOsgusXfPXlLIi-NxTLhO6iY,3696
64
+ sam2/configs/sam2/sam2_hiera_s.yaml,sha256=D8DFUj7pnY2tLv6UpAsUInpHKxUlas_1axCJSbTe_Jk,3658
65
+ sam2/configs/sam2/sam2_hiera_t.yaml,sha256=Mzmcc80Tu8PQwc6oQ_ovwlX9sgE5_gfwQnQ-BGrEa2s,3753
66
+ sam2/configs/sam2.1/sam2.1_hiera_b+.yaml,sha256=N9bFawen-NCLqqMUMVxg3Dqr4u3GbNkrrG0e1QA454g,3650
67
+ sam2/configs/sam2.1/sam2.1_hiera_l.yaml,sha256=Hb1stt_r6vWIxwBu4iLG77-pBJp61HKjzfsvXZGegQc,3798
68
+ sam2/configs/sam2.1/sam2.1_hiera_s.yaml,sha256=Dza5HobljQbIfkKZcWYhJGi4i5i2Dk2BbV5NTQiLb1U,3761
69
+ sam2/configs/sam2.1/sam2.1_hiera_t.yaml,sha256=-TLqwcYkHpEAMbLwAKgc2fio1IluInerX_tyHzeLGI0,3855
70
+ sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml,sha256=2WkZ4VdScRbDqAxuyWEVG9qY5euYiuTdhb5ufvvStR8,11256
71
+ sam2/modeling/__init__.py,sha256=NL2AacVHZOe41zp4kF2-ZGcUCi9zFwh1Eo9spNjN0Ko,197
72
+ sam2/modeling/memory_attention.py,sha256=BzWMt_WOw3iOiP9OZBX4qdYoo5Y1KdbiyRVJSlNHyOI,5509
73
+ sam2/modeling/memory_encoder.py,sha256=c_cImuX9rN_Krz3socptn4TYnn3rnPl1iWIqnvF7pC8,5657
74
+ sam2/modeling/position_encoding.py,sha256=tRQEcYwNOPOBKTyOXgChXRKWUbfwmxFYAC2JdKMJZ7U,8980
75
+ sam2/modeling/sam2_base.py,sha256=yMZRXd-q1BB3o5L_oR1g_8iiyiCb-bxg1-LNei-92LE,47056
76
+ sam2/modeling/sam2_utils.py,sha256=41u_E7wuVEoCcs0_lTmvOKdSZ2rDzXRL4xzYIgr-6AQ,13173
77
+ sam2/modeling/backbones/__init__.py,sha256=NL2AacVHZOe41zp4kF2-ZGcUCi9zFwh1Eo9spNjN0Ko,197
78
+ sam2/modeling/backbones/hieradet.py,sha256=A3hVgcowTQRRrg33oI7gvx4dvmb60oUGbmuf_A2I1k8,10003
79
+ sam2/modeling/backbones/image_encoder.py,sha256=FuqtIyIgOG9RDKjfq4ZV5jaZl3FJpNr5zpxvN05nd6k,4706
80
+ sam2/modeling/backbones/utils.py,sha256=xKBlfbKpK9oqf_EWzUAX9rHFmvZAgJPYh06wig30dq0,3007
81
+ sam2/modeling/sam/__init__.py,sha256=NL2AacVHZOe41zp4kF2-ZGcUCi9zFwh1Eo9spNjN0Ko,197
82
+ sam2/modeling/sam/mask_decoder.py,sha256=yjUjxYNlV0-t3xv7VPN05LS-ugUScYXqKxfSK5FuCZo,12657
83
+ sam2/modeling/sam/prompt_encoder.py,sha256=SWXMtKRQSqTXJGsq1m_-rNJiZBXIaiYdwcZf_Yrh1A0,7541
84
+ sam2/modeling/sam/transformer.py,sha256=zaGQUjMed1GQzosRWe_LgopGMtkxpti9TUcQnxIXgvE,10761
85
+ sam2/utils/__init__.py,sha256=NL2AacVHZOe41zp4kF2-ZGcUCi9zFwh1Eo9spNjN0Ko,197
86
+ sam2/utils/amg.py,sha256=t7MwkOKvcuBNu4FcjzKv9BpO0av5Zo9itZ8b3WQMpdg,12842
87
+ sam2/utils/misc.py,sha256=AWAMAcFhzQedcQb7HU2oRc-RqjGrK87K-MsVG21tIKI,13090
88
+ sam2/utils/transforms.py,sha256=ujpk9GAMYvIJIGpt87QOP88TPtrjL61liDG7DCptEUY,4885
89
+ neuro_sam-0.1.0.dist-info/METADATA,sha256=qmJWJ17j0Qlm65saFelge2KlmpYQYdU48uq0CKykyJM,9509
90
+ neuro_sam-0.1.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
91
+ neuro_sam-0.1.0.dist-info/entry_points.txt,sha256=Mw5mxn4u8N-boA1NOR8vfbPh_JbSiKpiIuqJ-styL9M,52
92
+ neuro_sam-0.1.0.dist-info/top_level.txt,sha256=yPbWxFcw79sErTk8zohihUHMK9LL31i3bXir2MrS4OQ,15
93
+ neuro_sam-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.10.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,2 @@
1
+ [napari.manifest]
2
+ neuro-sam = neuro_sam:napari.yaml
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Nipun Arora
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,2 @@
1
+ neuro_sam
2
+ sam2