lbm_suite2p_python 2.0.0__tar.gz → 2.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. {lbm_suite2p_python-2.0.0/lbm_suite2p_python.egg-info → lbm_suite2p_python-2.0.1}/PKG-INFO +17 -22
  2. {lbm_suite2p_python-2.0.0 → lbm_suite2p_python-2.0.1}/README.md +15 -12
  3. lbm_suite2p_python-2.0.1/lbm_suite2p_python/__init__.py +30 -0
  4. lbm_suite2p_python-2.0.1/lbm_suite2p_python/merging.py +282 -0
  5. {lbm_suite2p_python-2.0.0 → lbm_suite2p_python-2.0.1}/lbm_suite2p_python/postprocessing.py +3 -2
  6. {lbm_suite2p_python-2.0.0 → lbm_suite2p_python-2.0.1}/lbm_suite2p_python/run_lsp.py +23 -17
  7. {lbm_suite2p_python-2.0.0 → lbm_suite2p_python-2.0.1}/lbm_suite2p_python/zplane.py +251 -41
  8. {lbm_suite2p_python-2.0.0 → lbm_suite2p_python-2.0.1/lbm_suite2p_python.egg-info}/PKG-INFO +17 -22
  9. {lbm_suite2p_python-2.0.0 → lbm_suite2p_python-2.0.1}/lbm_suite2p_python.egg-info/SOURCES.txt +0 -1
  10. {lbm_suite2p_python-2.0.0 → lbm_suite2p_python-2.0.1}/lbm_suite2p_python.egg-info/requires.txt +1 -9
  11. {lbm_suite2p_python-2.0.0 → lbm_suite2p_python-2.0.1}/pyproject.toml +137 -151
  12. lbm_suite2p_python-2.0.0/lbm_suite2p_python/VERSION +0 -1
  13. lbm_suite2p_python-2.0.0/lbm_suite2p_python/__init__.py +0 -36
  14. lbm_suite2p_python-2.0.0/lbm_suite2p_python/merging.py +0 -561
  15. {lbm_suite2p_python-2.0.0 → lbm_suite2p_python-2.0.1}/LICENSE.md +0 -0
  16. {lbm_suite2p_python-2.0.0 → lbm_suite2p_python-2.0.1}/MANIFEST.in +0 -0
  17. {lbm_suite2p_python-2.0.0 → lbm_suite2p_python-2.0.1}/lbm_suite2p_python/__main__.py +0 -0
  18. {lbm_suite2p_python-2.0.0 → lbm_suite2p_python-2.0.1}/lbm_suite2p_python/_benchmarking.py +0 -0
  19. {lbm_suite2p_python-2.0.0 → lbm_suite2p_python-2.0.1}/lbm_suite2p_python/default_ops.py +0 -0
  20. {lbm_suite2p_python-2.0.0 → lbm_suite2p_python-2.0.1}/lbm_suite2p_python/utils.py +0 -0
  21. {lbm_suite2p_python-2.0.0 → lbm_suite2p_python-2.0.1}/lbm_suite2p_python/volume.py +0 -0
  22. {lbm_suite2p_python-2.0.0 → lbm_suite2p_python-2.0.1}/lbm_suite2p_python.egg-info/dependency_links.txt +0 -0
  23. {lbm_suite2p_python-2.0.0 → lbm_suite2p_python-2.0.1}/lbm_suite2p_python.egg-info/entry_points.txt +0 -0
  24. {lbm_suite2p_python-2.0.0 → lbm_suite2p_python-2.0.1}/lbm_suite2p_python.egg-info/top_level.txt +0 -0
  25. {lbm_suite2p_python-2.0.0 → lbm_suite2p_python-2.0.1}/setup.cfg +0 -0
  26. {lbm_suite2p_python-2.0.0 → lbm_suite2p_python-2.0.1}/tests/test_run_volume.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lbm_suite2p_python
3
- Version: 2.0.0
3
+ Version: 2.0.1
4
4
  Summary: Light Beads Microscopy Pipeline using Suite2p
5
5
  License-Expression: BSD-3-Clause
6
6
  Project-URL: homepage, https://github.com/MillerBrainObservatory/LBM-Suite2p-Python
@@ -11,15 +11,7 @@ Classifier: Programming Language :: Python :: 3 :: Only
11
11
  Requires-Python: <3.12.10,>=3.12.7
12
12
  Description-Content-Type: text/markdown
13
13
  License-File: LICENSE.md
14
- Requires-Dist: mkl_fft
15
- Requires-Dist: mbo_utilities[gui,suite3d]>=1.0.7
16
- Requires-Dist: suite2p_mbo
17
- Requires-Dist: tqdm
18
- Requires-Dist: pandas
19
- Requires-Dist: matplotlib
20
- Requires-Dist: dask>=2025.9.1
21
- Requires-Dist: zarr==3.1.3
22
- Requires-Dist: seaborn>=0.13.2
14
+ Requires-Dist: mbo_utilities>=2.0.2
23
15
  Provides-Extra: cpsam
24
16
  Requires-Dist: cellpose==4.0.6; extra == "cpsam"
25
17
  Requires-Dist: pytorch; extra == "cpsam"
@@ -38,11 +30,11 @@ Dynamic: license-file
38
30
 
39
31
  [![Documentation](https://img.shields.io/badge/Documentation-blue?style=for-the-badge&logo=readthedocs&logoColor=white)](https://millerbrainobservatory.github.io/LBM-Suite2p-Python/index.html)
40
32
 
41
- This package is still in a *beta* stage of development.
33
+ This package is still in a *late-beta* stage of development.
42
34
 
43
- A pipeline for processing 2-photon Light Beads Microscopy (LBM) datasets.
35
+ A pipeline for processing volumetric 2-photon Light Beads Microscopy (LBM) datasets.
44
36
 
45
- This pipeline uses the following software:
37
+ This pipeline uses the following open-source software:
46
38
 
47
39
  - [suite2p](https://github.com/MouseLand/suite2p)
48
40
  - [cellpose](https://github.com/MouseLand/cellpose)
@@ -57,30 +49,33 @@ This pipeline uses the following software:
57
49
 
58
50
  ## Installation
59
51
 
60
- See the [installation documentation](https://millerbrainobservatory.github.io/LBM-Suite2p-Python/install.html) for more details.
52
+ This pipeline is installable with `pip`:
61
53
 
62
- This pipeline is fully installable with `pip`.
54
+ ```bash
55
+ pip install lbm_suite2p_python
56
+ # with uv: uv pip install lbm_suite2p_python
57
+ ```
63
58
 
64
- `conda` can still be used for the virual environment, but be mindful to only install packages with `conda install` when absolutely necessary.
59
+ We highly encourage the use of a virtual environment. If you are unfamiliar with virtual environments, see our documentation [here](https://millerbrainobservatory.github.io/mbo_utilities/venvs.html).
65
60
 
66
- We recommend cloning the repository, until a more established workflow is available to upload to PyPi.
61
+ You may also use git to clone and install locally for updates not yet released to pypi:
67
62
 
68
- ```
63
+ ```bash
69
64
  git clone https://github.com/MillerBrainObservatory/LBM-Suite2p-Python.git
70
- git clone https://github.com/Suite3D # TODO
71
65
  cd LBM-Suite2p-Python
72
66
 
73
67
  # make sure your virtual environment is active
74
- pip install ".[all]" # optional, contains ".[gui, notebook]"
68
+ pip install "."
75
69
  ```
76
70
 
77
71
  ## Issues
72
+
78
73
  Widgets may throw "Invalid Rect" errors. This can be safely ignored until it is [resolved](https://github.com/pygfx/wgpu-py/issues/716#issuecomment-2880853089).
79
74
 
80
75
  ---
81
76
 
82
77
  ## Acknowledgements
83
78
 
84
- Thank you to the developers of [scanreader](https://github.com/atlab/scanreader), which provides a clean interface to ScanImage metadata using only tifffile and numpy.
79
+ This pipeline is mostly a volumetric wrapper around [suite2p](https://github.com/MouseLand/suite2p), [cellpose](https://github.com/MouseLand/cellpose) and [Suite3D](https://github.com/alihaydaroglu/suite3d). We thank the contributors to those projects.
85
80
 
86
- We vendor this repository because it is not published to an indexable Python package registry like PyPI.
81
+ Thank you to the developers of [scanreader](https://github.com/atlab/scanreader), which provides a clean interface to ScanImage metadata using only tifffile and numpy.
@@ -4,11 +4,11 @@
4
4
 
5
5
  [![Documentation](https://img.shields.io/badge/Documentation-blue?style=for-the-badge&logo=readthedocs&logoColor=white)](https://millerbrainobservatory.github.io/LBM-Suite2p-Python/index.html)
6
6
 
7
- This package is still in a *beta* stage of development.
7
+ This package is still in a *late-beta* stage of development.
8
8
 
9
- A pipeline for processing 2-photon Light Beads Microscopy (LBM) datasets.
9
+ A pipeline for processing volumetric 2-photon Light Beads Microscopy (LBM) datasets.
10
10
 
11
- This pipeline uses the following software:
11
+ This pipeline uses the following open-source software:
12
12
 
13
13
  - [suite2p](https://github.com/MouseLand/suite2p)
14
14
  - [cellpose](https://github.com/MouseLand/cellpose)
@@ -23,30 +23,33 @@ This pipeline uses the following software:
23
23
 
24
24
  ## Installation
25
25
 
26
- See the [installation documentation](https://millerbrainobservatory.github.io/LBM-Suite2p-Python/install.html) for more details.
26
+ This pipeline is installable with `pip`:
27
27
 
28
- This pipeline is fully installable with `pip`.
28
+ ```bash
29
+ pip install lbm_suite2p_python
30
+ # with uv: uv pip install lbm_suite2p_python
31
+ ```
29
32
 
30
- `conda` can still be used for the virual environment, but be mindful to only install packages with `conda install` when absolutely necessary.
33
+ We highly encourage the use of a virtual environment. If you are unfamiliar with virtual environments, see our documentation [here](https://millerbrainobservatory.github.io/mbo_utilities/venvs.html).
31
34
 
32
- We recommend cloning the repository, until a more established workflow is available to upload to PyPi.
35
+ You may also use git to clone and install locally for updates not yet released to pypi:
33
36
 
34
- ```
37
+ ```bash
35
38
  git clone https://github.com/MillerBrainObservatory/LBM-Suite2p-Python.git
36
- git clone https://github.com/Suite3D # TODO
37
39
  cd LBM-Suite2p-Python
38
40
 
39
41
  # make sure your virtual environment is active
40
- pip install ".[all]" # optional, contains ".[gui, notebook]"
42
+ pip install "."
41
43
  ```
42
44
 
43
45
  ## Issues
46
+
44
47
  Widgets may throw "Invalid Rect" errors. This can be safely ignored until it is [resolved](https://github.com/pygfx/wgpu-py/issues/716#issuecomment-2880853089).
45
48
 
46
49
  ---
47
50
 
48
51
  ## Acknowledgements
49
52
 
50
- Thank you to the developers of [scanreader](https://github.com/atlab/scanreader), which provides a clean interface to ScanImage metadata using only tifffile and numpy.
53
+ This pipeline is mostly a volumetric wrapper around [suite2p](https://github.com/MouseLand/suite2p), [cellpose](https://github.com/MouseLand/cellpose) and [Suite3D](https://github.com/alihaydaroglu/suite3d). We thank the contributors to those projects.
51
54
 
52
- We vendor this repository because it is not published to an indexable Python package registry like PyPI.
55
+ Thank you to the developers of [scanreader](https://github.com/atlab/scanreader), which provides a clean interface to ScanImage metadata using only tifffile and numpy.
@@ -0,0 +1,30 @@
1
+ from importlib.metadata import version, PackageNotFoundError
2
+
3
+ from lbm_suite2p_python.default_ops import default_ops
4
+ from lbm_suite2p_python.run_lsp import *
5
+ from lbm_suite2p_python.utils import *
6
+ from lbm_suite2p_python.volume import *
7
+ from lbm_suite2p_python.zplane import *
8
+
9
+ try:
10
+ __version__ = version("lbm_suite2p_python")
11
+ except PackageNotFoundError:
12
+ # fallback for editable installs
13
+ __version__ = "0.0.0"
14
+
15
+ __all__ = [
16
+ "run_volume",
17
+ "run_plane",
18
+ "plot_traces",
19
+ "plot_masks",
20
+ "plot_rastermap",
21
+ "plot_traces_noise",
22
+ "plot_volume_signal",
23
+ "plot_projection",
24
+ "plot_execution_time",
25
+ "plot_noise_distribution",
26
+ "dff_rolling_percentile",
27
+ "load_ops",
28
+ "load_planar_results",
29
+ "default_ops",
30
+ ]
@@ -0,0 +1,282 @@
1
+ from collections import defaultdict
2
+ from pathlib import Path
3
+
4
+ import numpy as np
5
+ from tqdm.auto import tqdm
6
+
7
+ from lbm_suite2p_python import plot_zplane_figures
8
+ from mbo_utilities.lazy_array import Suite2pArray
9
+
10
+
11
+ def group_plane_rois(input_dir):
12
+ input_dir = Path(input_dir)
13
+ grouped = defaultdict(list)
14
+
15
+ for d in input_dir.iterdir():
16
+ if (
17
+ d.is_dir()
18
+ and not d.name.endswith(".zarr") # exclude zarr dirs
19
+ and d.stem.startswith("plane")
20
+ and "_roi" in d.stem
21
+ ):
22
+ parts = d.stem.split("_")
23
+ if len(parts) == 2 and parts[1].startswith("roi"):
24
+ plane = parts[0] # e.g. "plane01"
25
+ grouped[plane].append(d)
26
+
27
+ return grouped
28
+
29
+
30
+ def _merge_images(
31
+ ops_list,
32
+ keys_full=("refImg", "meanImg", "meanImgE"),
33
+ keys_cropped=("max_proj", "Vcorr"),
34
+ ):
35
+ merged = {}
36
+
37
+ # --- determine global dimensions
38
+ Ly = max(ops["Ly"] for ops in ops_list)
39
+ widths = [ops["Lx"] for ops in ops_list]
40
+ total_Lx = sum(widths)
41
+
42
+ # Full-FOV keys: tile horizontally
43
+ for key in keys_full:
44
+ if all(key in ops for ops in ops_list):
45
+ canvas = np.zeros((Ly, total_Lx), dtype=ops_list[0][key].dtype)
46
+ xoff = 0
47
+ for opsd in ops_list:
48
+ arr = opsd[key]
49
+ arr_h, arr_w = arr.shape
50
+ slot_w = opsd["Lx"]
51
+
52
+ # crop/pad vertically to Ly
53
+ crop_h = min(arr_h, Ly)
54
+ slot_h = Ly
55
+ tmp = np.zeros((slot_h, slot_w), dtype=arr.dtype)
56
+ tmp[:crop_h, :min(arr_w, slot_w)] = arr[:crop_h, :min(arr_w, slot_w)]
57
+
58
+ # insert into canvas
59
+ canvas[:slot_h, xoff:xoff + slot_w] = tmp
60
+ xoff += slot_w
61
+
62
+ merged[key] = canvas
63
+
64
+ # Cropped keys: place at yrange/xrange
65
+ for key in keys_cropped:
66
+ if all(key in ops for ops in ops_list):
67
+ canvas = np.zeros((Ly, total_Lx), dtype=ops_list[0][key].dtype)
68
+ xoff = 0
69
+ for opsd in ops_list:
70
+ arr = opsd[key]
71
+ arr_h, arr_w = arr.shape
72
+ yr = np.array(opsd.get("yrange", [0, Ly]))
73
+ xr = np.array(opsd.get("xrange", [0, opsd["Lx"]])) + xoff
74
+
75
+ slot_h = yr[1] - yr[0]
76
+ slot_w = xr[1] - xr[0]
77
+
78
+ tmp = np.zeros((slot_h, slot_w), dtype=arr.dtype)
79
+ tmp[:min(arr_h, slot_h), :min(arr_w, slot_w)] = arr[:min(arr_h, slot_h), :min(arr_w, slot_w)]
80
+
81
+ canvas[yr[0]:yr[0]+slot_h, xr[0]:xr[0]+slot_w] = tmp
82
+ xoff += opsd["Lx"]
83
+
84
+ merged[key] = canvas
85
+
86
+ return merged
87
+
88
+ def merge_mrois(input_dir, output_dir, overwrite=True):
89
+ """
90
+ Merge Suite2p outputs from multiple ROIs into per-plane outputs.
91
+ Will attempt to merge everything available; skips missing files gracefully.
92
+ """
93
+ input_dir = Path(input_dir)
94
+ output_dir = Path(output_dir)
95
+ output_dir.mkdir(exist_ok=True)
96
+
97
+ grouped = group_plane_rois(input_dir)
98
+
99
+ for plane, dirs in tqdm(sorted(grouped.items()), desc="Merging mROIs", unit="plane"):
100
+ out_dir = output_dir / plane
101
+ out_ops = out_dir / "ops.npy"
102
+
103
+ if out_ops.exists() and not overwrite:
104
+ print(f"Skipping {plane}, merged outputs already exist")
105
+ continue
106
+
107
+ out_dir.mkdir(exist_ok=True)
108
+
109
+ # --- load per-ROI results
110
+ ops_list, stat_list, iscell_list = [], [], []
111
+ F_list, Fneu_list, spks_list = [], [], []
112
+ bin_paths = []
113
+ for d in sorted(dirs):
114
+ ops_path = d / "ops.npy"
115
+ if not ops_path.exists():
116
+ print(f"Skipping {d}, no ops.npy")
117
+ continue
118
+ ops = np.load(ops_path, allow_pickle=True).item()
119
+ ops_list.append(ops)
120
+
121
+ if (d / "stat.npy").exists():
122
+ stat_list.append(np.load(d / "stat.npy", allow_pickle=True))
123
+ if (d / "iscell.npy").exists():
124
+ iscell_list.append(np.load(d / "iscell.npy", allow_pickle=True))
125
+ if (d / "F.npy").exists():
126
+ F_list.append(np.load(d / "F.npy"))
127
+ if (d / "Fneu.npy").exists():
128
+ Fneu_list.append(np.load(d / "Fneu.npy"))
129
+ if (d / "spks.npy").exists():
130
+ spks_list.append(np.load(d / "spks.npy"))
131
+
132
+ if (d / "data_raw.bin").exists():
133
+ bin_paths.append(d / "data_raw.bin")
134
+ elif (d / "data.bin").exists():
135
+ bin_paths.append(d / "data.bin")
136
+
137
+ if not ops_list:
138
+ print(f"No valid ROIs found for {plane}, skipping merge")
139
+ continue
140
+
141
+ # --- dimensions
142
+ Ly = ops_list[0]["Ly"]
143
+ widths = [ops.get("xrange", [0, ops["Lx"]])[1] -
144
+ ops.get("xrange", [0, ops["Lx"]])[0] for ops in ops_list]
145
+ total_Lx = int(sum(widths))
146
+
147
+ # --- merge stat + traces
148
+ stat = None
149
+ if stat_list:
150
+ for i, st in enumerate(stat_list):
151
+ xoff = int(sum(widths[:i]))
152
+ for s in st:
153
+ s["xpix"] = np.asarray(s["xpix"], int) + xoff
154
+ s["ypix"] = np.asarray(s["ypix"], int)
155
+ s["med"] = [float(s["med"][0]), float(s["med"][1]) + xoff]
156
+ if "lam" in s:
157
+ s["lam"] = np.asarray(s["lam"], float).ravel()
158
+ if "ipix_neuropil" in s:
159
+ ypix, xpix = s["ypix"], s["xpix"]
160
+ s["ipix_neuropil"] = ypix + xpix * Ly
161
+ stat = np.concatenate(stat_list)
162
+
163
+ iscell = np.concatenate(iscell_list, 0) if iscell_list else None
164
+ F = np.concatenate(F_list, 0) if F_list else None
165
+ Fneu = np.concatenate(Fneu_list, 0) if Fneu_list else None
166
+ spks = np.concatenate(spks_list, 0) if spks_list else None
167
+
168
+ # --- merge binary
169
+ merged_bin = out_dir / "data.bin"
170
+ if bin_paths:
171
+ arrays = [Suite2pArray(p) for p in bin_paths]
172
+ nframes = min(arr.nframes for arr in arrays)
173
+ dtype = arrays[0].dtype
174
+ with open(merged_bin, "wb") as f:
175
+ for i in range(nframes):
176
+ frames = [arr[i] for arr in arrays]
177
+ f.write(np.hstack(frames).astype(dtype).tobytes())
178
+ for arr in arrays:
179
+ arr.close()
180
+ else:
181
+ merged_bin = None
182
+
183
+ # --- merged ops header
184
+ merged_ops = dict(ops_list[0])
185
+ merged_ops.update({
186
+ "Ly": Ly,
187
+ "Lx": total_Lx,
188
+ "yrange": [0, Ly],
189
+ "xrange": [0, total_Lx],
190
+ "ops_path": str(out_ops.resolve()),
191
+ "save_path": str(out_dir.resolve()),
192
+ "nrois": len(ops_list),
193
+ })
194
+ if merged_bin:
195
+ merged_ops["reg_file"] = str(merged_bin.resolve())
196
+
197
+ # >>> THIS IS THE ONLY PLACE YOU MERGE IMAGES <<<
198
+ merged_ops.update(_merge_images(ops_list))
199
+
200
+ for key in ["yoff", "xoff", "corrXY", "badframes"]:
201
+ arrays = [ops[key] for ops in ops_list if key in ops]
202
+ if arrays and all(np.array_equal(a, arrays[0]) for a in arrays[1:]):
203
+ merged_ops[key] = arrays[0]
204
+
205
+ np.save(out_ops, merged_ops)
206
+ if stat is not None: np.save(out_dir / "stat.npy", stat)
207
+ if iscell is not None: np.save(out_dir / "iscell.npy", iscell)
208
+ if F is not None: np.save(out_dir / "F.npy", F)
209
+ if Fneu is not None: np.save(out_dir / "Fneu.npy", Fneu)
210
+ if spks is not None: np.save(out_dir / "spks.npy", spks)
211
+
212
+ try:
213
+ plot_zplane_figures(out_dir, run_rastermap=False)
214
+ except Exception:
215
+ pass
216
+
217
+
218
+ def merge_zarr_rois(input_dir, output_dir=None, overwrite=True):
219
+ """
220
+ Concatenate roi1 + roi2 .zarr stores for each plane into a single planeXX.zarr.
221
+
222
+ Parameters
223
+ ----------
224
+ input_dir : Path or str
225
+ Directory containing planeXX_roi1, planeXX_roi2 subfolders with ops.npy + data.zarr.
226
+ output_dir : Path or str, optional
227
+ Where to write merged planeXX.zarr. Defaults to `input_dir`.
228
+ overwrite : bool
229
+ If True, existing outputs are replaced.
230
+ """
231
+ import dask.array as da
232
+
233
+ z_merged = None
234
+ input_dir = Path(input_dir)
235
+ output_dir = (
236
+ Path(output_dir)
237
+ if output_dir
238
+ else input_dir.parent / (input_dir.name + "_merged")
239
+ )
240
+ output_dir.mkdir(parents=True, exist_ok=True)
241
+
242
+ roi1_dirs = sorted(input_dir.glob("*plane*_roi1*"))
243
+ roi2_dirs = sorted(input_dir.glob("*plane*_roi2*"))
244
+ if not roi1_dirs or not roi2_dirs:
245
+ print("No roi1 or roi2 in input dir")
246
+ return None
247
+ assert len(roi1_dirs) == len(roi2_dirs), "Mismatched ROI dirs"
248
+
249
+ for roi1, roi2 in zip(roi1_dirs, roi2_dirs):
250
+ zplane = roi1.stem.split("_")[0] # "plane01"
251
+ out_path = output_dir / f"{zplane}.zarr"
252
+ if out_path.exists():
253
+ if overwrite:
254
+ import shutil
255
+
256
+ shutil.rmtree(out_path)
257
+ else:
258
+ print(f"Skipping {zplane}, {out_path} exists")
259
+ continue
260
+
261
+ # load ops
262
+ z1 = da.from_zarr(roi1)
263
+ z2 = da.from_zarr(roi2)
264
+
265
+ # sanity check
266
+ assert z1.shape[0] == z2.shape[0], "Frame count mismatch"
267
+ assert z1.shape[1] == z2.shape[1], "Height mismatch"
268
+
269
+ # concatenate along width (axis=2)
270
+ z_merged = da.concatenate([z1, z2], axis=2)
271
+ z_merged.to_zarr(out_path, overwrite=overwrite)
272
+
273
+ if z_merged:
274
+ print(f"{z_merged}")
275
+
276
+ return None
277
+
278
+
279
+ if __name__ == "__main__":
280
+ fpath = Path(r"D:\W2_DATA\kbarber\07_27_2025\mk355\raw\anatomical_3_roi")
281
+ merge_mrois(fpath, fpath.parent / "anatomical_3_merged")
282
+ x = 2
@@ -6,7 +6,6 @@ from scipy.ndimage import percentile_filter
6
6
  from scipy.stats import norm
7
7
 
8
8
 
9
- import numpy as np
10
9
 
11
10
  def _normalize_iscell(iscell):
12
11
  """Ensure iscell is 1D boolean array."""
@@ -197,7 +196,7 @@ def ops_to_json(ops: dict | str | Path, outpath=None, indent=2):
197
196
  return outpath
198
197
 
199
198
 
200
- def normalize_traces(F, mode="per_neuron"):
199
+ def normalize_traces(F, mode="percentile"):
201
200
  """
202
201
  Normalize fluorescence traces F to [0, 1] range.
203
202
  Parameters
@@ -429,3 +428,5 @@ def load_ops(ops_input: str | Path | list[str | Path]) -> dict:
429
428
  return ops_input
430
429
  print("Warning: No valid ops file provided, returning empty dict.")
431
430
  return {}
431
+
432
+
@@ -12,17 +12,17 @@ import numpy as np
12
12
 
13
13
  import suite2p
14
14
  from suite2p.io.binary import BinaryFile
15
- from lbm_suite2p_python.merging import remake_plane_figures
16
15
  from lbm_suite2p_python.postprocessing import (
17
16
  ops_to_json,
18
17
  load_planar_results,
19
18
  load_ops,
20
- filter_by_area
21
19
  )
22
20
  from mbo_utilities.log import get as get_logger
23
- import mbo_utilities as mbo # noqa
24
21
 
25
- from lbm_suite2p_python.zplane import save_pc_panels_and_metrics
22
+ from lbm_suite2p_python.zplane import (
23
+ save_pc_panels_and_metrics,
24
+ plot_zplane_figures
25
+ )
26
26
 
27
27
  logger = get_logger("run_lsp")
28
28
 
@@ -31,7 +31,6 @@ from lbm_suite2p_python.volume import (
31
31
  plot_volume_signal,
32
32
  plot_volume_neuron_counts,
33
33
  get_volume_stats,
34
- plot_execution_time,
35
34
  )
36
35
  from mbo_utilities.file_io import get_plane_from_filename
37
36
 
@@ -90,7 +89,7 @@ def run_volume(
90
89
  ops: dict | str | Path = None,
91
90
  keep_reg: bool = True,
92
91
  keep_raw: bool = False,
93
- force_reg: bool = True,
92
+ force_reg: bool = False,
94
93
  force_detect: bool = False,
95
94
  dff_window_size: int = 500,
96
95
  dff_percentile: int = 20,
@@ -162,6 +161,7 @@ def run_volume(
162
161
  - Traces animation over time and neurons
163
162
  - Optional rastermap clustering results
164
163
  """
164
+ from mbo_utilities.file_io import get_files, get_plane_from_filename
165
165
  start = time.time()
166
166
  if save_path is None:
167
167
  save_path = Path(input_files[0]).parent
@@ -173,8 +173,8 @@ def run_volume(
173
173
  for z, file in enumerate(input_files):
174
174
  tag = derive_tag_from_filename(Path(file).name)
175
175
  plane_num = get_plane_from_filename(tag, fallback=len(all_ops))
176
- subdir = f"plane{plane_num:02d}"
177
- plane_save_path = Path(save_path).joinpath(subdir)
176
+ # subdir = f"plane{tag:02d}"
177
+ plane_save_path = Path(save_path).joinpath(tag)
178
178
  plane_save_path.mkdir(exist_ok=True)
179
179
 
180
180
  start_file = time.time()
@@ -205,12 +205,16 @@ def run_volume(
205
205
 
206
206
  if "roi" in Path(input_files[0]).stem.lower():
207
207
  print("Detected mROI data, merging ROIs for each z-plane...")
208
- from .merging import merge_mrois, remake_plane_figures
208
+ from .merging import merge_mrois
209
209
  merged_savepath = save_path.joinpath("merged_mrois")
210
210
  merge_mrois(save_path, merged_savepath)
211
- all_ops = sorted(mbo.get_files(merged_savepath, "ops.npy", 2))
211
+ save_path = merged_savepath
212
212
 
213
- print(f"Planes found after merge: {len(all_ops)}")
213
+ all_ops = sorted(get_files(merged_savepath, "ops.npy", 2))
214
+ print(f"Planes found after merge: {len(all_ops)}")
215
+ else:
216
+ all_ops = sorted(get_files(save_path, "ops.npy", 2))
217
+ print(f"No mROI data detected, planes found: {len(all_ops)}")
214
218
 
215
219
  try:
216
220
  zstats_file = get_volume_stats(all_ops, overwrite=True)
@@ -220,7 +224,7 @@ def run_volume(
220
224
  zstats_file, os.path.join(save_path, "mean_volume_signal.png")
221
225
  )
222
226
  # todo: why is suite2p not saving timings to ops.npy?
223
- plot_execution_time(zstats_file, os.path.join(save_path, "execution_time.png"))
227
+ # plot_execution_time(zstats_file, os.path.join(save_path, "execution_time.png"))
224
228
 
225
229
  res_z = [
226
230
  load_planar_results(ops_path, z_plane=i)
@@ -429,6 +433,8 @@ def run_plane(
429
433
  >> output_ops = lsp.run_plane(input_files[0], save_path="D://data//outputs", keep_raw=True, keep_registered=True, force_reg=True, force_detect=True)
430
434
  """
431
435
  from mbo_utilities.array_types import MboRawArray
436
+ from mbo_utilities.lazy_array import imread, imwrite
437
+ from mbo_utilities.metadata import get_metadata
432
438
 
433
439
  if "debug" in kwargs:
434
440
  logger.setLevel(logging.DEBUG)
@@ -466,7 +472,7 @@ def run_plane(
466
472
  ops["diameter"]) > 1 and ops["aspect"] == 1.0:
467
473
  ops["aspect"] = ops["diameter"][0] / ops["diameter"][1] # noqa
468
474
 
469
- file = mbo.imread(input_path)
475
+ file = imread(input_path)
470
476
  if isinstance(file, MboRawArray):
471
477
  raise TypeError(
472
478
  "Input file appears to be a raw array. Please provide a planar input file."
@@ -474,7 +480,7 @@ def run_plane(
474
480
  if hasattr(file, "metadata"):
475
481
  metadata = file.metadata # noqa
476
482
  else:
477
- metadata = mbo.get_metadata(input_path)
483
+ metadata = get_metadata(input_path)
478
484
 
479
485
  if "plane" in ops:
480
486
  plane = ops["plane"]
@@ -484,7 +490,7 @@ def run_plane(
484
490
  ops["plane"] = plane
485
491
  else:
486
492
  # get the plane from the filename
487
- plane = mbo.get_plane_from_filename(input_path, ops.get("plane", None))
493
+ plane = get_plane_from_filename(input_path, ops.get("plane", None))
488
494
  ops["plane"] = plane
489
495
  metadata["plane"] = plane
490
496
 
@@ -530,7 +536,7 @@ def run_plane(
530
536
 
531
537
  if _should_write_bin(ops_file, force=kwargs.get("force_save", False)):
532
538
  md_combined = {**metadata, **ops}
533
- mbo.imwrite(file, plane_dir, ext=".bin", metadata=md_combined, register_z=False)
539
+ imwrite(file, plane_dir, ext=".bin", metadata=md_combined, register_z=False)
534
540
  else:
535
541
  print(
536
542
  f"Skipping data_raw.bin write, already exists and passes data validation checks."
@@ -600,7 +606,7 @@ def run_plane(
600
606
  save_pc_panels_and_metrics(ops_file, plane_dir / "pc_metrics")
601
607
 
602
608
  try:
603
- remake_plane_figures(
609
+ plot_zplane_figures(
604
610
  plane_dir,
605
611
  dff_percentile=dff_percentile,
606
612
  dff_window_size=dff_window_size,