waveorder 2.2.1__py3-none-any.whl → 3.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- waveorder/_version.py +16 -3
- waveorder/acq/__init__.py +0 -0
- waveorder/acq/acq_functions.py +166 -0
- waveorder/assets/HSV_legend.png +0 -0
- waveorder/assets/JCh_legend.png +0 -0
- waveorder/assets/waveorder_plugin_logo.png +0 -0
- waveorder/calib/Calibration.py +1512 -0
- waveorder/calib/Optimization.py +470 -0
- waveorder/calib/__init__.py +0 -0
- waveorder/calib/calibration_workers.py +464 -0
- waveorder/cli/apply_inverse_models.py +328 -0
- waveorder/cli/apply_inverse_transfer_function.py +379 -0
- waveorder/cli/compute_transfer_function.py +432 -0
- waveorder/cli/gui_widget.py +58 -0
- waveorder/cli/main.py +39 -0
- waveorder/cli/monitor.py +163 -0
- waveorder/cli/option_eat_all.py +47 -0
- waveorder/cli/parsing.py +122 -0
- waveorder/cli/printing.py +16 -0
- waveorder/cli/reconstruct.py +67 -0
- waveorder/cli/settings.py +187 -0
- waveorder/cli/utils.py +175 -0
- waveorder/filter.py +1 -2
- waveorder/focus.py +136 -25
- waveorder/io/__init__.py +0 -0
- waveorder/io/_reader.py +61 -0
- waveorder/io/core_functions.py +272 -0
- waveorder/io/metadata_reader.py +195 -0
- waveorder/io/utils.py +175 -0
- waveorder/io/visualization.py +160 -0
- waveorder/models/inplane_oriented_thick_pol3d_vector.py +3 -3
- waveorder/models/isotropic_fluorescent_thick_3d.py +92 -0
- waveorder/models/isotropic_fluorescent_thin_3d.py +331 -0
- waveorder/models/isotropic_thin_3d.py +73 -72
- waveorder/models/phase_thick_3d.py +103 -4
- waveorder/napari.yaml +36 -0
- waveorder/plugin/__init__.py +9 -0
- waveorder/plugin/gui.py +1094 -0
- waveorder/plugin/gui.ui +1440 -0
- waveorder/plugin/job_manager.py +42 -0
- waveorder/plugin/main_widget.py +1605 -0
- waveorder/plugin/tab_recon.py +3294 -0
- waveorder/scripts/__init__.py +0 -0
- waveorder/scripts/launch_napari.py +13 -0
- waveorder/scripts/repeat-cal-acq-rec.py +147 -0
- waveorder/scripts/repeat-calibration.py +31 -0
- waveorder/scripts/samples.py +85 -0
- waveorder/scripts/simulate_zarr_acq.py +204 -0
- waveorder/util.py +1 -1
- waveorder/visuals/napari_visuals.py +1 -1
- waveorder-3.0.0.dist-info/METADATA +350 -0
- waveorder-3.0.0.dist-info/RECORD +69 -0
- {waveorder-2.2.1.dist-info → waveorder-3.0.0.dist-info}/WHEEL +1 -1
- waveorder-3.0.0.dist-info/entry_points.txt +5 -0
- {waveorder-2.2.1.dist-info → waveorder-3.0.0.dist-info}/licenses/LICENSE +13 -1
- waveorder-2.2.1.dist-info/METADATA +0 -188
- waveorder-2.2.1.dist-info/RECORD +0 -27
- {waveorder-2.2.1.dist-info → waveorder-3.0.0.dist-info}/top_level.txt +0 -0
|
File without changes
|
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
# This script can be modified to debug and test calibrations
|
|
2
|
+
|
|
3
|
+
import random
|
|
4
|
+
import time
|
|
5
|
+
from contextlib import contextmanager
|
|
6
|
+
|
|
7
|
+
import napari
|
|
8
|
+
from pycromanager import Core
|
|
9
|
+
|
|
10
|
+
from waveorder.plugin.main_widget import MainWidget
|
|
11
|
+
|
|
12
|
+
SAVE_DIR = "."
|
|
13
|
+
SWING = 0.05
|
|
14
|
+
CAL_REPEATS = 3
|
|
15
|
+
BKG_REPEATS = 3
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@contextmanager
|
|
19
|
+
def stage_detour(app: MainWidget, dx: float, dy: float, wait=5):
|
|
20
|
+
"""Context manager to temporarily move the stage to a new XY-position.
|
|
21
|
+
|
|
22
|
+
Parameters
|
|
23
|
+
----------
|
|
24
|
+
app : MainWidget
|
|
25
|
+
waveorder main widget instance
|
|
26
|
+
dx : float
|
|
27
|
+
relative x to translate
|
|
28
|
+
dy : float
|
|
29
|
+
relative y to translate
|
|
30
|
+
wait : int, optional
|
|
31
|
+
time to wait for the stage to complete movement, by default 5
|
|
32
|
+
|
|
33
|
+
Yields
|
|
34
|
+
------
|
|
35
|
+
MainWidget
|
|
36
|
+
waveorder main widget instance
|
|
37
|
+
|
|
38
|
+
Usage
|
|
39
|
+
-----
|
|
40
|
+
```py
|
|
41
|
+
with stage_detour(app) as app:
|
|
42
|
+
pass # do something at the new location
|
|
43
|
+
```
|
|
44
|
+
"""
|
|
45
|
+
xy_stage = app.mmc.getXYStageDevice()
|
|
46
|
+
# get the original position
|
|
47
|
+
ox = app.mmc.getXPosition(xy_stage)
|
|
48
|
+
oy = app.mmc.getYPosition(xy_stage)
|
|
49
|
+
# go to a translated position
|
|
50
|
+
# TODO: args are floored due to a pycromanager bug: https://github.com/micro-manager/pycro-manager/issues/67
|
|
51
|
+
app.mmc.setRelativeXYPosition(int(dx), int(dy))
|
|
52
|
+
time.sleep(wait)
|
|
53
|
+
try:
|
|
54
|
+
yield app
|
|
55
|
+
finally:
|
|
56
|
+
# go back to the original position
|
|
57
|
+
# TODO: args are floored due to a pycromanager bug: https://github.com/micro-manager/pycro-manager/issues/67
|
|
58
|
+
app.mmc.setXYPosition(int(ox), int(oy))
|
|
59
|
+
time.sleep(wait)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def measure_fov(mmc: Core):
|
|
63
|
+
"""Calculate the MM FOV in micrometers.
|
|
64
|
+
|
|
65
|
+
Parameters
|
|
66
|
+
----------
|
|
67
|
+
mmc : Core
|
|
68
|
+
MMCore object via pycromanager (with CamelCase set to `True`)
|
|
69
|
+
|
|
70
|
+
Returns
|
|
71
|
+
-------
|
|
72
|
+
tuple[float, float]
|
|
73
|
+
FOV size (x, y)
|
|
74
|
+
"""
|
|
75
|
+
pixel_size = float(mmc.getPixelSizeUm())
|
|
76
|
+
if pixel_size == 0:
|
|
77
|
+
float(
|
|
78
|
+
input(
|
|
79
|
+
"Pixel size is not calibrated. Please provide an estimate (in micrometers):"
|
|
80
|
+
)
|
|
81
|
+
)
|
|
82
|
+
fov_x = pixel_size * float(mmc.getImageWidth())
|
|
83
|
+
fov_y = pixel_size * float(mmc.getImageHeight())
|
|
84
|
+
return fov_x, fov_y
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def rand_shift(length: float):
|
|
88
|
+
"""Randomly signed shift of a certain length.
|
|
89
|
+
|
|
90
|
+
Parameters
|
|
91
|
+
----------
|
|
92
|
+
length : float
|
|
93
|
+
absolote length in micrometers
|
|
94
|
+
|
|
95
|
+
Returns
|
|
96
|
+
-------
|
|
97
|
+
float
|
|
98
|
+
+length or -length
|
|
99
|
+
"""
|
|
100
|
+
sign = random.randint(0, 1) * 2 - 1
|
|
101
|
+
return sign * length
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def main():
|
|
105
|
+
viewer = napari.Viewer()
|
|
106
|
+
app = MainWidget(viewer)
|
|
107
|
+
viewer.window.add_dock_widget(app)
|
|
108
|
+
app.ui.qbutton_gui_mode.click()
|
|
109
|
+
app.calib_scheme = "5-State"
|
|
110
|
+
app.directory = SAVE_DIR
|
|
111
|
+
app.save_directory = SAVE_DIR
|
|
112
|
+
|
|
113
|
+
fov_x, fov_y = measure_fov(app.mmc)
|
|
114
|
+
|
|
115
|
+
input("Please center the target in the FOV and hit <Enter>")
|
|
116
|
+
|
|
117
|
+
for cal_repeat in range(CAL_REPEATS):
|
|
118
|
+
dx = rand_shift(fov_x)
|
|
119
|
+
dy = rand_shift(fov_y)
|
|
120
|
+
# run calibration
|
|
121
|
+
with stage_detour(app, dx, dy) as app:
|
|
122
|
+
print(f"Calibration repeat # {cal_repeat}")
|
|
123
|
+
app.swing = SWING
|
|
124
|
+
|
|
125
|
+
print(f"Calibrating with swing = {SWING}")
|
|
126
|
+
app.run_calibration()
|
|
127
|
+
time.sleep(90)
|
|
128
|
+
|
|
129
|
+
for bkg_repeat in range(BKG_REPEATS):
|
|
130
|
+
# capture background
|
|
131
|
+
with stage_detour(app, dx, dy) as app:
|
|
132
|
+
print(f">>> Background repeat # {bkg_repeat}")
|
|
133
|
+
app.last_calib_meta_file = app.calib.meta_file
|
|
134
|
+
app.capture_bg()
|
|
135
|
+
time.sleep(20)
|
|
136
|
+
app.ui.cb_bg_method.setCurrentIndex(
|
|
137
|
+
1
|
|
138
|
+
) # Set to "Measured" bg correction
|
|
139
|
+
app.enter_bg_correction()
|
|
140
|
+
app.save_name = f"cal-{cal_repeat}-bkg-{bkg_repeat}"
|
|
141
|
+
app.enter_acq_bg_path()
|
|
142
|
+
app.acq_ret_ori()
|
|
143
|
+
time.sleep(15)
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
if __name__ == "__main__":
|
|
147
|
+
main()
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# This script can be modified to debug and test calibrations
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
|
|
5
|
+
import napari
|
|
6
|
+
|
|
7
|
+
from waveorder.plugin.main_widget import MainWidget
|
|
8
|
+
|
|
9
|
+
SAVE_DIR = "./"
|
|
10
|
+
SWINGS = [0.1, 0.03, 0.01, 0.005]
|
|
11
|
+
REPEATS = 5
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def main():
|
|
15
|
+
viewer = napari.Viewer()
|
|
16
|
+
waveorder = MainWidget(viewer)
|
|
17
|
+
viewer.window.add_dock_widget(waveorder)
|
|
18
|
+
waveorder.ui.qbutton_connect_to_mm.click()
|
|
19
|
+
waveorder.calib_scheme = "5-State"
|
|
20
|
+
|
|
21
|
+
for repeat in range(REPEATS):
|
|
22
|
+
for swing in SWINGS:
|
|
23
|
+
print("Calibrating with swing = " + str(swing))
|
|
24
|
+
waveorder.swing = swing
|
|
25
|
+
waveorder.directory = SAVE_DIR
|
|
26
|
+
waveorder.run_calibration()
|
|
27
|
+
time.sleep(100)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
if __name__ == "__main__":
|
|
31
|
+
main()
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
import shutil
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from typing import Literal
|
|
4
|
+
|
|
5
|
+
from iohub import open_ome_zarr
|
|
6
|
+
from iohub.ngff import Plate
|
|
7
|
+
from napari.utils.notifications import show_warning
|
|
8
|
+
from platformdirs import user_data_dir
|
|
9
|
+
from wget import download
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _build_layer_list(dataset: Plate, layer_names: list[str]):
|
|
13
|
+
layer_list = []
|
|
14
|
+
for channel_name in layer_names:
|
|
15
|
+
channel_index = dataset.channel_names.index(channel_name)
|
|
16
|
+
position = dataset["0/0/0"]
|
|
17
|
+
data = (position["0"][:, channel_index],)
|
|
18
|
+
layer_dict = {"name": channel_name, "scale": position.scale[3:]}
|
|
19
|
+
layer_list.append((data, layer_dict))
|
|
20
|
+
|
|
21
|
+
return layer_list
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def download_and_unzip(data_type: Literal["target", "embryo"]) -> tuple[Path]:
|
|
25
|
+
"""Downloads sample data .zip from zenodo, unzips, and returns Paths to the .zarr datasets.
|
|
26
|
+
|
|
27
|
+
Skips the download if the files already exist.
|
|
28
|
+
|
|
29
|
+
Uses platformdirs.user_data_dir to store data.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
# Delete old data
|
|
33
|
+
old_data_dirs = ["waveorder-sample-v1.4"]
|
|
34
|
+
for old_data_dir in old_data_dirs:
|
|
35
|
+
old_data_path = Path(user_data_dir(old_data_dir))
|
|
36
|
+
if old_data_path.exists():
|
|
37
|
+
shutil.rmtree(str(old_data_path))
|
|
38
|
+
|
|
39
|
+
temp_dirpath = Path(user_data_dir("waveorder-sample-v1.5"))
|
|
40
|
+
temp_dirpath.mkdir(exist_ok=True, parents=True)
|
|
41
|
+
|
|
42
|
+
if data_type == "target":
|
|
43
|
+
data_dirpath = temp_dirpath / "sample_contribution"
|
|
44
|
+
data_size = "10 MB"
|
|
45
|
+
data_url = "https://zenodo.org/record/8386856/files/sample_contribution.zip?download=1"
|
|
46
|
+
elif data_type == "embryo":
|
|
47
|
+
data_dirpath = temp_dirpath / "sample_contribution_embryo"
|
|
48
|
+
data_size = "92 MB"
|
|
49
|
+
data_url = "https://zenodo.org/record/8386856/files/sample_contribution_embryo.zip?download=1"
|
|
50
|
+
|
|
51
|
+
if not data_dirpath.with_suffix(".zip").exists():
|
|
52
|
+
show_warning(
|
|
53
|
+
f"Downloading {data_size} sample contribution. This might take a moment..."
|
|
54
|
+
)
|
|
55
|
+
download(data_url, out=str(temp_dirpath))
|
|
56
|
+
|
|
57
|
+
if not data_dirpath.exists():
|
|
58
|
+
shutil.unpack_archive(
|
|
59
|
+
data_dirpath.with_suffix(".zip"), extract_dir=temp_dirpath
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
data_path = data_dirpath / "raw_data.zarr"
|
|
63
|
+
recon_path = data_dirpath / "reconstruction.zarr"
|
|
64
|
+
return data_path, recon_path
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def read_polarization_target_data():
|
|
68
|
+
"""Returns the polarization data sample contribution"""
|
|
69
|
+
data_path, _ = download_and_unzip("target")
|
|
70
|
+
dataset = open_ome_zarr(data_path)
|
|
71
|
+
return _build_layer_list(dataset, dataset.channel_names)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def read_polarization_target_reconstruction():
|
|
75
|
+
"""Returns the polarization target reconstruction sample contribution"""
|
|
76
|
+
_, recon_path = download_and_unzip("target")
|
|
77
|
+
dataset = open_ome_zarr(recon_path)
|
|
78
|
+
return _build_layer_list(dataset, ["Phase3D", "Retardance", "Orientation"])
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def read_zebrafish_embryo_reconstruction():
|
|
82
|
+
"""Returns the embryo reconstruction sample contribution"""
|
|
83
|
+
_, recon_path = download_and_unzip("embryo")
|
|
84
|
+
dataset = open_ome_zarr(recon_path)
|
|
85
|
+
return _build_layer_list(dataset, ["Retardance", "Orientation"])
|
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import shutil
|
|
3
|
+
import subprocess
|
|
4
|
+
import threading
|
|
5
|
+
import time
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
from iohub.convert import TIFFConverter
|
|
9
|
+
from iohub.ngff import open_ome_zarr
|
|
10
|
+
|
|
11
|
+
from waveorder.cli.utils import create_empty_hcs_zarr
|
|
12
|
+
|
|
13
|
+
# This script is a demo .zarr acquisition simulation from an acquired .zarr store
|
|
14
|
+
# The script copies and writes additional metadata to .zattrs inserting two keys
|
|
15
|
+
# The two keys are "FinalDimensions" and "CurrentDimensions".
|
|
16
|
+
# The "FinalDimensions" key with (t,p,z,c) needs to be inserted when the dataset is created
|
|
17
|
+
# and then should be updated at close to ensure aborted acquisitions represent correct dimensions.
|
|
18
|
+
# The "CurrentDimensions" key should have the same (t,p,z,c) information and should be written out
|
|
19
|
+
# either with every new image, end of dimension OR at frequent intervals.
|
|
20
|
+
# Refer further notes below in the example regarding encountered issues.
|
|
21
|
+
#
|
|
22
|
+
# Refer to steps at the end of the file on steps to run this file
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
# %% #############################################
|
|
26
|
+
def convert_data(
|
|
27
|
+
tif_path, latest_out_path, prefix="", data_type_str="ometiff"
|
|
28
|
+
):
|
|
29
|
+
converter = TIFFConverter(
|
|
30
|
+
os.path.join(tif_path, prefix),
|
|
31
|
+
latest_out_path,
|
|
32
|
+
data_type=data_type_str,
|
|
33
|
+
grid_layout=False,
|
|
34
|
+
)
|
|
35
|
+
converter.run()
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def run_convert(ome_tif_path):
|
|
39
|
+
out_path = os.path.join(
|
|
40
|
+
Path(ome_tif_path).parent.absolute(),
|
|
41
|
+
("raw_" + Path(ome_tif_path).name + ".zarr"),
|
|
42
|
+
)
|
|
43
|
+
convert_data(ome_tif_path, out_path)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
# %% #############################################
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def run_acq(input_path="", waitBetweenT=30):
|
|
50
|
+
|
|
51
|
+
output_store_path = os.path.join(
|
|
52
|
+
Path(input_path).parent.absolute(),
|
|
53
|
+
("acq_sim_" + Path(input_path).name),
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
if Path(output_store_path).exists():
|
|
57
|
+
shutil.rmtree(output_store_path)
|
|
58
|
+
time.sleep(1)
|
|
59
|
+
|
|
60
|
+
input_data = open_ome_zarr(input_path, mode="r")
|
|
61
|
+
channel_names = input_data.channel_names
|
|
62
|
+
|
|
63
|
+
position_keys: list[tuple[str]] = []
|
|
64
|
+
|
|
65
|
+
for path, pos in input_data.positions():
|
|
66
|
+
shape = pos["0"].shape
|
|
67
|
+
dtype = pos["0"].dtype
|
|
68
|
+
chunks = pos["0"].chunks
|
|
69
|
+
scale = (1, 1, 1, 1, 1)
|
|
70
|
+
position_keys.append(path.split("/"))
|
|
71
|
+
|
|
72
|
+
create_empty_hcs_zarr(
|
|
73
|
+
output_store_path,
|
|
74
|
+
position_keys,
|
|
75
|
+
shape,
|
|
76
|
+
chunks,
|
|
77
|
+
scale,
|
|
78
|
+
channel_names,
|
|
79
|
+
dtype,
|
|
80
|
+
{},
|
|
81
|
+
)
|
|
82
|
+
output_dataset = open_ome_zarr(output_store_path, mode="r+")
|
|
83
|
+
|
|
84
|
+
if "Summary" in input_data.zattrs.keys():
|
|
85
|
+
output_dataset.zattrs["Summary"] = input_data.zattrs["Summary"]
|
|
86
|
+
|
|
87
|
+
output_dataset.zattrs.update(
|
|
88
|
+
{
|
|
89
|
+
"FinalDimensions": {
|
|
90
|
+
"channel": shape[1],
|
|
91
|
+
"position": len(position_keys),
|
|
92
|
+
"time": shape[0],
|
|
93
|
+
"z": shape[2],
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
total_time = shape[0]
|
|
99
|
+
total_pos = len(position_keys)
|
|
100
|
+
total_z = shape[2]
|
|
101
|
+
total_c = shape[1]
|
|
102
|
+
for t in range(total_time):
|
|
103
|
+
for p in range(total_pos):
|
|
104
|
+
for z in range(total_z):
|
|
105
|
+
for c in range(total_c):
|
|
106
|
+
position_key_string = "/".join(position_keys[p])
|
|
107
|
+
img_src = input_data[position_key_string][0][t, c, z]
|
|
108
|
+
|
|
109
|
+
img_data = output_dataset[position_key_string][0]
|
|
110
|
+
img_data[t, c, z] = img_src
|
|
111
|
+
|
|
112
|
+
# Note: On-The-Fly dataset reconstruction will throw Permission Denied when being written
|
|
113
|
+
# Maybe we can read the zaatrs directly in that case as a file which is less blocking
|
|
114
|
+
# If this write/read is a constant issue then the zattrs 'CurrentDimensions' key
|
|
115
|
+
# should be updated less frequently, instead of current design of updating with
|
|
116
|
+
# each image
|
|
117
|
+
output_dataset.zattrs.update(
|
|
118
|
+
{
|
|
119
|
+
"CurrentDimensions": {
|
|
120
|
+
"channel": total_c,
|
|
121
|
+
"position": p + 1,
|
|
122
|
+
"time": t + 1,
|
|
123
|
+
"z": z + 1,
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
required_order = ["time", "position", "z", "channel"]
|
|
129
|
+
my_dict = output_dataset.zattrs["CurrentDimensions"]
|
|
130
|
+
sorted_dict_acq = {
|
|
131
|
+
k: my_dict[k]
|
|
132
|
+
for k in sorted(my_dict, key=lambda x: required_order.index(x))
|
|
133
|
+
}
|
|
134
|
+
print("Writer thread - Acquisition Dim:", sorted_dict_acq)
|
|
135
|
+
|
|
136
|
+
# reconThread = threading.Thread(target=doReconstruct, args=(output_store_path, t))
|
|
137
|
+
# reconThread.start()
|
|
138
|
+
|
|
139
|
+
time.sleep(waitBetweenT) # sleep after every t
|
|
140
|
+
|
|
141
|
+
output_dataset.close
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def do_reconstruct(input_path, time_point):
|
|
145
|
+
|
|
146
|
+
config_path = os.path.join(
|
|
147
|
+
Path(input_path).parent.absolute(), "Bire-" + str(time_point) + ".yml"
|
|
148
|
+
)
|
|
149
|
+
output_path = os.path.join(
|
|
150
|
+
Path(input_path).parent.absolute(), "Recon_" + Path(input_path).name
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
print(
|
|
154
|
+
"Processing {input} time_point={tp}".format(
|
|
155
|
+
input=input_path, tp=time_point
|
|
156
|
+
)
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
try:
|
|
160
|
+
proc = subprocess.run(
|
|
161
|
+
[
|
|
162
|
+
"waveorder",
|
|
163
|
+
"reconstruct",
|
|
164
|
+
"-i",
|
|
165
|
+
input_path,
|
|
166
|
+
"-c",
|
|
167
|
+
config_path,
|
|
168
|
+
"-o",
|
|
169
|
+
output_path,
|
|
170
|
+
"-uid",
|
|
171
|
+
"test",
|
|
172
|
+
]
|
|
173
|
+
)
|
|
174
|
+
if proc.returncode != 0:
|
|
175
|
+
raise Exception(
|
|
176
|
+
"An error occurred in processing ! Check terminal output."
|
|
177
|
+
)
|
|
178
|
+
except Exception as exc:
|
|
179
|
+
print(exc.args)
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
# %% #############################################
|
|
183
|
+
def run_acquire(input_path, waitBetweenT):
|
|
184
|
+
runThread1Acq = threading.Thread(
|
|
185
|
+
target=run_acq, args=(input_path, waitBetweenT)
|
|
186
|
+
)
|
|
187
|
+
runThread1Acq.start()
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
# %% #############################################
|
|
191
|
+
# Step 1:
|
|
192
|
+
# Convert an existing ome-tif waveorder acquisition, preferably with all dims (t, p, z, c)
|
|
193
|
+
# This will convert an existing ome-tif to a .zarr storage
|
|
194
|
+
|
|
195
|
+
# ome_tif_path = "/ome-zarr_data/waveorderAcq/test/snap_6D_ometiff_1"
|
|
196
|
+
# runConvert(ome_tif_path)
|
|
197
|
+
|
|
198
|
+
# %% #############################################
|
|
199
|
+
# Step 2:
|
|
200
|
+
# run the test to simulate Acquiring a waveorder .zarr store
|
|
201
|
+
|
|
202
|
+
input_path = "/ome-zarr_data/waveorderAcq/test/raw_snap_6D_ometiff_1.zarr"
|
|
203
|
+
waitBetweenT = 60
|
|
204
|
+
run_acquire(input_path, waitBetweenT)
|
waveorder/util.py
CHANGED
|
@@ -714,7 +714,7 @@ def inten_normalization(img_stack, bg_filter=True):
|
|
|
714
714
|
img_stack[i], size=X // 2
|
|
715
715
|
)
|
|
716
716
|
else:
|
|
717
|
-
img_norm_stack[i] = img_stack[i]
|
|
717
|
+
img_norm_stack[i] = img_stack[i]
|
|
718
718
|
img_norm_stack[i] /= torch.mean(img_norm_stack[i])
|
|
719
719
|
img_norm_stack[i] -= 1
|
|
720
720
|
|
|
@@ -13,7 +13,7 @@ def add_transfer_function_to_viewer(
|
|
|
13
13
|
complex_rgb: bool = False,
|
|
14
14
|
):
|
|
15
15
|
zyx_shape = transfer_function.shape[-3:]
|
|
16
|
-
lim = torch.max(torch.abs(transfer_function)) * clim_factor
|
|
16
|
+
lim = (torch.max(torch.abs(transfer_function)) * clim_factor).item()
|
|
17
17
|
voxel_scale = np.array(
|
|
18
18
|
[
|
|
19
19
|
zyx_shape[0] * zyx_scale[0],
|