waveorder 2.2.0__py3-none-any.whl → 2.2.1b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- waveorder/_version.py +9 -4
- waveorder/background_estimator.py +2 -2
- waveorder/correction.py +1 -1
- waveorder/filter.py +206 -0
- waveorder/focus.py +5 -3
- waveorder/models/inplane_oriented_thick_pol3d.py +6 -2
- waveorder/models/inplane_oriented_thick_pol3d_vector.py +29 -60
- waveorder/models/isotropic_fluorescent_thick_3d.py +18 -7
- waveorder/models/isotropic_thin_3d.py +19 -16
- waveorder/models/phase_thick_3d.py +29 -19
- waveorder/optics.py +11 -14
- waveorder/reconstruct.py +28 -0
- waveorder/stokes.py +4 -3
- waveorder/util.py +9 -9
- waveorder/visuals/jupyter_visuals.py +19 -22
- waveorder/visuals/matplotlib_visuals.py +4 -4
- waveorder/visuals/napari_visuals.py +2 -3
- waveorder/visuals/utils.py +1 -2
- waveorder/waveorder_reconstructor.py +10 -9
- waveorder/waveorder_simulator.py +6 -6
- {waveorder-2.2.0.dist-info → waveorder-2.2.1b0.dist-info}/METADATA +3 -2
- waveorder-2.2.1b0.dist-info/RECORD +27 -0
- {waveorder-2.2.0.dist-info → waveorder-2.2.1b0.dist-info}/WHEEL +1 -1
- waveorder-2.2.0.dist-info/RECORD +0 -25
- {waveorder-2.2.0.dist-info → waveorder-2.2.1b0.dist-info}/LICENSE +0 -0
- {waveorder-2.2.0.dist-info → waveorder-2.2.1b0.dist-info}/top_level.txt +0 -0
waveorder/_version.py
CHANGED
|
@@ -1,8 +1,13 @@
|
|
|
1
|
-
# file generated by
|
|
1
|
+
# file generated by setuptools-scm
|
|
2
2
|
# don't change, don't track in version control
|
|
3
|
+
|
|
4
|
+
__all__ = ["__version__", "__version_tuple__", "version", "version_tuple"]
|
|
5
|
+
|
|
3
6
|
TYPE_CHECKING = False
|
|
4
7
|
if TYPE_CHECKING:
|
|
5
|
-
from typing import Tuple
|
|
8
|
+
from typing import Tuple
|
|
9
|
+
from typing import Union
|
|
10
|
+
|
|
6
11
|
VERSION_TUPLE = Tuple[Union[int, str], ...]
|
|
7
12
|
else:
|
|
8
13
|
VERSION_TUPLE = object
|
|
@@ -12,5 +17,5 @@ __version__: str
|
|
|
12
17
|
__version_tuple__: VERSION_TUPLE
|
|
13
18
|
version_tuple: VERSION_TUPLE
|
|
14
19
|
|
|
15
|
-
__version__ = version = '2.2.
|
|
16
|
-
__version_tuple__ = version_tuple = (2, 2,
|
|
20
|
+
__version__ = version = '2.2.1b0'
|
|
21
|
+
__version_tuple__ = version_tuple = (2, 2, 1)
|
waveorder/correction.py
CHANGED
waveorder/filter.py
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
1
|
+
import itertools
|
|
2
|
+
|
|
3
|
+
import torch
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def apply_filter_bank(
|
|
7
|
+
io_filter_bank: torch.Tensor,
|
|
8
|
+
i_input_array: torch.Tensor,
|
|
9
|
+
) -> torch.Tensor:
|
|
10
|
+
"""
|
|
11
|
+
Applies a filter bank to an input array.
|
|
12
|
+
|
|
13
|
+
io_filter_bank.shape must be smaller or equal to i_input_array.shape in all
|
|
14
|
+
dimensions. When io_filter_bank is smaller, it is effectively "stretched"
|
|
15
|
+
to apply the filter.
|
|
16
|
+
|
|
17
|
+
io_filter_bank is in "wrapped" format, i.e., the zero frequency is the
|
|
18
|
+
zeroth element.
|
|
19
|
+
|
|
20
|
+
i_input_array and io_filter_bank must have inverse sample spacing, i.e.,
|
|
21
|
+
is input_array contains samples spaced by dx, then io_filter_bank must
|
|
22
|
+
have extent 1/dx. Note that there is no need for io_filter_bank to have
|
|
23
|
+
sample spacing 1/(n*dx) because io_filter_bank will be stretched.
|
|
24
|
+
|
|
25
|
+
Parameters
|
|
26
|
+
----------
|
|
27
|
+
io_filter_bank : torch.Tensor
|
|
28
|
+
The filter bank to be applied in the frequency domain.
|
|
29
|
+
The spatial extent of io_filter_bank must be 1/dx, where dx is the
|
|
30
|
+
sample spacing of i_input_array.
|
|
31
|
+
|
|
32
|
+
Leading dimensions are the input and output dimensions.
|
|
33
|
+
io_filter_bank.shape[:2] == (num_input_channels, num_output_channels)
|
|
34
|
+
|
|
35
|
+
Trailing dimensions are spatial frequency dimensions.
|
|
36
|
+
io_filter_bank.shape[2:] == (Z', Y', X') or (Y', X')
|
|
37
|
+
|
|
38
|
+
i_input_array : torch.Tensor
|
|
39
|
+
The real-valued input array with sample spacing dx to be filtered.
|
|
40
|
+
|
|
41
|
+
Leading dimension is the input dimension, matching the filter bank.
|
|
42
|
+
i_input_array.shape[0] == i
|
|
43
|
+
|
|
44
|
+
Trailing dimensions are spatial dimensions.
|
|
45
|
+
i_input_array.shape[1:] == (Z, Y, X) or (Y, X)
|
|
46
|
+
|
|
47
|
+
Returns
|
|
48
|
+
-------
|
|
49
|
+
torch.Tensor
|
|
50
|
+
The filtered real-valued output array with shape
|
|
51
|
+
(num_output_channels, Z, Y, X) or (num_output_channels, Y, X).
|
|
52
|
+
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
# Ensure all dimensions of transfer_function are smaller than or equal to input_array
|
|
56
|
+
if any(
|
|
57
|
+
t > i
|
|
58
|
+
for t, i in zip(io_filter_bank.shape[2:], i_input_array.shape[1:])
|
|
59
|
+
):
|
|
60
|
+
raise ValueError(
|
|
61
|
+
"All spatial dimensions of io_filter_bank must be <= i_input_array."
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
# Ensure the number of spatial dimensions match
|
|
65
|
+
if io_filter_bank.ndim - i_input_array.ndim != 1:
|
|
66
|
+
raise ValueError(
|
|
67
|
+
"io_filter_bank and i_input_array must have the same number of spatial dimensions."
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
# Ensure the input dimensions match
|
|
71
|
+
if io_filter_bank.shape[0] != i_input_array.shape[0]:
|
|
72
|
+
raise ValueError(
|
|
73
|
+
"io_filter_bank.shape[0] and i_input_array.shape[0] must be the same."
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
num_input_channels, num_output_channels = io_filter_bank.shape[:2]
|
|
77
|
+
spatial_dims = io_filter_bank.shape[2:]
|
|
78
|
+
|
|
79
|
+
# Pad input_array until each dimension is divisible by transfer_function
|
|
80
|
+
pad_sizes = [
|
|
81
|
+
(0, (t - (i % t)) % t)
|
|
82
|
+
for t, i in zip(
|
|
83
|
+
io_filter_bank.shape[2:][::-1], i_input_array.shape[1:][::-1]
|
|
84
|
+
)
|
|
85
|
+
]
|
|
86
|
+
flat_pad_sizes = list(itertools.chain(*pad_sizes))
|
|
87
|
+
padded_input_array = torch.nn.functional.pad(i_input_array, flat_pad_sizes)
|
|
88
|
+
|
|
89
|
+
# Apply the transfer function in the frequency domain
|
|
90
|
+
fft_dims = [d for d in range(1, i_input_array.ndim)]
|
|
91
|
+
padded_input_spectrum = torch.fft.fftn(padded_input_array, dim=fft_dims)
|
|
92
|
+
|
|
93
|
+
# Matrix-vector multiplication over f
|
|
94
|
+
# If this is a bottleneck, consider extending `stretched_multiply` to
|
|
95
|
+
# a `stretched_matrix_multiply` that uses an call like
|
|
96
|
+
# torch.einsum('io..., i... -> o...', io_filter_bank, padded_input_spectrum)
|
|
97
|
+
#
|
|
98
|
+
# Further optimization is likely with a combination of
|
|
99
|
+
# torch.baddbmm, torch.pixel_shuffle, torch.pixel_unshuffle.
|
|
100
|
+
padded_output_spectrum = torch.zeros(
|
|
101
|
+
(num_output_channels,) + spatial_dims,
|
|
102
|
+
dtype=padded_input_spectrum.dtype,
|
|
103
|
+
device=padded_input_spectrum.device,
|
|
104
|
+
)
|
|
105
|
+
for input_channel_idx in range(num_input_channels):
|
|
106
|
+
for output_channel_idx in range(num_output_channels):
|
|
107
|
+
padded_output_spectrum[output_channel_idx] += stretched_multiply(
|
|
108
|
+
io_filter_bank[input_channel_idx, output_channel_idx],
|
|
109
|
+
padded_input_spectrum[input_channel_idx],
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
# Cast to real, ignoring imaginary part
|
|
113
|
+
padded_result = torch.real(
|
|
114
|
+
torch.fft.ifftn(padded_output_spectrum, dim=fft_dims)
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
# Remove padding and return
|
|
118
|
+
slices = tuple(slice(0, i) for i in i_input_array.shape)
|
|
119
|
+
return padded_result[slices]
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def stretched_multiply(
|
|
123
|
+
small_array: torch.Tensor, large_array: torch.Tensor
|
|
124
|
+
) -> torch.Tensor:
|
|
125
|
+
"""
|
|
126
|
+
Effectively "stretches" small_array onto large_array before multiplying.
|
|
127
|
+
|
|
128
|
+
Each dimension of large_array must be divisible by each dimension of small_array.
|
|
129
|
+
|
|
130
|
+
Instead of upsampling small_array, this function uses a "block element-wise"
|
|
131
|
+
multiplication by breaking the large_array into blocks before element-wise
|
|
132
|
+
multiplication with the small_array.
|
|
133
|
+
|
|
134
|
+
For example, a `stretched_multiply` of a 3x3 array by a 99x99 array will
|
|
135
|
+
divide the 99x99 array into 33x33 blocks
|
|
136
|
+
[[33x33, 33x33, 33x33],
|
|
137
|
+
[33x33, 33x33, 33x33],
|
|
138
|
+
[33x33, 33x33, 33x33]]
|
|
139
|
+
and multiply each block by the corresponding element in the 3x3 array.
|
|
140
|
+
|
|
141
|
+
Returns an array with the same shape as large_array.
|
|
142
|
+
|
|
143
|
+
Works for arbitrary dimensions.
|
|
144
|
+
|
|
145
|
+
Parameters
|
|
146
|
+
----------
|
|
147
|
+
small_array : torch.Tensor
|
|
148
|
+
A smaller array whose elements will be "stretched" onto blocks in the large array.
|
|
149
|
+
large_array : torch.Tensor
|
|
150
|
+
A larger array that will be divided into blocks and multiplied by the small array.
|
|
151
|
+
|
|
152
|
+
Returns
|
|
153
|
+
-------
|
|
154
|
+
torch.Tensor
|
|
155
|
+
Resulting tensor with shape matching large_array.
|
|
156
|
+
|
|
157
|
+
Example
|
|
158
|
+
-------
|
|
159
|
+
small_array = torch.tensor([[1, 2],
|
|
160
|
+
[3, 4]])
|
|
161
|
+
|
|
162
|
+
large_array = torch.tensor([[1, 2, 3, 4],
|
|
163
|
+
[5, 6, 7, 8],
|
|
164
|
+
[9, 10, 11, 12],
|
|
165
|
+
[13, 14, 15, 16]])
|
|
166
|
+
|
|
167
|
+
stretched_multiply(small_array, large_array) returns
|
|
168
|
+
|
|
169
|
+
[[ 1, 2, 6, 8],
|
|
170
|
+
[ 5, 6, 14, 16],
|
|
171
|
+
[ 27, 30, 44, 48],
|
|
172
|
+
[ 39, 42, 60, 64]]
|
|
173
|
+
"""
|
|
174
|
+
|
|
175
|
+
# Ensure each dimension of large_array is divisible by each dimension of small_array
|
|
176
|
+
if any(l % s != 0 for s, l in zip(small_array.shape, large_array.shape)):
|
|
177
|
+
raise ValueError(
|
|
178
|
+
"Each dimension of large_array must be divisible by each dimension of small_array"
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
# Ensure the number of dimensions match
|
|
182
|
+
if small_array.ndim != large_array.ndim:
|
|
183
|
+
raise ValueError(
|
|
184
|
+
"small_array and large_array must have the same number of dimensions"
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
# Get shapes
|
|
188
|
+
s_shape = small_array.shape
|
|
189
|
+
l_shape = large_array.shape
|
|
190
|
+
|
|
191
|
+
# Reshape both array into blocks
|
|
192
|
+
block_shape = tuple(p // s for p, s in zip(l_shape, s_shape))
|
|
193
|
+
new_large_shape = tuple(itertools.chain(*zip(s_shape, block_shape)))
|
|
194
|
+
new_small_shape = tuple(
|
|
195
|
+
itertools.chain(*zip(s_shape, small_array.ndim * (1,)))
|
|
196
|
+
)
|
|
197
|
+
reshaped_large_array = large_array.reshape(new_large_shape)
|
|
198
|
+
reshaped_small_array = small_array.reshape(new_small_shape)
|
|
199
|
+
|
|
200
|
+
# Multiply the reshaped arrays
|
|
201
|
+
reshaped_result = reshaped_large_array * reshaped_small_array
|
|
202
|
+
|
|
203
|
+
# Reshape the result back to the large array shape
|
|
204
|
+
result = reshaped_result.reshape(l_shape)
|
|
205
|
+
|
|
206
|
+
return result
|
waveorder/focus.py
CHANGED
|
@@ -1,9 +1,11 @@
|
|
|
1
|
-
|
|
1
|
+
import warnings
|
|
2
2
|
from typing import Literal, Optional
|
|
3
|
-
|
|
3
|
+
|
|
4
4
|
import matplotlib.pyplot as plt
|
|
5
5
|
import numpy as np
|
|
6
|
-
import
|
|
6
|
+
from scipy.signal import peak_widths
|
|
7
|
+
|
|
8
|
+
from waveorder import util
|
|
7
9
|
|
|
8
10
|
|
|
9
11
|
def focus_from_transverse_band(
|
|
@@ -7,7 +7,9 @@ from torch import Tensor
|
|
|
7
7
|
from waveorder import correction, stokes, util
|
|
8
8
|
|
|
9
9
|
|
|
10
|
-
def generate_test_phantom(
|
|
10
|
+
def generate_test_phantom(
|
|
11
|
+
yx_shape: Tuple[int, int],
|
|
12
|
+
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
|
|
11
13
|
star, theta, _ = util.generate_star_target(yx_shape, blur_px=0.1)
|
|
12
14
|
retardance = 0.25 * star
|
|
13
15
|
orientation = (theta % np.pi) * (star > 1e-3)
|
|
@@ -23,7 +25,9 @@ def calculate_transfer_function(
|
|
|
23
25
|
return stokes.calculate_intensity_to_stokes_matrix(swing, scheme=scheme)
|
|
24
26
|
|
|
25
27
|
|
|
26
|
-
def visualize_transfer_function(
|
|
28
|
+
def visualize_transfer_function(
|
|
29
|
+
viewer, intensity_to_stokes_matrix: Tensor
|
|
30
|
+
) -> None:
|
|
27
31
|
viewer.add_image(
|
|
28
32
|
intensity_to_stokes_matrix.cpu().numpy(),
|
|
29
33
|
name="Intensity to stokes matrix",
|
|
@@ -1,10 +1,12 @@
|
|
|
1
|
-
import
|
|
2
|
-
import numpy as np
|
|
1
|
+
from typing import Literal
|
|
3
2
|
|
|
3
|
+
import numpy as np
|
|
4
|
+
import torch
|
|
4
5
|
from torch import Tensor
|
|
5
|
-
from
|
|
6
|
-
|
|
6
|
+
from torch.nn.functional import avg_pool3d
|
|
7
|
+
|
|
7
8
|
from waveorder import optics, sampling, stokes, util
|
|
9
|
+
from waveorder.filter import apply_filter_bank
|
|
8
10
|
from waveorder.visuals.napari_visuals import add_transfer_function_to_viewer
|
|
9
11
|
|
|
10
12
|
|
|
@@ -39,7 +41,6 @@ def calculate_transfer_function(
|
|
|
39
41
|
numerical_aperture_detection: float,
|
|
40
42
|
invert_phase_contrast: bool = False,
|
|
41
43
|
fourier_oversample_factor: int = 1,
|
|
42
|
-
transverse_downsample_factor: int = 1,
|
|
43
44
|
) -> tuple[
|
|
44
45
|
torch.Tensor, torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]
|
|
45
46
|
]:
|
|
@@ -65,38 +66,25 @@ def calculate_transfer_function(
|
|
|
65
66
|
|
|
66
67
|
tf_calculation_shape = (
|
|
67
68
|
zyx_shape[0] * z_factor * fourier_oversample_factor,
|
|
68
|
-
int(
|
|
69
|
-
|
|
70
|
-
zyx_shape[1]
|
|
71
|
-
* yx_factor
|
|
72
|
-
* fourier_oversample_factor
|
|
73
|
-
/ transverse_downsample_factor
|
|
74
|
-
)
|
|
75
|
-
),
|
|
76
|
-
int(
|
|
77
|
-
np.ceil(
|
|
78
|
-
zyx_shape[2]
|
|
79
|
-
* yx_factor
|
|
80
|
-
* fourier_oversample_factor
|
|
81
|
-
/ transverse_downsample_factor
|
|
82
|
-
)
|
|
83
|
-
),
|
|
69
|
+
int(np.ceil(zyx_shape[1] * yx_factor * fourier_oversample_factor)),
|
|
70
|
+
int(np.ceil(zyx_shape[2] * yx_factor * fourier_oversample_factor)),
|
|
84
71
|
)
|
|
85
72
|
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
73
|
+
(
|
|
74
|
+
sfZYX_transfer_function,
|
|
75
|
+
intensity_to_stokes_matrix,
|
|
76
|
+
) = _calculate_wrap_unsafe_transfer_function(
|
|
77
|
+
swing,
|
|
78
|
+
scheme,
|
|
79
|
+
tf_calculation_shape,
|
|
80
|
+
yx_pixel_size / yx_factor,
|
|
81
|
+
z_pixel_size / z_factor,
|
|
82
|
+
wavelength_illumination,
|
|
83
|
+
z_padding,
|
|
84
|
+
index_of_refraction_media,
|
|
85
|
+
numerical_aperture_illumination,
|
|
86
|
+
numerical_aperture_detection,
|
|
87
|
+
invert_phase_contrast=invert_phase_contrast,
|
|
100
88
|
)
|
|
101
89
|
|
|
102
90
|
# avg_pool3d does not support complex numbers
|
|
@@ -123,25 +111,12 @@ def calculate_transfer_function(
|
|
|
123
111
|
)
|
|
124
112
|
|
|
125
113
|
# Compute singular system on cropped and downsampled
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
# Interpolate to final size in YX
|
|
129
|
-
def complex_interpolate(
|
|
130
|
-
tensor: torch.Tensor, zyx_shape: tuple[int, int, int]
|
|
131
|
-
) -> torch.Tensor:
|
|
132
|
-
interpolated_real = interpolate(tensor.real, size=zyx_shape)
|
|
133
|
-
interpolated_imag = interpolate(tensor.imag, size=zyx_shape)
|
|
134
|
-
return interpolated_real + 1j * interpolated_imag
|
|
135
|
-
|
|
136
|
-
full_cropped = complex_interpolate(cropped, zyx_shape)
|
|
137
|
-
full_U = complex_interpolate(U, zyx_shape)
|
|
138
|
-
full_S = interpolate(S[None], size=zyx_shape)[0] # S is real
|
|
139
|
-
full_Vh = complex_interpolate(Vh, zyx_shape)
|
|
114
|
+
singular_system = calculate_singular_system(cropped)
|
|
140
115
|
|
|
141
116
|
return (
|
|
142
|
-
|
|
117
|
+
cropped,
|
|
143
118
|
intensity_to_stokes_matrix,
|
|
144
|
-
|
|
119
|
+
singular_system,
|
|
145
120
|
)
|
|
146
121
|
|
|
147
122
|
|
|
@@ -332,20 +307,14 @@ def apply_inverse_transfer_function(
|
|
|
332
307
|
TV_rho_strength: float = 1e-3,
|
|
333
308
|
TV_iterations: int = 10,
|
|
334
309
|
):
|
|
335
|
-
sZYX_data = torch.fft.fftn(szyx_data, dim=(1, 2, 3))
|
|
336
|
-
|
|
337
310
|
# Key computation
|
|
338
311
|
print("Computing inverse filter")
|
|
339
312
|
U, S, Vh = singular_system
|
|
340
313
|
S_reg = S / (S**2 + regularization_strength)
|
|
341
|
-
|
|
342
|
-
ZYXsf_inverse_filter = torch.einsum(
|
|
314
|
+
sfzyx_inverse_filter = torch.einsum(
|
|
343
315
|
"sjzyx,jzyx,jfzyx->sfzyx", U, S_reg, Vh
|
|
344
316
|
)
|
|
345
317
|
|
|
346
|
-
|
|
347
|
-
fZYX_reconstructed = torch.einsum(
|
|
348
|
-
"szyx,sfzyx->fzyx", sZYX_data, ZYXsf_inverse_filter
|
|
349
|
-
)
|
|
318
|
+
fzyx_recon = apply_filter_bank(sfzyx_inverse_filter, szyx_data)
|
|
350
319
|
|
|
351
|
-
return
|
|
320
|
+
return fzyx_recon
|
|
@@ -5,6 +5,8 @@ import torch
|
|
|
5
5
|
from torch import Tensor
|
|
6
6
|
|
|
7
7
|
from waveorder import optics, sampling, util
|
|
8
|
+
from waveorder.filter import apply_filter_bank
|
|
9
|
+
from waveorder.reconstruct import tikhonov_regularized_inverse_filter
|
|
8
10
|
from waveorder.visuals.napari_visuals import add_transfer_function_to_viewer
|
|
9
11
|
|
|
10
12
|
|
|
@@ -30,7 +32,6 @@ def calculate_transfer_function(
|
|
|
30
32
|
index_of_refraction_media: float,
|
|
31
33
|
numerical_aperture_detection: float,
|
|
32
34
|
) -> Tensor:
|
|
33
|
-
|
|
34
35
|
transverse_nyquist = sampling.transverse_nyquist(
|
|
35
36
|
wavelength_emission,
|
|
36
37
|
numerical_aperture_detection, # ill = det for fluorescence
|
|
@@ -108,7 +109,11 @@ def _calculate_wrap_unsafe_transfer_function(
|
|
|
108
109
|
return optical_transfer_function
|
|
109
110
|
|
|
110
111
|
|
|
111
|
-
def visualize_transfer_function(
|
|
112
|
+
def visualize_transfer_function(
|
|
113
|
+
viewer,
|
|
114
|
+
optical_transfer_function: Tensor,
|
|
115
|
+
zyx_scale: tuple[float, float, float],
|
|
116
|
+
) -> None:
|
|
112
117
|
add_transfer_function_to_viewer(
|
|
113
118
|
viewer,
|
|
114
119
|
torch.real(optical_transfer_function),
|
|
@@ -118,7 +123,10 @@ def visualize_transfer_function(viewer, optical_transfer_function: Tensor, zyx_s
|
|
|
118
123
|
|
|
119
124
|
|
|
120
125
|
def apply_transfer_function(
|
|
121
|
-
zyx_object: Tensor,
|
|
126
|
+
zyx_object: Tensor,
|
|
127
|
+
optical_transfer_function: Tensor,
|
|
128
|
+
z_padding: int,
|
|
129
|
+
background: int = 10,
|
|
122
130
|
) -> Tensor:
|
|
123
131
|
"""Simulate imaging by applying a transfer function
|
|
124
132
|
|
|
@@ -205,12 +213,15 @@ def apply_inverse_transfer_function(
|
|
|
205
213
|
|
|
206
214
|
# Reconstruct
|
|
207
215
|
if reconstruction_algorithm == "Tikhonov":
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
optical_transfer_function,
|
|
211
|
-
reg_re=regularization_strength,
|
|
216
|
+
inverse_filter = tikhonov_regularized_inverse_filter(
|
|
217
|
+
optical_transfer_function, regularization_strength
|
|
212
218
|
)
|
|
213
219
|
|
|
220
|
+
# [None]s and [0] are for applying a 1x1 "bank" of filters.
|
|
221
|
+
# For further uniformity, consider returning (1, Z, Y, X)
|
|
222
|
+
f_real = apply_filter_bank(
|
|
223
|
+
inverse_filter[None, None], zyx_padded[None]
|
|
224
|
+
)[0]
|
|
214
225
|
elif reconstruction_algorithm == "TV":
|
|
215
226
|
raise NotImplementedError
|
|
216
227
|
f_real = util.single_variable_admm_tv_deconvolution_3D(
|
|
@@ -6,6 +6,7 @@ from torch import Tensor
|
|
|
6
6
|
|
|
7
7
|
from waveorder import optics, sampling, util
|
|
8
8
|
|
|
9
|
+
|
|
9
10
|
def generate_test_phantom(
|
|
10
11
|
yx_shape: Tuple[int, int],
|
|
11
12
|
yx_pixel_size: float,
|
|
@@ -50,20 +51,21 @@ def calculate_transfer_function(
|
|
|
50
51
|
)
|
|
51
52
|
yx_factor = int(np.ceil(yx_pixel_size / transverse_nyquist))
|
|
52
53
|
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
54
|
+
(
|
|
55
|
+
absorption_2d_to_3d_transfer_function,
|
|
56
|
+
phase_2d_to_3d_transfer_function,
|
|
57
|
+
) = _calculate_wrap_unsafe_transfer_function(
|
|
58
|
+
(
|
|
59
|
+
yx_shape[0] * yx_factor,
|
|
60
|
+
yx_shape[1] * yx_factor,
|
|
61
|
+
),
|
|
62
|
+
yx_pixel_size / yx_factor,
|
|
63
|
+
z_position_list,
|
|
64
|
+
wavelength_illumination,
|
|
65
|
+
index_of_refraction_media,
|
|
66
|
+
numerical_aperture_illumination,
|
|
67
|
+
numerical_aperture_detection,
|
|
68
|
+
invert_phase_contrast=invert_phase_contrast,
|
|
67
69
|
)
|
|
68
70
|
|
|
69
71
|
absorption_2d_to_3d_transfer_function_out = torch.zeros(
|
|
@@ -151,9 +153,9 @@ def visualize_transfer_function(
|
|
|
151
153
|
absorption_2d_to_3d_transfer_function: Tensor,
|
|
152
154
|
phase_2d_to_3d_transfer_function: Tensor,
|
|
153
155
|
) -> None:
|
|
154
|
-
"""Note: unlike other `visualize_transfer_function` calls, this transfer
|
|
156
|
+
"""Note: unlike other `visualize_transfer_function` calls, this transfer
|
|
155
157
|
function is a mixed 3D-to-2D transfer function, so it cannot reuse
|
|
156
|
-
util.add_transfer_function_to_viewer. If more 3D-to-2D transfer functions
|
|
158
|
+
util.add_transfer_function_to_viewer. If more 3D-to-2D transfer functions
|
|
157
159
|
are added, consider refactoring.
|
|
158
160
|
"""
|
|
159
161
|
arrays = [
|
|
@@ -286,6 +288,7 @@ def apply_inverse_transfer_function(
|
|
|
286
288
|
zyx_data_hat = torch.fft.fft2(zyx_data_normalized, dim=(1, 2))
|
|
287
289
|
|
|
288
290
|
# TODO AHA and b_vec calculations should be moved into tikhonov/tv calculations
|
|
291
|
+
# TODO Reformulate to use filter.apply_filter_bank
|
|
289
292
|
AHA = [
|
|
290
293
|
torch.sum(torch.abs(absorption_2d_to_3d_transfer_function) ** 2, dim=0)
|
|
291
294
|
+ regularization_strength,
|
|
@@ -5,7 +5,9 @@ import torch
|
|
|
5
5
|
from torch import Tensor
|
|
6
6
|
|
|
7
7
|
from waveorder import optics, sampling, util
|
|
8
|
+
from waveorder.filter import apply_filter_bank
|
|
8
9
|
from waveorder.models import isotropic_fluorescent_thick_3d
|
|
10
|
+
from waveorder.reconstruct import tikhonov_regularized_inverse_filter
|
|
9
11
|
from waveorder.visuals.napari_visuals import add_transfer_function_to_viewer
|
|
10
12
|
|
|
11
13
|
|
|
@@ -56,22 +58,23 @@ def calculate_transfer_function(
|
|
|
56
58
|
yx_factor = int(np.ceil(yx_pixel_size / transverse_nyquist))
|
|
57
59
|
z_factor = int(np.ceil(z_pixel_size / axial_nyquist))
|
|
58
60
|
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
61
|
+
(
|
|
62
|
+
real_potential_transfer_function,
|
|
63
|
+
imag_potential_transfer_function,
|
|
64
|
+
) = _calculate_wrap_unsafe_transfer_function(
|
|
65
|
+
(
|
|
66
|
+
zyx_shape[0] * z_factor,
|
|
67
|
+
zyx_shape[1] * yx_factor,
|
|
68
|
+
zyx_shape[2] * yx_factor,
|
|
69
|
+
),
|
|
70
|
+
yx_pixel_size / yx_factor,
|
|
71
|
+
z_pixel_size / z_factor,
|
|
72
|
+
wavelength_illumination,
|
|
73
|
+
z_padding,
|
|
74
|
+
index_of_refraction_media,
|
|
75
|
+
numerical_aperture_illumination,
|
|
76
|
+
numerical_aperture_detection,
|
|
77
|
+
invert_phase_contrast=invert_phase_contrast,
|
|
75
78
|
)
|
|
76
79
|
|
|
77
80
|
zyx_out_shape = (zyx_shape[0] + 2 * z_padding,) + zyx_shape[1:]
|
|
@@ -167,7 +170,10 @@ def visualize_transfer_function(
|
|
|
167
170
|
|
|
168
171
|
|
|
169
172
|
def apply_transfer_function(
|
|
170
|
-
zyx_object: np.ndarray,
|
|
173
|
+
zyx_object: np.ndarray,
|
|
174
|
+
real_potential_transfer_function: np.ndarray,
|
|
175
|
+
z_padding: int,
|
|
176
|
+
brightness: float,
|
|
171
177
|
) -> np.ndarray:
|
|
172
178
|
# This simplified forward model only handles phase, so it resuses the fluorescence forward model
|
|
173
179
|
# TODO: extend to absorption
|
|
@@ -249,10 +255,14 @@ def apply_inverse_transfer_function(
|
|
|
249
255
|
|
|
250
256
|
# Reconstruct
|
|
251
257
|
if reconstruction_algorithm == "Tikhonov":
|
|
252
|
-
|
|
253
|
-
|
|
258
|
+
inverse_filter = tikhonov_regularized_inverse_filter(
|
|
259
|
+
effective_transfer_function, regularization_strength
|
|
254
260
|
)
|
|
255
261
|
|
|
262
|
+
# [None]s and [0] are for applying a 1x1 "bank" of filters.
|
|
263
|
+
# For further uniformity, consider returning (1, Z, Y, X)
|
|
264
|
+
f_real = apply_filter_bank(inverse_filter[None, None], zyx[None])[0]
|
|
265
|
+
|
|
256
266
|
elif reconstruction_algorithm == "TV":
|
|
257
267
|
raise NotImplementedError
|
|
258
268
|
f_real = util.single_variable_admm_tv_deconvolution_3D(
|
waveorder/optics.py
CHANGED
|
@@ -1,9 +1,8 @@
|
|
|
1
|
+
import itertools
|
|
2
|
+
|
|
1
3
|
import numpy as np
|
|
2
4
|
import torch
|
|
3
|
-
|
|
4
|
-
import gc
|
|
5
|
-
import itertools
|
|
6
|
-
from numpy.fft import fft, fft2, ifft2, fftn, ifftn, fftshift, ifftshift
|
|
5
|
+
from numpy.fft import fft2, fftn, fftshift, ifft2, ifftn, ifftshift
|
|
7
6
|
|
|
8
7
|
|
|
9
8
|
def Jones_sample(Ein, t, sa):
|
|
@@ -272,7 +271,7 @@ def generate_vector_source_defocus_pupil(
|
|
|
272
271
|
# TEMPORARY SIMPLIFY ROTATIONS "TURN OFF ROTATIONS"
|
|
273
272
|
# 3x2 IDENTITY MATRIX
|
|
274
273
|
rotations = torch.zeros_like(rotations)
|
|
275
|
-
rotations[1, 0, ...] = 1
|
|
274
|
+
rotations[1, 0, ...] = 1
|
|
276
275
|
rotations[2, 1, ...] = 1
|
|
277
276
|
|
|
278
277
|
# Main calculation in the frequency domain
|
|
@@ -718,10 +717,10 @@ def gen_dyadic_Greens_tensor(G_real, ps, psz, lambda_in, space="real"):
|
|
|
718
717
|
|
|
719
718
|
|
|
720
719
|
def generate_greens_tensor_spectrum(
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
720
|
+
zyx_shape,
|
|
721
|
+
zyx_pixel_size,
|
|
722
|
+
wavelength,
|
|
723
|
+
):
|
|
725
724
|
"""
|
|
726
725
|
Parameters
|
|
727
726
|
----------
|
|
@@ -733,14 +732,12 @@ def generate_greens_tensor_spectrum(
|
|
|
733
732
|
Returns
|
|
734
733
|
-------
|
|
735
734
|
torch.tensor
|
|
736
|
-
Green's tensor spectrum
|
|
735
|
+
Green's tensor spectrum
|
|
737
736
|
"""
|
|
738
737
|
Z, Y, X = zyx_shape
|
|
739
738
|
dZ, dY, dX = zyx_pixel_size
|
|
740
739
|
|
|
741
|
-
z_step = torch.fft.ifftshift(
|
|
742
|
-
(torch.arange(Z) - Z // 2) * dZ
|
|
743
|
-
)
|
|
740
|
+
z_step = torch.fft.ifftshift((torch.arange(Z) - Z // 2) * dZ)
|
|
744
741
|
y_step = torch.fft.ifftshift((torch.arange(Y) - Y // 2) * dY)
|
|
745
742
|
x_step = torch.fft.ifftshift((torch.arange(X) - X // 2) * dX)
|
|
746
743
|
|
|
@@ -769,7 +766,7 @@ def generate_greens_tensor_spectrum(
|
|
|
769
766
|
G_3D /= torch.amax(torch.abs(G_3D))
|
|
770
767
|
|
|
771
768
|
return G_3D
|
|
772
|
-
|
|
769
|
+
|
|
773
770
|
|
|
774
771
|
def compute_weak_object_transfer_function_2d(
|
|
775
772
|
illumination_pupil, detection_pupil
|
waveorder/reconstruct.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def tikhonov_regularized_inverse_filter(
|
|
5
|
+
forward_filter: torch.Tensor, regularization_strength: float
|
|
6
|
+
):
|
|
7
|
+
"""Compute the Tikhonov regularized inverse filter from a forward filter.
|
|
8
|
+
|
|
9
|
+
Parameters
|
|
10
|
+
----------
|
|
11
|
+
forward_filter : torch.Tensor
|
|
12
|
+
The forward filter tensor.
|
|
13
|
+
regularization_strength : float
|
|
14
|
+
The strength of the regularization term.
|
|
15
|
+
Returns
|
|
16
|
+
-------
|
|
17
|
+
torch.Tensor
|
|
18
|
+
The Tikhonov regularized inverse filter.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
if forward_filter.ndim == 3:
|
|
22
|
+
forward_filter_conj = torch.conj(forward_filter)
|
|
23
|
+
return forward_filter_conj / (
|
|
24
|
+
(forward_filter_conj * forward_filter) + regularization_strength
|
|
25
|
+
)
|
|
26
|
+
else:
|
|
27
|
+
# TC TODO INTEGRATE THE 5D FILTER BANK CASE
|
|
28
|
+
raise NotImplementedError("Only 3D tensors are supported.")
|
waveorder/stokes.py
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
Overview
|
|
3
3
|
--------
|
|
4
4
|
|
|
5
|
-
This module collects Stokes- and Mueller-related calculations.
|
|
5
|
+
This module collects Stokes- and Mueller-related calculations.
|
|
6
6
|
|
|
7
7
|
The functions are roughly organized into groups:
|
|
8
8
|
|
|
@@ -29,8 +29,8 @@ y = mmul(A, x)
|
|
|
29
29
|
Usage
|
|
30
30
|
-----
|
|
31
31
|
|
|
32
|
-
All functions are intended to be used with torch.Tensors with Stokes- or
|
|
33
|
-
Mueller-indices as the first axes.
|
|
32
|
+
All functions are intended to be used with torch.Tensors with Stokes- or
|
|
33
|
+
Mueller-indices as the first axes.
|
|
34
34
|
|
|
35
35
|
For example, the following usage modes of stokes_after_adr are valid:
|
|
36
36
|
|
|
@@ -46,6 +46,7 @@ For example, the following usage modes of stokes_after_adr are valid:
|
|
|
46
46
|
>>> stokes_after_adr(*adr_params) # * expands along the first axis
|
|
47
47
|
|
|
48
48
|
"""
|
|
49
|
+
|
|
49
50
|
import numpy as np
|
|
50
51
|
import torch
|
|
51
52
|
|
waveorder/util.py
CHANGED
|
@@ -1,15 +1,14 @@
|
|
|
1
|
+
import re
|
|
2
|
+
import time
|
|
3
|
+
from collections import namedtuple
|
|
4
|
+
|
|
1
5
|
import numpy as np
|
|
2
|
-
import matplotlib.pyplot as plt
|
|
3
6
|
import pywt
|
|
4
|
-
import time
|
|
5
7
|
import torch
|
|
6
|
-
|
|
7
|
-
from numpy.fft import fft, ifft, fft2, ifft2, fftn, ifftn, fftshift, ifftshift
|
|
8
|
+
from numpy.fft import fft, fft2, fftn, fftshift, ifft, ifftn, ifftshift
|
|
8
9
|
from scipy.ndimage import uniform_filter
|
|
9
|
-
from collections import namedtuple
|
|
10
|
-
from .optics import scattering_potential_tensor_to_3D_orientation_PN
|
|
11
10
|
|
|
12
|
-
import
|
|
11
|
+
from .optics import scattering_potential_tensor_to_3D_orientation_PN
|
|
13
12
|
|
|
14
13
|
numbers = re.compile(r"(\d+)")
|
|
15
14
|
|
|
@@ -2289,5 +2288,6 @@ def gellmann():
|
|
|
2289
2288
|
[[e, 0, 0], [0, d, 0], [0, 0, d]],
|
|
2290
2289
|
[[0, 0, c], [0, 0, 0], [c, 0, 0]],
|
|
2291
2290
|
[[0, 0, 0], [0, -c, 0], [0, 0, c]], #
|
|
2292
|
-
],
|
|
2293
|
-
|
|
2291
|
+
],
|
|
2292
|
+
dtype=torch.complex64,
|
|
2293
|
+
)
|
|
@@ -1,22 +1,16 @@
|
|
|
1
|
-
import numpy as np
|
|
2
|
-
import matplotlib.pyplot as plt
|
|
3
|
-
import ipywidgets as widgets
|
|
4
|
-
import os
|
|
5
1
|
import io
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
import ipywidgets as widgets
|
|
5
|
+
import matplotlib.pyplot as plt
|
|
6
|
+
import numpy as np
|
|
7
|
+
from ipywidgets import HBox, Image, Layout, interact
|
|
8
|
+
from matplotlib.colors import Normalize, hsv_to_rgb
|
|
9
|
+
from numpy.typing import NDArray
|
|
6
10
|
from PIL import Image as PImage
|
|
7
|
-
from ipywidgets import (
|
|
8
|
-
Image,
|
|
9
|
-
Layout,
|
|
10
|
-
interact,
|
|
11
|
-
HBox,
|
|
12
|
-
)
|
|
13
|
-
from matplotlib.colors import hsv_to_rgb
|
|
14
|
-
from matplotlib.colors import Normalize
|
|
15
11
|
from scipy.ndimage import uniform_filter
|
|
16
12
|
from scipy.stats import binned_statistic_2d
|
|
17
13
|
|
|
18
|
-
from numpy.typing import NDArray
|
|
19
|
-
|
|
20
14
|
|
|
21
15
|
def im_bit_convert(im, bit=16, norm=False, limit=[]):
|
|
22
16
|
im = im.astype(
|
|
@@ -172,7 +166,7 @@ def image_stack_viewer_fast(
|
|
|
172
166
|
else:
|
|
173
167
|
raise ValueError('origin can only be either "upper" or "lower"')
|
|
174
168
|
|
|
175
|
-
im_wgt = Image(
|
|
169
|
+
im_wgt = Image(
|
|
176
170
|
value=im_dict[0],
|
|
177
171
|
layout=Layout(height=str(size[0]) + "px", width=str(size[1]) + "px"),
|
|
178
172
|
)
|
|
@@ -1046,15 +1040,17 @@ def plotVectorField(
|
|
|
1046
1040
|
# plot vector field representaiton of the orientation map
|
|
1047
1041
|
|
|
1048
1042
|
# Compute U, V such that they are as long as line-length when anisotropy = 1.
|
|
1049
|
-
U, V =
|
|
1050
|
-
2 * orientation
|
|
1051
|
-
|
|
1043
|
+
U, V = (
|
|
1044
|
+
anisotropy * linelength * np.cos(2 * orientation),
|
|
1045
|
+
anisotropy * linelength * np.sin(2 * orientation),
|
|
1046
|
+
)
|
|
1052
1047
|
USmooth = uniform_filter(U, (window, window)) # plot smoothed vector field
|
|
1053
1048
|
VSmooth = uniform_filter(V, (window, window)) # plot smoothed vector field
|
|
1054
1049
|
azimuthSmooth = 0.5 * np.arctan2(VSmooth, USmooth)
|
|
1055
1050
|
RSmooth = np.sqrt(USmooth**2 + VSmooth**2)
|
|
1056
|
-
USmooth, VSmooth =
|
|
1057
|
-
azimuthSmooth
|
|
1051
|
+
USmooth, VSmooth = (
|
|
1052
|
+
RSmooth * np.cos(azimuthSmooth),
|
|
1053
|
+
RSmooth * np.sin(azimuthSmooth),
|
|
1058
1054
|
)
|
|
1059
1055
|
|
|
1060
1056
|
nY, nX = img.shape
|
|
@@ -1639,8 +1635,9 @@ def plot3DVectorField(
|
|
|
1639
1635
|
VSmooth = uniform_filter(V, (window, window)) # plot smoothed vector field
|
|
1640
1636
|
azimuthSmooth = 0.5 * np.arctan2(VSmooth, USmooth)
|
|
1641
1637
|
RSmooth = np.sqrt(USmooth**2 + VSmooth**2)
|
|
1642
|
-
USmooth, VSmooth =
|
|
1643
|
-
azimuthSmooth
|
|
1638
|
+
USmooth, VSmooth = (
|
|
1639
|
+
RSmooth * np.cos(azimuthSmooth),
|
|
1640
|
+
RSmooth * np.sin(azimuthSmooth),
|
|
1644
1641
|
)
|
|
1645
1642
|
|
|
1646
1643
|
nY, nX = img.shape
|
|
@@ -23,7 +23,7 @@ def plot_5d_ortho(
|
|
|
23
23
|
Plot 5D multi-channel data in a grid or ortho-slice views.
|
|
24
24
|
|
|
25
25
|
Input data is a 6D array with (row, column, channels, Z, Y, X) dimensions.
|
|
26
|
-
|
|
26
|
+
|
|
27
27
|
`color_funcs` permits different RGB color maps for each row and column.
|
|
28
28
|
|
|
29
29
|
Parameters
|
|
@@ -32,7 +32,7 @@ def plot_5d_ortho(
|
|
|
32
32
|
5D array with shape (R, C, Ch, Z, Y, X) containing the data to plot.
|
|
33
33
|
[r]ows and [c]olumns form a grid
|
|
34
34
|
[C]hannels contain multiple color channels
|
|
35
|
-
[ZYX] contain 3D volumes.
|
|
35
|
+
[ZYX] contain 3D volumes.
|
|
36
36
|
filename : str
|
|
37
37
|
Path to save the output plot.
|
|
38
38
|
voxel_size : tuple[float, float, float]
|
|
@@ -40,8 +40,8 @@ def plot_5d_ortho(
|
|
|
40
40
|
zyx_slice : tuple[int, int, int]
|
|
41
41
|
Indices of the ortho-slices to plot in (Z, Y, X) indices.
|
|
42
42
|
color_funcs : list[list[callable]]
|
|
43
|
-
A list of lists of callables, one for each element of the plot grid,
|
|
44
|
-
with len(color_funcs) == R and len(colors_funcs[0] == C).
|
|
43
|
+
A list of lists of callables, one for each element of the plot grid,
|
|
44
|
+
with len(color_funcs) == R and len(colors_funcs[0] == C).
|
|
45
45
|
Each callable accepts [C]hannel arguments and returns RGB color values,
|
|
46
46
|
enabling different RGB color maps for each member of the grid.
|
|
47
47
|
row_labels : list[str], optional
|
|
@@ -1,9 +1,8 @@
|
|
|
1
|
-
from waveorder.visuals.utils import complex_tensor_to_rgb
|
|
2
|
-
from typing import TYPE_CHECKING
|
|
3
|
-
|
|
4
1
|
import numpy as np
|
|
5
2
|
import torch
|
|
6
3
|
|
|
4
|
+
from waveorder.visuals.utils import complex_tensor_to_rgb
|
|
5
|
+
|
|
7
6
|
|
|
8
7
|
def add_transfer_function_to_viewer(
|
|
9
8
|
viewer: "napari.Viewer",
|
waveorder/visuals/utils.py
CHANGED
|
@@ -1,11 +1,10 @@
|
|
|
1
|
-
import numpy as np
|
|
2
1
|
import matplotlib.colors as mcolors
|
|
2
|
+
import numpy as np
|
|
3
3
|
|
|
4
4
|
|
|
5
5
|
# Main function to convert a complex-valued torch tensor to RGB numpy array
|
|
6
6
|
# with red at +1, green at +i, blue at -1, and purple at -i
|
|
7
7
|
def complex_tensor_to_rgb(array, saturate_clim_fraction=1.0):
|
|
8
|
-
|
|
9
8
|
# Calculate magnitude and phase for the entire array
|
|
10
9
|
magnitude = np.abs(array)
|
|
11
10
|
phase = np.angle(array)
|
|
@@ -1,15 +1,15 @@
|
|
|
1
|
-
import numpy as np
|
|
2
|
-
import matplotlib.pyplot as plt
|
|
3
1
|
import itertools
|
|
4
2
|
import time
|
|
5
|
-
import os
|
|
6
3
|
import warnings
|
|
7
|
-
|
|
4
|
+
|
|
5
|
+
import matplotlib.pyplot as plt
|
|
6
|
+
import numpy as np
|
|
8
7
|
from IPython import display
|
|
9
|
-
from
|
|
10
|
-
|
|
11
|
-
from .optics import *
|
|
8
|
+
from numpy.fft import fft2, fftn, fftshift, ifft, ifft2, ifftn, ifftshift
|
|
9
|
+
|
|
12
10
|
from .background_estimator import *
|
|
11
|
+
from .optics import *
|
|
12
|
+
from .util import *
|
|
13
13
|
|
|
14
14
|
|
|
15
15
|
def intensity_mapping(img_stack):
|
|
@@ -1474,8 +1474,9 @@ class waveorder_microscopy:
|
|
|
1474
1474
|
torch.tensor(z.astype("complex64").transpose((2, 1, 0))),
|
|
1475
1475
|
torch.tensor(self.psz),
|
|
1476
1476
|
)
|
|
1477
|
-
return
|
|
1478
|
-
(1, 2, 0)
|
|
1477
|
+
return (
|
|
1478
|
+
H_re.numpy().transpose((1, 2, 0)),
|
|
1479
|
+
H_im.numpy().transpose((1, 2, 0)),
|
|
1479
1480
|
)
|
|
1480
1481
|
|
|
1481
1482
|
for i in range(self.N_pattern):
|
waveorder/waveorder_simulator.py
CHANGED
|
@@ -1,13 +1,13 @@
|
|
|
1
|
-
import numpy as np
|
|
2
|
-
import matplotlib.pyplot as plt
|
|
3
1
|
import itertools
|
|
4
2
|
import time
|
|
5
|
-
import os
|
|
6
|
-
import torch
|
|
7
|
-
from numpy.fft import fft, ifft, fft2, ifft2, fftn, ifftn, fftshift, ifftshift
|
|
8
3
|
from concurrent.futures import ProcessPoolExecutor
|
|
9
|
-
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
import torch
|
|
7
|
+
from numpy.fft import fft2, fftn, fftshift, ifft2, ifftn, ifftshift
|
|
8
|
+
|
|
10
9
|
from .optics import *
|
|
10
|
+
from .util import *
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
def Jones_PC_forward_model(
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.2
|
|
2
2
|
Name: waveorder
|
|
3
|
-
Version: 2.2.
|
|
3
|
+
Version: 2.2.1b0
|
|
4
4
|
Summary: Wave-optical simulations and deconvolution of optical properties
|
|
5
5
|
Author-email: CZ Biohub SF <compmicro@czbiohub.org>
|
|
6
6
|
Maintainer-email: Talon Chandler <talon.chandler@czbiohub.org>, Shalin Mehta <shalin.mehta@czbiohub.org>
|
|
@@ -62,6 +62,7 @@ Requires-Dist: torch>=2.4.1
|
|
|
62
62
|
Provides-Extra: dev
|
|
63
63
|
Requires-Dist: pytest; extra == "dev"
|
|
64
64
|
Requires-Dist: pytest-cov; extra == "dev"
|
|
65
|
+
Requires-Dist: black==25.1.0; extra == "dev"
|
|
65
66
|
Provides-Extra: examples
|
|
66
67
|
Requires-Dist: napari[all]; extra == "examples"
|
|
67
68
|
Requires-Dist: jupyter; extra == "examples"
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
waveorder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
+
waveorder/_version.py,sha256=GmZKo0FXy1i2hmm_Tns9LgX3RbtJXrUCizylCrNMoI4,513
|
|
3
|
+
waveorder/background_estimator.py,sha256=GFy3N0qqp5M6JVZBbIUvTYhSin8Njg7zA2WN69pKLAE,12398
|
|
4
|
+
waveorder/correction.py,sha256=uAWDKXq-FYwi1obxxWq0A-suNVf1cvqUnPsDC-LIlsM,3460
|
|
5
|
+
waveorder/filter.py,sha256=BSNYTMYy-HrqtupcXMm9OzWPMlX_NF7KSNjeN0-0ghQ,7350
|
|
6
|
+
waveorder/focus.py,sha256=ELI-bhtoodSFL7mKlXt0S8HqCc02gJMr7gCl3kYW5bE,6314
|
|
7
|
+
waveorder/optics.py,sha256=RikR3kkPFoVUS86jiGQZey7Jtz5Ga-xKWnXsexo3Okc,44242
|
|
8
|
+
waveorder/reconstruct.py,sha256=nF00-uxYF67QVcNClp1VIB31ElUfbjO3RNxbxc0ELH4,832
|
|
9
|
+
waveorder/sampling.py,sha256=OAqlfjEemX6tQV2a2S8X0wQx9JCSykcvVxvKr1CY3H0,2521
|
|
10
|
+
waveorder/stokes.py,sha256=G_hcqS-GbslVnCWmS13y77K1YZ8cvwIl-DDKyC84LSI,15184
|
|
11
|
+
waveorder/util.py,sha256=K5wdR_da1MYSVHUSp0O-m_FzMKRX60_KXCv3IXtCgKA,71243
|
|
12
|
+
waveorder/waveorder_reconstructor.py,sha256=-MluWmnZnCZm7Xu-8V8QWX9ma0_5oAZO2Xlyrg1bRes,152072
|
|
13
|
+
waveorder/waveorder_simulator.py,sha256=uRRX_wcWzJzlVcfToLpIlh4e8Xt9NjTvdonyGEf2Z1c,45805
|
|
14
|
+
waveorder/models/inplane_oriented_thick_pol3d.py,sha256=bx5yViNz7wY5BBXUea1Tw0RhYsEzErENRnAbgpa34C0,5992
|
|
15
|
+
waveorder/models/inplane_oriented_thick_pol3d_vector.py,sha256=tAHkIV02CZ1DbjvikKON2bvkrRfR6jEiUenDhrTtdtI,9754
|
|
16
|
+
waveorder/models/isotropic_fluorescent_thick_3d.py,sha256=mrai8u3aQ9TxI_TZZXFXZWei6g_ZcxkLxjlUDv1rHJA,7050
|
|
17
|
+
waveorder/models/isotropic_thin_3d.py,sha256=ADn1py_-bkXKD9_Uw4av1xE-X-Y6Wyp3uSdaA9C7JPU,10956
|
|
18
|
+
waveorder/models/phase_thick_3d.py,sha256=sOHphv6_SfarLxrhj4bcZwM-5bdjXS4-j6RCreOBTmA,8654
|
|
19
|
+
waveorder/visuals/jupyter_visuals.py,sha256=6kxICjEtP1qc1EuETc_NJ6Y4A7nVaC-bP3wl_CQNPfg,58096
|
|
20
|
+
waveorder/visuals/matplotlib_visuals.py,sha256=v1zi0ZlXEV5CcpNzTWL0vDJ2Md2-RSHnc3rAB61wimg,10915
|
|
21
|
+
waveorder/visuals/napari_visuals.py,sha256=jgyKRlWqJv1PvCRWfpgqRUv6rwIsI4AXQGLL5IftPnM,2352
|
|
22
|
+
waveorder/visuals/utils.py,sha256=QC5WSc2yzPMjk66IjA39iNFgO_0It6evma201hH8Lg4,1001
|
|
23
|
+
waveorder-2.2.1b0.dist-info/LICENSE,sha256=auz4oGH1A-xZtoiR2zuXIk-Hii4v9aGgFVBqn7nfpms,1509
|
|
24
|
+
waveorder-2.2.1b0.dist-info/METADATA,sha256=ZFf0LKaV4-RlN6AkT9K6_BzLqqG72i9wRPQDc9bscms,9724
|
|
25
|
+
waveorder-2.2.1b0.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
|
|
26
|
+
waveorder-2.2.1b0.dist-info/top_level.txt,sha256=i3zReXiiMTnyPk93W7aEz_oEfsLnfR_Kzl7PW7kUslA,10
|
|
27
|
+
waveorder-2.2.1b0.dist-info/RECORD,,
|
waveorder-2.2.0.dist-info/RECORD
DELETED
|
@@ -1,25 +0,0 @@
|
|
|
1
|
-
waveorder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
waveorder/_version.py,sha256=pbL_Q6fDSZl5UbKP04ZFdzrJpd1PO1gH7IwJCwLV7mk,411
|
|
3
|
-
waveorder/background_estimator.py,sha256=gCIO6-232H0CGH4o6gnqW9KSYGOrXf5E9nD67WeF304,12399
|
|
4
|
-
waveorder/correction.py,sha256=N0Ic6mqw3U7mqow4dKTOkNx2QYOLwedGNH7HiKV-M6s,3460
|
|
5
|
-
waveorder/focus.py,sha256=4mg84Fe4V-oFplsuaU_VQU1_TEDoEfPggIAv6Is2dE4,6312
|
|
6
|
-
waveorder/optics.py,sha256=Z0N9IN5AJ563eOISlFVJLS5K7lyfSjOcHVkzcLSKP1E,44325
|
|
7
|
-
waveorder/sampling.py,sha256=OAqlfjEemX6tQV2a2S8X0wQx9JCSykcvVxvKr1CY3H0,2521
|
|
8
|
-
waveorder/stokes.py,sha256=Wk9ZimzICIZLh1CkB0kQSCSBLeugkDeydwXTPd-M-po,15186
|
|
9
|
-
waveorder/util.py,sha256=goT2OD6Zej4v3RT1tu_XAQtDWi94HiDlL4pJk2itt6s,71272
|
|
10
|
-
waveorder/waveorder_reconstructor.py,sha256=SSSru4TOLB1VUOWMLHzMSboMbazgfslFXrjOpnwmqFk,152107
|
|
11
|
-
waveorder/waveorder_simulator.py,sha256=_HCmDZkACUGzgwnaI-q0PjsL1gRE55IQuaWw-wtAjCU,45856
|
|
12
|
-
waveorder/models/inplane_oriented_thick_pol3d.py,sha256=jEpMcAZ6tIg9Kg-lHpbH0vBAphSGZGUAqLyDc0hv_bs,5979
|
|
13
|
-
waveorder/models/inplane_oriented_thick_pol3d_vector.py,sha256=09-Qu6Ka3S2GmkcmGIyVbQCYP1I_S9O1HvDBZDQ7AlQ,10817
|
|
14
|
-
waveorder/models/isotropic_fluorescent_thick_3d.py,sha256=TuE5QlScy6pdCZyrWJifJ6UJMQT15lv8HhkNa_cQfFs,6713
|
|
15
|
-
waveorder/models/isotropic_thin_3d.py,sha256=c-1eKknNWKxDiMNUi0wBf6YeXhUQiUR-zguH8M7En_k,10941
|
|
16
|
-
waveorder/models/phase_thick_3d.py,sha256=2gq5pd6BPxDmkqnf9bvbOfraD2CeGBr0GU1s9cYBTms,8374
|
|
17
|
-
waveorder/visuals/jupyter_visuals.py,sha256=w-vlMtfyl3I1ACNfYIW4fbS9TIMAVittNj3GbjlRYz4,58121
|
|
18
|
-
waveorder/visuals/matplotlib_visuals.py,sha256=e-4LrHPFU--j3gbUoZrO8WHpDIYNZDFu8vqBZyhyiG4,10922
|
|
19
|
-
waveorder/visuals/napari_visuals.py,sha256=gI420pgOzun4Elx__txdk1eEBcTIBB6Gpln-6n8Wo1k,2385
|
|
20
|
-
waveorder/visuals/utils.py,sha256=6vdZmpvFGHwSwxeV8vCKWQ0MBOrDokSIJhjdBtJLHeM,1002
|
|
21
|
-
waveorder-2.2.0.dist-info/LICENSE,sha256=auz4oGH1A-xZtoiR2zuXIk-Hii4v9aGgFVBqn7nfpms,1509
|
|
22
|
-
waveorder-2.2.0.dist-info/METADATA,sha256=D7tFPrufseWBEz4-kPpR6pejV_GtA91ll7SsO4u3qas,9677
|
|
23
|
-
waveorder-2.2.0.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
|
24
|
-
waveorder-2.2.0.dist-info/top_level.txt,sha256=i3zReXiiMTnyPk93W7aEz_oEfsLnfR_Kzl7PW7kUslA,10
|
|
25
|
-
waveorder-2.2.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|