BOSlib 0.0.1__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- BOSlib/__init__.py +9 -0
- BOSlib/culculate_refractiveindex.py +89 -0
- BOSlib/evaluation.py +344 -0
- BOSlib/reconstruction.py +167 -0
- BOSlib/reconstruction_utils.py +47 -0
- BOSlib/shift.py +126 -0
- BOSlib/shift_utils.py +306 -0
- BOSlib/utils.py +204 -0
- BOSlib-0.0.1.dist-info/LICENSE +674 -0
- BOSlib-0.0.1.dist-info/METADATA +84 -0
- BOSlib-0.0.1.dist-info/RECORD +13 -0
- BOSlib-0.0.1.dist-info/WHEEL +5 -0
- BOSlib-0.0.1.dist-info/top_level.txt +1 -0
BOSlib/shift.py
ADDED
@@ -0,0 +1,126 @@
|
|
1
|
+
from skimage.metrics import structural_similarity as ssm
|
2
|
+
import numpy as np
|
3
|
+
from PIL import Image
|
4
|
+
import BOSlib.shift_utils as ib
|
5
|
+
|
6
|
+
def SSIM(ref_array : np.ndarray, exp_array : np.ndarray):
|
7
|
+
"""
|
8
|
+
Compute the inverted Structural Similarity Index (SSIM) difference matrix between two grayscale images.
|
9
|
+
|
10
|
+
Parameters
|
11
|
+
----------
|
12
|
+
ref_array : np.ndarray
|
13
|
+
The reference grayscale image array.
|
14
|
+
exp_array : np.ndarray
|
15
|
+
The experimental grayscale image array.
|
16
|
+
|
17
|
+
Returns
|
18
|
+
-------
|
19
|
+
np.ndarray
|
20
|
+
The inverted SSIM difference matrix, where higher values indicate greater dissimilarity between the two images.
|
21
|
+
"""
|
22
|
+
# Compute the structural similarity matrix (SSM) on the grayscale images
|
23
|
+
(score, diff) = ssm(ref_array, exp_array, full=True)
|
24
|
+
diff_inv = -diff
|
25
|
+
return diff_inv
|
26
|
+
|
27
|
+
def SP_BOS(ref_array : np.ndarray, exp_array : np.ndarray, binarization : str ="HPfilter", thresh : int = 128, freq : int = 500):
|
28
|
+
"""
|
29
|
+
Calculate the displacement map of stripe patterns in experimental images using the Background Oriented Schlieren (BOS) method.
|
30
|
+
|
31
|
+
This function computes the relative displacement between stripes in a reference and experimental image by compensating for background movement and noise. The displacement map is calculated by processing the images through several steps including image resizing, binarization, boundary detection, noise reduction, displacement calculation, and background compensation.
|
32
|
+
|
33
|
+
Parameters
|
34
|
+
----------
|
35
|
+
ref_array : np.ndarray
|
36
|
+
The reference grayscale image array. This image represents the original, undisturbed pattern.
|
37
|
+
|
38
|
+
exp_array : np.ndarray
|
39
|
+
The experimental grayscale image array. This image represents the pattern after deformation due to external factors.
|
40
|
+
|
41
|
+
binarization : str, optional, default="HPfilter"
|
42
|
+
The method used for binarization of the images. Options are:
|
43
|
+
- "thresh" : Use thresholding for binarization.
|
44
|
+
- "HPfilter" : Use high-pass filtering for binarization.
|
45
|
+
|
46
|
+
thresh : int, optional, default=128
|
47
|
+
The threshold value used for binarization when `binarization="thresh"`. Pixels with values above the threshold are set to 1, and those below are set to 0.
|
48
|
+
|
49
|
+
freq : int, optional, default=500
|
50
|
+
The frequency parameter used for high-pass filtering when `binarization="HPfilter"`.
|
51
|
+
|
52
|
+
Returns
|
53
|
+
-------
|
54
|
+
np.ndarray
|
55
|
+
A 2D array representing the displacement map of the stripe patterns, with background movement compensated. Each value represents the relative displacement between the reference and experimental images, with noise and background displacements removed.
|
56
|
+
|
57
|
+
Notes
|
58
|
+
-----
|
59
|
+
The method performs the following steps:
|
60
|
+
1. Vertically stretches both the reference and experimental images by a factor of 10.
|
61
|
+
2. Binarizes the images using either thresholding or high-pass filtering.
|
62
|
+
3. Identifies the upper and lower boundaries of the stripes and calculates their centers for both images.
|
63
|
+
4. Filters out noise by removing displacements larger than a certain threshold.
|
64
|
+
5. Computes the displacement between the stripe centers.
|
65
|
+
6. Compensates for background movement by normalizing the displacement map, subtracting the mean displacement over a specified region.
|
66
|
+
"""
|
67
|
+
|
68
|
+
im_ref=Image.fromarray(ref_array)
|
69
|
+
im_exp=Image.fromarray(exp_array)
|
70
|
+
|
71
|
+
#streach the image vertivally *10
|
72
|
+
im_ref=im_ref.resize((im_ref.size[0],im_ref.size[1]*10))
|
73
|
+
im_exp=im_exp.resize((im_exp.size[0],im_exp.size[1]*10))
|
74
|
+
|
75
|
+
ar_ref=np.array(im_ref)
|
76
|
+
ar_exp=np.array(im_exp)
|
77
|
+
|
78
|
+
if binarization =="thresh":
|
79
|
+
# Binarization
|
80
|
+
bin_ref = ib._biner_thresh(ar_ref, thresh)
|
81
|
+
bin_exp = ib._biner_thresh(ar_exp, thresh)
|
82
|
+
|
83
|
+
print("Binarization",bin_ref.shape,bin_exp.shape)
|
84
|
+
elif binarization =="HPfilter":
|
85
|
+
bin_ref=ib._biner_HP(ar_ref, freq)
|
86
|
+
bin_exp=ib._biner_HP(ar_exp, freq)
|
87
|
+
print("Binarization",bin_ref.shape,bin_exp.shape)
|
88
|
+
else:
|
89
|
+
raise ValueError("Binarization is thresh or HPfilter")
|
90
|
+
|
91
|
+
# Detect the coordinates of the color boundaries in the binarized reference image
|
92
|
+
ref_u, ref_d = ib._bin_indexer(bin_ref)
|
93
|
+
ref_u = np.nan_to_num(ref_u)
|
94
|
+
ref_d = np.nan_to_num(ref_d)
|
95
|
+
print("bin_indexer_ref",ref_u.shape,ref_d.shape)
|
96
|
+
# Detect the coordinates of the color boundaries in the binarized experimental image
|
97
|
+
# u represents the upper boundary of the white stripe, d represents the lower boundary
|
98
|
+
exp_u, exp_d = ib._bin_indexer(bin_exp)
|
99
|
+
exp_u = np.nan_to_num(exp_u)
|
100
|
+
exp_d = np.nan_to_num(exp_d)
|
101
|
+
print("bin_indexer_exp",exp_u.shape,exp_d.shape)
|
102
|
+
|
103
|
+
# Remove data with abnormally large displacements as noise
|
104
|
+
ref_u, exp_u = ib._noize_reducer_2(ref_u, exp_u, 10)
|
105
|
+
ref_d, exp_d = ib._noize_reducer_2(ref_d, exp_d, 10)
|
106
|
+
print("noize_reducer_2",exp_u.shape,exp_d.shape)
|
107
|
+
print("noize_reducer_2",ref_u.shape,ref_d.shape)
|
108
|
+
|
109
|
+
# Combine the upper and lower boundary data to calculate the center of the stripe
|
110
|
+
ref = ib._mixing(ref_u, ref_d)
|
111
|
+
exp = ib._mixing(exp_u, exp_d)
|
112
|
+
|
113
|
+
print("mixing",ref.shape,exp.shape)
|
114
|
+
|
115
|
+
# Calculate displacement (upward displacement is positive)
|
116
|
+
diff = -(exp - ref)
|
117
|
+
|
118
|
+
# Rearrange the displacement values into the correct positions and interpolate gaps
|
119
|
+
diff_comp = ib._complementer(ref, diff)
|
120
|
+
|
121
|
+
print("complementer",diff_comp.shape)
|
122
|
+
|
123
|
+
# Subtract the overall background movement by dividing by the mean displacement
|
124
|
+
diff_comp = diff_comp - np.nanmean(diff_comp[0:1000, 10:100])
|
125
|
+
|
126
|
+
return diff_comp
|
BOSlib/shift_utils.py
ADDED
@@ -0,0 +1,306 @@
|
|
1
|
+
import numpy as np
|
2
|
+
from PIL import Image
|
3
|
+
import pandas as pd
|
4
|
+
from scipy import signal
|
5
|
+
|
6
|
+
def _biner_thresh(ar_in: np.ndarray, thresh: int) -> np.ndarray:
|
7
|
+
"""
|
8
|
+
Binarize an array based on a threshold value.
|
9
|
+
|
10
|
+
Parameters
|
11
|
+
----------
|
12
|
+
ar_in : np.ndarray
|
13
|
+
Input array to be binarized.
|
14
|
+
thresh : int
|
15
|
+
Threshold value for binarization.
|
16
|
+
|
17
|
+
Returns
|
18
|
+
-------
|
19
|
+
np.ndarray
|
20
|
+
Binarized array where values above the threshold are `True` and values below are `False`.
|
21
|
+
"""
|
22
|
+
ar_bin = ar_in > thresh
|
23
|
+
return ar_bin
|
24
|
+
|
25
|
+
def _bin_indexer(ar_in: np.ndarray) -> tuple:
|
26
|
+
"""
|
27
|
+
Detect color boundary coordinates in a binarized image by finding gradient edges.
|
28
|
+
|
29
|
+
Parameters
|
30
|
+
----------
|
31
|
+
ar_in : np.ndarray
|
32
|
+
Input binarized image array.
|
33
|
+
|
34
|
+
Returns
|
35
|
+
-------
|
36
|
+
tuple
|
37
|
+
Arrays containing the y-coordinates for the detected upper and lower boundaries of stripes.
|
38
|
+
"""
|
39
|
+
# Convert to int for differentiation
|
40
|
+
ar_in = ar_in.astype(np.int8)
|
41
|
+
ar2 = np.delete(ar_in, 0, 0)
|
42
|
+
ar3 = np.delete(ar_in, ar_in.shape[0] - 1, 0)
|
43
|
+
ar4 = ar2 - ar3
|
44
|
+
|
45
|
+
# Detect positive gradient (upper boundary)
|
46
|
+
u_tuple = np.where(ar4 > 0)
|
47
|
+
u_index = np.stack([u_tuple[1], u_tuple[0]]).T
|
48
|
+
|
49
|
+
# Convert to DataFrame for processing
|
50
|
+
df = pd.DataFrame(u_index, columns=["X", "Y"])
|
51
|
+
df = df.pivot(index="Y", columns="X", values="Y")
|
52
|
+
|
53
|
+
# Group by index for averaging boundaries
|
54
|
+
df["index"] = df.index / 10
|
55
|
+
df["index"] = df[["index"]].astype(int)
|
56
|
+
df = df.groupby("index").mean() / 10
|
57
|
+
df.columns = range(df.shape[1])
|
58
|
+
u_index = np.array(df)
|
59
|
+
|
60
|
+
# Initialize array to store boundary positions
|
61
|
+
u_index_2 = np.zeros([1000, ar_in.shape[1]])
|
62
|
+
|
63
|
+
# Process each column to handle NaN values
|
64
|
+
for x in range(ar_in.shape[1]):
|
65
|
+
|
66
|
+
ar_loop=u_index[:,x][np.where(~np.isnan(u_index[:,x]))[0]]
|
67
|
+
u_index_2[:,x]=np.concatenate([ar_loop,np.full(1000-ar_loop.shape[0],np.nan)])
|
68
|
+
|
69
|
+
# Detect negative gradient (lower boundary)
|
70
|
+
d_tuple = np.where(ar4 < 0)
|
71
|
+
d_index = np.stack([d_tuple[1], d_tuple[0]]).T
|
72
|
+
|
73
|
+
# Process for lower boundary similar to upper
|
74
|
+
df = pd.DataFrame(d_index, columns=["X", "Y"])
|
75
|
+
df = df.pivot(index="Y", columns="X", values="Y")
|
76
|
+
df["index"] = df.index / 10
|
77
|
+
df["index"] = df[["index"]].astype(int)
|
78
|
+
df = df.groupby("index").mean() / 10
|
79
|
+
df.columns = range(df.shape[1])
|
80
|
+
d_index = np.array(df)
|
81
|
+
|
82
|
+
# Initialize array for lower boundary positions
|
83
|
+
d_index_2 = np.zeros([1000, ar_in.shape[1]])
|
84
|
+
|
85
|
+
# Process each column to fill NaNs
|
86
|
+
for x in range(ar_in.shape[1]):
|
87
|
+
ar_loop = d_index[:, x][~np.isnan(d_index[:, x])]
|
88
|
+
d_index_2[:, x] = np.concatenate([ar_loop, np.full(1000 - ar_loop.shape[0], np.nan)])
|
89
|
+
|
90
|
+
return u_index_2, d_index_2
|
91
|
+
|
92
|
+
def _noize_reducer_2(ar_ref: np.ndarray, ar_exp: np.ndarray, diff_thresh: int) -> tuple:
|
93
|
+
"""
|
94
|
+
Remove noise by aligning arrays based on a displacement threshold.
|
95
|
+
|
96
|
+
Parameters
|
97
|
+
----------
|
98
|
+
ar_ref : np.ndarray
|
99
|
+
Reference array.
|
100
|
+
ar_exp : np.ndarray
|
101
|
+
Experimental array.
|
102
|
+
diff_thresh : int
|
103
|
+
Threshold for detecting displacement.
|
104
|
+
|
105
|
+
Returns
|
106
|
+
-------
|
107
|
+
tuple
|
108
|
+
Arrays with noise filtered based on the displacement threshold.
|
109
|
+
"""
|
110
|
+
for x in range(ar_ref.shape[1]):
|
111
|
+
ref = ar_ref[:, x]
|
112
|
+
exp = ar_exp[:, x]
|
113
|
+
|
114
|
+
while np.any(abs(exp - ref) > diff_thresh):
|
115
|
+
if np.any(exp - ref > diff_thresh):
|
116
|
+
y = np.where(exp - ref > diff_thresh)[0].min()
|
117
|
+
ref = np.delete(ref, y)
|
118
|
+
ref = np.insert(ref, ref.shape[0], np.nan)
|
119
|
+
|
120
|
+
if np.any(exp - ref < -diff_thresh):
|
121
|
+
y = np.where(exp - ref < -diff_thresh)[0].min()
|
122
|
+
exp = np.delete(exp, y)
|
123
|
+
exp = np.insert(exp, exp.shape[0], np.nan)
|
124
|
+
|
125
|
+
ar_ref[:, x] = ref
|
126
|
+
ar_exp[:, x] = exp
|
127
|
+
|
128
|
+
return ar_ref, ar_exp
|
129
|
+
|
130
|
+
def _mixing(u_ar: np.ndarray, d_ar: np.ndarray) -> np.ndarray:
|
131
|
+
"""
|
132
|
+
Calculate the center positions between upper and lower boundaries.
|
133
|
+
|
134
|
+
Parameters
|
135
|
+
----------
|
136
|
+
u_ar : np.ndarray
|
137
|
+
Array of upper boundary coordinates.
|
138
|
+
d_ar : np.ndarray
|
139
|
+
Array of lower boundary coordinates.
|
140
|
+
|
141
|
+
Returns
|
142
|
+
-------
|
143
|
+
np.ndarray
|
144
|
+
Array with center positions between boundaries.
|
145
|
+
"""
|
146
|
+
ar = np.full([u_ar.shape[0] * 2, u_ar.shape[1]], np.nan)
|
147
|
+
ar[::2] = u_ar
|
148
|
+
ar[1::2] = d_ar
|
149
|
+
ar2 = np.delete(ar, 0, 0)
|
150
|
+
ar3 = np.delete(ar, ar.shape[0] - 1, 0)
|
151
|
+
ar = (ar2 + ar3) / 2
|
152
|
+
|
153
|
+
return ar
|
154
|
+
|
155
|
+
def _complementer(ref_ar: np.ndarray, diff_ar: np.ndarray) -> np.ndarray:
|
156
|
+
"""
|
157
|
+
Rearrange displacement data to correct positions and interpolate gaps.
|
158
|
+
|
159
|
+
Parameters
|
160
|
+
----------
|
161
|
+
ref_ar : np.ndarray
|
162
|
+
Reference array containing stripe positions.
|
163
|
+
diff_ar : np.ndarray
|
164
|
+
Array of displacement values.
|
165
|
+
|
166
|
+
Returns
|
167
|
+
-------
|
168
|
+
np.ndarray
|
169
|
+
Compensated displacement array with interpolated gaps.
|
170
|
+
"""
|
171
|
+
max_ar = int(np.nanmax(ref_ar))
|
172
|
+
diff_2 = np.vstack([np.full(max_ar, -1), range(max_ar), np.zeros(max_ar)]).T
|
173
|
+
|
174
|
+
for x in range(ref_ar.shape[1]):
|
175
|
+
ar_loop = np.vstack([np.full_like(ref_ar[:, x], x), ref_ar[:, x], diff_ar[:, x]]).T
|
176
|
+
ar_loop = ar_loop[~np.isnan(ar_loop).any(axis=1)]
|
177
|
+
diff_2 = np.concatenate([diff_2, ar_loop])
|
178
|
+
|
179
|
+
diff_2[:, 1] = diff_2[:, 1].astype(int)
|
180
|
+
diff_df = pd.DataFrame(diff_2)
|
181
|
+
diff_df = diff_df.pivot_table(columns=0, index=1, values=2)
|
182
|
+
diff_df = diff_df.interpolate(limit=50)
|
183
|
+
|
184
|
+
diff_comp = diff_df.values
|
185
|
+
|
186
|
+
return diff_comp
|
187
|
+
|
188
|
+
def _stretch_image_vertically(image: np.ndarray, scale_factor: int) -> np.ndarray:
|
189
|
+
"""
|
190
|
+
Stretch a grayscale image vertically by a given scale factor.
|
191
|
+
|
192
|
+
Parameters
|
193
|
+
----------
|
194
|
+
image : np.ndarray
|
195
|
+
Input grayscale image as a 2D numpy array.
|
196
|
+
scale_factor : int
|
197
|
+
The factor by which to stretch the image vertically.
|
198
|
+
|
199
|
+
Returns
|
200
|
+
-------
|
201
|
+
np.ndarray
|
202
|
+
Vertically stretched image.
|
203
|
+
"""
|
204
|
+
# Verify that the input image is 2D
|
205
|
+
if image.ndim != 2:
|
206
|
+
raise ValueError("Input image must be a 2D numpy array.")
|
207
|
+
|
208
|
+
# Vertically stretch the image by repeating each row 'scale_factor' times
|
209
|
+
stretched_image = np.repeat(image, scale_factor, axis=0)
|
210
|
+
|
211
|
+
return stretched_image
|
212
|
+
|
213
|
+
def _cycle(ref_array: np.ndarray):
|
214
|
+
"""
|
215
|
+
Calculate the cycle length of stripes in a reference image.
|
216
|
+
|
217
|
+
Parameters
|
218
|
+
----------
|
219
|
+
ref_array : np.ndarray
|
220
|
+
The reference image to be analyzed, provided as a 2D numpy array.
|
221
|
+
|
222
|
+
Returns
|
223
|
+
-------
|
224
|
+
float
|
225
|
+
The calculated cycle length based on the detected boundaries in the reference image.
|
226
|
+
"""
|
227
|
+
|
228
|
+
# Vertically stretch the reference image by a factor of 10
|
229
|
+
im_ref=Image.fromarray(ref_array)
|
230
|
+
im_ref=im_ref.resize((im_ref.size[0],im_ref.size[1]*10))
|
231
|
+
|
232
|
+
# Convert the stretched image to a numpy array
|
233
|
+
ar_ref = np.array(im_ref)
|
234
|
+
|
235
|
+
# Binarize the stretched image using a threshold of 128
|
236
|
+
bin_ref = _biner_thresh(ar_ref, 128)
|
237
|
+
|
238
|
+
# Detect upper and lower boundaries in the binarized image
|
239
|
+
ref_u, ref_d = _bin_indexer(bin_ref)
|
240
|
+
|
241
|
+
# Mix the upper and lower boundary coordinates to find the midpoints
|
242
|
+
ref = _mixing(ref_u, ref_d)
|
243
|
+
|
244
|
+
# Calculate the intervals between midpoints by finding differences
|
245
|
+
ref_interbal = np.delete(ref, 0, 0) - np.delete(ref, ref.shape[0] - 1, 0)
|
246
|
+
|
247
|
+
# Count the number of valid intervals (non-NaN values)
|
248
|
+
count = np.count_nonzero(~np.isnan(ref_interbal[:, 0]))
|
249
|
+
|
250
|
+
# Trim the intervals array to exclude unnecessary values
|
251
|
+
ref_interbal = ref_interbal[0:count - 2]
|
252
|
+
|
253
|
+
# Calculate the cycle length as twice the average interval length
|
254
|
+
# This accounts for both peaks and valleys in the detected boundaries
|
255
|
+
cycle = np.nanmean(ref_interbal) * 2
|
256
|
+
|
257
|
+
return cycle
|
258
|
+
|
259
|
+
def _biner_HP(ar_in,cycle):
|
260
|
+
# パラメータ設定
|
261
|
+
sample_freq = 1#サンプリング周波数[1/px]
|
262
|
+
cutoff_freq = 1/cycle#カットオフ周波数[1/px]
|
263
|
+
filter_order = 8#次数
|
264
|
+
|
265
|
+
# フィルタの作成
|
266
|
+
sos_high = signal.butter(filter_order, cutoff_freq, 'highpass', output='sos', fs=sample_freq)
|
267
|
+
#sos_low = signal.butter(filter_order, cutoff_freq, 'lowpass', output='sos', fs=sample_freq)
|
268
|
+
|
269
|
+
#答え格納用ndarrayの作成(符号付16bitを指定しないと勝手に符号なしにされるZO☆)
|
270
|
+
ar_h=np.zeros_like(ar_in).astype(np.int16)
|
271
|
+
|
272
|
+
#1行ずつ処理
|
273
|
+
for x in range(ar_in.shape[1]):
|
274
|
+
ar_x=ar_in[:,x]
|
275
|
+
|
276
|
+
#フィルタの適用
|
277
|
+
ar_h[:,x] = signal.sosfiltfilt(sos_high, ar_x)
|
278
|
+
#yf_l = signal.sosfiltfilt(sos_low, y)
|
279
|
+
|
280
|
+
#plt.plot(yf_h[100:1500])
|
281
|
+
ar_bin=ar_h>0
|
282
|
+
#y_bin=pd.DataFrame(y_bin*1)
|
283
|
+
|
284
|
+
|
285
|
+
|
286
|
+
#f ar_bin[0,:].sum()==0 or ar_bin[0,:].sum()==ar_bin.shape[1]:
|
287
|
+
first_state=ar_bin[0,0]
|
288
|
+
for x in range(ar_bin.shape[1]):
|
289
|
+
y=0
|
290
|
+
state=ar_bin[0,x]!=first_state
|
291
|
+
|
292
|
+
while(state==False):
|
293
|
+
ar_bin[y,x]=1-ar_bin[y,x]
|
294
|
+
y=y+1
|
295
|
+
state=ar_bin[y,x]!=first_state
|
296
|
+
|
297
|
+
|
298
|
+
#上端の汚い部分をごまかす
|
299
|
+
x=0
|
300
|
+
count=np.nan
|
301
|
+
while(count!=0):
|
302
|
+
count=ar_bin[x-1,:].sum()
|
303
|
+
ar_bin[x,:]=0
|
304
|
+
x=x+1
|
305
|
+
|
306
|
+
return ar_bin
|
BOSlib/utils.py
ADDED
@@ -0,0 +1,204 @@
|
|
1
|
+
import numpy as np
|
2
|
+
import BOSlib.shift_utils as ib
|
3
|
+
from tqdm import tqdm,trange
|
4
|
+
import matplotlib.pyplot as plt
|
5
|
+
|
6
|
+
def shift2angle(shift: np.ndarray, ref_array: np.ndarray, sensor_pitch: float, resolution_of_background: float, Lb: float, Lci: float, binarization : str ="HPfilter", thresh : int = 128, freq : int = 500):
|
7
|
+
"""
|
8
|
+
Convert the background image displacement to the angle of light refraction.
|
9
|
+
|
10
|
+
Parameters
|
11
|
+
----------
|
12
|
+
shift : np.ndarray
|
13
|
+
Displacement values from the background image.
|
14
|
+
ref_array : np.ndarray
|
15
|
+
Reference image array used for calculations.
|
16
|
+
sensor_pitch : float
|
17
|
+
The pitch of the image sensor in mm.
|
18
|
+
resolution_of_background : float
|
19
|
+
It represents the number of lines per mm.
|
20
|
+
Lb : float
|
21
|
+
Distance from the background to the object being captured(mm).
|
22
|
+
Lci : float
|
23
|
+
Distance from the image sensor to the object being captured(mm).
|
24
|
+
|
25
|
+
binarization : str, optional, default="HPfilter"
|
26
|
+
The method used for binarization of the images. Options are:
|
27
|
+
- "thresh" : Use thresholding for binarization.
|
28
|
+
- "HPfilter" : Use high-pass filtering for binarization.
|
29
|
+
|
30
|
+
thresh : int, optional, default=128
|
31
|
+
The threshold value used for binarization when `binarization="thresh"`. Pixels with values above the threshold are set to 1, and those below are set to 0.
|
32
|
+
|
33
|
+
freq : int, optional, default=500
|
34
|
+
The frequency parameter used for high-pass filtering when `binarization="HPfilter"`.
|
35
|
+
|
36
|
+
Returns
|
37
|
+
-------
|
38
|
+
tuple
|
39
|
+
- angle : np.ndarray
|
40
|
+
The calculated angles of light refraction.
|
41
|
+
- Lc : float
|
42
|
+
The distance from the object to the lens.
|
43
|
+
- Li : float
|
44
|
+
The distance from the lens to the image sensor.
|
45
|
+
- projection_ratio : float
|
46
|
+
The ratio of projection based on the dimensions.
|
47
|
+
"""
|
48
|
+
Lb=Lb*10**-3
|
49
|
+
Lci=Lci*10**-3
|
50
|
+
|
51
|
+
# Size of one LP (in pixels)
|
52
|
+
dpLP = ib._cycle(ref_array)
|
53
|
+
|
54
|
+
sensor_pitch = sensor_pitch * 10**-3 # Convert sensor pitch from mm to m
|
55
|
+
BGmpLP = 1 / resolution_of_background * 10**-3 # Convert pattern resolution from mm to m
|
56
|
+
|
57
|
+
# Size of one LP on the projection plane (m/LP)
|
58
|
+
mpLP = dpLP * sensor_pitch
|
59
|
+
|
60
|
+
# Magnification of the imaging
|
61
|
+
projection_ratio = mpLP / BGmpLP
|
62
|
+
|
63
|
+
# Total length
|
64
|
+
Lbi = Lci + Lb
|
65
|
+
|
66
|
+
Lc = Lbi / (projection_ratio + 1) - Lb # Distance from the object to the lens
|
67
|
+
Li = Lci - Lc # Distance from the lens to the image sensor
|
68
|
+
|
69
|
+
# Calculate the angle based on shift and projection properties
|
70
|
+
angle = shift * (sensor_pitch) / (projection_ratio * Lb)
|
71
|
+
np.nan_to_num(angle, copy=False) # Replace NaN values with zero in the angle array
|
72
|
+
|
73
|
+
return angle, Lc, Li, projection_ratio
|
74
|
+
|
75
|
+
def get_gladstone_dale_constant(temperature, pressure, humidity):
|
76
|
+
"""
|
77
|
+
Calculate the Gladstone-Dale constant based on temperature, pressure, and humidity without using metpy.
|
78
|
+
|
79
|
+
Parameters
|
80
|
+
----------
|
81
|
+
temperature : float
|
82
|
+
Temperature in degrees Celsius (°C).
|
83
|
+
pressure : float
|
84
|
+
Pressure in hectopascals (hPa).
|
85
|
+
humidity : float
|
86
|
+
Humidity as a percentage (%).
|
87
|
+
|
88
|
+
Returns
|
89
|
+
-------
|
90
|
+
tuple
|
91
|
+
- G : float
|
92
|
+
The calculated Gladstone-Dale constant.
|
93
|
+
- density : float
|
94
|
+
The density of the atmosphere.
|
95
|
+
"""
|
96
|
+
|
97
|
+
# Constants
|
98
|
+
R_dry = 287.058 # Specific gas constant for dry air, J/(kg·K)
|
99
|
+
R_water_vapor = 461.495 # Specific gas constant for water vapor, J/(kg·K)
|
100
|
+
|
101
|
+
# Convert input values
|
102
|
+
T_kelvin = temperature + 273.15 # Convert temperature to Kelvin
|
103
|
+
p_pa = pressure * 100 # Convert pressure to Pascals
|
104
|
+
e_saturation = 6.1078 * 10 ** ((7.5 * temperature) / (237.3 + temperature)) # Saturation vapor pressure in hPa
|
105
|
+
e_actual = e_saturation * (humidity / 100) # Actual vapor pressure in hPa
|
106
|
+
p_dry = p_pa - e_actual * 100 # Partial pressure of dry air in Pa
|
107
|
+
|
108
|
+
# Calculate densities
|
109
|
+
density_dry = p_dry / (R_dry * T_kelvin) # Density of dry air
|
110
|
+
density_vapor = (e_actual * 100) / (R_water_vapor * T_kelvin) # Density of water vapor
|
111
|
+
|
112
|
+
# Total density of humid air
|
113
|
+
density_air = density_dry + density_vapor
|
114
|
+
|
115
|
+
# Gladstone-Dale constant calculation
|
116
|
+
n_air = 1.0003 # Refractive index of air
|
117
|
+
G = (n_air - 1) / density_air
|
118
|
+
|
119
|
+
return G, density_air
|
120
|
+
|
121
|
+
def _compute_laplacian_chunk(array_chunk):
|
122
|
+
"""
|
123
|
+
Compute the Laplacian for a chunk of an array.
|
124
|
+
|
125
|
+
Parameters
|
126
|
+
----------
|
127
|
+
array_chunk : ndarray
|
128
|
+
A chunk of the original array, assumed to be 3D.
|
129
|
+
|
130
|
+
Returns
|
131
|
+
-------
|
132
|
+
laplacian_chunk : ndarray
|
133
|
+
The Laplacian of the input array chunk.
|
134
|
+
"""
|
135
|
+
grad_yy = np.gradient(array_chunk, axis=1)
|
136
|
+
grad_zz = np.gradient(array_chunk, axis=2)
|
137
|
+
laplacian_chunk = grad_yy + grad_zz
|
138
|
+
return laplacian_chunk
|
139
|
+
|
140
|
+
def compute_laplacian_in_chunks(array, chunk_size):
|
141
|
+
"""
|
142
|
+
Compute the Laplacian of a 3D array in chunks to reduce memory usage.
|
143
|
+
|
144
|
+
Parameters
|
145
|
+
----------
|
146
|
+
array : ndarray
|
147
|
+
The 3D input array for which the Laplacian is calculated.
|
148
|
+
chunk_size : int
|
149
|
+
The size of each chunk along each dimension.
|
150
|
+
|
151
|
+
Returns
|
152
|
+
-------
|
153
|
+
laplacian : ndarray
|
154
|
+
The computed Laplacian of the input array.
|
155
|
+
"""
|
156
|
+
# Get the shape of the input array
|
157
|
+
shape = array.shape
|
158
|
+
|
159
|
+
# Create an array to store the result
|
160
|
+
laplacian = np.zeros_like(array)
|
161
|
+
|
162
|
+
# Process each chunk
|
163
|
+
for i in trange(0, shape[0], chunk_size):
|
164
|
+
for j in range(0, shape[1], chunk_size):
|
165
|
+
for k in range(0, shape[2], chunk_size):
|
166
|
+
# Extract the current chunk
|
167
|
+
chunk = array[i:i+chunk_size, j:j+chunk_size, k:k+chunk_size]
|
168
|
+
|
169
|
+
# Compute the Laplacian for the chunk
|
170
|
+
laplacian_chunk = _compute_laplacian_chunk(chunk)
|
171
|
+
|
172
|
+
# Store the result in the corresponding position in the original array
|
173
|
+
laplacian[i:i+chunk_size, j:j+chunk_size, k:k+chunk_size] = laplacian_chunk
|
174
|
+
|
175
|
+
return laplacian
|
176
|
+
|
177
|
+
|
178
|
+
def stripe_generator(width:int,height:int,stripe_width:int):
|
179
|
+
|
180
|
+
"""
|
181
|
+
Generate a horizontal stripe pattern image and save it as a PNG file.
|
182
|
+
|
183
|
+
Args:
|
184
|
+
width (int): The width of the image in pixels.
|
185
|
+
height (int): The height of the image in pixels.
|
186
|
+
stripe_width (int): The width of each stripe in pixels.
|
187
|
+
|
188
|
+
Returns:
|
189
|
+
None
|
190
|
+
"""
|
191
|
+
|
192
|
+
# 横縞パターンの生成
|
193
|
+
image = np.zeros((height, width), dtype=np.uint8)
|
194
|
+
for i in range(height):
|
195
|
+
if (i // stripe_width) % 2 == 0:
|
196
|
+
image[i, :] = 255 # 白 (モノクロ: 255)
|
197
|
+
|
198
|
+
# 画像を表示
|
199
|
+
plt.imshow(image, cmap='binary', interpolation='nearest')
|
200
|
+
plt.axis('off')
|
201
|
+
plt.show()
|
202
|
+
|
203
|
+
# 画像を保存
|
204
|
+
plt.imsave(f'horizontal_stripes{stripe_width}px.png', image, cmap='binary')
|