opensfdi 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,22 @@
1
+ Metadata-Version: 2.1
2
+ Name: opensfdi
3
+ Version: 0.1.0
4
+ Summary:
5
+ Author: Daniel Weston
6
+ Author-email: psydw2@nottingham.ac.uk
7
+ Requires-Python: ==3.11.7
8
+ Classifier: Programming Language :: Python :: 3
9
+ Requires-Dist: imageio (>=2.34.2,<3.0.0)
10
+ Requires-Dist: matplotlib (>=3.9.1,<4.0.0)
11
+ Requires-Dist: numpy (>=2.0.1,<3.0.0)
12
+ Requires-Dist: numpy-stl (>=3.1.1,<4.0.0)
13
+ Requires-Dist: opencv-python (>=4.10.0.84,<5.0.0.0)
14
+ Requires-Dist: pillow (>=10.4.0,<11.0.0)
15
+ Requires-Dist: scikit-image (>=0.24.0,<0.25.0)
16
+ Requires-Dist: scikit-learn (>=1.5.1,<2.0.0)
17
+ Requires-Dist: scipy (>=1.14.0,<2.0.0)
18
+ Description-Content-Type: text/markdown
19
+
20
+ # OPTIMlab-benchtop-sfdi
21
+
22
+ TODO
@@ -0,0 +1,3 @@
1
+ # OPTIMlab-benchtop-sfdi
2
+
3
+ TODO
@@ -0,0 +1,22 @@
1
+ [tool.poetry]
2
+ name = "opensfdi"
3
+ version = "0.1.0"
4
+ description = ""
5
+ authors = ["Daniel Weston <psydw2@nottingham.ac.uk>"]
6
+ readme = "README.md"
7
+
8
+ [tool.poetry.dependencies]
9
+ python = "3.11.7"
10
+ matplotlib = "^3.9.1"
11
+ scipy = "^1.14.0"
12
+ numpy = "^2.0.1"
13
+ numpy-stl = "^3.1.1"
14
+ imageio = "^2.34.2"
15
+ opencv-python = "^4.10.0.84"
16
+ pillow = "^10.4.0"
17
+ scikit-learn = "^1.5.1"
18
+ scikit-image = "^0.24.0"
19
+
20
+ [build-system]
21
+ requires = ["poetry-core"]
22
+ build-backend = "poetry.core.masonry.api"
@@ -0,0 +1,99 @@
1
+ from skimage.restoration import unwrap_phase
2
+
3
+ import numpy as np
4
+ import cv2
5
+ from matplotlib import pyplot as plt
6
+
7
+ import logging
8
+ import sys
9
+ import argparse
10
+
11
+ parser = argparse.ArgumentParser()
12
+
13
+ parser.add_argument('--debug', action='store_true')
14
+
15
+ args, unknown = parser.parse_known_args()
16
+
17
+ args = vars(args)
18
+
19
+ DEBUG = args["debug"]
20
+
21
+ logger = logging.getLogger('opensfdi')
22
+
23
+ #formatter = logging.Formatter(fmt='%(threadName)s:%(message)s')
24
+ formatter = logging.Formatter(fmt='[%(levelname)s] %(message)s')
25
+
26
+ handler = logging.StreamHandler(sys.stdout)
27
+ handler.setFormatter(formatter)
28
+
29
+ logger.addHandler(handler)
30
+ logger.setLevel(logging.DEBUG if DEBUG else logging.INFO)
31
+
32
+ def show_surface(data):
33
+ hf = plt.figure()
34
+
35
+ ha = hf.add_subplot(111, projection='3d')
36
+
37
+ X, Y = np.meshgrid(range(len(data[0])), range(len(data))) # `plot_surface` expects `x` and `y` data to be 2D
38
+ temp = np.mean(data, axis=2) if data.ndim == 3 else data
39
+ ha.plot_surface(X, Y, temp)
40
+
41
+ plt.show()
42
+
43
+ def display_image(img, grey=False, title='', vmin=0.0, vmax=1.0):
44
+ if grey:
45
+ cmap='gray'
46
+ else:
47
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
48
+ img = cv2.normalize(img, None, 0.0, 1.0, cv2.NORM_MINMAX, cv2.CV_32F)
49
+ cmap='jet'
50
+
51
+ plt.imshow(img, cmap=cmap, vmin=vmin, vmax=vmax)
52
+ plt.title(title)
53
+ plt.show()
54
+
55
+ def centre_crop_img(img, x1, y1, x2:int = 0, y2:int = 0):
56
+ if x2 == 0:
57
+ x2 = img.shape[1] if x1 == 0 else -x1
58
+
59
+ if y2 == 0:
60
+ y2 = img.shape[0] if y1 == 0 else -y1
61
+
62
+ return img[y1 : y2, x1 : x2]
63
+
64
+ def normalise_image(img):
65
+ return ((img - img.min()) / (img.max() - img.min()))
66
+
67
+ def rgb2grey(img):
68
+ r, g, b = img[:,:,0], img[:,:,1], img[:,:,2]
69
+ return 0.2989 * r + 0.5870 * g + 0.1140 * b
70
+
71
+ def unwrapped_phase(phi_imgs):
72
+ return unwrap_phase(phi_imgs)
73
+
74
+ def wrapped_phase(imgs):
75
+ p = np.zeros(imgs[0].shape, dtype=np.float32)
76
+ q = np.zeros(imgs[0].shape, dtype=np.float32)
77
+
78
+ for i, img in enumerate(imgs):
79
+ phase = (2.0 * np.pi * i) / len(imgs)
80
+ p = np.add(p, img * np.sin(phase), dtype=np.float32)
81
+ q = np.add(q, img * np.cos(phase), dtype=np.float32)
82
+
83
+ return np.negative(np.arctan2(p, q), dtype=np.float32)
84
+
85
+ def ac_imgs(imgs: list):
86
+ return np.divide(np.sum(imgs, axis=0), len(imgs))
87
+
88
+ def dc_imgs(imgs: list):
89
+ N = len(imgs)
90
+
91
+ p = q = np.zeros(imgs[0].shape, dtype=np.float32)
92
+
93
+ for i, img in enumerate(imgs):
94
+ phase = (2.0 * np.pi * i) / N
95
+
96
+ p = np.add(p, img * np.sin(phase))
97
+ q = np.add(q, img * np.cos(phase))
98
+
99
+ return (2.0 / N) * np.sqrt((p * p) + (q * q))
@@ -0,0 +1,200 @@
1
+ import cv2
2
+ import logging
3
+ import numpy as np
4
+
5
+ import matplotlib.pyplot as plt
6
+
7
+ from opensfdi import DEBUG
8
+ from opensfdi import rgb2grey, display_image
9
+ from opensfdi.definitions import CALIBRATION_DIR
10
+ from opensfdi.io.std import Serializable
11
+
12
+ from abc import ABC, abstractmethod
13
+
14
+ def apply_correction(img, coeffs, x1=0.0, x2=1.0):
15
+ poly_func = np.poly1d(coeffs)
16
+
17
+ corrected_img = poly_func(img)
18
+
19
+ corrected_img[corrected_img < x1] = x1 # Cutoff values less than x1
20
+ corrected_img[corrected_img > x2] = x2 # Cutoff values greater than x2
21
+
22
+ return corrected_img
23
+
24
+ class Calibration(Serializable, ABC):
25
+ def __init__(self):
26
+ self.logger = logging.getLogger("opensfdi")
27
+
28
+ @abstractmethod
29
+ def calibrate(self):
30
+ raise NotImplementedError
31
+
32
+ class GammaCalibration(Calibration):
33
+ def __init__(self, camera, projector, delta, crop_size=0.25, order=5, intensity_count=32):
34
+ super().__init__()
35
+
36
+ self.camera = camera
37
+ self.projector = projector
38
+
39
+ self._delta = delta
40
+ self._crop_size = crop_size
41
+ self._order = order
42
+ self._intensity_count = intensity_count
43
+
44
+ self.coeffs = None
45
+ self.visible = None
46
+
47
+ def calibrate(self):
48
+ intensities = np.linspace(0.0, 1.0, self._intensity_count, dtype=np.float32)
49
+ w, h = self.camera.resolution
50
+ self.projector.imgs = np.array([(np.ones((h, w, 3), dtype=np.float32) * i) for i in intensities]) # 3 channels for rgb
51
+
52
+ # Need to get some images
53
+ captured_imgs = np.empty((self._intensity_count,), dtype=np.ndarray)
54
+
55
+ # Capture all of the images
56
+ for i in range(self._intensity_count):
57
+ self.projector.display()
58
+ captured_imgs[i] = self.camera.capture()
59
+
60
+ cap_height, cap_width, _ = captured_imgs[0].shape
61
+
62
+ # Calculate region of interest values
63
+ roi = int(cap_width * self._crop_size)
64
+ mid_height = int(cap_height / 2)
65
+ mid_width = int(cap_width / 2)
66
+ rows = [x + mid_height for x in range(-roi, roi)]
67
+ cols = [x + mid_width for x in range(-roi, roi)]
68
+
69
+ # Calculate average pixel value for each image
70
+ averages = [np.mean(x[rows, cols]) for x in captured_imgs]
71
+
72
+ # Find sfirst observable change of values for averages (left and right sides) i.e >= delta
73
+ s, f = self._detectable_indices(averages, self._delta)
74
+
75
+ vis_averages = averages[s:f+1]
76
+ vis_intensities = intensities[s:f+1]
77
+
78
+ self.coeffs = np.polyfit(vis_averages, vis_intensities, self._order)
79
+ self.visible = intensities[s:f+1]
80
+
81
+ if DEBUG: # Plot results
82
+ plt.plot(vis_averages, vis_intensities, 'o')
83
+ trendpoly = np.poly1d(self.coeffs)
84
+ plt.title('Gamma Calibration Curve Results')
85
+
86
+ plt.xlabel("Measured")
87
+ plt.ylabel("Actual")
88
+
89
+ plt.plot(vis_averages, trendpoly(vis_averages))
90
+ plt.show()
91
+
92
+ return self.coeffs, self.visible
93
+
94
+ def serialize(self):
95
+ return {
96
+ "coeffs" : self.coeffs.tolist(),
97
+ "visible_intensities" : self.visible.tolist()
98
+ }
99
+
100
+ def deserialize(self):
101
+ return None
102
+
103
+ def _detectable_indices(self, values, delta):
104
+ start = finish = None
105
+
106
+ for i in range(1, len(values) - 1):
107
+ x1 = values[i - 1]
108
+ x2 = values[i]
109
+
110
+ y1 = values[len(values) - i - 1]
111
+ y2 = values[len(values) - i]
112
+
113
+ if not start and abs(x1 - x2) >= delta:
114
+ start = i
115
+
116
+ if not finish and abs(y1 - y2) >= delta:
117
+ finish = len(values) - i - 1
118
+
119
+ return start, finish
120
+
121
+ class CameraCalibration(Calibration):
122
+ def __init__(self, camera, img_count=10, cb_size=(8, 6)):
123
+ super().__init__()
124
+
125
+ self.camera = camera
126
+
127
+ self._cb_size = cb_size
128
+
129
+ if img_count < 10:
130
+ raise Exception("10 or more images are required to calibrate cameras")
131
+
132
+ self._img_count = img_count
133
+
134
+ self.cam_mat = self.dist_mat = self.optimal_mat = None
135
+
136
+ def on_checkboard_change(self, i):
137
+ '''
138
+ (i) = Image number just completed (doesn't take into account multiple cameras)
139
+ '''
140
+ pass
141
+
142
+ def calibrate(self):
143
+ self.logger.info(f"Using {self._img_count} checkerboard images to calibrate {self.camera.name}")
144
+
145
+ imgs = np.empty((self._img_count), dtype=np.ndarray)
146
+
147
+ for i in range(len(imgs)):
148
+ img = self.camera.capture()
149
+ imgs[i] = img
150
+
151
+ self.on_checkerboard_change(i)
152
+
153
+ CHECKERBOARD = (self._cb_size[0] - 1, self._cb_size[1] - 1)
154
+
155
+ criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
156
+
157
+ threedpoints = []
158
+ twodpoints = []
159
+
160
+ objectp3d = np.zeros((1, CHECKERBOARD[0] * CHECKERBOARD[1], 3), np.float32)
161
+ objectp3d[0, :, :2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2)
162
+
163
+ for i, image in enumerate(imgs):
164
+ image = cv2.normalize(image, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
165
+ grey_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
166
+
167
+ ret, corners = cv2.findChessboardCorners(grey_img, CHECKERBOARD, cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)
168
+
169
+ if ret:
170
+ threedpoints.append(objectp3d)
171
+
172
+ corners2 = cv2.cornerSubPix(grey_img, corners, (11, 11), (-1, -1), criteria)
173
+
174
+ twodpoints.append(corners2)
175
+
176
+ #image = cv2.drawChessboardCorners(image, CHECKERBOARD, corners2, ret)
177
+ else:
178
+ self.logger.warning(f'{self.camera.name} failed to find checkerboard on image {i}')
179
+
180
+ h, w = imgs[0].shape[:2]
181
+
182
+ ret, self.cam_mat, self.dist_mat, r_vecs, t_vecs = cv2.calibrateCamera(threedpoints, twodpoints, grey_img.shape[::-1], None, None)
183
+
184
+ if not ret: raise
185
+
186
+ self.optimal_mat, roi = cv2.getOptimalNewCameraMatrix(self.cam_mat, self.dist_mat, (w, h), 1, (w, h))
187
+
188
+ return self.cam_mat, self.dist_mat, self.optimal_mat
189
+
190
+ def serialize(self):
191
+ return {
192
+ "checkerboard_size" : self._cb_size,
193
+ "image_count" : self._img_count,
194
+ "cam_mat" : self.cam_mat.tolist(),
195
+ "dist_mat" : self.dist_mat.tolist(),
196
+ "optimal_mat" : self.optimal_mat.tolist()
197
+ }
198
+
199
+ def deserialize(self):
200
+ return None
@@ -0,0 +1,43 @@
1
+ import os
2
+
3
+ from pathlib import Path
4
+
5
+ ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) # Root of the entire codebase
6
+
7
+ DATA_DIR = os.path.join(ROOT_DIR, 'data') # IO data location
8
+
9
+ RESULTS_DIR = os.path.join(DATA_DIR, "results") # Directory for results to be written to
10
+
11
+ FRINGES_DIR = os.path.join(DATA_DIR, "fringes") # Fringes are to be used from this directory
12
+
13
+ CALIBRATION_DIR = os.path.join(DATA_DIR, "calibration") # Location where calibration data is dumped
14
+
15
+ def make_structure():
16
+ global ROOT_DIR
17
+ global DATA_DIR
18
+ global RESULTS_DIR
19
+ global FRINGES_DIR
20
+ global CALIBRATION_DIR
21
+
22
+ Path(ROOT_DIR).mkdir(exist_ok=True)
23
+ Path(DATA_DIR).mkdir(exist_ok=True)
24
+ Path(RESULTS_DIR).mkdir(exist_ok=True)
25
+ Path(FRINGES_DIR).mkdir(exist_ok=True)
26
+ Path(CALIBRATION_DIR).mkdir(exist_ok=True)
27
+
28
+ def update_root(new_root, mkdirs=True):
29
+ global ROOT_DIR
30
+ global DATA_DIR
31
+ global RESULTS_DIR
32
+ global FRINGES_DIR
33
+ global CALIBRATION_DIR
34
+
35
+ ROOT_DIR = new_root
36
+ DATA_DIR = os.path.join(ROOT_DIR, 'data') # IO data location
37
+ RESULTS_DIR = os.path.join(DATA_DIR, "results") # Directory for results to be written to
38
+ FRINGES_DIR = os.path.join(DATA_DIR, "fringes") # Fringes are to be used from this directory
39
+ CALIBRATION_DIR = os.path.join(DATA_DIR, "calibration") # Location where calibration data is dumped
40
+
41
+ if mkdirs: make_structure()
42
+
43
+ make_structure()
@@ -0,0 +1,266 @@
1
+
2
+ import numpy as np
3
+
4
+ import logging
5
+
6
+ from time import sleep
7
+ from scipy.ndimage import gaussian_filter
8
+ from scipy.interpolate import griddata
9
+
10
+ from opensfdi.profilometry import ClassicPhaseHeight
11
+ from opensfdi.video import FringeProjector
12
+ from opensfdi.utils import maths
13
+
14
+ class Photogrammetry:
15
+ def __init__(self, cameras, delay):
16
+ if len(cameras) < 2: raise Exception("You need at least 2 cameras to run an experiment")
17
+
18
+ self.logger = logging.getLogger('opensfdi')
19
+ self.cameras = cameras
20
+ self.delay = delay
21
+
22
+ def run(self):
23
+ if 0 < self.delay: sleep(self.delay)
24
+ return [camera.capture() for camera in self.cameras]
25
+
26
+ class FringeProjection:
27
+ def __init__(self, cameras, projector: FringeProjector, delay=0.0):
28
+ if projector is None:
29
+ raise Exception("You need a projector to run an experiment")
30
+
31
+ if len(cameras) == 0:
32
+ raise Exception("You need at least 1 camera to run an experiment")
33
+
34
+ self.logger = logging.getLogger('opensfdi')
35
+
36
+ self.cameras = cameras
37
+ self.projector = projector
38
+ self.delay = delay
39
+
40
+ def run(self):
41
+ self.projector.display()
42
+ if 0 < self.delay: sleep(self.delay)
43
+ return [camera.capture() for camera in self.cameras]
44
+
45
+ def next(self):
46
+ self.projector.next()
47
+
48
+ """
49
+ def stream(self):
50
+ self.stream = True
51
+
52
+ while self.stream:
53
+ yield self.run()
54
+
55
+ self.logger.info('Finished streaming') """
56
+
57
+ class LightCalc:
58
+ def __init__(self, mu_a, mu_sp, refr_index, sf = [0.0, 0.2], std_dev = 3):
59
+ self.mu_a = mu_a
60
+ self.mu_sp = mu_sp
61
+ self.refr_index = refr_index
62
+ self.sf = sf
63
+ self.std_dev = std_dev
64
+
65
+ def __calculate(self, mu_a, mu_sp, refr_index):
66
+ # Effective Reflection Coefficient
67
+ R_eff = 0.0636 * refr_index + 0.668 + 0.710 / refr_index - 1.44 / (refr_index ** 2)
68
+
69
+ A = (1 - R_eff) / (2 * (1 + R_eff)) # Proportionality Constant
70
+ mu_tr = mu_sp + mu_a # Transport Coefficient
71
+ ap = mu_sp / mu_tr # Reduced albedo
72
+
73
+ return R_eff, A, mu_tr, ap
74
+
75
+ def calculate(self, imgs, ref_imgs):
76
+ R_eff, A, mu_tr, ap = self.__calculate(self.mu_a, self.mu_sp, self.refr_index)
77
+
78
+ ref_img_ac = maths.AC(ref_imgs)
79
+ ref_img_dc = maths.DC(ref_imgs)
80
+
81
+ # Apply some gaussian filtering if necessary
82
+ if 0 < self.std_dev:
83
+ ref_img_dc = gaussian_filter(ref_img_dc, self.std_dev)
84
+ ref_img_ac = gaussian_filter(ref_img_ac, self.std_dev)
85
+
86
+ # Get AC/DC Reflectance values using diffusion approximation
87
+ r_ac, r_dc = maths.diffusion_approximation(A, ap, mu_tr, f[1])
88
+
89
+ R_d_AC2 = (imgs_ac / ref_img_ac) * r_ac
90
+ R_d_DC2 = (imgs_dc / ref_img_dc) * r_dc
91
+
92
+ xi = []
93
+ x, y = R_d_AC2.shape
94
+ # Put the DC and AC diffuse reflectance values into an array
95
+ for i in range(x):
96
+ for j in range(y):
97
+ freq = [R_d_DC2[i][j], R_d_AC2[i][j]]
98
+ xi.append(freq)
99
+
100
+ # Get an array of reflectance values and corresponding optical properties
101
+ # We are setting the absorption coefficient range
102
+ mu_a = np.arange(0, 0.5, 0.001)
103
+ mu_sp = np.arange(0.1, 5, 0.01)
104
+
105
+ # THE DIFFUSION APPROXIMATION
106
+ # Getting the diffuse reflectance AC values corresponding to specific absorption and reduced scattering coefficients
107
+ Reflectance_AC = []
108
+ Reflectance_DC = []
109
+ op_mua = []
110
+ op_sp = []
111
+ for i in range(len(mu_a)):
112
+ for j in range(len(mu_sp)):
113
+ # 1.43 is the refractive index of tissue
114
+ R_eff, A, mu_tr, ap = self.__calculate(mu_a[i], mu_sp[j], 1.43)
115
+
116
+ g = lambda mu_effp: (3 * A * ap) / (((mu_effp / mu_tr) + 1) * ((mu_effp / mu_tr) + 3 * A))
117
+
118
+ ac = maths.mu_eff(mu_a[i], mu_tr, f[1])
119
+ dc = maths.mu_eff(mu_a[i], mu_tr, f[0])
120
+
121
+ Reflectance_AC.append(g(ac))
122
+ Reflectance_DC.append(g(dc))
123
+
124
+ op_mua.append(mu_a[i])
125
+ op_sp.append(mu_sp[j])
126
+
127
+ # putting the DC and AC diffuse reflectance values generated from the Diffusion Approximation into an array
128
+ points = []
129
+ for k in range(len(mu_a) * len(mu_sp)):
130
+ freq = [Reflectance_DC[k], Reflectance_AC[k]]
131
+ points.append(freq)
132
+
133
+ points_array = np.array(points)
134
+
135
+ #putting the optical properties into two seperate arrays
136
+ op_mua_array = np.array(op_mua)
137
+ op_sp_array = np.array(op_sp)
138
+
139
+ #using scipy.interpolate.griddata to perform cubic interpolation of diffuse reflectance values to match
140
+ #the generated diffuse reflectance values from image to calculated optical properties
141
+ interp_method = 'cubic'
142
+ coeff_abs = griddata(points_array, op_mua_array, xi, method=interp_method) #mua
143
+ coeff_sct = griddata(points_array, op_sp_array, xi, method=interp_method) #musp
144
+
145
+ abs_plot = np.reshape(coeff_abs, (R_d_AC2.shape[0], R_d_AC2.shape[1]))
146
+ sct_plot = np.reshape(coeff_sct, (R_d_AC2.shape[0], R_d_AC2.shape[1]))
147
+
148
+ absorption = np.nanmean(abs_plot)
149
+ absorption_std = np.std(abs_plot)
150
+
151
+ scattering = np.nanmean(sct_plot)
152
+ scattering_std = np.std(sct_plot)
153
+
154
+
155
+ self.logger.info(f'Absorption: {absorption}')
156
+ self.logger.info(f'Deviation std: {absorption_std}')
157
+
158
+ self.logger.info(f'Scattering: {scattering}')
159
+ self.logger.info(f'Scattering std: {scattering_std}')
160
+
161
+ return absorption, scattering, absorption_std, scattering_std
162
+
163
+ class Experiment:
164
+ def __init__(self, test):
165
+ self.logger = logging.getLogger("opensfdi")
166
+
167
+ self.test = test
168
+
169
+ self.streaming = False
170
+
171
+ self.save_results = False
172
+
173
+ def stream(self):
174
+ self.streaming = True
175
+
176
+ while self.streaming:
177
+ yield self.run()
178
+
179
+ self.logger.info('Finished streaming')
180
+
181
+ # Subclass Experiment to declare your own default run behaviour
182
+ def run(self):
183
+ self.logger.info(f'Taking a measurement')
184
+
185
+ result = self.test.run()
186
+
187
+ return result
188
+
189
+ class FPExperiment(Experiment):
190
+ def __init__(self, test: FringeProjection):
191
+ super().__init__(test)
192
+
193
+ # Subclass Experiment to declare your own default run behaviour
194
+ def run(self):
195
+ self.logger.info(f'Taking a measurement')
196
+
197
+ result = self.test.run()
198
+
199
+ return result
200
+
201
+ class NStepFPExperiment(FPExperiment):
202
+ def __init__(self, test: FringeProjection, steps=3):
203
+ super().__init__(test)
204
+
205
+ self._pre_cbs = []
206
+ self._post_cbs = []
207
+
208
+ self.steps = steps
209
+
210
+ proj_phases = len(test.projector.phases)
211
+
212
+ if proj_phases < self.steps:
213
+ raise Exception(f"You need {steps} phases to run a {steps}-step experiment ({proj_phases} provided)")
214
+
215
+ def run(self):
216
+ # Run the experiment n times for both reference and measurement images
217
+
218
+ # Run pre-reference image callbacks
219
+ for cb in self._pre_cbs: cb()
220
+
221
+ ref_imgs = []
222
+ for _ in range(self.steps):
223
+ ref_imgs.append(self.test.run())
224
+ self.test.next()
225
+
226
+ # Run post-ref callbacks
227
+ for cb in self._post_cbs: cb()
228
+
229
+ imgs = []
230
+ for _ in range(self.steps):
231
+ imgs.append(self.test.run())
232
+ self.test.next()
233
+
234
+ self.logger.info(f'Measurement completed')
235
+
236
+ ref_imgs = np.transpose(ref_imgs, (1, 0, 2, 3, 4))
237
+ imgs = np.transpose(imgs, (1, 0, 2, 3, 4))
238
+
239
+ return ref_imgs, imgs
240
+
241
+ def classic_ph(self, ref_imgs, imgs, sf, cam_plane_dists, cam_proj_dists):
242
+ cameras = imgs.shape[0]
243
+
244
+ if len(cam_plane_dists) != cameras:
245
+ raise Exception("You must provide a distance for all cameras to the ref plane")
246
+ return None
247
+
248
+ if len(cam_proj_dists) != cameras:
249
+ raise Exception("You must provide a distance for all cameras to the projector")
250
+ return None
251
+
252
+ heightmaps = []
253
+
254
+ for i in range(cameras):
255
+ ph = ClassicPhaseHeight(sf, cam_plane_dists[i], cam_proj_dists[i])
256
+ heightmap = ph.heightmap(ref_imgs[i], imgs[i], convert_grey=True, crop=None)
257
+
258
+ heightmaps.append(heightmap)
259
+
260
+ return heightmaps
261
+
262
+ def add_pre_ref_callback(self, cb):
263
+ self._pre_cbs.append(cb)
264
+
265
+ def add_post_ref_callback(self, cb):
266
+ self._post_cbs.append(cb)
@@ -0,0 +1,56 @@
1
+ import cv2
2
+
3
+ import numpy as np
4
+
5
+ class FringeFactory:
6
+ @staticmethod
7
+ def MakeBinary(frequency, phase_count, orientation, width=1024, height=1024):
8
+ # Maybe not a good idea to rely upon sinusoidal function but works for now :)
9
+ imgs = FringeFactory.MakeSinusoidal(frequency, phase_count, orientation, width, height)
10
+
11
+ width, height, _ = imgs[0].shape
12
+
13
+ for i in range(phase_count):
14
+ for col in range(width):
15
+ for row in range(height):
16
+ imgs[i][col][row] = 0.0 if imgs[i][col][row] < 0.5 else 1.0
17
+
18
+ return imgs
19
+
20
+ @staticmethod
21
+ def MakeBinaryRGB(frequency, phase_count, orientation, width=1024, height=1024):
22
+ imgs = FringeFactory.MakeBinary(frequency, phase_count, orientation, width, height)
23
+
24
+ return FringeFactory.GrayToRGB(imgs)
25
+
26
+ @staticmethod
27
+ def MakeSinusoidal(frequency, phase_count, orientation, width=1024, height=1024):
28
+ imgs = np.empty((phase_count, width, height))
29
+
30
+ for i in range(phase_count):
31
+ x, y = np.meshgrid(np.arange(width, dtype=int), np.arange(height, dtype=int))
32
+
33
+ gradient = np.sin(orientation) * x - np.cos(orientation) * y
34
+
35
+ imgs[i] = np.sin(((2.0 * np.pi * gradient) / frequency) + phase_count)
36
+
37
+ imgs[i] = cv2.normalize(imgs[i], None, 0.0, 1.0, cv2.NORM_MINMAX, cv2.CV_32F)
38
+
39
+ return imgs
40
+
41
+ @staticmethod
42
+ def MakeSinusoidalRGB(frequency, phase_count, orientation, width=1024, height=1024):
43
+ imgs = FringeFactory.MakeSinusoidal(frequency, phase_count, orientation, width, height)
44
+
45
+ return FringeFactory.GrayToRGB(imgs)
46
+
47
+ @staticmethod
48
+ def GrayToRGB(imgs):
49
+ count, width, height = imgs.shape
50
+
51
+ rgb_imgs = np.empty((count, width, height, 3))
52
+
53
+ for i in range(count):
54
+ rgb_imgs[i] = cv2.cvtColor(imgs[i], cv2.COLOR_GRAY2RGB)
55
+
56
+ return rgb_imgs
File without changes
@@ -0,0 +1,175 @@
1
+ import cv2
2
+ import os
3
+ import pickle
4
+
5
+ from abc import ABC, abstractmethod
6
+
7
+ class Repo(ABC):
8
+ @abstractmethod
9
+ def commit(self):
10
+ pass
11
+
12
+ class ImageRepo(Repo):
13
+ @abstractmethod
14
+ def add_image(self, img, name):
15
+ raise NotImplementedError
16
+
17
+ @abstractmethod
18
+ def load_image(self, name):
19
+ raise NotImplementedError
20
+
21
+ class ResultRepo(Repo):
22
+ @abstractmethod
23
+ def add_fringe(self, imgs, name):
24
+ raise NotImplementedError
25
+
26
+ @abstractmethod
27
+ def add_image(self, imgs, name):
28
+ raise NotImplementedError
29
+
30
+ @abstractmethod
31
+ def add_ref_image(self, imgs, name):
32
+ raise NotImplementedError
33
+
34
+ @abstractmethod
35
+ def add_heightmap(self, heightmap, name):
36
+ raise NotImplementedError
37
+
38
+ @abstractmethod
39
+ def load_fringe(self, name):
40
+ raise NotImplementedError
41
+
42
+ @abstractmethod
43
+ def load_image(self, name):
44
+ raise NotImplementedError
45
+
46
+ @abstractmethod
47
+ def load_ref_image(self, name):
48
+ raise NotImplementedError
49
+
50
+ @abstractmethod
51
+ def load_heightmap(self, name):
52
+ raise NotImplementedError
53
+
54
+ class CalibrationRepo(Repo):
55
+ @abstractmethod
56
+ def add_gamma(self, data):
57
+ raise NotImplementedError
58
+
59
+ @abstractmethod
60
+ def add_lens(self, data):
61
+ raise NotImplementedError
62
+
63
+ @abstractmethod
64
+ def add_proj(self, data):
65
+ raise NotImplementedError
66
+
67
+ @abstractmethod
68
+ def load_gamma(self, cam_name):
69
+ raise NotImplementedError
70
+
71
+ @abstractmethod
72
+ def load_lens(self, cam_name):
73
+ raise NotImplementedError
74
+
75
+ @abstractmethod
76
+ def load_proj(self, proj_name):
77
+ raise NotImplementedError
78
+
79
+ ### CONCRETE IMPLEMENTATIONS ###
80
+
81
+ class BinRepo(Repo):
82
+ def __init__(self, file):
83
+ self._file = file
84
+
85
+ self._outdated = True
86
+ self._load_cache = None
87
+
88
+ self._changes = []
89
+
90
+ def add_bin(self, data):
91
+ self._changes.append(data)
92
+
93
+ def load_bin(self):
94
+ if self._outdated:
95
+ with open(os.path.join(self._file), 'rb') as infile:
96
+ self._load_cache = pickle.load(infile)
97
+
98
+ self._outdated = False
99
+
100
+ return self._load_cache
101
+
102
+ def commit(self):
103
+ out = {}
104
+ for d in self._changes:
105
+ out = out | d
106
+
107
+ with open(os.path.join(self._file), 'wb') as outfile:
108
+ pickle.dump(out, outfile, protocol=pickle.HIGHEST_PROTOCOL)
109
+
110
+ self._outdated = True
111
+
112
+ class FileImageRepo(ImageRepo):
113
+ def __init__(self, path):
114
+ self._path = path
115
+ self._changes = {}
116
+
117
+ def add_image(self, img, name):
118
+ self._changes[name] = cv2.normalize(img, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
119
+
120
+ def load_image(self, name):
121
+ # BGR Format
122
+ return cv2.imread(os.path.join(self._path, name), cv2.IMREAD_COLOR)
123
+
124
+ def commit(self):
125
+ # Write all images
126
+ for name, img in self._changes.items():
127
+ cv2.imwrite(os.path.join(self._path, name), img)
128
+
129
+ self._changes = dict()
130
+
131
+ class BinCalibrationRepo(CalibrationRepo):
132
+ def __init__(self, file):
133
+ self._repo = BinRepo(file)
134
+
135
+ self._data = dict()
136
+
137
+ def add_gamma(self, data):
138
+ cam_name = data.camera.name
139
+
140
+ self.__if_ne(cam_name)
141
+
142
+ self._data[cam_name]["gamma"] = data.serialize()
143
+
144
+ def add_lens(self, data):
145
+ cam_name = data.camera.name
146
+
147
+ self.__if_ne(cam_name)
148
+
149
+ self._data[cam_name]["lens"] = data.serialize()
150
+
151
+ def add_proj(self, data):
152
+ projector = data.projector.name
153
+
154
+ self._data[proj_name] = data.serialize()
155
+
156
+ def load_gamma(self, cam_name):
157
+ data = self._repo.load_bin()[cam_name]
158
+ return data["gamma"]
159
+
160
+ def load_lens(self, cam_name):
161
+ data = self._repo.load_bin()[cam_name]
162
+ return data["lens"]
163
+
164
+ def load_proj(self, proj_name):
165
+ #data = self._repo.load_bin()[proj_name]
166
+ return None
167
+
168
+ def commit(self):
169
+ self._repo.add_bin(self._data)
170
+
171
+ self._repo.commit()
172
+
173
+ def __if_ne(self, name):
174
+ if not (name in self._data):
175
+ self._data[name] = dict()
@@ -0,0 +1,34 @@
1
+ import os
2
+ import sys
3
+
4
+ from contextlib import contextmanager
5
+
6
+ from abc import ABC, abstractmethod
7
+
8
+ # Serialise/Deserialize libraries
9
+ class Serializable(ABC):
10
+ @abstractmethod
11
+ def serialize(self):
12
+ raise NotImplementedError
13
+
14
+ @abstractmethod
15
+ def deserialize(self):
16
+ raise NotImplementedError
17
+
18
+ # Redirect stdout to /dev/null
19
+ @contextmanager
20
+ def stdout_redirected(to=os.devnull):
21
+ fd = sys.stdout.fileno()
22
+
23
+ def _redirect_stdout(to):
24
+ sys.stdout.close()
25
+ os.dup2(to.fileno(), fd)
26
+ sys.stdout = os.fdopen(fd, 'w')
27
+
28
+ with os.fdopen(os.dup(fd), 'w') as old_stdout:
29
+ with open(to, 'w') as file:
30
+ _redirect_stdout(to=file)
31
+ try:
32
+ yield
33
+ finally:
34
+ _redirect_stdout(to=old_stdout)
@@ -0,0 +1,148 @@
1
+ import numpy as np
2
+
3
+ from matplotlib import pyplot as plt
4
+ from numpy.polynomial import polynomial as P
5
+
6
+ from abc import ABC
7
+ from stl import mesh
8
+
9
+ from opensfdi import wrapped_phase, unwrapped_phase, centre_crop_img, rgb2grey
10
+
11
+ def show_heightmap(heightmap, title='Heightmap'):
12
+ x, y = np.meshgrid(range(heightmap.shape[0]), range(heightmap.shape[1]))
13
+
14
+ fig = plt.figure()
15
+ ax = fig.add_subplot(111, projection='3d')
16
+ ax.plot_surface(x, y, np.transpose(heightmap))
17
+ plt.title(title)
18
+ plt.show()
19
+
20
+ class PhaseHeight(ABC):
21
+ def phasemap(self, imgs):
22
+ w_phase = wrapped_phase(imgs)
23
+
24
+ return unwrapped_phase(w_phase)
25
+
26
+ def to_stl(self, heightmap):
27
+ # Create vertices from the heightmap
28
+ vertices = []
29
+ for y in range(heightmap.shape[0]):
30
+ for x in range(heightmap.shape[1]):
31
+ vertices.append([x, y, heightmap[y, x]])
32
+
33
+ vertices = np.array(vertices)
34
+
35
+ # Create faces for the mesh
36
+ faces = []
37
+ for y in range(heightmap.shape[0] - 1):
38
+ for x in range(heightmap.shape[1] - 1):
39
+ v1 = x + y * heightmap.shape[1]
40
+ v2 = (x + 1) + y * heightmap.shape[1]
41
+ v3 = x + (y + 1) * heightmap.shape[1]
42
+ v4 = (x + 1) + (y + 1) * heightmap.shape[1]
43
+
44
+ # First triangle
45
+ faces.append([v1, v2, v3])
46
+ # Second triangle
47
+ faces.append([v2, v4, v3])
48
+
49
+ # Create the mesh object
50
+ mesh_data = mesh.Mesh(np.zeros(len(faces), dtype=mesh.Mesh.dtype))
51
+ for i, f in enumerate(faces):
52
+ for j in range(3):
53
+ mesh_data.vectors[i][j] = vertices[f[j]]
54
+
55
+ mesh_data.save('heightmap_mesh.stl')
56
+
57
+ class ClassicPhaseHeight(PhaseHeight):
58
+ # ℎ = 𝜙𝐷𝐸 ⋅ 𝑝 ⋅ 𝑑 / 𝜙𝐷𝐸 ⋅ 𝑝 + 2𝜋𝑙
59
+ # p = stripe width
60
+ # d = distance between camera and reference plane
61
+ # l = distance between camera and projector
62
+
63
+ def __init__(self, p, d, l):
64
+ super().__init__()
65
+
66
+ self.p = p
67
+ self.d = d
68
+ self.l = l
69
+
70
+ def heightmap(self, ref_imgs, imgs, convert_grey=False, crop=None):
71
+ if convert_grey:
72
+ imgs = np.array([rgb2grey(img) for img in imgs])
73
+ ref_imgs = np.array([rgb2grey(img) for img in ref_imgs])
74
+
75
+ if crop is not None:
76
+ h, w = imgs[0].shape[:2]
77
+ if len(crop) == 2:
78
+ crop_x1 = int(crop[0] * w)
79
+ crop_x2 = w - crop_x1 - 1
80
+ crop_y1 = int(crop[1] * h)
81
+ crop_y2 = h - crop_y1 - 1
82
+ elif len(crop) == 4:
83
+ crop_x1 = int(crop[0] * w)
84
+ crop_y1 = int(crop[1] * h)
85
+ crop_x2 = w - int(crop[2] * w) - 1
86
+ crop_y2 = h - int(crop[3] * h) - 1
87
+ else: raise Exception("Invalid crop tuple passed")
88
+
89
+ imgs = np.array([centre_crop_img(img, crop_x1, crop_y1, crop_x2, crop_y2) for img in imgs])
90
+ ref_imgs = np.array([centre_crop_img(img, crop_x1, crop_y1, crop_x2, crop_y2) for img in ref_imgs])
91
+
92
+ ref_phase, measured_phase = self.phasemap(ref_imgs), self.phasemap(imgs)
93
+
94
+ phase_diff = measured_phase - ref_phase
95
+
96
+ return np.divide(self.l * phase_diff, phase_diff - (2.0 * np.pi * self.p * self.d), dtype=np.float32)
97
+
98
+ class TriangularStereoHeight(PhaseHeight):
99
+ def __init__(self, ref_dist, sensor_dist, freq):
100
+ super().__init__()
101
+
102
+ self.ref_dist = ref_dist
103
+ self.sensor_dist = sensor_dist
104
+ self.freq = freq
105
+
106
+ def heightmap(self, imgs):
107
+ phase = self.phasemap(imgs)
108
+
109
+ #heightmap = np.divide(self.ref_dist * phase_diff, 2.0 * np.pi * self.sensor_dist * self.freq)
110
+
111
+ #heightmap[heightmap <= 0] = 0 # Remove negative values
112
+
113
+ return None
114
+
115
+ class PolyPhaseHeight(PhaseHeight):
116
+ def __init__(self, coeffs=None):
117
+ super().__init__()
118
+
119
+ self.coeffs = coeffs
120
+
121
+ def calibrate(self, heightmap, ref_imgs, imgs, deg=1):
122
+ ref_phase, measured_phase = self.phasemap(ref_imgs), self.phasemap(imgs)
123
+
124
+ diff = ref_phase - measured_phase
125
+
126
+ total = np.zeros(ref_phase.shape, dtype=np.float64)
127
+
128
+ for i in range(deg):
129
+ total += np.power(diff, i)
130
+
131
+ # Coefficients are in ascending order
132
+
133
+ self.coeffs, stats = P.polyfit(diff.ravel(), heightmap.ravel(), deg=deg, full=True)
134
+
135
+ return self.coeffs, stats[0][0]
136
+
137
+ def heightmap(self, ref_imgs, imgs):
138
+ ref_phase, measured_phase = self.phasemaps(ref_imgs, imgs)
139
+
140
+ diff = ref_phase - measured_phase
141
+
142
+ result = np.zeros(ref_phase.shape, dtype=np.float64)
143
+
144
+ for i, a_i in enumerate(self.coeffs):
145
+ print(f'{round(a_i, ndigits=3)} X_{i}')
146
+ result += (np.power(diff, i) * a_i)
147
+
148
+ return result
@@ -0,0 +1,99 @@
1
+ import os
2
+ import logging
3
+ import numpy as np
4
+
5
+ from datetime import datetime
6
+
7
+ from opensfdi.io.repositories import ImageRepo, FileImageRepo, CalibrationRepo, BinRepo, BinCalibrationRepo
8
+ from opensfdi.definitions import RESULTS_DIR, CALIBRATION_DIR
9
+
10
+ class CalibrationService():
11
+ def __init__(self, data_repo:CalibrationRepo = None):
12
+ self._logger = logging.getLogger('opensfdi')
13
+
14
+ self._data_repo = data_repo
15
+
16
+ if self._data_repo is None: # Default to bin repo
17
+ output = os.path.join(CALIBRATION_DIR, 'calibration.json')
18
+ self._data_repo = BinCalibrationRepo(output)
19
+
20
+ self._logger.debug(f'Using calibration data file {output}')
21
+
22
+ def save_calibrations(self, gamma_calib=None, lens_calib=None, proj_calib=None):
23
+ updated = False
24
+ if gamma_calib:
25
+ self._data_repo.add_gamma(gamma_calib)
26
+ updated = True
27
+
28
+ if lens_calib:
29
+ self._data_repo.add_lens(lens_calib)
30
+ updated = True
31
+
32
+ if proj_calib:
33
+ self._data_repo.add_proj(proj_calib)
34
+ updated = True
35
+
36
+ if updated:
37
+ self._data_repo.commit()
38
+
39
+ def load_calibrations(self, cam_name, proj_name):
40
+ return self._data_repo.load_gamma(cam_name), self._data_repo.load_lens(cam_name), self._data_repo.load_proj(proj_name)
41
+
42
+ class ResultService():
43
+ def __init__(self, data_repo:BinRepo, image_repo:ImageRepo):
44
+ self._logger = logging.getLogger('opensfdi')
45
+
46
+ self._data_repo = data_repo
47
+ self._image_repo = image_repo
48
+
49
+ @staticmethod
50
+ def default(directory=None):
51
+ if directory is None: directory = str(datetime.now().strftime("%Y%m%d_%H%M%S"))
52
+
53
+ loc = os.path.join(RESULTS_DIR, directory)
54
+
55
+ if not os.path.exists(loc): os.mkdir(loc, 0o770)
56
+
57
+ data_out = os.path.join(loc, 'results.bin')
58
+
59
+ return ResultService(BinRepo(data_out), FileImageRepo(loc))
60
+
61
+ def save_data(self, data=None, fringes=None, imgs=None, ref_imgs=None):
62
+ if data is not None:
63
+ self._data_repo.add_bin(data)
64
+ self._data_repo.commit()
65
+
66
+ updated = False
67
+
68
+ if fringes is not None:
69
+ updated = True
70
+ for i, img in enumerate(fringes):
71
+ self._image_repo.add_image(img, f'fringes{i}.jpg')
72
+
73
+ if imgs is not None:
74
+ updated = True
75
+ for cam_i, xs in enumerate(imgs):
76
+ for i, img in enumerate(xs):
77
+ self._image_repo.add_image(img, f'cam{cam_i}_img{i}.jpg')
78
+
79
+ if ref_imgs is not None:
80
+ updated = True
81
+ for cam_i, xs in enumerate(ref_imgs):
82
+ for i, img in enumerate(xs):
83
+ self._image_repo.add_image(img, f'cam{cam_i}_refimg{i}.jpg')
84
+
85
+ if updated: self._image_repo.commit()
86
+
87
+ def load_data(self):
88
+ data = self._data_repo.load_bin()
89
+ cam_count = len(data["cameras"].keys())
90
+ phases = data["phases"]
91
+
92
+ imgs = np.empty((cam_count, phases), dtype=np.ndarray)
93
+ ref_imgs = np.empty((cam_count, phases), dtype=np.ndarray)
94
+ for cam_i in range(cam_count):
95
+ for i in range(phases):
96
+ imgs[cam_i][i] = self._image_repo.load_image(f'cam{cam_i}_img{i}.jpg')
97
+ ref_imgs[cam_i][i] = self._image_repo.load_image(f'cam{cam_i}_refimg{i}.jpg')
98
+
99
+ return ref_imgs, imgs, data
File without changes
@@ -0,0 +1,18 @@
1
+ import numpy as np
2
+
3
+ # Demodulation (array input)
4
+ def AC(imgs: list):
5
+ return (2 ** 0.5 / 3) * (((imgs[0] - imgs[1]) ** 2 + (imgs[1] - imgs[2]) ** 2 + (imgs[2] - imgs[0]) ** 2) ** 0.5)
6
+
7
+ def DC(imgs: list):
8
+ return sum(imgs) / len(imgs)
9
+
10
+ def mu_eff(mu_a, mu_tr, f):
11
+ a = (2 * np.pi * f) ** 2
12
+ return mu_tr * (3 * (mu_a / mu_tr) + a / mu_tr ** 2) ** 0.5
13
+
14
+ def diffusion_approximation(A, ap, mu_tr, f_ac):
15
+ r_ac = (3 * A * ap) / (((2 * np.pi * f_ac) / mu_tr) ** 2 + ((2 * np.pi * f_ac) / mu_tr) * (1 + 3 * A) + 3 * A)
16
+ r_dc = (3 * A * ap) / (3 * (1 - ap) + (1 + 3 * A) * np.sqrt(3 * (1 - ap)) + 3 * A)
17
+
18
+ return r_ac, r_dc
@@ -0,0 +1,122 @@
1
+
2
+ from abc import ABC, abstractmethod
3
+
4
+ import logging
5
+ import cv2
6
+
7
+ class Projector(ABC):
8
+ @abstractmethod
9
+ def __init__(self, name):
10
+ self.logger = logging.getLogger('opensfdi')
11
+
12
+ self.name = name
13
+
14
+ @abstractmethod
15
+ def display(self):
16
+ pass
17
+
18
+ class ImageProjector(Projector):
19
+ @abstractmethod
20
+ def __init__(self, name):
21
+ super().__init__(name)
22
+
23
+ self.img = None
24
+
25
+ @abstractmethod
26
+ def set_image(self, img):
27
+ self.img = img
28
+
29
+ @abstractmethod
30
+ def display(self):
31
+ return self.img
32
+
33
+ class FringeProjector(Projector):
34
+ @abstractmethod
35
+ def __init__(self, name, frequency, orientation, resolution, phases=[]):
36
+ super().__init__(name)
37
+
38
+ self.frequency = frequency
39
+
40
+ self.orientation = orientation
41
+
42
+ self.resolution = resolution
43
+
44
+ self.phases = phases
45
+ self.current = 0
46
+
47
+ @abstractmethod
48
+ def display(self):
49
+ pass
50
+
51
+ def get_phase(self):
52
+ return 0.0 if len(self.phases) == 0 else self.phases[self.current]
53
+
54
+ def set_phases(self, phases, reset=False):
55
+ self.phases = phases
56
+
57
+ if reset: self.current = 0
58
+
59
+ def next(self):
60
+ self.current = (self.current + 1) % len(self.phases)
61
+
62
+ class Camera(ABC):
63
+ def __init__(self, resolution=(1280, 720), name='Camera1', cam_mat=None, dist_mat = None, optimal_mat=None):
64
+ self.logger = logging.getLogger('opensfdi')
65
+
66
+ self.resolution = resolution
67
+
68
+ self.name = name
69
+
70
+ self.cam_mat = cam_mat
71
+ self.dist_mat = dist_mat
72
+ self.optimal_mat = optimal_mat
73
+
74
+ @abstractmethod
75
+ def capture(self):
76
+ pass
77
+
78
+ def set_resolution(self, res):
79
+ self.resolution = res
80
+
81
+ def try_undistort_img(self, img):
82
+ if self.cam_mat is not None and self.dist_mat is not None and self.optimal_mat is not None:
83
+ self.logger.debug('Undistorting camera image...')
84
+ return cv2.undistort(img, self.cam_mat, self.dist_mat, None, self.optimal_mat)
85
+
86
+ return img
87
+
88
+ class FakeCamera(Camera):
89
+ def __init__(self, imgs=[], name='Camera1', cam_mat=None, dist_mat = None, optimal_mat=None):
90
+ super().__init__(name='Camera1', cam_mat=cam_mat, dist_mat=dist_mat, optimal_mat=optimal_mat)
91
+
92
+ self.img_num = 0
93
+
94
+ self.imgs = imgs
95
+
96
+ def capture(self):
97
+ img = next(self.imgs)
98
+
99
+ if not self.loop and len(self.imgs) <= self.img_num:
100
+ self.img_num = 0
101
+ return None
102
+
103
+ self.img_num += 1
104
+
105
+ self.logger.info(f'Returning an image')
106
+
107
+ return img
108
+
109
+ def __iter__(self):
110
+ return iter(self.imgs)
111
+
112
+ def add_image(self, img):
113
+ self._images.append(img)
114
+ return self
115
+
116
+ class FileCamera(FakeCamera):
117
+ def __init__(self, img_paths, name='Camera1', cam_mat=None, dist_mat = None, optimal_mat=None):
118
+ super().__init__(name='Camera1', cam_mat=cam_mat, dist_mat=dist_mat, optimal_mat=optimal_mat)
119
+
120
+ # Load all images into memory
121
+ for path in img_paths:
122
+ self.imgs.append(cv2.imread(path, 1))