pyvale 2025.5.3__cp311-cp311-macosx_13_0_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pyvale might be problematic. Click here for more details.
- pyvale/.dylibs/libomp.dylib +0 -0
- pyvale/__init__.py +89 -0
- pyvale/analyticmeshgen.py +102 -0
- pyvale/analyticsimdatafactory.py +91 -0
- pyvale/analyticsimdatagenerator.py +323 -0
- pyvale/blendercalibrationdata.py +15 -0
- pyvale/blenderlightdata.py +26 -0
- pyvale/blendermaterialdata.py +15 -0
- pyvale/blenderrenderdata.py +30 -0
- pyvale/blenderscene.py +488 -0
- pyvale/blendertools.py +420 -0
- pyvale/camera.py +146 -0
- pyvale/cameradata.py +69 -0
- pyvale/cameradata2d.py +84 -0
- pyvale/camerastereo.py +217 -0
- pyvale/cameratools.py +522 -0
- pyvale/cython/rastercyth.c +32211 -0
- pyvale/cython/rastercyth.cpython-311-darwin.so +0 -0
- pyvale/cython/rastercyth.py +640 -0
- pyvale/data/__init__.py +5 -0
- pyvale/data/cal_target.tiff +0 -0
- pyvale/data/case00_HEX20_out.e +0 -0
- pyvale/data/case00_HEX27_out.e +0 -0
- pyvale/data/case00_HEX8_out.e +0 -0
- pyvale/data/case00_TET10_out.e +0 -0
- pyvale/data/case00_TET14_out.e +0 -0
- pyvale/data/case00_TET4_out.e +0 -0
- pyvale/data/case13_out.e +0 -0
- pyvale/data/case16_out.e +0 -0
- pyvale/data/case17_out.e +0 -0
- pyvale/data/case18_1_out.e +0 -0
- pyvale/data/case18_2_out.e +0 -0
- pyvale/data/case18_3_out.e +0 -0
- pyvale/data/case25_out.e +0 -0
- pyvale/data/case26_out.e +0 -0
- pyvale/data/optspeckle_2464x2056px_spec5px_8bit_gblur1px.tiff +0 -0
- pyvale/dataset.py +325 -0
- pyvale/errorcalculator.py +109 -0
- pyvale/errordriftcalc.py +146 -0
- pyvale/errorintegrator.py +336 -0
- pyvale/errorrand.py +607 -0
- pyvale/errorsyscalib.py +134 -0
- pyvale/errorsysdep.py +327 -0
- pyvale/errorsysfield.py +414 -0
- pyvale/errorsysindep.py +808 -0
- pyvale/examples/__init__.py +5 -0
- pyvale/examples/basics/ex1_1_basicscalars_therm2d.py +131 -0
- pyvale/examples/basics/ex1_2_sensormodel_therm2d.py +158 -0
- pyvale/examples/basics/ex1_3_customsens_therm3d.py +216 -0
- pyvale/examples/basics/ex1_4_basicerrors_therm3d.py +153 -0
- pyvale/examples/basics/ex1_5_fielderrs_therm3d.py +168 -0
- pyvale/examples/basics/ex1_6_caliberrs_therm2d.py +133 -0
- pyvale/examples/basics/ex1_7_spatavg_therm2d.py +123 -0
- pyvale/examples/basics/ex2_1_basicvectors_disp2d.py +112 -0
- pyvale/examples/basics/ex2_2_vectorsens_disp2d.py +111 -0
- pyvale/examples/basics/ex2_3_sensangle_disp2d.py +139 -0
- pyvale/examples/basics/ex2_4_chainfielderrs_disp2d.py +196 -0
- pyvale/examples/basics/ex2_5_vectorfields3d_disp3d.py +109 -0
- pyvale/examples/basics/ex3_1_basictensors_strain2d.py +114 -0
- pyvale/examples/basics/ex3_2_tensorsens2d_strain2d.py +111 -0
- pyvale/examples/basics/ex3_3_tensorsens3d_strain3d.py +182 -0
- pyvale/examples/basics/ex4_1_expsim2d_thermmech2d.py +171 -0
- pyvale/examples/basics/ex4_2_expsim3d_thermmech3d.py +252 -0
- pyvale/examples/genanalyticdata/ex1_1_scalarvisualisation.py +35 -0
- pyvale/examples/genanalyticdata/ex1_2_scalarcasebuild.py +43 -0
- pyvale/examples/genanalyticdata/ex2_1_analyticsensors.py +80 -0
- pyvale/examples/imagedef2d/ex_imagedef2d_todisk.py +79 -0
- pyvale/examples/renderblender/ex1_1_blenderscene.py +121 -0
- pyvale/examples/renderblender/ex1_2_blenderdeformed.py +119 -0
- pyvale/examples/renderblender/ex2_1_stereoscene.py +128 -0
- pyvale/examples/renderblender/ex2_2_stereodeformed.py +131 -0
- pyvale/examples/renderblender/ex3_1_blendercalibration.py +120 -0
- pyvale/examples/renderrasterisation/ex_rastenp.py +153 -0
- pyvale/examples/renderrasterisation/ex_rastercyth_oneframe.py +218 -0
- pyvale/examples/renderrasterisation/ex_rastercyth_static_cypara.py +187 -0
- pyvale/examples/renderrasterisation/ex_rastercyth_static_pypara.py +190 -0
- pyvale/examples/visualisation/ex1_1_plot_traces.py +102 -0
- pyvale/examples/visualisation/ex2_1_animate_sim.py +89 -0
- pyvale/experimentsimulator.py +175 -0
- pyvale/field.py +128 -0
- pyvale/fieldconverter.py +351 -0
- pyvale/fieldsampler.py +111 -0
- pyvale/fieldscalar.py +166 -0
- pyvale/fieldtensor.py +218 -0
- pyvale/fieldtransform.py +388 -0
- pyvale/fieldvector.py +213 -0
- pyvale/generatorsrandom.py +505 -0
- pyvale/imagedef2d.py +569 -0
- pyvale/integratorfactory.py +240 -0
- pyvale/integratorquadrature.py +217 -0
- pyvale/integratorrectangle.py +165 -0
- pyvale/integratorspatial.py +89 -0
- pyvale/integratortype.py +43 -0
- pyvale/output.py +17 -0
- pyvale/pyvaleexceptions.py +11 -0
- pyvale/raster.py +31 -0
- pyvale/rastercy.py +77 -0
- pyvale/rasternp.py +603 -0
- pyvale/rendermesh.py +147 -0
- pyvale/sensorarray.py +178 -0
- pyvale/sensorarrayfactory.py +196 -0
- pyvale/sensorarraypoint.py +278 -0
- pyvale/sensordata.py +71 -0
- pyvale/sensordescriptor.py +213 -0
- pyvale/sensortools.py +142 -0
- pyvale/simcases/case00_HEX20.i +242 -0
- pyvale/simcases/case00_HEX27.i +242 -0
- pyvale/simcases/case00_HEX8.i +242 -0
- pyvale/simcases/case00_TET10.i +242 -0
- pyvale/simcases/case00_TET14.i +242 -0
- pyvale/simcases/case00_TET4.i +242 -0
- pyvale/simcases/case01.i +101 -0
- pyvale/simcases/case02.i +156 -0
- pyvale/simcases/case03.i +136 -0
- pyvale/simcases/case04.i +181 -0
- pyvale/simcases/case05.i +234 -0
- pyvale/simcases/case06.i +305 -0
- pyvale/simcases/case07.geo +135 -0
- pyvale/simcases/case07.i +87 -0
- pyvale/simcases/case08.geo +144 -0
- pyvale/simcases/case08.i +153 -0
- pyvale/simcases/case09.geo +204 -0
- pyvale/simcases/case09.i +87 -0
- pyvale/simcases/case10.geo +204 -0
- pyvale/simcases/case10.i +257 -0
- pyvale/simcases/case11.geo +337 -0
- pyvale/simcases/case11.i +147 -0
- pyvale/simcases/case12.geo +388 -0
- pyvale/simcases/case12.i +329 -0
- pyvale/simcases/case13.i +140 -0
- pyvale/simcases/case14.i +159 -0
- pyvale/simcases/case15.geo +337 -0
- pyvale/simcases/case15.i +150 -0
- pyvale/simcases/case16.geo +391 -0
- pyvale/simcases/case16.i +357 -0
- pyvale/simcases/case17.geo +135 -0
- pyvale/simcases/case17.i +144 -0
- pyvale/simcases/case18.i +254 -0
- pyvale/simcases/case18_1.i +254 -0
- pyvale/simcases/case18_2.i +254 -0
- pyvale/simcases/case18_3.i +254 -0
- pyvale/simcases/case19.geo +252 -0
- pyvale/simcases/case19.i +99 -0
- pyvale/simcases/case20.geo +252 -0
- pyvale/simcases/case20.i +250 -0
- pyvale/simcases/case21.geo +74 -0
- pyvale/simcases/case21.i +155 -0
- pyvale/simcases/case22.geo +82 -0
- pyvale/simcases/case22.i +140 -0
- pyvale/simcases/case23.geo +164 -0
- pyvale/simcases/case23.i +140 -0
- pyvale/simcases/case24.geo +79 -0
- pyvale/simcases/case24.i +123 -0
- pyvale/simcases/case25.geo +82 -0
- pyvale/simcases/case25.i +140 -0
- pyvale/simcases/case26.geo +166 -0
- pyvale/simcases/case26.i +140 -0
- pyvale/simcases/run_1case.py +61 -0
- pyvale/simcases/run_all_cases.py +69 -0
- pyvale/simcases/run_build_case.py +64 -0
- pyvale/simcases/run_example_cases.py +69 -0
- pyvale/simtools.py +67 -0
- pyvale/visualexpplotter.py +191 -0
- pyvale/visualimagedef.py +74 -0
- pyvale/visualimages.py +76 -0
- pyvale/visualopts.py +493 -0
- pyvale/visualsimanimator.py +111 -0
- pyvale/visualsimsensors.py +318 -0
- pyvale/visualtools.py +136 -0
- pyvale/visualtraceplotter.py +142 -0
- pyvale-2025.5.3.dist-info/METADATA +144 -0
- pyvale-2025.5.3.dist-info/RECORD +175 -0
- pyvale-2025.5.3.dist-info/WHEEL +6 -0
- pyvale-2025.5.3.dist-info/licenses/LICENSE +21 -0
- pyvale-2025.5.3.dist-info/top_level.txt +1 -0
pyvale/cameratools.py
ADDED
|
@@ -0,0 +1,522 @@
|
|
|
1
|
+
# ==============================================================================
|
|
2
|
+
# pyvale: the python validation engine
|
|
3
|
+
# License: MIT
|
|
4
|
+
# Copyright (C) 2025 The Computer Aided Validation Team
|
|
5
|
+
# ==============================================================================
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
NOTE: This module is a feature under developement.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import warnings
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
import numpy as np
|
|
14
|
+
from scipy.signal import convolve2d
|
|
15
|
+
import copy
|
|
16
|
+
from scipy.spatial.transform import Rotation
|
|
17
|
+
import matplotlib.image as mplim
|
|
18
|
+
from PIL import Image
|
|
19
|
+
from pyvale.cameradata2d import CameraData2D
|
|
20
|
+
from pyvale.sensordata import SensorData
|
|
21
|
+
from pyvale.cameradata import CameraData
|
|
22
|
+
from pyvale.camerastereo import CameraStereo
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class CameraTools:
|
|
26
|
+
@staticmethod
|
|
27
|
+
def load_image(im_path: Path) -> np.ndarray:
|
|
28
|
+
|
|
29
|
+
input_im = mplim.imread(im_path).astype(np.float64)
|
|
30
|
+
# If we have RGB then get rid of it
|
|
31
|
+
# TODO: make sure this is collapsing RGB to grey scale coorectly
|
|
32
|
+
if input_im.ndim > 2:
|
|
33
|
+
input_im = input_im[:,:,0]
|
|
34
|
+
|
|
35
|
+
return input_im
|
|
36
|
+
|
|
37
|
+
@staticmethod
|
|
38
|
+
def save_image(save_file: Path,
|
|
39
|
+
image: np.ndarray,
|
|
40
|
+
n_bits: int = 16) -> None:
|
|
41
|
+
|
|
42
|
+
# Need to flip image so coords are top left with Y down
|
|
43
|
+
# TODO check this
|
|
44
|
+
image = image[::-1,:]
|
|
45
|
+
|
|
46
|
+
if n_bits > 8:
|
|
47
|
+
im = Image.fromarray(image.astype(np.uint16))
|
|
48
|
+
else:
|
|
49
|
+
im = Image.fromarray(image.astype(np.uint8))
|
|
50
|
+
|
|
51
|
+
im.save(save_file)
|
|
52
|
+
|
|
53
|
+
@staticmethod
|
|
54
|
+
def image_num_str(im_num: int, width: int , cam_num: int = -1) -> str:
|
|
55
|
+
num_str = str(im_num)
|
|
56
|
+
num_str = num_str.zfill(width)
|
|
57
|
+
|
|
58
|
+
if cam_num >= 0:
|
|
59
|
+
num_str = num_str+'_'+str(cam_num)
|
|
60
|
+
|
|
61
|
+
return num_str
|
|
62
|
+
|
|
63
|
+
@staticmethod
|
|
64
|
+
def pixel_vec_px(pixels_count: np.ndarray) -> tuple[np.ndarray,np.ndarray]:
|
|
65
|
+
px_vec_x = np.arange(0,pixels_count[0],1)
|
|
66
|
+
px_vec_y = np.arange(0,pixels_count[1],1)
|
|
67
|
+
return (px_vec_x,px_vec_y)
|
|
68
|
+
@staticmethod
|
|
69
|
+
def pixel_grid_px(pixels_count: np.ndarray
|
|
70
|
+
) -> tuple[np.ndarray,np.ndarray]:
|
|
71
|
+
(px_vec_x,px_vec_y) = CameraTools.pixel_vec_px(pixels_count)
|
|
72
|
+
return np.meshgrid(px_vec_x,px_vec_y)
|
|
73
|
+
@staticmethod
|
|
74
|
+
def vectorise_pixel_grid_px(pixels_count: np.ndarray) -> tuple[np.ndarray,np.ndarray]:
|
|
75
|
+
(px_grid_x,px_grid_y) = CameraTools.pixel_grid_px(pixels_count)
|
|
76
|
+
return (px_grid_x.flatten(),px_grid_y.flatten())
|
|
77
|
+
|
|
78
|
+
@staticmethod
|
|
79
|
+
def subpixel_vec_px(pixels_count: np.ndarray,
|
|
80
|
+
subsample: int = 2) -> tuple[np.ndarray,np.ndarray]:
|
|
81
|
+
px_vec_x = np.arange(1/(2*subsample),pixels_count[0],1/subsample)
|
|
82
|
+
px_vec_y = np.arange(1/(2*subsample),pixels_count[1],1/subsample)
|
|
83
|
+
return (px_vec_x,px_vec_y)
|
|
84
|
+
|
|
85
|
+
@staticmethod
|
|
86
|
+
def subpixel_grid_px(pixels_count: np.ndarray,
|
|
87
|
+
subsample: int = 2) -> tuple[np.ndarray,np.ndarray]:
|
|
88
|
+
(px_vec_x,px_vec_y) = CameraTools.subpixel_vec_px(pixels_count,subsample)
|
|
89
|
+
return np.meshgrid(px_vec_x,px_vec_y)
|
|
90
|
+
|
|
91
|
+
@staticmethod
|
|
92
|
+
def vectorise_subpixel_grid_px(pixels_count: np.ndarray,
|
|
93
|
+
subsample: int = 2) -> tuple[np.ndarray,np.ndarray]:
|
|
94
|
+
(px_grid_x,px_grid_y) = CameraTools.subpixel_grid_px(pixels_count,subsample)
|
|
95
|
+
return (px_grid_x.flatten(),px_grid_y.flatten())
|
|
96
|
+
|
|
97
|
+
@staticmethod
|
|
98
|
+
def pixel_vec_leng(field_of_view: np.ndarray,
|
|
99
|
+
leng_per_px: float) -> tuple[np.ndarray,np.ndarray]:
|
|
100
|
+
px_vec_x = np.arange(leng_per_px/2,
|
|
101
|
+
field_of_view[0],
|
|
102
|
+
leng_per_px)
|
|
103
|
+
px_vec_y = np.arange(leng_per_px/2,
|
|
104
|
+
field_of_view[1],
|
|
105
|
+
leng_per_px)
|
|
106
|
+
return (px_vec_x,px_vec_y)
|
|
107
|
+
|
|
108
|
+
@staticmethod
|
|
109
|
+
def pixel_grid_leng(field_of_view: np.ndarray,
|
|
110
|
+
leng_per_px: float) -> tuple[np.ndarray,np.ndarray]:
|
|
111
|
+
(px_vec_x,px_vec_y) = CameraTools.pixel_vec_leng(field_of_view,leng_per_px)
|
|
112
|
+
return np.meshgrid(px_vec_x,px_vec_y)
|
|
113
|
+
|
|
114
|
+
@staticmethod
|
|
115
|
+
def vectorise_pixel_grid_leng(field_of_view: np.ndarray,
|
|
116
|
+
leng_per_px: float) -> tuple[np.ndarray,np.ndarray]:
|
|
117
|
+
(px_grid_x,px_grid_y) = CameraTools.pixel_grid_leng(field_of_view,leng_per_px)
|
|
118
|
+
return (px_grid_x.flatten(),px_grid_y.flatten())
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
@staticmethod
|
|
122
|
+
def subpixel_vec_leng(field_of_view: np.ndarray,
|
|
123
|
+
leng_per_px: float,
|
|
124
|
+
subsample: int = 2) -> tuple[np.ndarray,np.ndarray]:
|
|
125
|
+
px_vec_x = np.arange(leng_per_px/(2*subsample),
|
|
126
|
+
field_of_view[0],
|
|
127
|
+
leng_per_px/subsample)
|
|
128
|
+
px_vec_y = np.arange(leng_per_px/(2*subsample),
|
|
129
|
+
field_of_view[1],
|
|
130
|
+
leng_per_px/subsample)
|
|
131
|
+
return (px_vec_x,px_vec_y)
|
|
132
|
+
|
|
133
|
+
@staticmethod
|
|
134
|
+
def subpixel_grid_leng(field_of_view: np.ndarray,
|
|
135
|
+
leng_per_px: float,
|
|
136
|
+
subsample: int = 2) -> tuple[np.ndarray,np.ndarray]:
|
|
137
|
+
(px_vec_x,px_vec_y) = CameraTools.subpixel_vec_leng(
|
|
138
|
+
field_of_view,
|
|
139
|
+
leng_per_px,
|
|
140
|
+
subsample)
|
|
141
|
+
return np.meshgrid(px_vec_x,px_vec_y)
|
|
142
|
+
|
|
143
|
+
@staticmethod
|
|
144
|
+
def vectorise_subpixel_grid_leng(field_of_view: np.ndarray,
|
|
145
|
+
leng_per_px: float,
|
|
146
|
+
subsample: int = 2) -> tuple[np.ndarray,np.ndarray]:
|
|
147
|
+
(px_grid_x,px_grid_y) = CameraTools.subpixel_grid_leng(
|
|
148
|
+
field_of_view,
|
|
149
|
+
leng_per_px,
|
|
150
|
+
subsample)
|
|
151
|
+
return (px_grid_x.flatten(),px_grid_y.flatten())
|
|
152
|
+
|
|
153
|
+
@staticmethod
|
|
154
|
+
def calc_resolution_from_sim_2d(pixels_count: np.ndarray,
|
|
155
|
+
coords: np.ndarray,
|
|
156
|
+
pixels_border: int,
|
|
157
|
+
view_plane_axes: tuple[int,int] = (0,1),
|
|
158
|
+
) -> float:
|
|
159
|
+
|
|
160
|
+
coords_min = np.min(coords, axis=0)
|
|
161
|
+
coords_max = np.max(coords, axis=0)
|
|
162
|
+
field_of_view = np.abs(coords_max - coords_min)
|
|
163
|
+
roi_px = np.array(pixels_count - 2*pixels_border,dtype=np.float64)
|
|
164
|
+
|
|
165
|
+
resolution = np.zeros_like(view_plane_axes,dtype=np.float64)
|
|
166
|
+
for ii in view_plane_axes:
|
|
167
|
+
resolution[ii] = field_of_view[view_plane_axes[ii]] / roi_px[ii]
|
|
168
|
+
|
|
169
|
+
return np.max(resolution)
|
|
170
|
+
|
|
171
|
+
@staticmethod
|
|
172
|
+
def calc_roi_cent_from_sim_2d(coords: np.ndarray,) -> np.ndarray:
|
|
173
|
+
return np.mean(coords,axis=0)
|
|
174
|
+
|
|
175
|
+
@staticmethod
|
|
176
|
+
def crop_image_rectangle(image: np.ndarray,
|
|
177
|
+
pixels_count: np.ndarray,
|
|
178
|
+
corner: tuple[int,int] = (0,0)
|
|
179
|
+
) -> np.ndarray:
|
|
180
|
+
|
|
181
|
+
crop_x = np.array((corner[0],pixels_count[0]),dtype=np.int32)
|
|
182
|
+
crop_y = np.array((corner[1],pixels_count[1]),dtype=np.int32)
|
|
183
|
+
|
|
184
|
+
if corner[0] < 0:
|
|
185
|
+
crop_x[0] = 0
|
|
186
|
+
warnings.warn("Crop edge outside image, setting to image edge.")
|
|
187
|
+
|
|
188
|
+
if corner[1] < 0:
|
|
189
|
+
crop_y[0] = 0
|
|
190
|
+
warnings.warn("Crop edge outside image, setting to image edge.")
|
|
191
|
+
|
|
192
|
+
if ((corner[0]+pixels_count[0]) > image.shape[1]):
|
|
193
|
+
crop_x[1] = image.shape[0]
|
|
194
|
+
warnings.warn("Crop edge outside image, setting to image edge.")
|
|
195
|
+
|
|
196
|
+
if (corner[1]+pixels_count[1]) > image.shape[0]:
|
|
197
|
+
crop_y[1] = image.shape[1]
|
|
198
|
+
warnings.warn("Crop edge outside image, setting to image edge.")
|
|
199
|
+
|
|
200
|
+
return image[crop_y[0]:crop_y[1],crop_x[0]:crop_x[1]]
|
|
201
|
+
|
|
202
|
+
@staticmethod
|
|
203
|
+
def average_subpixel_image(subpx_image: np.ndarray,
|
|
204
|
+
subsample: int) -> np.ndarray:
|
|
205
|
+
if subsample <= 1:
|
|
206
|
+
return subpx_image
|
|
207
|
+
|
|
208
|
+
conv_mask = np.ones((subsample,subsample))/(subsample**2)
|
|
209
|
+
subpx_image_conv = convolve2d(subpx_image,conv_mask,mode='same')
|
|
210
|
+
avg_image = subpx_image_conv[round(subsample/2)-1::subsample,
|
|
211
|
+
round(subsample/2)-1::subsample]
|
|
212
|
+
return avg_image
|
|
213
|
+
|
|
214
|
+
@staticmethod
|
|
215
|
+
def build_sensor_data_from_camera_2d(cam_data: CameraData2D) -> SensorData:
|
|
216
|
+
pixels_vectorised = CameraTools.vectorise_pixel_grid_leng(cam_data.field_of_view,
|
|
217
|
+
cam_data.leng_per_px)
|
|
218
|
+
|
|
219
|
+
positions = np.zeros((pixels_vectorised[0].shape[0],3))
|
|
220
|
+
for ii,vv in enumerate(cam_data.view_axes):
|
|
221
|
+
positions[:,vv] = pixels_vectorised[ii] + cam_data.roi_shift_world[ii]
|
|
222
|
+
|
|
223
|
+
if cam_data.angle is None:
|
|
224
|
+
angle = None
|
|
225
|
+
else:
|
|
226
|
+
angle = (cam_data.angle,)
|
|
227
|
+
|
|
228
|
+
sens_data = SensorData(positions=positions,
|
|
229
|
+
sample_times=cam_data.sample_times,
|
|
230
|
+
angles=angle)
|
|
231
|
+
|
|
232
|
+
return sens_data
|
|
233
|
+
|
|
234
|
+
#-------------------------------------------------------------------------------
|
|
235
|
+
# NOTE: keep these functions!
|
|
236
|
+
# These functions work for 3D cameras calculating imaging dist and fov taking
|
|
237
|
+
# account of camera rotation by rotating the bounding box of the sim into cam
|
|
238
|
+
# coords
|
|
239
|
+
|
|
240
|
+
@staticmethod
|
|
241
|
+
def fov_from_cam_rot_3d(cam_rot: Rotation,
|
|
242
|
+
coords_world: np.ndarray) -> np.ndarray:
|
|
243
|
+
(xx,yy,zz) = (0,1,2)
|
|
244
|
+
|
|
245
|
+
cam_to_world_mat = cam_rot.as_matrix()
|
|
246
|
+
world_to_cam_mat = np.linalg.inv(cam_to_world_mat)
|
|
247
|
+
|
|
248
|
+
bb_min = np.min(coords_world,axis=0)
|
|
249
|
+
bb_max = np.max(coords_world,axis=0)
|
|
250
|
+
|
|
251
|
+
bound_box_world_vecs = np.array([[bb_min[xx],bb_min[yy],bb_max[zz]],
|
|
252
|
+
[bb_max[xx],bb_min[yy],bb_max[zz]],
|
|
253
|
+
[bb_max[xx],bb_max[yy],bb_max[zz]],
|
|
254
|
+
[bb_min[xx],bb_max[yy],bb_max[zz]],
|
|
255
|
+
|
|
256
|
+
[bb_min[xx],bb_min[yy],bb_min[zz]],
|
|
257
|
+
[bb_max[xx],bb_min[yy],bb_min[zz]],
|
|
258
|
+
[bb_max[xx],bb_max[yy],bb_min[zz]],
|
|
259
|
+
[bb_min[xx],bb_max[yy],bb_min[zz]],])
|
|
260
|
+
|
|
261
|
+
bound_box_cam_vecs = np.matmul(world_to_cam_mat,bound_box_world_vecs.T)
|
|
262
|
+
boundbox_cam_leng = (np.max(bound_box_cam_vecs,axis=1)
|
|
263
|
+
- np.min(bound_box_cam_vecs,axis=1))
|
|
264
|
+
|
|
265
|
+
# print(80*"-")
|
|
266
|
+
# print(f"{bb_min=}")
|
|
267
|
+
# print(f"{bb_max=}")
|
|
268
|
+
# print()
|
|
269
|
+
# print("Cam to world mat:")
|
|
270
|
+
# print(cam_to_world_mat)
|
|
271
|
+
# print()
|
|
272
|
+
# print("World to cam mat:")
|
|
273
|
+
# print(world_to_cam_mat)
|
|
274
|
+
# print(80*"-")
|
|
275
|
+
|
|
276
|
+
return np.array((boundbox_cam_leng[xx],boundbox_cam_leng[yy]))
|
|
277
|
+
|
|
278
|
+
@staticmethod
|
|
279
|
+
def image_dist_from_fov_3d(pixel_num: np.ndarray,
|
|
280
|
+
pixel_size: np.ndarray,
|
|
281
|
+
focal_leng: float,
|
|
282
|
+
fov_leng: np.ndarray) -> np.ndarray:
|
|
283
|
+
|
|
284
|
+
sensor_dims = pixel_num * pixel_size
|
|
285
|
+
fov_angle = 2*np.arctan(sensor_dims/(2*focal_leng))
|
|
286
|
+
image_dist = fov_leng/(2*np.tan(fov_angle/2))
|
|
287
|
+
return image_dist
|
|
288
|
+
|
|
289
|
+
@staticmethod
|
|
290
|
+
def pos_fill_frame_from_rotation(coords_world: np.ndarray,
|
|
291
|
+
pixel_num: np.ndarray,
|
|
292
|
+
pixel_size: np.ndarray,
|
|
293
|
+
focal_leng: float,
|
|
294
|
+
cam_rot: Rotation,
|
|
295
|
+
frame_fill: float = 1.0,
|
|
296
|
+
) -> tuple[np.ndarray,
|
|
297
|
+
np.ndarray]:
|
|
298
|
+
fov_leng = CameraTools.fov_from_cam_rot_3d(
|
|
299
|
+
cam_rot=cam_rot,
|
|
300
|
+
coords_world=coords_world,
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
# Scales the FOV by the given factor, greater than 1.0 will zoom out
|
|
304
|
+
# making sure the mesh is wholly within the image
|
|
305
|
+
fov_leng = frame_fill*fov_leng
|
|
306
|
+
|
|
307
|
+
image_dist = CameraTools.image_dist_from_fov_3d(
|
|
308
|
+
pixel_num=pixel_num,
|
|
309
|
+
pixel_size=pixel_size,
|
|
310
|
+
focal_leng=focal_leng,
|
|
311
|
+
fov_leng=fov_leng,
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
roi_pos_world = (np.max(coords_world[:,:-1],axis=0)
|
|
315
|
+
+ np.min(coords_world[:,:-1],axis=0))/2.0
|
|
316
|
+
cam_z_dir_world = cam_rot.as_matrix()[:,-1]
|
|
317
|
+
cam_pos_world = (roi_pos_world + np.max(image_dist)*cam_z_dir_world)
|
|
318
|
+
|
|
319
|
+
print(80*"-")
|
|
320
|
+
print(f"{fov_leng=}")
|
|
321
|
+
print(f"{image_dist=}")
|
|
322
|
+
print(80*"-")
|
|
323
|
+
|
|
324
|
+
return (roi_pos_world,cam_pos_world)
|
|
325
|
+
|
|
326
|
+
|
|
327
|
+
#---------------------------------------------------------------------------
|
|
328
|
+
# Blender camera tools
|
|
329
|
+
|
|
330
|
+
@staticmethod
|
|
331
|
+
def calculate_FOV(cam_data: CameraData) -> tuple[float, float]:
|
|
332
|
+
"""A method to calulate the camera's field of view in mm
|
|
333
|
+
|
|
334
|
+
Parameters
|
|
335
|
+
----------
|
|
336
|
+
cam_data : CameraData
|
|
337
|
+
A dataclass containing the camera parameters
|
|
338
|
+
|
|
339
|
+
Returns
|
|
340
|
+
-------
|
|
341
|
+
tuple[float, float]
|
|
342
|
+
A tuple containing the field of view in mm in both x and y directions
|
|
343
|
+
"""
|
|
344
|
+
FOV_x = (((cam_data.image_dist - cam_data.focal_length)
|
|
345
|
+
/ cam_data.focal_length) *
|
|
346
|
+
(cam_data.pixels_size) *
|
|
347
|
+
cam_data.pixels_num[0])[0]
|
|
348
|
+
FOV_y = (cam_data.pixels_num[1] / cam_data.pixels_num[0]) * FOV_x
|
|
349
|
+
FOV_mm = (FOV_x, FOV_y)
|
|
350
|
+
return FOV_mm
|
|
351
|
+
|
|
352
|
+
@staticmethod
|
|
353
|
+
def blender_FOV(cam_data: CameraData) -> tuple[float, float]:
|
|
354
|
+
"""A method to calculate the camera's field of view in mm using Blender's
|
|
355
|
+
method. This method differs due to one simplification.
|
|
356
|
+
|
|
357
|
+
Parameters
|
|
358
|
+
----------
|
|
359
|
+
cam_data : CameraData
|
|
360
|
+
A dataclass containing the camera parameters
|
|
361
|
+
|
|
362
|
+
Returns
|
|
363
|
+
-------
|
|
364
|
+
tuple[float, float]
|
|
365
|
+
A tuple containing the FOV in x and y directions
|
|
366
|
+
"""
|
|
367
|
+
FOV_x = (cam_data.pixels_num[0] * cam_data.pixels_size[0] * cam_data.image_dist) / cam_data.focal_length
|
|
368
|
+
FOV_y = (cam_data.pixels_num[1] / cam_data.pixels_num[0]) * FOV_x
|
|
369
|
+
FOV_blender = (FOV_x, FOV_y)
|
|
370
|
+
return FOV_blender
|
|
371
|
+
|
|
372
|
+
@staticmethod
|
|
373
|
+
def calculate_mm_px_resolution(cam_data: CameraData) -> float:
|
|
374
|
+
"""Function to calculate the mm/px resolution of a camera
|
|
375
|
+
|
|
376
|
+
Parameters
|
|
377
|
+
----------
|
|
378
|
+
cam_data : CameraData
|
|
379
|
+
A dataclass containing the camera parameters
|
|
380
|
+
|
|
381
|
+
Returns
|
|
382
|
+
-------
|
|
383
|
+
float
|
|
384
|
+
The mm/px resolution
|
|
385
|
+
"""
|
|
386
|
+
FOV_mm = CameraTools.blender_FOV(cam_data)
|
|
387
|
+
resolution = FOV_mm[0] / cam_data.pixels_num[0]
|
|
388
|
+
return resolution
|
|
389
|
+
|
|
390
|
+
@staticmethod
|
|
391
|
+
def focal_length_from_resolution(pixels_size: np.ndarray,
|
|
392
|
+
working_dist: float,
|
|
393
|
+
resolution: float) -> float:
|
|
394
|
+
"""A method to calculate the required focal length to achieve a certain
|
|
395
|
+
resolution. This is calculated given the pixel size and working distance.
|
|
396
|
+
This method can be used for a 2D setup or for camera 0 for a stereo setup.
|
|
397
|
+
|
|
398
|
+
Parameters
|
|
399
|
+
----------
|
|
400
|
+
pixels_size : np.ndarray
|
|
401
|
+
The camera pixel size in the x and y directions (in mm).
|
|
402
|
+
working_dist : float
|
|
403
|
+
The working distance of the camera to the sample.
|
|
404
|
+
resolution : float
|
|
405
|
+
The desired resolution in mm/px.
|
|
406
|
+
|
|
407
|
+
Returns
|
|
408
|
+
-------
|
|
409
|
+
float
|
|
410
|
+
The focal length required to obtain the desired image resolution.
|
|
411
|
+
"""
|
|
412
|
+
focal_length = working_dist / ((resolution / pixels_size[0]))
|
|
413
|
+
return focal_length
|
|
414
|
+
|
|
415
|
+
@staticmethod
|
|
416
|
+
def blender_camera_from_resolution(pixels_num: np.ndarray,
|
|
417
|
+
pixels_size: np.ndarray,
|
|
418
|
+
working_dist: float,
|
|
419
|
+
resolution: float) -> CameraData:
|
|
420
|
+
"""A convenience function to create a camera object in Blender from its pixels,
|
|
421
|
+
the pixel size, the working distance and desired resolution.
|
|
422
|
+
|
|
423
|
+
Parameters
|
|
424
|
+
----------
|
|
425
|
+
pixels_num : np.ndarray
|
|
426
|
+
The number of pixels in the camera, in the x and y directions.
|
|
427
|
+
pixels_size : np.ndarray
|
|
428
|
+
The camera pixels size in mm, in the x and y directions.
|
|
429
|
+
working_dist : float
|
|
430
|
+
The working distance of the camera.
|
|
431
|
+
resolution : float
|
|
432
|
+
The desired mm/px resolution
|
|
433
|
+
|
|
434
|
+
Returns
|
|
435
|
+
-------
|
|
436
|
+
CameraData
|
|
437
|
+
A dataclass containing the created camera's parameters.
|
|
438
|
+
"""
|
|
439
|
+
focal_length = CameraTools.focal_length_from_resolution(pixels_size, working_dist, resolution)
|
|
440
|
+
|
|
441
|
+
cam_data = CameraData(pixels_num=pixels_num,
|
|
442
|
+
pixels_size=pixels_size,
|
|
443
|
+
pos_world=(0, 0, working_dist),
|
|
444
|
+
rot_world=Rotation.from_euler("xyz", [0, 0, 0]),
|
|
445
|
+
roi_cent_world=(0, 0, 0),
|
|
446
|
+
focal_length=focal_length)
|
|
447
|
+
return cam_data
|
|
448
|
+
|
|
449
|
+
@staticmethod
|
|
450
|
+
def symmetric_stereo_cameras(cam_data_0: CameraData,
|
|
451
|
+
stereo_angle:float) -> CameraStereo:
|
|
452
|
+
"""A convenience function to set up a symmetric stereo camera system, given
|
|
453
|
+
an initial CameraData dataclass and a stereo angle. This assumes the basic
|
|
454
|
+
camera parameters are the same.
|
|
455
|
+
|
|
456
|
+
Parameters
|
|
457
|
+
----------
|
|
458
|
+
cam_data_0 : CameraData
|
|
459
|
+
A dataclass containing the camera parameters for a single camera, which
|
|
460
|
+
will be camera 0.
|
|
461
|
+
stereo_angle : float
|
|
462
|
+
The stereo angle between the two cameras.
|
|
463
|
+
|
|
464
|
+
Returns
|
|
465
|
+
-------
|
|
466
|
+
CameraStereo
|
|
467
|
+
An instance of the CameraStereo class. This class contains
|
|
468
|
+
information about each of the cameras, as well as the extrinsic
|
|
469
|
+
parameters between them.
|
|
470
|
+
"""
|
|
471
|
+
cam_data_1 = copy.deepcopy(cam_data_0)
|
|
472
|
+
base = 2 * cam_data_0.pos_world[2] * np.tan(np.radians(stereo_angle) / 2)
|
|
473
|
+
|
|
474
|
+
cam_data_0.pos_world[0] -= base / 2
|
|
475
|
+
cam_data_1.pos_world[0] += base / 2
|
|
476
|
+
|
|
477
|
+
cam_0_rot = (0, -np.radians(stereo_angle / 2), 0)
|
|
478
|
+
cam_0_rot = Rotation.from_euler("xyz", cam_0_rot, degrees=False)
|
|
479
|
+
cam_data_0.rot_world = cam_0_rot
|
|
480
|
+
|
|
481
|
+
cam_1_rot = (0, np.radians(stereo_angle / 2), 0)
|
|
482
|
+
cam_1_rot = Rotation.from_euler("xyz", cam_1_rot, degrees=False)
|
|
483
|
+
cam_data_1.rot_world = cam_1_rot
|
|
484
|
+
|
|
485
|
+
stereo_system = CameraStereo(cam_data_0, cam_data_1)
|
|
486
|
+
|
|
487
|
+
return stereo_system
|
|
488
|
+
|
|
489
|
+
@staticmethod
|
|
490
|
+
def faceon_stereo_cameras(cam_data_0: CameraData,
|
|
491
|
+
stereo_angle: float) -> CameraStereo:
|
|
492
|
+
# TODO: Correct docstring
|
|
493
|
+
"""A convenience function to set up a face-on stereo camera system, given
|
|
494
|
+
an initial CameraData dataclass and a stereo angle. This assumes the basic
|
|
495
|
+
camera parameters are the same.
|
|
496
|
+
|
|
497
|
+
Parameters
|
|
498
|
+
----------
|
|
499
|
+
cam_data_0 : CameraData
|
|
500
|
+
A dataclass containing the camera parameters for a single camera, which
|
|
501
|
+
will be camera 0.
|
|
502
|
+
stereo_angle : float
|
|
503
|
+
The stereo angle between the two cameras.
|
|
504
|
+
|
|
505
|
+
Returns
|
|
506
|
+
-------
|
|
507
|
+
CameraStereo
|
|
508
|
+
An instance of the CameraStereo class. This class contains
|
|
509
|
+
information about each of the cameras, as well as the extrinsic
|
|
510
|
+
parameters between them.
|
|
511
|
+
"""
|
|
512
|
+
cam_data_1 = copy.deepcopy(cam_data_0)
|
|
513
|
+
base = cam_data_0.pos_world[2] * np.tan(np.radians(stereo_angle))
|
|
514
|
+
cam_data_1.pos_world[0] += base
|
|
515
|
+
|
|
516
|
+
rotation_angle = (0, np.radians(stereo_angle), 0)
|
|
517
|
+
rotation_angle = Rotation.from_euler("xyz", rotation_angle, degrees=False)
|
|
518
|
+
cam_data_1.rot_world = rotation_angle
|
|
519
|
+
|
|
520
|
+
stereo_system = CameraStereo(cam_data_0, cam_data_1)
|
|
521
|
+
|
|
522
|
+
return stereo_system
|