pyvale 2025.5.3__cp311-cp311-macosx_13_0_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyvale might be problematic. Click here for more details.

Files changed (175) hide show
  1. pyvale/.dylibs/libomp.dylib +0 -0
  2. pyvale/__init__.py +89 -0
  3. pyvale/analyticmeshgen.py +102 -0
  4. pyvale/analyticsimdatafactory.py +91 -0
  5. pyvale/analyticsimdatagenerator.py +323 -0
  6. pyvale/blendercalibrationdata.py +15 -0
  7. pyvale/blenderlightdata.py +26 -0
  8. pyvale/blendermaterialdata.py +15 -0
  9. pyvale/blenderrenderdata.py +30 -0
  10. pyvale/blenderscene.py +488 -0
  11. pyvale/blendertools.py +420 -0
  12. pyvale/camera.py +146 -0
  13. pyvale/cameradata.py +69 -0
  14. pyvale/cameradata2d.py +84 -0
  15. pyvale/camerastereo.py +217 -0
  16. pyvale/cameratools.py +522 -0
  17. pyvale/cython/rastercyth.c +32211 -0
  18. pyvale/cython/rastercyth.cpython-311-darwin.so +0 -0
  19. pyvale/cython/rastercyth.py +640 -0
  20. pyvale/data/__init__.py +5 -0
  21. pyvale/data/cal_target.tiff +0 -0
  22. pyvale/data/case00_HEX20_out.e +0 -0
  23. pyvale/data/case00_HEX27_out.e +0 -0
  24. pyvale/data/case00_HEX8_out.e +0 -0
  25. pyvale/data/case00_TET10_out.e +0 -0
  26. pyvale/data/case00_TET14_out.e +0 -0
  27. pyvale/data/case00_TET4_out.e +0 -0
  28. pyvale/data/case13_out.e +0 -0
  29. pyvale/data/case16_out.e +0 -0
  30. pyvale/data/case17_out.e +0 -0
  31. pyvale/data/case18_1_out.e +0 -0
  32. pyvale/data/case18_2_out.e +0 -0
  33. pyvale/data/case18_3_out.e +0 -0
  34. pyvale/data/case25_out.e +0 -0
  35. pyvale/data/case26_out.e +0 -0
  36. pyvale/data/optspeckle_2464x2056px_spec5px_8bit_gblur1px.tiff +0 -0
  37. pyvale/dataset.py +325 -0
  38. pyvale/errorcalculator.py +109 -0
  39. pyvale/errordriftcalc.py +146 -0
  40. pyvale/errorintegrator.py +336 -0
  41. pyvale/errorrand.py +607 -0
  42. pyvale/errorsyscalib.py +134 -0
  43. pyvale/errorsysdep.py +327 -0
  44. pyvale/errorsysfield.py +414 -0
  45. pyvale/errorsysindep.py +808 -0
  46. pyvale/examples/__init__.py +5 -0
  47. pyvale/examples/basics/ex1_1_basicscalars_therm2d.py +131 -0
  48. pyvale/examples/basics/ex1_2_sensormodel_therm2d.py +158 -0
  49. pyvale/examples/basics/ex1_3_customsens_therm3d.py +216 -0
  50. pyvale/examples/basics/ex1_4_basicerrors_therm3d.py +153 -0
  51. pyvale/examples/basics/ex1_5_fielderrs_therm3d.py +168 -0
  52. pyvale/examples/basics/ex1_6_caliberrs_therm2d.py +133 -0
  53. pyvale/examples/basics/ex1_7_spatavg_therm2d.py +123 -0
  54. pyvale/examples/basics/ex2_1_basicvectors_disp2d.py +112 -0
  55. pyvale/examples/basics/ex2_2_vectorsens_disp2d.py +111 -0
  56. pyvale/examples/basics/ex2_3_sensangle_disp2d.py +139 -0
  57. pyvale/examples/basics/ex2_4_chainfielderrs_disp2d.py +196 -0
  58. pyvale/examples/basics/ex2_5_vectorfields3d_disp3d.py +109 -0
  59. pyvale/examples/basics/ex3_1_basictensors_strain2d.py +114 -0
  60. pyvale/examples/basics/ex3_2_tensorsens2d_strain2d.py +111 -0
  61. pyvale/examples/basics/ex3_3_tensorsens3d_strain3d.py +182 -0
  62. pyvale/examples/basics/ex4_1_expsim2d_thermmech2d.py +171 -0
  63. pyvale/examples/basics/ex4_2_expsim3d_thermmech3d.py +252 -0
  64. pyvale/examples/genanalyticdata/ex1_1_scalarvisualisation.py +35 -0
  65. pyvale/examples/genanalyticdata/ex1_2_scalarcasebuild.py +43 -0
  66. pyvale/examples/genanalyticdata/ex2_1_analyticsensors.py +80 -0
  67. pyvale/examples/imagedef2d/ex_imagedef2d_todisk.py +79 -0
  68. pyvale/examples/renderblender/ex1_1_blenderscene.py +121 -0
  69. pyvale/examples/renderblender/ex1_2_blenderdeformed.py +119 -0
  70. pyvale/examples/renderblender/ex2_1_stereoscene.py +128 -0
  71. pyvale/examples/renderblender/ex2_2_stereodeformed.py +131 -0
  72. pyvale/examples/renderblender/ex3_1_blendercalibration.py +120 -0
  73. pyvale/examples/renderrasterisation/ex_rastenp.py +153 -0
  74. pyvale/examples/renderrasterisation/ex_rastercyth_oneframe.py +218 -0
  75. pyvale/examples/renderrasterisation/ex_rastercyth_static_cypara.py +187 -0
  76. pyvale/examples/renderrasterisation/ex_rastercyth_static_pypara.py +190 -0
  77. pyvale/examples/visualisation/ex1_1_plot_traces.py +102 -0
  78. pyvale/examples/visualisation/ex2_1_animate_sim.py +89 -0
  79. pyvale/experimentsimulator.py +175 -0
  80. pyvale/field.py +128 -0
  81. pyvale/fieldconverter.py +351 -0
  82. pyvale/fieldsampler.py +111 -0
  83. pyvale/fieldscalar.py +166 -0
  84. pyvale/fieldtensor.py +218 -0
  85. pyvale/fieldtransform.py +388 -0
  86. pyvale/fieldvector.py +213 -0
  87. pyvale/generatorsrandom.py +505 -0
  88. pyvale/imagedef2d.py +569 -0
  89. pyvale/integratorfactory.py +240 -0
  90. pyvale/integratorquadrature.py +217 -0
  91. pyvale/integratorrectangle.py +165 -0
  92. pyvale/integratorspatial.py +89 -0
  93. pyvale/integratortype.py +43 -0
  94. pyvale/output.py +17 -0
  95. pyvale/pyvaleexceptions.py +11 -0
  96. pyvale/raster.py +31 -0
  97. pyvale/rastercy.py +77 -0
  98. pyvale/rasternp.py +603 -0
  99. pyvale/rendermesh.py +147 -0
  100. pyvale/sensorarray.py +178 -0
  101. pyvale/sensorarrayfactory.py +196 -0
  102. pyvale/sensorarraypoint.py +278 -0
  103. pyvale/sensordata.py +71 -0
  104. pyvale/sensordescriptor.py +213 -0
  105. pyvale/sensortools.py +142 -0
  106. pyvale/simcases/case00_HEX20.i +242 -0
  107. pyvale/simcases/case00_HEX27.i +242 -0
  108. pyvale/simcases/case00_HEX8.i +242 -0
  109. pyvale/simcases/case00_TET10.i +242 -0
  110. pyvale/simcases/case00_TET14.i +242 -0
  111. pyvale/simcases/case00_TET4.i +242 -0
  112. pyvale/simcases/case01.i +101 -0
  113. pyvale/simcases/case02.i +156 -0
  114. pyvale/simcases/case03.i +136 -0
  115. pyvale/simcases/case04.i +181 -0
  116. pyvale/simcases/case05.i +234 -0
  117. pyvale/simcases/case06.i +305 -0
  118. pyvale/simcases/case07.geo +135 -0
  119. pyvale/simcases/case07.i +87 -0
  120. pyvale/simcases/case08.geo +144 -0
  121. pyvale/simcases/case08.i +153 -0
  122. pyvale/simcases/case09.geo +204 -0
  123. pyvale/simcases/case09.i +87 -0
  124. pyvale/simcases/case10.geo +204 -0
  125. pyvale/simcases/case10.i +257 -0
  126. pyvale/simcases/case11.geo +337 -0
  127. pyvale/simcases/case11.i +147 -0
  128. pyvale/simcases/case12.geo +388 -0
  129. pyvale/simcases/case12.i +329 -0
  130. pyvale/simcases/case13.i +140 -0
  131. pyvale/simcases/case14.i +159 -0
  132. pyvale/simcases/case15.geo +337 -0
  133. pyvale/simcases/case15.i +150 -0
  134. pyvale/simcases/case16.geo +391 -0
  135. pyvale/simcases/case16.i +357 -0
  136. pyvale/simcases/case17.geo +135 -0
  137. pyvale/simcases/case17.i +144 -0
  138. pyvale/simcases/case18.i +254 -0
  139. pyvale/simcases/case18_1.i +254 -0
  140. pyvale/simcases/case18_2.i +254 -0
  141. pyvale/simcases/case18_3.i +254 -0
  142. pyvale/simcases/case19.geo +252 -0
  143. pyvale/simcases/case19.i +99 -0
  144. pyvale/simcases/case20.geo +252 -0
  145. pyvale/simcases/case20.i +250 -0
  146. pyvale/simcases/case21.geo +74 -0
  147. pyvale/simcases/case21.i +155 -0
  148. pyvale/simcases/case22.geo +82 -0
  149. pyvale/simcases/case22.i +140 -0
  150. pyvale/simcases/case23.geo +164 -0
  151. pyvale/simcases/case23.i +140 -0
  152. pyvale/simcases/case24.geo +79 -0
  153. pyvale/simcases/case24.i +123 -0
  154. pyvale/simcases/case25.geo +82 -0
  155. pyvale/simcases/case25.i +140 -0
  156. pyvale/simcases/case26.geo +166 -0
  157. pyvale/simcases/case26.i +140 -0
  158. pyvale/simcases/run_1case.py +61 -0
  159. pyvale/simcases/run_all_cases.py +69 -0
  160. pyvale/simcases/run_build_case.py +64 -0
  161. pyvale/simcases/run_example_cases.py +69 -0
  162. pyvale/simtools.py +67 -0
  163. pyvale/visualexpplotter.py +191 -0
  164. pyvale/visualimagedef.py +74 -0
  165. pyvale/visualimages.py +76 -0
  166. pyvale/visualopts.py +493 -0
  167. pyvale/visualsimanimator.py +111 -0
  168. pyvale/visualsimsensors.py +318 -0
  169. pyvale/visualtools.py +136 -0
  170. pyvale/visualtraceplotter.py +142 -0
  171. pyvale-2025.5.3.dist-info/METADATA +144 -0
  172. pyvale-2025.5.3.dist-info/RECORD +175 -0
  173. pyvale-2025.5.3.dist-info/WHEEL +6 -0
  174. pyvale-2025.5.3.dist-info/licenses/LICENSE +21 -0
  175. pyvale-2025.5.3.dist-info/top_level.txt +1 -0
pyvale/imagedef2d.py ADDED
@@ -0,0 +1,569 @@
1
+ # ==============================================================================
2
+ # pyvale: the python validation engine
3
+ # License: MIT
4
+ # Copyright (C) 2025 The Computer Aided Validation Team
5
+ # ==============================================================================
6
+
7
+ """
8
+ NOTE: This module is a feature under developement.
9
+ """
10
+
11
+ import time
12
+ import warnings
13
+ from dataclasses import dataclass
14
+ from pathlib import Path
15
+ import numpy as np
16
+ from scipy.interpolate import griddata
17
+ from scipy.interpolate import RectBivariateSpline
18
+ from scipy import ndimage
19
+
20
+ from pyvale.rasternp import edge_function, RasterNP
21
+ from pyvale.cameradata2d import CameraData2D
22
+ from pyvale.cameratools import CameraTools
23
+
24
+
25
+ @dataclass(slots=True)
26
+ class ImageDefOpts:
27
+ save_path: Path | None = None
28
+ save_tag: str = "defimage"
29
+
30
+ mask_input_image: bool = True
31
+
32
+ crop_on: bool = False
33
+ crop_px: np.ndarray | None = None # only used to crop input image if above is true
34
+
35
+ calc_res_from_fe: bool = False
36
+ calc_res_border_px: int = 5
37
+
38
+ add_static_ref: bool = False
39
+
40
+ fe_interp: str = "linear"
41
+ fe_rescale: bool = True
42
+ fe_extrap_outside_fov: bool = True # forces displacements outside the
43
+ #subsample: int = 2 # MOVED TO CAMERA DATA
44
+
45
+ image_def_order: int = 3
46
+ image_def_extrap: str = "nearest"
47
+ image_def_extval: float = 0.0 # only used if above is "constant"
48
+
49
+ def_complex_geom: bool = True
50
+
51
+ def __post_init__(self) -> None:
52
+ if self.save_path is None:
53
+ self.save_path = Path.cwd() / "deformed_images"
54
+
55
+
56
+ class ImageDef2D:
57
+
58
+ @staticmethod
59
+ def image_mask_from_sim(cam_data: CameraData2D,
60
+ image: np.ndarray,
61
+ coords: np.ndarray,
62
+ connectivity: np.ndarray
63
+ ) -> tuple[np.ndarray,np.ndarray]:
64
+
65
+ # Here to allow for addition
66
+ #subsample: int = cam_data.subsample
67
+ subsample: int = 1
68
+
69
+ coords_raster = coords - cam_data.roi_cent_world
70
+ if coords_raster.shape[1] >= 3:
71
+ coords_raster = coords_raster[:,:-1]
72
+
73
+ # Coords NDC: Convert to normalised device coords in the range [-1,1]
74
+ coords_raster[:,0] = 2*coords_raster[:,0] / cam_data.field_of_view[0]
75
+ coords_raster[:,1] = 2*coords_raster[:,1] / cam_data.field_of_view[1]
76
+
77
+ # Coords Raster: Covert to pixel (raster) coords
78
+ # Shape = ([X,Y,Z],num_nodes)
79
+ coords_raster[:,0] = (coords_raster[:,0] + 1)/2 * cam_data.pixels_count[0]
80
+ coords_raster[:,1] = (1-coords_raster[:,1])/2 * cam_data.pixels_count[1]
81
+
82
+ # shape=(num_elems,node_per_elem,coord[x,y])
83
+ elem_coords = np.ascontiguousarray(coords_raster[connectivity,:])
84
+
85
+ #shape=(num_elems,coord[x,y,z])
86
+ elem_coord_min = np.min(elem_coords,axis=1)
87
+ elem_coord_max = np.max(elem_coords,axis=1)
88
+
89
+ # Check that min/max nodes are within the 4 edges of the camera image
90
+ #shape=(4_edges_to_check,num_elems)
91
+ crop_mask = np.zeros([elem_coords.shape[0],4],dtype=np.int8)
92
+ crop_mask[elem_coord_min[:,0] <= (cam_data.pixels_count[0]-1), 0] = 1
93
+ crop_mask[elem_coord_min[:,1] <= (cam_data.pixels_count[1]-1), 1] = 1
94
+ crop_mask[elem_coord_max[:,0] >= 0, 2] = 1
95
+ crop_mask[elem_coord_max[:,1] >= 0, 3] = 1
96
+ crop_mask = np.sum(crop_mask,axis=1) == 4
97
+
98
+ # Mask the element coords
99
+ elem_coords = np.ascontiguousarray(elem_coords[crop_mask,:,:])
100
+
101
+ # Get only the elements that are within the FOV
102
+ # Mask the elem coords and the max and min elem coords for processing
103
+ elem_coord_min = elem_coord_min[crop_mask,:]
104
+ elem_coord_max = elem_coord_max[crop_mask,:]
105
+ num_elems_in_image = elem_coord_min.shape[0]
106
+
107
+ # Find the indices of the bounding box that each element lies within on
108
+ # the image, bounded by the upper and lower edges of the image
109
+ elem_bound_boxes_inds = np.zeros([num_elems_in_image,4],dtype=np.int32)
110
+ elem_bound_boxes_inds[:,0] = RasterNP.elem_bound_box_low(
111
+ elem_coord_min[:,0])
112
+ elem_bound_boxes_inds[:,1] = RasterNP.elem_bound_box_high(
113
+ elem_coord_max[:,0],
114
+ cam_data.pixels_count[0]-1)
115
+ elem_bound_boxes_inds[:,2] = RasterNP.elem_bound_box_low(
116
+ elem_coord_min[:,1])
117
+ elem_bound_boxes_inds[:,3] = RasterNP.elem_bound_box_high(
118
+ elem_coord_max[:,1],
119
+ cam_data.pixels_count[1]-1)
120
+
121
+ num_edges: int = 3
122
+ if elem_coords.shape[1] > 3:
123
+ num_edges = 4
124
+
125
+ mask_subpixel_buffer = np.full(subsample*cam_data.pixels_count,0.0).T
126
+ # Raster Loop
127
+ for ee in range(elem_coords.shape[0]):
128
+ # Create the subpixel coords inside the bounding box to test with the
129
+ # edge function. Use the pixel indices of the bounding box.
130
+ bound_subpx_x = np.arange(elem_bound_boxes_inds[ee,0],
131
+ elem_bound_boxes_inds[ee,1],
132
+ 1/subsample) + 1/(2*subsample)
133
+ bound_subpx_y = np.arange(elem_bound_boxes_inds[ee,2],
134
+ elem_bound_boxes_inds[ee,3],
135
+ 1/subsample) + 1/(2*subsample)
136
+ (bound_subpx_grid_x,bound_subpx_grid_y) = np.meshgrid(bound_subpx_x,
137
+ bound_subpx_y)
138
+ bound_coords_grid_shape = bound_subpx_grid_x.shape
139
+ # shape=(coord[x,y],num_subpx_in_box)
140
+ bound_subpx_coords_flat = np.vstack((bound_subpx_grid_x.flatten(),
141
+ bound_subpx_grid_y.flatten()))
142
+
143
+ # Create the subpixel indices for buffer slicing later
144
+ subpx_inds_x = np.arange(subsample*elem_bound_boxes_inds[ee,0],
145
+ subsample*elem_bound_boxes_inds[ee,1])
146
+ subpx_inds_y = np.arange(subsample*elem_bound_boxes_inds[ee,2],
147
+ subsample*elem_bound_boxes_inds[ee,3])
148
+ (subpx_inds_grid_x,subpx_inds_grid_y) = np.meshgrid(subpx_inds_x,
149
+ subpx_inds_y)
150
+
151
+ edge = np.zeros((num_edges,bound_subpx_coords_flat.shape[1]),dtype=np.float64)
152
+
153
+ if num_edges == 4:
154
+ edge[0,:] = edge_function(elem_coords[ee,1,:],
155
+ elem_coords[ee,2,:],
156
+ bound_subpx_coords_flat)
157
+ edge[1,:] = edge_function(elem_coords[ee,2,:],
158
+ elem_coords[ee,3,:],
159
+ bound_subpx_coords_flat)
160
+ edge[2,:] = edge_function(elem_coords[ee,3,:],
161
+ elem_coords[ee,0,:],
162
+ bound_subpx_coords_flat)
163
+ edge[3,:] = edge_function(elem_coords[ee,0,:],
164
+ elem_coords[ee,1,:],
165
+ bound_subpx_coords_flat)
166
+ else:
167
+ edge[0,:] = edge_function(elem_coords[ee,1,:],
168
+ elem_coords[ee,2,:],
169
+ bound_subpx_coords_flat)
170
+ edge[1,:] = edge_function(elem_coords[ee,2,:],
171
+ elem_coords[ee,0,:],
172
+ bound_subpx_coords_flat)
173
+ edge[2,:] = edge_function(elem_coords[ee,0,:],
174
+ elem_coords[ee,1,:],
175
+ bound_subpx_coords_flat)
176
+
177
+
178
+ # Now we check where the edge function is above zero for all edges
179
+ edge_check = np.zeros_like(edge,dtype=np.int8)
180
+ edge_check[edge >= 0.0] = 1
181
+ edge_check = np.sum(edge_check, axis=0)
182
+ # Create a mask with the check, TODO check the 3 here for non triangles
183
+ edge_mask_flat = edge_check == num_edges
184
+ edge_mask_grid = np.reshape(edge_mask_flat,bound_coords_grid_shape)
185
+
186
+ subpx_inds_grid_x = subpx_inds_grid_x[edge_mask_grid]
187
+ subpx_inds_grid_y = subpx_inds_grid_y[edge_mask_grid]
188
+ mask_subpixel_buffer[subpx_inds_grid_y,subpx_inds_grid_x] += 1.0
189
+
190
+ mask_subpixel_buffer[mask_subpixel_buffer>1.0] = 1.0
191
+
192
+ mask_buffer = CameraTools.average_subpixel_image(mask_subpixel_buffer,
193
+ subsample)
194
+ image[mask_buffer<1.0] = cam_data.background
195
+ return (image,mask_subpixel_buffer)
196
+
197
+
198
+ @staticmethod
199
+ def upsample_image(cam_data: CameraData2D,
200
+ input_im: np.ndarray):
201
+ # Get grid of pixel centroid locations
202
+ (px_vec_xm,px_vec_ym) = CameraTools.pixel_vec_leng(cam_data.field_of_view,
203
+ cam_data.leng_per_px)
204
+
205
+ # Get grid of sub-pixel centroid locations
206
+ (subpx_vec_xm,subpx_vec_ym) = CameraTools.subpixel_vec_leng(
207
+ cam_data.field_of_view,
208
+ cam_data.leng_per_px,
209
+ cam_data.subsample)
210
+
211
+ # NOTE: See Scipy transition from interp2d docs here:
212
+ # https://scipy.github.io/devdocs/tutorial/interpolate/interp_transition_guide.html
213
+ spline_interp = RectBivariateSpline(px_vec_xm,
214
+ px_vec_ym,
215
+ input_im.T)
216
+ upsampled_image_interp = lambda x_new, y_new: spline_interp(x_new, y_new).T
217
+
218
+ # This function will flip the image regardless of the y vector input so flip it
219
+ # back to FE coords
220
+ upsampled_image = upsampled_image_interp(subpx_vec_xm,subpx_vec_ym)
221
+
222
+ return upsampled_image
223
+
224
+
225
+ @staticmethod
226
+ def preprocess(cam_data: CameraData2D,
227
+ image_input: np.ndarray,
228
+ coords: np.ndarray,
229
+ connectivity: np.ndarray,
230
+ disp_x: np.ndarray,
231
+ disp_y: np.ndarray,
232
+ id_opts: ImageDefOpts,
233
+ print_on: bool = False
234
+ ) -> tuple[np.ndarray | None,
235
+ np.ndarray | None,
236
+ np.ndarray | None,
237
+ np.ndarray | None,
238
+ np.ndarray | None]:
239
+
240
+ if print_on:
241
+ print("\n"+"="*80)
242
+ print("IMAGE DEF PRE-PROCESSING\n")
243
+
244
+ if not id_opts.save_path.is_dir():
245
+ id_opts.save_path.mkdir()
246
+
247
+ # Make displacements a 2D column vector, allows addition of static frame
248
+ if disp_x.ndim == 1:
249
+ disp_x = np.atleast_2d(disp_x).T
250
+ if disp_y.ndim == 1:
251
+ disp_y = np.atleast_2d(disp_y).T
252
+
253
+ if id_opts.add_static_ref:
254
+ num_nodes = coords.shape[0] # type: ignore
255
+ disp_x = np.hstack((np.zeros((num_nodes,1)),disp_x))
256
+ disp_y = np.hstack((np.zeros((num_nodes,1)),disp_y))
257
+
258
+ image_input = CameraTools.crop_image_rectangle(image_input,
259
+ cam_data.pixels_count)
260
+
261
+ if id_opts.mask_input_image or id_opts.def_complex_geom:
262
+ if print_on:
263
+ print('Image masking or complex geometry on, getting image mask.')
264
+ tic = time.perf_counter()
265
+
266
+ (image_input,
267
+ image_mask) = ImageDef2D.image_mask_from_sim(cam_data,
268
+ image_input,
269
+ coords,
270
+ connectivity)
271
+
272
+
273
+ if print_on:
274
+ toc = time.perf_counter()
275
+ print(f'Calculating image mask took {toc-tic:.4f} seconds')
276
+ else:
277
+ image_mask = None
278
+
279
+
280
+ # Image upsampling
281
+ if print_on:
282
+ print('\n'+'-'*80)
283
+ print('GENERATE UPSAMPLED IMAGE\n')
284
+ print(f'Upsampling input image with a {cam_data.subsample}x{cam_data.subsample} subpixel')
285
+ tic = time.perf_counter()
286
+
287
+ upsampled_image = ImageDef2D.upsample_image(cam_data,image_input)
288
+
289
+ if print_on:
290
+ toc = time.perf_counter()
291
+ print(f'Upsampling image withtook {toc-tic:.4f} seconds')
292
+
293
+ return (upsampled_image,image_mask,image_input,disp_x,disp_y)
294
+
295
+ @staticmethod
296
+ def deform_one_image(upsampled_image: np.ndarray,
297
+ cam_data: CameraData2D,
298
+ id_opts: ImageDefOpts,
299
+ coords: np.ndarray,
300
+ disp: np.ndarray,
301
+ image_mask: np.ndarray | None = None,
302
+ print_on: bool = True
303
+ ) -> tuple[np.ndarray,
304
+ np.ndarray,
305
+ np.ndarray,
306
+ np.ndarray,
307
+ np.ndarray | None]:
308
+
309
+ if image_mask is not None:
310
+ if (image_mask.shape[0] != cam_data.pixels_count[1]) or (image_mask.shape[1] != cam_data.pixels_count[0]):
311
+ if image_mask.size == 0:
312
+ warnings.warn('Image mask not specified, using default mask of whole image.')
313
+ else:
314
+ warnings.warn('Image mask size does not match camera, using default mask of whole image.')
315
+ image_mask = np.ones([cam_data.pixels_count[1],cam_data.pixels_count[0]])
316
+
317
+ (px_grid_xm,
318
+ px_grid_ym) = CameraTools.pixel_grid_leng(cam_data.field_of_view,
319
+ cam_data.leng_per_px)
320
+
321
+ (subpx_grid_xm,
322
+ subpx_grid_ym) = CameraTools.subpixel_grid_leng(cam_data.field_of_view,
323
+ cam_data.leng_per_px,
324
+ cam_data.subsample)
325
+
326
+ #--------------------------------------------------------------------------
327
+ # Interpolate FE displacements onto the sub-pixel grid
328
+ if print_on:
329
+ print('Interpolating displacement onto sub-pixel grid.')
330
+ tic = time.perf_counter()
331
+
332
+ (subpx_disp_x,subpx_disp_y) = _interp_sim_disp_to_subpx_grid(
333
+ coords,
334
+ disp,
335
+ cam_data,
336
+ id_opts,
337
+ subpx_grid_xm,
338
+ subpx_grid_ym)
339
+
340
+ if print_on:
341
+ toc = time.perf_counter()
342
+ print('Interpolating displacement with NaN extrap took {:.4f} seconds'.format(toc-tic))
343
+
344
+ #--------------------------------------------------------------------------
345
+ # Interpolate sub-pixel gray levels with ndimage toolbox
346
+ if print_on:
347
+ print('Deforming sub-pixel image.')
348
+ tic = time.perf_counter()
349
+
350
+ def_image_subpx = _interp_subpx_image(upsampled_image,
351
+ subpx_grid_xm-subpx_disp_x,
352
+ subpx_grid_ym-subpx_disp_y,
353
+ cam_data,
354
+ id_opts)
355
+
356
+ if print_on:
357
+ toc = time.perf_counter()
358
+ print('Deforming sub-pixel image with ndimage took {:.4f} seconds'.format(toc-tic))
359
+
360
+ #--------------------------------------------------------------------------
361
+ # Average subpixel image
362
+ if print_on:
363
+ tic = time.perf_counter()
364
+
365
+ def_image = CameraTools.average_subpixel_image(def_image_subpx,
366
+ cam_data.subsample)
367
+
368
+ if print_on:
369
+ toc = time.perf_counter()
370
+ print('Averaging sub-pixel imagetook {:.4f} seconds'.format(toc-tic))
371
+
372
+ #--------------------------------------------------------------------------
373
+ # DEFORMING IMAGE MASK
374
+ if id_opts.def_complex_geom:
375
+ if print_on:
376
+ print('Deforming image mask.')
377
+ tic = time.perf_counter()
378
+
379
+ (def_image,def_mask) = _deform_image_mask(def_image,
380
+ image_mask,
381
+ px_grid_xm,
382
+ px_grid_ym,
383
+ subpx_disp_x,
384
+ subpx_disp_y,
385
+ cam_data)
386
+
387
+ if print_on:
388
+ toc = time.perf_counter()
389
+ print('Deforming image mask with ndimage took {:.4f} seconds'.format(toc-tic))
390
+
391
+ else:
392
+ def_mask = None
393
+
394
+ # Need to flip the image as all processing above is done with y axis down
395
+ # from the top left hand corner
396
+ def_image = def_image[::-1,:]
397
+
398
+ return (def_image,def_image_subpx,subpx_disp_x,subpx_disp_y,def_mask)
399
+
400
+ @staticmethod
401
+ def deform_images_to_disk(cam_data: CameraData2D,
402
+ upsampled_image: np.ndarray,
403
+ coords: np.ndarray,
404
+ connectivity: np.ndarray,
405
+ disp_x: np.ndarray,
406
+ disp_y: np.ndarray,
407
+ image_mask: np.ndarray | None,
408
+ id_opts: ImageDefOpts,
409
+ print_on: bool = False) -> None:
410
+
411
+ #---------------------------------------------------------------------------
412
+ # Image Deformation Loop
413
+ if print_on:
414
+ print('\n'+'='*80)
415
+ print('DEFORMING IMAGES')
416
+
417
+ num_frames = disp_x.shape[1]
418
+ ticl = time.perf_counter()
419
+
420
+ for ff in range(num_frames):
421
+ if print_on:
422
+ ticf = time.perf_counter()
423
+ print(f'\nDEFORMING FRAME: {ff}')
424
+
425
+ disp = np.array((disp_x[:,ff],disp_y[:,ff])).T
426
+ (def_image,
427
+ _,
428
+ _,
429
+ _,
430
+ _) = ImageDef2D.deform_one_image(upsampled_image,
431
+ cam_data,
432
+ id_opts,
433
+ coords,
434
+ disp,
435
+ image_mask,
436
+ print_on=print_on)
437
+
438
+ save_file = id_opts.save_path / str(f'{id_opts.save_tag}_'+
439
+ f'{CameraTools.image_num_str(im_num=ff,width=4)}'+
440
+ '.tiff')
441
+ CameraTools.save_image(save_file,def_image,cam_data.bits)
442
+
443
+ if print_on:
444
+ tocf = time.perf_counter()
445
+ print(f'DEFORMING FRAME: {ff} took {tocf-ticf:.4f} seconds')
446
+
447
+ if print_on:
448
+ tocl = time.perf_counter()
449
+ print('\n'+'-'*50)
450
+ print(f'Deforming all images took {tocl-ticl:.4f} seconds')
451
+ print('-'*50)
452
+
453
+ print('\n'+'='*80)
454
+ print('COMPLETE\n')
455
+
456
+
457
+
458
+ def _interp_sim_disp_to_subpx_grid(coords: np.ndarray,
459
+ disp: np.ndarray,
460
+ cam_data: CameraData2D,
461
+ id_opts: ImageDefOpts,
462
+ subpx_grid_xm: np.ndarray,
463
+ subpx_grid_ym: np.ndarray
464
+ ) -> tuple[np.ndarray,np.ndarray]:
465
+
466
+ # Interpolate displacements onto sub-pixel locations - nan extrapolation
467
+ subpx_disp_x = griddata((coords[:,0] + disp[:,0] + cam_data.world_to_cam[0],
468
+ coords[:,1] + disp[:,1] + cam_data.world_to_cam[1]),
469
+ disp[:,0],
470
+ (subpx_grid_xm,subpx_grid_ym),
471
+ method=id_opts.fe_interp,
472
+ fill_value=np.nan,
473
+ rescale=id_opts.fe_rescale)
474
+
475
+ subpx_disp_y = griddata((coords[:,0] + disp[:,0] + cam_data.world_to_cam[0],
476
+ coords[:,1] + disp[:,1] + cam_data.world_to_cam[1]),
477
+ disp[:,1],
478
+ (subpx_grid_xm,subpx_grid_ym),
479
+ method=id_opts.fe_interp,
480
+ fill_value=np.nan,
481
+ rescale=id_opts.fe_rescale)
482
+
483
+ # Ndimage interp can't handle nans so force everything outside the specimen
484
+ # to extrapolate outside the FOV - then use ndimage opts to control
485
+ if id_opts.fe_extrap_outside_fov:
486
+ subpx_disp_ext_vals = 2*cam_data.field_of_view
487
+ else:
488
+ subpx_disp_ext_vals = (0.0,0.0)
489
+
490
+ # Set the nans to the extrapoltion value
491
+ subpx_disp_x[np.isnan(subpx_disp_x)] = subpx_disp_ext_vals[0]
492
+ subpx_disp_y[np.isnan(subpx_disp_y)] = subpx_disp_ext_vals[1]
493
+
494
+ return (subpx_disp_x,subpx_disp_y)
495
+
496
+
497
+ def _interp_subpx_image(upsampled_image: np.ndarray,
498
+ def_subpx_x: np.ndarray,
499
+ def_subpx_y: np.ndarray,
500
+ cam_data: CameraData2D,
501
+ id_opts: ImageDefOpts,
502
+ ) -> np.ndarray:
503
+
504
+ # Flip needed to be consistent with pixel coords of ndimage
505
+ def_subpx_x = def_subpx_x[::-1,:]
506
+ def_subpx_y = def_subpx_y[::-1,:]
507
+
508
+ # NDIMAGE: IMAGE DEF
509
+ # NOTE: need to shift to pixel centroid co-ords from nodal so -0.5 makes the
510
+ # top left 0,0 in pixel co-ords
511
+ def_subpx_x_in_px = def_subpx_x*(cam_data.subsample/cam_data.leng_per_px)-0.5
512
+ def_subpx_y_in_px = def_subpx_y*(cam_data.subsample/cam_data.leng_per_px)-0.5
513
+ # NOTE: prefilter needs to be on to match griddata and interp2D!
514
+ # with prefilter on this exactly matches I2D but 10x faster!
515
+ def_image_subpx = ndimage.map_coordinates(upsampled_image,
516
+ [[def_subpx_y_in_px],
517
+ [def_subpx_x_in_px]],
518
+ prefilter=True,
519
+ order= id_opts.image_def_order,
520
+ mode= id_opts.image_def_extrap,
521
+ cval= id_opts.image_def_extval)
522
+
523
+ def_image_subpx = def_image_subpx[0,:,:].squeeze()
524
+
525
+ return def_image_subpx
526
+
527
+
528
+ def _deform_image_mask(def_image: np.ndarray,
529
+ image_mask: np.ndarray,
530
+ px_grid_xm: np.ndarray,
531
+ px_grid_ym: np.ndarray,
532
+ subpx_disp_x: np.ndarray,
533
+ subpx_disp_y: np.ndarray,
534
+ cam_data: CameraData2D,
535
+ ) -> tuple[np.ndarray,np.ndarray]:
536
+
537
+ # This is slow - might be quicker to just deform an upsampled mask
538
+ px_disp_x = CameraTools.average_subpixel_image(subpx_disp_x,
539
+ cam_data.subsample)
540
+ px_disp_y = CameraTools.average_subpixel_image(subpx_disp_y,
541
+ cam_data.subsample)
542
+
543
+ def_px_x = px_grid_xm-px_disp_x
544
+ def_px_y = px_grid_ym-px_disp_y
545
+ # Flip needed to be consistent with pixel coords of ndimage
546
+ def_px_x = def_px_x[::-1,:]
547
+ def_px_y = def_px_y[::-1,:]
548
+
549
+ # NDIMAGE: DEFORM IMAGE MASK
550
+ # NOTE: need to shift to pixel centroid co-ords from nodal so -0.5 makes the
551
+ # top left 0,0 in pixel co-ords
552
+ def_px_x_in_px = def_px_x*(1/cam_data.leng_per_px)-0.5
553
+ def_px_y_in_px = def_px_y*(1/cam_data.leng_per_px)-0.5
554
+ # NOTE: prefilter needs to be on to match griddata and interp2D!
555
+ # with prefilter on this exactly matches I2D but 10x faster!
556
+ def_mask = ndimage.map_coordinates(image_mask,
557
+ [[def_px_y_in_px],
558
+ [def_px_x_in_px]],
559
+ prefilter=True,
560
+ order=2,
561
+ mode='constant',
562
+ cval=0)
563
+
564
+ def_mask = def_mask[0,:,:].squeeze()
565
+ # Use the deformed image mask to mask the deformed image
566
+ # Mask is 0-1 with 1 being definitely inside the sample 0 outside
567
+ def_image[def_mask<0.51] = cam_data.background # type: ignore
568
+
569
+ return (def_image,image_mask)