pyvale 2025.5.3__cp311-cp311-musllinux_1_2_aarch64.whl → 2025.7.1__cp311-cp311-musllinux_1_2_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pyvale might be problematic. Click here for more details.
- pyvale/__init__.py +12 -0
- pyvale/blendercalibrationdata.py +3 -1
- pyvale/blenderscene.py +7 -5
- pyvale/blendertools.py +27 -5
- pyvale/camera.py +1 -0
- pyvale/cameradata.py +3 -0
- pyvale/camerasensor.py +147 -0
- pyvale/camerastereo.py +4 -4
- pyvale/cameratools.py +23 -61
- pyvale/cython/rastercyth.c +1657 -1352
- pyvale/cython/rastercyth.cpython-311-aarch64-linux-musl.so +0 -0
- pyvale/cython/rastercyth.py +71 -26
- pyvale/data/DIC_Challenge_Star_Noise_Def.tiff +0 -0
- pyvale/data/DIC_Challenge_Star_Noise_Ref.tiff +0 -0
- pyvale/data/plate_hole_def0000.tiff +0 -0
- pyvale/data/plate_hole_def0001.tiff +0 -0
- pyvale/data/plate_hole_ref0000.tiff +0 -0
- pyvale/data/plate_rigid_def0000.tiff +0 -0
- pyvale/data/plate_rigid_def0001.tiff +0 -0
- pyvale/data/plate_rigid_ref0000.tiff +0 -0
- pyvale/dataset.py +96 -6
- pyvale/dic/cpp/dicbruteforce.cpp +370 -0
- pyvale/dic/cpp/dicfourier.cpp +648 -0
- pyvale/dic/cpp/dicinterpolator.cpp +559 -0
- pyvale/dic/cpp/dicmain.cpp +215 -0
- pyvale/dic/cpp/dicoptimizer.cpp +675 -0
- pyvale/dic/cpp/dicrg.cpp +137 -0
- pyvale/dic/cpp/dicscanmethod.cpp +677 -0
- pyvale/dic/cpp/dicsmooth.cpp +138 -0
- pyvale/dic/cpp/dicstrain.cpp +383 -0
- pyvale/dic/cpp/dicutil.cpp +563 -0
- pyvale/dic2d.py +164 -0
- pyvale/dic2dcpp.cpython-311-aarch64-linux-musl.so +0 -0
- pyvale/dicchecks.py +476 -0
- pyvale/dicdataimport.py +247 -0
- pyvale/dicregionofinterest.py +887 -0
- pyvale/dicresults.py +55 -0
- pyvale/dicspecklegenerator.py +238 -0
- pyvale/dicspecklequality.py +305 -0
- pyvale/dicstrain.py +387 -0
- pyvale/dicstrainresults.py +37 -0
- pyvale/errorintegrator.py +10 -8
- pyvale/examples/basics/ex1_1_basicscalars_therm2d.py +124 -113
- pyvale/examples/basics/ex1_2_sensormodel_therm2d.py +124 -132
- pyvale/examples/basics/ex1_3_customsens_therm3d.py +199 -195
- pyvale/examples/basics/ex1_4_basicerrors_therm3d.py +125 -121
- pyvale/examples/basics/ex1_5_fielderrs_therm3d.py +145 -141
- pyvale/examples/basics/ex1_6_caliberrs_therm2d.py +96 -101
- pyvale/examples/basics/ex1_7_spatavg_therm2d.py +109 -105
- pyvale/examples/basics/ex2_1_basicvectors_disp2d.py +92 -91
- pyvale/examples/basics/ex2_2_vectorsens_disp2d.py +96 -90
- pyvale/examples/basics/ex2_3_sensangle_disp2d.py +88 -89
- pyvale/examples/basics/ex2_4_chainfielderrs_disp2d.py +172 -171
- pyvale/examples/basics/ex2_5_vectorfields3d_disp3d.py +88 -86
- pyvale/examples/basics/ex3_1_basictensors_strain2d.py +90 -90
- pyvale/examples/basics/ex3_2_tensorsens2d_strain2d.py +93 -91
- pyvale/examples/basics/ex3_3_tensorsens3d_strain3d.py +172 -160
- pyvale/examples/basics/ex4_1_expsim2d_thermmech2d.py +154 -148
- pyvale/examples/basics/ex4_2_expsim3d_thermmech3d.py +249 -231
- pyvale/examples/dic/ex1_region_of_interest.py +98 -0
- pyvale/examples/dic/ex2_plate_with_hole.py +149 -0
- pyvale/examples/dic/ex3_plate_with_hole_strain.py +93 -0
- pyvale/examples/dic/ex4_dic_blender.py +95 -0
- pyvale/examples/dic/ex5_dic_challenge.py +102 -0
- pyvale/examples/imagedef2d/ex_imagedef2d_todisk.py +4 -2
- pyvale/examples/renderblender/ex1_1_blenderscene.py +152 -105
- pyvale/examples/renderblender/ex1_2_blenderdeformed.py +151 -100
- pyvale/examples/renderblender/ex2_1_stereoscene.py +183 -116
- pyvale/examples/renderblender/ex2_2_stereodeformed.py +185 -112
- pyvale/examples/renderblender/ex3_1_blendercalibration.py +164 -109
- pyvale/examples/renderrasterisation/ex_rastenp.py +74 -35
- pyvale/examples/renderrasterisation/ex_rastercyth_oneframe.py +6 -13
- pyvale/examples/renderrasterisation/ex_rastercyth_static_cypara.py +2 -2
- pyvale/examples/renderrasterisation/ex_rastercyth_static_pypara.py +2 -4
- pyvale/imagedef2d.py +3 -2
- pyvale/imagetools.py +137 -0
- pyvale/rastercy.py +34 -4
- pyvale/rasternp.py +300 -276
- pyvale/rasteropts.py +58 -0
- pyvale/renderer.py +47 -0
- pyvale/rendermesh.py +52 -62
- pyvale/renderscene.py +51 -0
- pyvale/sensorarrayfactory.py +2 -2
- pyvale/sensortools.py +19 -35
- pyvale/simcases/case21.i +1 -1
- pyvale/simcases/run_1case.py +8 -0
- pyvale/simtools.py +2 -2
- pyvale/visualsimplotter.py +180 -0
- {pyvale-2025.5.3.dist-info → pyvale-2025.7.1.dist-info}/METADATA +11 -57
- {pyvale-2025.5.3.dist-info → pyvale-2025.7.1.dist-info}/RECORD +96 -56
- {pyvale-2025.5.3.dist-info → pyvale-2025.7.1.dist-info}/WHEEL +1 -1
- pyvale.libs/libgcc_s-69c45f16.so.1 +0 -0
- pyvale.libs/libgomp-b626072d.so.1.0.0 +0 -0
- pyvale.libs/libstdc++-1f1a71be.so.6.0.33 +0 -0
- pyvale/examples/visualisation/ex1_1_plot_traces.py +0 -102
- pyvale/examples/visualisation/ex2_1_animate_sim.py +0 -89
- {pyvale-2025.5.3.dist-info → pyvale-2025.7.1.dist-info}/licenses/LICENSE +0 -0
- {pyvale-2025.5.3.dist-info → pyvale-2025.7.1.dist-info}/top_level.txt +0 -0
pyvale/__init__.py
CHANGED
|
@@ -46,6 +46,7 @@ from pyvale.camerastereo import *
|
|
|
46
46
|
import pyvale.cython.rastercyth as rastercyth
|
|
47
47
|
from pyvale.rastercy import *
|
|
48
48
|
|
|
49
|
+
from pyvale.renderscene import *
|
|
49
50
|
from pyvale.rendermesh import *
|
|
50
51
|
from pyvale.rasternp import *
|
|
51
52
|
|
|
@@ -87,3 +88,14 @@ from pyvale.simtools import *
|
|
|
87
88
|
from pyvale.output import *
|
|
88
89
|
from pyvale.pyvaleexceptions import *
|
|
89
90
|
|
|
91
|
+
from pyvale.experimentsimulator import *
|
|
92
|
+
|
|
93
|
+
from pyvale.dicspecklegenerator import *
|
|
94
|
+
from pyvale.dicspecklequality import *
|
|
95
|
+
from pyvale.dicregionofinterest import *
|
|
96
|
+
from pyvale.dic2d import *
|
|
97
|
+
from pyvale.dicdataimport import *
|
|
98
|
+
from pyvale.dicresults import *
|
|
99
|
+
from pyvale.dic2dcpp import *
|
|
100
|
+
from pyvale.dicstrain import *
|
|
101
|
+
from pyvale.dicchecks import *
|
pyvale/blendercalibrationdata.py
CHANGED
pyvale/blenderscene.py
CHANGED
|
@@ -13,7 +13,7 @@ from pyvale.simtools import SimTools
|
|
|
13
13
|
from pyvale.blendermaterialdata import BlenderMaterialData
|
|
14
14
|
from pyvale.blenderrenderdata import RenderData, RenderEngine
|
|
15
15
|
from pyvale.camerastereo import CameraStereo
|
|
16
|
-
from pyvale.rendermesh import
|
|
16
|
+
from pyvale.rendermesh import RenderMesh
|
|
17
17
|
from pyvale.pyvaleexceptions import BlenderError
|
|
18
18
|
|
|
19
19
|
|
|
@@ -94,12 +94,14 @@ class BlenderScene():
|
|
|
94
94
|
new_cam.dof.use_dof = True
|
|
95
95
|
new_cam.dof.aperture_fstop = cam_data.fstop
|
|
96
96
|
|
|
97
|
+
new_cam.clip_end = ((cam_data.pos_world[2] - cam_data.roi_cent_world[2])
|
|
98
|
+
+ 100)
|
|
99
|
+
|
|
97
100
|
bpy.context.scene.camera = camera
|
|
98
101
|
return camera
|
|
99
102
|
|
|
100
103
|
def add_stereo_system(self, stereo_system: CameraStereo) -> tuple[bpy.data.objects,
|
|
101
104
|
bpy.data.objects]:
|
|
102
|
-
# TODO: Correct docstring
|
|
103
105
|
"""A method to add a stereo camera system within Blender, given an
|
|
104
106
|
instance of the CameraStereo class (that describes a stereo system).
|
|
105
107
|
|
|
@@ -152,7 +154,7 @@ class BlenderScene():
|
|
|
152
154
|
return light_ob
|
|
153
155
|
|
|
154
156
|
def add_part(self,
|
|
155
|
-
render_mesh:
|
|
157
|
+
render_mesh: RenderMesh,
|
|
156
158
|
sim_spat_dim: int) -> bpy.data.objects:
|
|
157
159
|
"""A method to add a part mesh into Blender, given a RenderMeshData object.
|
|
158
160
|
This is done by taking the mesh information from the RenderMeshData
|
|
@@ -258,7 +260,7 @@ class BlenderScene():
|
|
|
258
260
|
BlenderTools.uv_unwrap_part(part, mm_px_resolution, cal)
|
|
259
261
|
|
|
260
262
|
def _debug_deform(self,
|
|
261
|
-
render_mesh:
|
|
263
|
+
render_mesh: RenderMesh,
|
|
262
264
|
sim_spat_dim:int,
|
|
263
265
|
part: bpy.data.objects) -> None:
|
|
264
266
|
"""A method to deform the Blender mesh object using the simulation results.
|
|
@@ -371,7 +373,7 @@ class BlenderScene():
|
|
|
371
373
|
bpy.ops.render.render(write_still=True)
|
|
372
374
|
|
|
373
375
|
def render_deformed_images(self,
|
|
374
|
-
render_mesh:
|
|
376
|
+
render_mesh: RenderMesh,
|
|
375
377
|
sim_spat_dim: int,
|
|
376
378
|
render_data:RenderData,
|
|
377
379
|
part: bpy.data.objects,
|
pyvale/blendertools.py
CHANGED
|
@@ -332,7 +332,9 @@ class BlenderTools:
|
|
|
332
332
|
A dataclass containing the parameters needed to render the images
|
|
333
333
|
calibration_data : CalibrationData
|
|
334
334
|
A dataclass containing the parameters by which to move the calibration
|
|
335
|
-
target. These inclcude the plungle depth and rotation angle.
|
|
335
|
+
target. These inclcude the plungle depth and rotation angle. It also
|
|
336
|
+
inlcludes optional x and y limits for the movement of the calibration
|
|
337
|
+
target (if None they will be initialised from the FOV).
|
|
336
338
|
part : bpy.data.objects
|
|
337
339
|
The Blender part object, in this instance the calibration target.
|
|
338
340
|
|
|
@@ -371,15 +373,17 @@ class BlenderTools:
|
|
|
371
373
|
plunge = calibration_data.plunge_lims[0] + calibration_data.plunge_step * ii
|
|
372
374
|
# Plunge
|
|
373
375
|
(FOV_x, FOV_y) = CameraTools.blender_FOV(render_data.cam_data[0])
|
|
374
|
-
x_limit = int(round((FOV_x / 2) - (part.dimensions[0] / 2)))
|
|
375
376
|
|
|
376
|
-
|
|
377
|
+
if calibration_data.x_limit is None:
|
|
378
|
+
calibration_data.x_limit = int(round((FOV_x / 2) - (part.dimensions[0] / 2)))
|
|
379
|
+
if calibration_data.y_limit is None:
|
|
380
|
+
calibration_data.y_limit = int(round((FOV_y / 2) - (part.dimensions[1] / 2)))
|
|
377
381
|
|
|
378
382
|
for x in np.arange(-1, 2):
|
|
379
|
-
x *= x_limit
|
|
383
|
+
x *= calibration_data.x_limit
|
|
380
384
|
# Move in x-dir
|
|
381
385
|
for y in np.arange(-1, 2):
|
|
382
|
-
y *= y_limit
|
|
386
|
+
y *= calibration_data.y_limit
|
|
383
387
|
# Move in y-dir
|
|
384
388
|
part.location = ((x, y, plunge))
|
|
385
389
|
part.location[2] = plunge
|
|
@@ -415,6 +419,24 @@ class BlenderTools:
|
|
|
415
419
|
print('Total number of calibration images = ' + str(render_counter))
|
|
416
420
|
return render_counter
|
|
417
421
|
|
|
422
|
+
def check_for_GPU() -> bool:
|
|
423
|
+
"""A method to check whether the machine has a GPU or not.
|
|
424
|
+
|
|
425
|
+
Returns
|
|
426
|
+
-------
|
|
427
|
+
bool
|
|
428
|
+
Returns True if a GPU is present, returns False if only a CPU is
|
|
429
|
+
present.
|
|
430
|
+
"""
|
|
431
|
+
accepted_gpus = ["CUDA", "OPTIX", "HIP", "METAL", "ONEAPI"]
|
|
432
|
+
cycles_prefs = bpy.context.preferences.addons["cycles"].preferences
|
|
433
|
+
cycles_prefs.refresh_devices()
|
|
434
|
+
for device in cycles_prefs.devices:
|
|
435
|
+
print(f"Name: {device.name}, Type: {device.type}, Use: {device.use}")
|
|
436
|
+
if device.type in accepted_gpus:
|
|
437
|
+
return True
|
|
438
|
+
return False
|
|
439
|
+
|
|
418
440
|
|
|
419
441
|
|
|
420
442
|
|
pyvale/camera.py
CHANGED
pyvale/cameradata.py
CHANGED
|
@@ -25,6 +25,8 @@ class CameraData:
|
|
|
25
25
|
focal_length: float | None = 50.0
|
|
26
26
|
sub_samp: int = 2
|
|
27
27
|
|
|
28
|
+
bits: int = 16
|
|
29
|
+
|
|
28
30
|
back_face_removal: bool = True
|
|
29
31
|
|
|
30
32
|
k1: float = 0.0
|
|
@@ -40,6 +42,7 @@ class CameraData:
|
|
|
40
42
|
sensor_size: np.ndarray = field(init=False)
|
|
41
43
|
image_dims: np.ndarray = field(init=False)
|
|
42
44
|
image_dist: float = field(init=False)
|
|
45
|
+
|
|
43
46
|
cam_to_world_mat: np.ndarray = field(init=False)
|
|
44
47
|
world_to_cam_mat: np.ndarray = field(init=False)
|
|
45
48
|
|
pyvale/camerasensor.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
# ==============================================================================
|
|
2
|
+
# pyvale: the python validation engine
|
|
3
|
+
# License: MIT
|
|
4
|
+
# Copyright (C) 2025 The Computer Aided Validation Team
|
|
5
|
+
# ==============================================================================
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
NOTE: This module is a feature under developement.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import numpy as np
|
|
12
|
+
from pyvale.field import IField
|
|
13
|
+
from pyvale.sensorarray import ISensorArray
|
|
14
|
+
from pyvale.errorintegrator import ErrIntegrator
|
|
15
|
+
from pyvale.sensordescriptor import SensorDescriptor
|
|
16
|
+
from pyvale.fieldsampler import sample_field_with_sensor_data
|
|
17
|
+
from pyvale.cameradata2d import CameraData2D
|
|
18
|
+
from pyvale.cameratools import CameraTools
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class CameraBasic2D(ISensorArray):
|
|
23
|
+
__slots__ = ("_cam_data","_field","_error_integrator","_descriptor",
|
|
24
|
+
"_sensor_data","_truth","_measurements")
|
|
25
|
+
|
|
26
|
+
def __init__(self,
|
|
27
|
+
cam_data: CameraData2D,
|
|
28
|
+
field: IField,
|
|
29
|
+
descriptor: SensorDescriptor | None = None,
|
|
30
|
+
) -> None:
|
|
31
|
+
|
|
32
|
+
self._cam_data = cam_data
|
|
33
|
+
self._field = field
|
|
34
|
+
self._error_integrator = None
|
|
35
|
+
|
|
36
|
+
self._descriptor = SensorDescriptor()
|
|
37
|
+
if descriptor is not None:
|
|
38
|
+
self._descriptor = descriptor
|
|
39
|
+
|
|
40
|
+
self._sensor_data = CameraTools.build_sensor_data_from_camera_2d(self._cam_data)
|
|
41
|
+
|
|
42
|
+
self._truth = None
|
|
43
|
+
self._measurements = None
|
|
44
|
+
|
|
45
|
+
#---------------------------------------------------------------------------
|
|
46
|
+
# Accessors
|
|
47
|
+
def get_sample_times(self) -> np.ndarray:
|
|
48
|
+
if self._sensor_data.sample_times is None:
|
|
49
|
+
#shape=(n_time_steps,)
|
|
50
|
+
return self._field.get_time_steps()
|
|
51
|
+
|
|
52
|
+
#shape=(n_time_steps,)
|
|
53
|
+
return self._sensor_data.sample_times
|
|
54
|
+
|
|
55
|
+
def get_measurement_shape(self) -> tuple[int,int,int]:
|
|
56
|
+
return (self._sensor_data.positions.shape[0],
|
|
57
|
+
len(self._field.get_all_components()),
|
|
58
|
+
self.get_sample_times().shape[0])
|
|
59
|
+
|
|
60
|
+
def get_image_measurements_shape(self) -> tuple[int,int,int,int]:
|
|
61
|
+
return (self._cam_data.num_pixels[1],
|
|
62
|
+
self._cam_data.num_pixels[0],
|
|
63
|
+
len(self._field.get_all_components()),
|
|
64
|
+
self.get_sample_times().shape[0])
|
|
65
|
+
|
|
66
|
+
def get_field(self) -> IField:
|
|
67
|
+
return self._field
|
|
68
|
+
|
|
69
|
+
def get_descriptor(self) -> SensorDescriptor:
|
|
70
|
+
return self._descriptor
|
|
71
|
+
|
|
72
|
+
#---------------------------------------------------------------------------
|
|
73
|
+
# Truth calculation from simulation
|
|
74
|
+
def calc_truth_values(self) -> np.ndarray:
|
|
75
|
+
self._truth = sample_field_with_sensor_data(self._field,
|
|
76
|
+
self._sensor_data)
|
|
77
|
+
#shape=(n_pixels,n_field_comps,n_time_steps)
|
|
78
|
+
return self._truth
|
|
79
|
+
|
|
80
|
+
def get_truth(self) -> np.ndarray:
|
|
81
|
+
if self._truth is None:
|
|
82
|
+
self._truth = self.calc_truth_values()
|
|
83
|
+
#shape=(n_pixels,n_field_comps,n_time_steps)
|
|
84
|
+
return self._truth
|
|
85
|
+
|
|
86
|
+
#---------------------------------------------------------------------------
|
|
87
|
+
# Errors
|
|
88
|
+
def set_error_integrator(self, err_int: ErrIntegrator) -> None:
|
|
89
|
+
self._error_integrator = err_int
|
|
90
|
+
|
|
91
|
+
def get_errors_systematic(self) -> np.ndarray | None:
|
|
92
|
+
if self._error_integrator is None:
|
|
93
|
+
return None
|
|
94
|
+
|
|
95
|
+
#shape=(n_pixels,n_field_comps,n_time_steps)
|
|
96
|
+
return self._error_integrator.get_errs_systematic()
|
|
97
|
+
|
|
98
|
+
def get_errors_random(self) -> np.ndarray | None:
|
|
99
|
+
if self._error_integrator is None:
|
|
100
|
+
return None
|
|
101
|
+
|
|
102
|
+
#shape=(n_pixels,n_field_comps,n_time_steps)
|
|
103
|
+
return self._error_integrator.get_errs_random()
|
|
104
|
+
|
|
105
|
+
def get_errors_total(self) -> np.ndarray | None:
|
|
106
|
+
if self._error_integrator is None:
|
|
107
|
+
return None
|
|
108
|
+
|
|
109
|
+
#shape=(n_pixels,n_field_comps,n_time_steps)
|
|
110
|
+
return self._error_integrator.get_errs_total()
|
|
111
|
+
|
|
112
|
+
#---------------------------------------------------------------------------
|
|
113
|
+
# Measurements
|
|
114
|
+
def calc_measurements(self) -> np.ndarray:
|
|
115
|
+
if self._error_integrator is None:
|
|
116
|
+
self._measurements = self.get_truth()
|
|
117
|
+
else:
|
|
118
|
+
self._measurements = self.get_truth() + \
|
|
119
|
+
self._error_integrator.calc_errors_from_chain(self.get_truth())
|
|
120
|
+
|
|
121
|
+
#shape=(n_pixels,n_field_comps,n_time_steps)
|
|
122
|
+
return self._measurements
|
|
123
|
+
|
|
124
|
+
def get_measurements(self) -> np.ndarray:
|
|
125
|
+
if self._measurements is None:
|
|
126
|
+
self._measurements = self.calc_measurements()
|
|
127
|
+
|
|
128
|
+
#shape=(n_pixels,n_field_comps,n_time_steps)
|
|
129
|
+
return self._measurements
|
|
130
|
+
|
|
131
|
+
#---------------------------------------------------------------------------
|
|
132
|
+
# Images
|
|
133
|
+
def calc_measurement_images(self) -> np.ndarray:
|
|
134
|
+
#shape=(n_pixels,n_field_comps,n_time_steps)
|
|
135
|
+
self._measurements = self.calc_measurements()
|
|
136
|
+
image_shape = self.get_image_measurements_shape()
|
|
137
|
+
#shape=(n_pixels_y,n_pixels_x,n_field_comps,n_time_steps)
|
|
138
|
+
return np.reshape(self._measurements,image_shape)
|
|
139
|
+
|
|
140
|
+
def get_measurement_images(self) -> np.ndarray:
|
|
141
|
+
self._measurements = self.get_measurements()
|
|
142
|
+
image_shape = self.get_image_measurements_shape()
|
|
143
|
+
#shape=(n_pixels_y,n_pixels_x,n_field_comps,n_time_steps)
|
|
144
|
+
return np.reshape(self._measurements,image_shape)
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
|
pyvale/camerastereo.py
CHANGED
|
@@ -62,10 +62,10 @@ class CameraStereo:
|
|
|
62
62
|
An instance of the CameraStereo class, given the specified parameters.
|
|
63
63
|
"""
|
|
64
64
|
calib_params = yaml.safe_load(calib_path.read_text())
|
|
65
|
-
pixels_num_cam0 = np.array([calib_params['Cam0_Cx [pixels]']*2,
|
|
66
|
-
calib_params['Cam0_Cy [pixels]']*2])
|
|
67
|
-
pixels_num_cam1 = np.array([calib_params['Cam1_Cx [pixels]']*2,
|
|
68
|
-
calib_params['Cam1_Cy [pixels]']*2])
|
|
65
|
+
pixels_num_cam0 = np.array([int(calib_params['Cam0_Cx [pixels]']*2),
|
|
66
|
+
int(calib_params['Cam0_Cy [pixels]']*2)])
|
|
67
|
+
pixels_num_cam1 = np.array([int(calib_params['Cam1_Cx [pixels]']*2),
|
|
68
|
+
int(calib_params['Cam1_Cy [pixels]']*2)])
|
|
69
69
|
pixels_size = focal_length / calib_params["Cam0_Fx [pixels]"]
|
|
70
70
|
stereo_rotation = Rotation.from_euler("xyz", ([calib_params['Theta [deg]'],
|
|
71
71
|
calib_params['Phi [deg]'],
|
pyvale/cameratools.py
CHANGED
|
@@ -23,43 +23,6 @@ from pyvale.camerastereo import CameraStereo
|
|
|
23
23
|
|
|
24
24
|
|
|
25
25
|
class CameraTools:
|
|
26
|
-
@staticmethod
|
|
27
|
-
def load_image(im_path: Path) -> np.ndarray:
|
|
28
|
-
|
|
29
|
-
input_im = mplim.imread(im_path).astype(np.float64)
|
|
30
|
-
# If we have RGB then get rid of it
|
|
31
|
-
# TODO: make sure this is collapsing RGB to grey scale coorectly
|
|
32
|
-
if input_im.ndim > 2:
|
|
33
|
-
input_im = input_im[:,:,0]
|
|
34
|
-
|
|
35
|
-
return input_im
|
|
36
|
-
|
|
37
|
-
@staticmethod
|
|
38
|
-
def save_image(save_file: Path,
|
|
39
|
-
image: np.ndarray,
|
|
40
|
-
n_bits: int = 16) -> None:
|
|
41
|
-
|
|
42
|
-
# Need to flip image so coords are top left with Y down
|
|
43
|
-
# TODO check this
|
|
44
|
-
image = image[::-1,:]
|
|
45
|
-
|
|
46
|
-
if n_bits > 8:
|
|
47
|
-
im = Image.fromarray(image.astype(np.uint16))
|
|
48
|
-
else:
|
|
49
|
-
im = Image.fromarray(image.astype(np.uint8))
|
|
50
|
-
|
|
51
|
-
im.save(save_file)
|
|
52
|
-
|
|
53
|
-
@staticmethod
|
|
54
|
-
def image_num_str(im_num: int, width: int , cam_num: int = -1) -> str:
|
|
55
|
-
num_str = str(im_num)
|
|
56
|
-
num_str = num_str.zfill(width)
|
|
57
|
-
|
|
58
|
-
if cam_num >= 0:
|
|
59
|
-
num_str = num_str+'_'+str(cam_num)
|
|
60
|
-
|
|
61
|
-
return num_str
|
|
62
|
-
|
|
63
26
|
@staticmethod
|
|
64
27
|
def pixel_vec_px(pixels_count: np.ndarray) -> tuple[np.ndarray,np.ndarray]:
|
|
65
28
|
px_vec_x = np.arange(0,pixels_count[0],1)
|
|
@@ -258,21 +221,14 @@ class CameraTools:
|
|
|
258
221
|
[bb_max[xx],bb_max[yy],bb_min[zz]],
|
|
259
222
|
[bb_min[xx],bb_max[yy],bb_min[zz]],])
|
|
260
223
|
|
|
224
|
+
print(80*"-")
|
|
225
|
+
print(bound_box_world_vecs)
|
|
226
|
+
print(80*"-")
|
|
227
|
+
|
|
261
228
|
bound_box_cam_vecs = np.matmul(world_to_cam_mat,bound_box_world_vecs.T)
|
|
262
229
|
boundbox_cam_leng = (np.max(bound_box_cam_vecs,axis=1)
|
|
263
230
|
- np.min(bound_box_cam_vecs,axis=1))
|
|
264
231
|
|
|
265
|
-
# print(80*"-")
|
|
266
|
-
# print(f"{bb_min=}")
|
|
267
|
-
# print(f"{bb_max=}")
|
|
268
|
-
# print()
|
|
269
|
-
# print("Cam to world mat:")
|
|
270
|
-
# print(cam_to_world_mat)
|
|
271
|
-
# print()
|
|
272
|
-
# print("World to cam mat:")
|
|
273
|
-
# print(world_to_cam_mat)
|
|
274
|
-
# print(80*"-")
|
|
275
|
-
|
|
276
232
|
return np.array((boundbox_cam_leng[xx],boundbox_cam_leng[yy]))
|
|
277
233
|
|
|
278
234
|
@staticmethod
|
|
@@ -287,14 +243,14 @@ class CameraTools:
|
|
|
287
243
|
return image_dist
|
|
288
244
|
|
|
289
245
|
@staticmethod
|
|
290
|
-
def
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
246
|
+
def pos_fill_frame(coords_world: np.ndarray,
|
|
247
|
+
pixel_num: np.ndarray,
|
|
248
|
+
pixel_size: np.ndarray,
|
|
249
|
+
focal_leng: float,
|
|
250
|
+
cam_rot: Rotation,
|
|
251
|
+
frame_fill: float = 1.0,
|
|
252
|
+
) -> tuple[np.ndarray,np.ndarray]:
|
|
253
|
+
|
|
298
254
|
fov_leng = CameraTools.fov_from_cam_rot_3d(
|
|
299
255
|
cam_rot=cam_rot,
|
|
300
256
|
coords_world=coords_world,
|
|
@@ -316,13 +272,19 @@ class CameraTools:
|
|
|
316
272
|
cam_z_dir_world = cam_rot.as_matrix()[:,-1]
|
|
317
273
|
cam_pos_world = (roi_pos_world + np.max(image_dist)*cam_z_dir_world)
|
|
318
274
|
|
|
319
|
-
print(80*"-")
|
|
320
|
-
print(f"{fov_leng=}")
|
|
321
|
-
print(f"{image_dist=}")
|
|
322
|
-
print(80*"-")
|
|
323
|
-
|
|
324
275
|
return (roi_pos_world,cam_pos_world)
|
|
325
276
|
|
|
277
|
+
@staticmethod
|
|
278
|
+
def pos_fill_frame_all(coords_world_list: list[np.ndarray],
|
|
279
|
+
pixel_num: np.ndarray,
|
|
280
|
+
pixel_size: np.ndarray,
|
|
281
|
+
focal_leng: float,
|
|
282
|
+
cam_rot: Rotation,
|
|
283
|
+
frame_fill: float = 1.0,
|
|
284
|
+
) -> tuple[np.ndarray,np.ndarray]:
|
|
285
|
+
pass
|
|
286
|
+
|
|
287
|
+
|
|
326
288
|
|
|
327
289
|
#---------------------------------------------------------------------------
|
|
328
290
|
# Blender camera tools
|