pyvale 2025.5.3__cp311-cp311-macosx_14_0_arm64.whl → 2025.7.1__cp311-cp311-macosx_14_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pyvale might be problematic. Click here for more details.
- pyvale/.dylibs/libomp.dylib +0 -0
- pyvale/.dylibs/libunwind.1.0.dylib +0 -0
- pyvale/__init__.py +12 -0
- pyvale/blendercalibrationdata.py +3 -1
- pyvale/blenderscene.py +7 -5
- pyvale/blendertools.py +27 -5
- pyvale/camera.py +1 -0
- pyvale/cameradata.py +3 -0
- pyvale/camerasensor.py +147 -0
- pyvale/camerastereo.py +4 -4
- pyvale/cameratools.py +23 -61
- pyvale/cython/rastercyth.c +1657 -1352
- pyvale/cython/rastercyth.cpython-311-darwin.so +0 -0
- pyvale/cython/rastercyth.py +71 -26
- pyvale/data/DIC_Challenge_Star_Noise_Def.tiff +0 -0
- pyvale/data/DIC_Challenge_Star_Noise_Ref.tiff +0 -0
- pyvale/data/plate_hole_def0000.tiff +0 -0
- pyvale/data/plate_hole_def0001.tiff +0 -0
- pyvale/data/plate_hole_ref0000.tiff +0 -0
- pyvale/data/plate_rigid_def0000.tiff +0 -0
- pyvale/data/plate_rigid_def0001.tiff +0 -0
- pyvale/data/plate_rigid_ref0000.tiff +0 -0
- pyvale/dataset.py +96 -6
- pyvale/dic/cpp/dicbruteforce.cpp +370 -0
- pyvale/dic/cpp/dicfourier.cpp +648 -0
- pyvale/dic/cpp/dicinterpolator.cpp +559 -0
- pyvale/dic/cpp/dicmain.cpp +215 -0
- pyvale/dic/cpp/dicoptimizer.cpp +675 -0
- pyvale/dic/cpp/dicrg.cpp +137 -0
- pyvale/dic/cpp/dicscanmethod.cpp +677 -0
- pyvale/dic/cpp/dicsmooth.cpp +138 -0
- pyvale/dic/cpp/dicstrain.cpp +383 -0
- pyvale/dic/cpp/dicutil.cpp +563 -0
- pyvale/dic2d.py +164 -0
- pyvale/dic2dcpp.cpython-311-darwin.so +0 -0
- pyvale/dicchecks.py +476 -0
- pyvale/dicdataimport.py +247 -0
- pyvale/dicregionofinterest.py +887 -0
- pyvale/dicresults.py +55 -0
- pyvale/dicspecklegenerator.py +238 -0
- pyvale/dicspecklequality.py +305 -0
- pyvale/dicstrain.py +387 -0
- pyvale/dicstrainresults.py +37 -0
- pyvale/errorintegrator.py +10 -8
- pyvale/examples/basics/ex1_1_basicscalars_therm2d.py +124 -113
- pyvale/examples/basics/ex1_2_sensormodel_therm2d.py +124 -132
- pyvale/examples/basics/ex1_3_customsens_therm3d.py +199 -195
- pyvale/examples/basics/ex1_4_basicerrors_therm3d.py +125 -121
- pyvale/examples/basics/ex1_5_fielderrs_therm3d.py +145 -141
- pyvale/examples/basics/ex1_6_caliberrs_therm2d.py +96 -101
- pyvale/examples/basics/ex1_7_spatavg_therm2d.py +109 -105
- pyvale/examples/basics/ex2_1_basicvectors_disp2d.py +92 -91
- pyvale/examples/basics/ex2_2_vectorsens_disp2d.py +96 -90
- pyvale/examples/basics/ex2_3_sensangle_disp2d.py +88 -89
- pyvale/examples/basics/ex2_4_chainfielderrs_disp2d.py +172 -171
- pyvale/examples/basics/ex2_5_vectorfields3d_disp3d.py +88 -86
- pyvale/examples/basics/ex3_1_basictensors_strain2d.py +90 -90
- pyvale/examples/basics/ex3_2_tensorsens2d_strain2d.py +93 -91
- pyvale/examples/basics/ex3_3_tensorsens3d_strain3d.py +172 -160
- pyvale/examples/basics/ex4_1_expsim2d_thermmech2d.py +154 -148
- pyvale/examples/basics/ex4_2_expsim3d_thermmech3d.py +249 -231
- pyvale/examples/dic/ex1_region_of_interest.py +98 -0
- pyvale/examples/dic/ex2_plate_with_hole.py +149 -0
- pyvale/examples/dic/ex3_plate_with_hole_strain.py +93 -0
- pyvale/examples/dic/ex4_dic_blender.py +95 -0
- pyvale/examples/dic/ex5_dic_challenge.py +102 -0
- pyvale/examples/imagedef2d/ex_imagedef2d_todisk.py +4 -2
- pyvale/examples/renderblender/ex1_1_blenderscene.py +152 -105
- pyvale/examples/renderblender/ex1_2_blenderdeformed.py +151 -100
- pyvale/examples/renderblender/ex2_1_stereoscene.py +183 -116
- pyvale/examples/renderblender/ex2_2_stereodeformed.py +185 -112
- pyvale/examples/renderblender/ex3_1_blendercalibration.py +164 -109
- pyvale/examples/renderrasterisation/ex_rastenp.py +74 -35
- pyvale/examples/renderrasterisation/ex_rastercyth_oneframe.py +6 -13
- pyvale/examples/renderrasterisation/ex_rastercyth_static_cypara.py +2 -2
- pyvale/examples/renderrasterisation/ex_rastercyth_static_pypara.py +2 -4
- pyvale/imagedef2d.py +3 -2
- pyvale/imagetools.py +137 -0
- pyvale/rastercy.py +34 -4
- pyvale/rasternp.py +300 -276
- pyvale/rasteropts.py +58 -0
- pyvale/renderer.py +47 -0
- pyvale/rendermesh.py +52 -62
- pyvale/renderscene.py +51 -0
- pyvale/sensorarrayfactory.py +2 -2
- pyvale/sensortools.py +19 -35
- pyvale/simcases/case21.i +1 -1
- pyvale/simcases/run_1case.py +8 -0
- pyvale/simtools.py +2 -2
- pyvale/visualsimplotter.py +180 -0
- {pyvale-2025.5.3.dist-info → pyvale-2025.7.1.dist-info}/METADATA +11 -57
- {pyvale-2025.5.3.dist-info → pyvale-2025.7.1.dist-info}/RECORD +95 -57
- {pyvale-2025.5.3.dist-info → pyvale-2025.7.1.dist-info}/WHEEL +1 -1
- pyvale/examples/visualisation/ex1_1_plot_traces.py +0 -102
- pyvale/examples/visualisation/ex2_1_animate_sim.py +0 -89
- {pyvale-2025.5.3.dist-info → pyvale-2025.7.1.dist-info}/licenses/LICENSE +0 -0
- {pyvale-2025.5.3.dist-info → pyvale-2025.7.1.dist-info}/top_level.txt +0 -0
pyvale/rasternp.py
CHANGED
|
@@ -1,31 +1,121 @@
|
|
|
1
|
-
|
|
1
|
+
#===============================================================================
|
|
2
2
|
# pyvale: the python validation engine
|
|
3
3
|
# License: MIT
|
|
4
4
|
# Copyright (C) 2025 The Computer Aided Validation Team
|
|
5
|
-
|
|
5
|
+
#===============================================================================
|
|
6
6
|
|
|
7
7
|
"""
|
|
8
|
-
NOTE: this module is a feature under developement
|
|
8
|
+
NOTE: this module is a feature under developement
|
|
9
9
|
"""
|
|
10
10
|
|
|
11
11
|
from pathlib import Path
|
|
12
|
+
import time
|
|
12
13
|
from multiprocessing.pool import Pool
|
|
13
14
|
import numpy as np
|
|
14
15
|
import numba
|
|
16
|
+
#import matplotlib.pyplot as plt
|
|
17
|
+
from pyvale.dataset import DataSet
|
|
15
18
|
from pyvale.cameradata import CameraData
|
|
16
19
|
from pyvale.cameratools import CameraTools
|
|
17
|
-
from pyvale.rendermesh import
|
|
20
|
+
from pyvale.rendermesh import RenderMesh
|
|
21
|
+
from pyvale.renderer import IRenderer, RenderScene
|
|
22
|
+
from pyvale.rasteropts import RasterOpts, save_raster
|
|
23
|
+
from pyvale.imagetools import ImageTools
|
|
24
|
+
#from pyvale.visualimages import plot_field_image
|
|
18
25
|
import pyvale.cython.rastercyth as rastercyth
|
|
19
26
|
|
|
27
|
+
|
|
28
|
+
# NOTE: This module is a feature under developement.
|
|
29
|
+
|
|
30
|
+
#===============================================================================
|
|
31
|
+
class RasterNumpy(IRenderer):
|
|
32
|
+
__slots__ = ("scene","opts",)
|
|
33
|
+
|
|
34
|
+
def __init__(self, opts: RasterOpts) -> None:
|
|
35
|
+
self.opts = opts
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def render(self,
|
|
39
|
+
scene: RenderScene,
|
|
40
|
+
cam_ind: int = 0,
|
|
41
|
+
frame_ind: int = 0,
|
|
42
|
+
field_ind: int = 0) -> np.ndarray:
|
|
43
|
+
|
|
44
|
+
image = RasterNP.raster_frame(
|
|
45
|
+
cam_ind=cam_ind,
|
|
46
|
+
frame_ind=frame_ind,
|
|
47
|
+
field_ind=field_ind,
|
|
48
|
+
cam_data=scene.cameras[cam_ind],
|
|
49
|
+
meshes=scene.meshes,
|
|
50
|
+
opts=self.opts,
|
|
51
|
+
save_path=None,
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
return image
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def render_to_disk(self,
|
|
58
|
+
scene: RenderScene,
|
|
59
|
+
cam_ind: int = 0,
|
|
60
|
+
frame_ind: int = 0,
|
|
61
|
+
field_ind: int = 0,
|
|
62
|
+
save_path: Path | None = None) -> None:
|
|
63
|
+
|
|
64
|
+
if save_path is None:
|
|
65
|
+
save_path = DataSet.create_output_path()
|
|
66
|
+
|
|
67
|
+
if not save_path.is_dir():
|
|
68
|
+
raise FileExistsError(f"Save path for render images does not exist:\n{save_path}")
|
|
69
|
+
|
|
70
|
+
RasterNP.raster_frame(
|
|
71
|
+
cam_ind=cam_ind,
|
|
72
|
+
frame_ind=frame_ind,
|
|
73
|
+
field_ind=field_ind,
|
|
74
|
+
cam_data=scene.cameras[cam_ind],
|
|
75
|
+
meshes=scene.meshes,
|
|
76
|
+
opts=self.opts,
|
|
77
|
+
save_path=save_path,
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def render_all(self, scene: RenderScene) -> list[np.ndarray]:
|
|
82
|
+
|
|
83
|
+
images = RasterNP.raster_scene(
|
|
84
|
+
scene=scene,
|
|
85
|
+
opts=self.opts,
|
|
86
|
+
save_path=None,
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
return images
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def render_all_to_disk(self,
|
|
93
|
+
scene: RenderScene,
|
|
94
|
+
save_path: Path | None = None) -> None:
|
|
95
|
+
|
|
96
|
+
if save_path is None:
|
|
97
|
+
save_path = DataSet.create_output_path()
|
|
98
|
+
|
|
99
|
+
if not save_path.is_dir():
|
|
100
|
+
raise FileExistsError(f"Save path for render images does not exist:\n{save_path}")
|
|
101
|
+
|
|
102
|
+
RasterNP.raster_scene(scene=scene,opts=self.opts,save_path=save_path)
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
#===============================================================================
|
|
20
107
|
class RasterNP:
|
|
21
108
|
@staticmethod
|
|
22
|
-
def world_to_raster_coords(
|
|
109
|
+
def world_to_raster_coords(world_to_cam_mat: np.ndarray,
|
|
110
|
+
pixels_num: np.ndarray,
|
|
111
|
+
image_dims: np.ndarray,
|
|
112
|
+
image_dist: float,
|
|
23
113
|
coords_world: np.ndarray) -> np.ndarray:
|
|
24
114
|
# coords_world.shape=(num_nodes,coord[X,Y,Z,W])
|
|
25
115
|
|
|
26
116
|
# Project onto camera coords
|
|
27
117
|
# coords_raster.shape=(num_nodes,coord[X,Y,Z,W])
|
|
28
|
-
coords_raster = np.matmul(coords_world,
|
|
118
|
+
coords_raster = np.matmul(coords_world,world_to_cam_mat.T)
|
|
29
119
|
|
|
30
120
|
# NOTE: w is not 1 when the matrix is a perspective projection! It is only 1
|
|
31
121
|
# here when we have an affine transformation
|
|
@@ -34,30 +124,30 @@ class RasterNP:
|
|
|
34
124
|
coords_raster[:,2] = coords_raster[:,2] / coords_raster[:,3]
|
|
35
125
|
|
|
36
126
|
# Coords Image: Perspective divide
|
|
37
|
-
coords_raster[:,0] = (
|
|
127
|
+
coords_raster[:,0] = (image_dist * coords_raster[:,0]
|
|
38
128
|
/ -coords_raster[:,2])
|
|
39
|
-
coords_raster[:,1] = (
|
|
129
|
+
coords_raster[:,1] = (image_dist * coords_raster[:,1]
|
|
40
130
|
/ -coords_raster[:,2])
|
|
41
131
|
|
|
42
132
|
# Coords NDC: Convert to normalised device coords in the range [-1,1]
|
|
43
|
-
coords_raster[:,0] = 2*coords_raster[:,0] /
|
|
44
|
-
coords_raster[:,1] = 2*coords_raster[:,1] /
|
|
133
|
+
coords_raster[:,0] = 2*coords_raster[:,0] / image_dims[0]
|
|
134
|
+
coords_raster[:,1] = 2*coords_raster[:,1] / image_dims[1]
|
|
45
135
|
|
|
46
136
|
# Coords Raster: Covert to pixel (raster) coords
|
|
47
137
|
# Shape = ([X,Y,Z],num_nodes)
|
|
48
|
-
coords_raster[:,0] = (coords_raster[:,0] + 1)/2 *
|
|
49
|
-
coords_raster[:,1] = (1-coords_raster[:,1])/2 *
|
|
138
|
+
coords_raster[:,0] = (coords_raster[:,0] + 1)/2 * pixels_num[0]
|
|
139
|
+
coords_raster[:,1] = (1-coords_raster[:,1])/2 * pixels_num[1]
|
|
50
140
|
coords_raster[:,2] = -coords_raster[:,2]
|
|
51
141
|
|
|
52
142
|
return coords_raster
|
|
53
143
|
|
|
54
144
|
|
|
55
145
|
@staticmethod
|
|
56
|
-
def back_face_removal_mask(
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
coords_cam = np.matmul(coords_world,
|
|
146
|
+
def back_face_removal_mask(world_to_cam_mat: np.ndarray,
|
|
147
|
+
coords_world: np.ndarray,
|
|
148
|
+
connect: np.ndarray
|
|
149
|
+
) -> np.ndarray:
|
|
150
|
+
coords_cam = np.matmul(coords_world,world_to_cam_mat.T)
|
|
61
151
|
|
|
62
152
|
# shape=(num_elems,nodes_per_elem,coord[x,y,z,w])
|
|
63
153
|
elem_cam_coords = coords_cam[connect,:]
|
|
@@ -75,12 +165,12 @@ class RasterNP:
|
|
|
75
165
|
proj_elem_to_cam = np.dot(cam_normal,elem_cam_normals)
|
|
76
166
|
|
|
77
167
|
# NOTE this should be a numerical precision tolerance (epsilon)
|
|
78
|
-
back_face_mask = proj_elem_to_cam > 1e-
|
|
168
|
+
back_face_mask = proj_elem_to_cam > 1e-12
|
|
79
169
|
|
|
80
170
|
return back_face_mask
|
|
81
171
|
|
|
82
172
|
@staticmethod
|
|
83
|
-
def crop_and_bound_by_connect(
|
|
173
|
+
def crop_and_bound_by_connect(pixels_num: np.ndarray,
|
|
84
174
|
coords_raster: np.ndarray,
|
|
85
175
|
connectivity: np.ndarray,
|
|
86
176
|
) -> tuple[np.ndarray,np.ndarray]:
|
|
@@ -97,8 +187,8 @@ class RasterNP:
|
|
|
97
187
|
# Check that min/max nodes are within the 4 edges of the camera image
|
|
98
188
|
#shape=(4_edges_to_check,num_elems)
|
|
99
189
|
crop_mask = np.zeros([connectivity.shape[0],4],dtype=np.int8)
|
|
100
|
-
crop_mask[elem_raster_coord_min[:,0] <= (
|
|
101
|
-
crop_mask[elem_raster_coord_min[:,1] <= (
|
|
190
|
+
crop_mask[elem_raster_coord_min[:,0] <= (pixels_num[0]-1), 0] = 1
|
|
191
|
+
crop_mask[elem_raster_coord_min[:,1] <= (pixels_num[1]-1), 1] = 1
|
|
102
192
|
crop_mask[elem_raster_coord_max[:,0] >= 0, 2] = 1
|
|
103
193
|
crop_mask[elem_raster_coord_max[:,1] >= 0, 3] = 1
|
|
104
194
|
crop_mask = np.sum(crop_mask,axis=1) == 4
|
|
@@ -117,12 +207,12 @@ class RasterNP:
|
|
|
117
207
|
elem_raster_coord_min[:,0])
|
|
118
208
|
elem_bound_boxes_inds[:,1] = RasterNP.elem_bound_box_high(
|
|
119
209
|
elem_raster_coord_max[:,0],
|
|
120
|
-
|
|
210
|
+
pixels_num[0]-1)
|
|
121
211
|
elem_bound_boxes_inds[:,2] = RasterNP.elem_bound_box_low(
|
|
122
212
|
elem_raster_coord_min[:,1])
|
|
123
213
|
elem_bound_boxes_inds[:,3] = RasterNP.elem_bound_box_high(
|
|
124
214
|
elem_raster_coord_max[:,1],
|
|
125
|
-
|
|
215
|
+
pixels_num[1]-1)
|
|
126
216
|
|
|
127
217
|
return (crop_mask,elem_bound_boxes_inds)
|
|
128
218
|
|
|
@@ -143,28 +233,61 @@ class RasterNP:
|
|
|
143
233
|
bound = np.min(bound_mat,axis=0)
|
|
144
234
|
return bound
|
|
145
235
|
|
|
236
|
+
@staticmethod
|
|
237
|
+
def average_buffers(cam_data: CameraData,
|
|
238
|
+
image_buff_subpx: np.ndarray,
|
|
239
|
+
depth_buff_subpx: np.ndarray) -> tuple[np.ndarray,np.ndarray]:
|
|
240
|
+
|
|
241
|
+
depth_buff_avg = np.empty((cam_data.pixels_num[1],cam_data.pixels_num[0]),
|
|
242
|
+
dtype=np.float64)
|
|
243
|
+
image_buff_avg = np.empty((cam_data.pixels_num[1],cam_data.pixels_num[0]),
|
|
244
|
+
dtype=np.float64)
|
|
245
|
+
|
|
246
|
+
if Path(rastercyth.__file__).suffix in (".so",".dll",".dylib"):
|
|
247
|
+
depth_buff_avg = np.array(
|
|
248
|
+
rastercyth.average_image(depth_buff_subpx,cam_data.sub_samp))
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
image_buff_avg = np.array(
|
|
252
|
+
rastercyth.average_image(image_buff_subpx,cam_data.sub_samp))
|
|
253
|
+
|
|
254
|
+
else:
|
|
255
|
+
depth_buff_avg = CameraTools.average_subpixel_image(
|
|
256
|
+
depth_buff_subpx,cam_data.sub_samp)
|
|
257
|
+
image_buff_avg = CameraTools.average_subpixel_image(
|
|
258
|
+
image_buff_subpx,cam_data.sub_samp)
|
|
259
|
+
|
|
260
|
+
return (image_buff_avg,depth_buff_avg)
|
|
261
|
+
|
|
146
262
|
|
|
147
263
|
@staticmethod
|
|
148
|
-
def setup_frame(
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
disp_field_frame: np.ndarray | None = None,
|
|
264
|
+
def setup_frame(camera: CameraData,
|
|
265
|
+
mesh: RenderMesh,
|
|
266
|
+
frame_ind: int = 0,
|
|
152
267
|
) -> tuple[np.ndarray,np.ndarray,np.ndarray]:
|
|
153
268
|
|
|
154
|
-
connect_in_frame = np.copy(connectivity)
|
|
155
|
-
coords_deform = np.copy(
|
|
269
|
+
connect_in_frame = np.copy(mesh.connectivity)
|
|
270
|
+
coords_deform = np.copy(mesh.coords)
|
|
156
271
|
|
|
157
272
|
#-----------------------------------------------------------------------
|
|
158
273
|
# DEFORM MESH WITH DISPLACEMENT
|
|
159
|
-
if
|
|
274
|
+
if mesh.fields_disp is not None:
|
|
160
275
|
# Exclude w coord from mesh deformation
|
|
161
|
-
coords_deform[:,:-1] = coords_deform[:,:-1]
|
|
276
|
+
coords_deform[:,:-1] = (coords_deform[:,:-1]
|
|
277
|
+
+ mesh.fields_disp[:,frame_ind,:])
|
|
278
|
+
|
|
279
|
+
#-----------------------------------------------------------------------
|
|
280
|
+
# Convert all meshes from local to world coords
|
|
281
|
+
coords_deform = np.matmul(coords_deform,mesh.mesh_to_world_mat.T)
|
|
162
282
|
|
|
163
283
|
#-----------------------------------------------------------------------
|
|
164
284
|
# Convert world coords of all elements in the scene
|
|
165
285
|
# shape=(num_nodes,coord[x,y,z,w])
|
|
166
|
-
coords_raster = RasterNP.world_to_raster_coords(
|
|
167
|
-
|
|
286
|
+
coords_raster = RasterNP.world_to_raster_coords(camera.world_to_cam_mat,
|
|
287
|
+
camera.pixels_num,
|
|
288
|
+
camera.image_dims,
|
|
289
|
+
camera.image_dist,
|
|
290
|
+
coords_deform)
|
|
168
291
|
|
|
169
292
|
# Convert to perspective correct hyperbolic interpolation for z interp
|
|
170
293
|
# shape=(num_nodes,coord[x,y,z,w])
|
|
@@ -175,16 +298,16 @@ class RasterNP:
|
|
|
175
298
|
#-----------------------------------------------------------------------
|
|
176
299
|
# BACKFACE REMOVAL
|
|
177
300
|
# shape=(num_elems,)
|
|
178
|
-
back_face_mask = RasterNP.back_face_removal_mask(
|
|
179
|
-
|
|
180
|
-
|
|
301
|
+
back_face_mask = RasterNP.back_face_removal_mask(camera.world_to_cam_mat,
|
|
302
|
+
coords_deform,
|
|
303
|
+
connect_in_frame)
|
|
181
304
|
connect_in_frame = connect_in_frame[back_face_mask,:]
|
|
182
305
|
|
|
183
306
|
#-----------------------------------------------------------------------
|
|
184
307
|
# CROPPING & BOUNDING BOX OPERATIONS
|
|
185
308
|
(crop_mask,
|
|
186
309
|
elem_bound_box_inds) = RasterNP.crop_and_bound_by_connect(
|
|
187
|
-
|
|
310
|
+
camera.pixels_num,
|
|
188
311
|
coords_raster,
|
|
189
312
|
connect_in_frame,
|
|
190
313
|
)
|
|
@@ -201,21 +324,24 @@ class RasterNP:
|
|
|
201
324
|
|
|
202
325
|
|
|
203
326
|
@staticmethod
|
|
204
|
-
def raster_elem(
|
|
205
|
-
elem_raster_coords: np.ndarray,
|
|
327
|
+
def raster_elem(elem_raster_coords: np.ndarray,
|
|
206
328
|
elem_bound_box_inds: np.ndarray,
|
|
207
329
|
elem_area: float,
|
|
208
|
-
field_divide_z: np.ndarray
|
|
330
|
+
field_divide_z: np.ndarray,
|
|
331
|
+
sub_samp: int,
|
|
209
332
|
) -> tuple[np.ndarray,np.ndarray,np.ndarray,np.ndarray]:
|
|
333
|
+
# elem_raster_coords.shape=()
|
|
334
|
+
# elem_bound_box_inds.shape=()
|
|
335
|
+
# field_divide_z.shape=()
|
|
210
336
|
|
|
211
337
|
# Create the subpixel coords inside the bounding box to test with the
|
|
212
338
|
# edge function. Use the pixel indices of the bounding box.
|
|
213
339
|
bound_subpx_x = np.arange(elem_bound_box_inds[0],
|
|
214
340
|
elem_bound_box_inds[1],
|
|
215
|
-
1/
|
|
341
|
+
1/sub_samp) + 1/(2*sub_samp)
|
|
216
342
|
bound_subpx_y = np.arange(elem_bound_box_inds[2],
|
|
217
343
|
elem_bound_box_inds[3],
|
|
218
|
-
1/
|
|
344
|
+
1/sub_samp) + 1/(2*sub_samp)
|
|
219
345
|
(bound_subpx_grid_x,bound_subpx_grid_y) = np.meshgrid(bound_subpx_x,
|
|
220
346
|
bound_subpx_y)
|
|
221
347
|
bound_coords_grid_shape = bound_subpx_grid_x.shape
|
|
@@ -224,10 +350,10 @@ class RasterNP:
|
|
|
224
350
|
bound_subpx_grid_y.flatten()))
|
|
225
351
|
|
|
226
352
|
# Create the subpixel indices for buffer slicing later
|
|
227
|
-
subpx_inds_x = np.arange(
|
|
228
|
-
|
|
229
|
-
subpx_inds_y = np.arange(
|
|
230
|
-
|
|
353
|
+
subpx_inds_x = np.arange(sub_samp*elem_bound_box_inds[0],
|
|
354
|
+
sub_samp*elem_bound_box_inds[1])
|
|
355
|
+
subpx_inds_y = np.arange(sub_samp*elem_bound_box_inds[2],
|
|
356
|
+
sub_samp*elem_bound_box_inds[3])
|
|
231
357
|
(subpx_inds_grid_x,subpx_inds_grid_y) = np.meshgrid(subpx_inds_x,
|
|
232
358
|
subpx_inds_y)
|
|
233
359
|
|
|
@@ -275,225 +401,203 @@ class RasterNP:
|
|
|
275
401
|
subpx_inds_grid_x[edge_mask_grid],
|
|
276
402
|
subpx_inds_grid_y[edge_mask_grid])
|
|
277
403
|
|
|
404
|
+
|
|
278
405
|
@staticmethod
|
|
279
|
-
def
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
406
|
+
def raster_one_mesh(connect_in_frame: np.ndarray,
|
|
407
|
+
coords_raster: np.ndarray,
|
|
408
|
+
elem_bound_box_inds: np.ndarray,
|
|
409
|
+
elem_areas: np.ndarray,
|
|
410
|
+
field_frame_div_z: np.ndarray,
|
|
411
|
+
sub_samp: int,
|
|
412
|
+
image_buff_subpx: np.ndarray,
|
|
413
|
+
depth_buff_subpx: np.ndarray,
|
|
414
|
+
) -> tuple[np.ndarray,np.ndarray]:
|
|
286
415
|
#connect_in_frame.shape=(num_elems,nodes_per_elem)
|
|
287
416
|
#coords_raster.shape=(num_coords,coord[x,y,z,w])
|
|
288
417
|
#elem_bound_box_inds.shape=(num_elems,[min_x,max_x,min_y,max_y])
|
|
289
418
|
#elem_areas.shape=(num_elems,)
|
|
290
419
|
#field_frame_divide_z=(num_coords,)
|
|
291
420
|
|
|
292
|
-
depth_buffer = 1e5*cam_data.image_dist*np.ones(
|
|
293
|
-
cam_data.sub_samp*cam_data.pixels_num).T
|
|
294
|
-
image_buffer = np.full(cam_data.sub_samp*cam_data.pixels_num,0.0).T
|
|
295
|
-
|
|
296
|
-
# elem_raster_coords.shape=(num_elems,nodes_per_elem,coord[x,y,z,w])
|
|
297
|
-
# field_divide_z.shape=(num_elems,nodes_per_elem,num_time_steps)
|
|
298
|
-
# elem_raster_coords.shape=(nodes_per_elem,coord[x,y,z,w])
|
|
299
|
-
|
|
300
421
|
for ee in range(connect_in_frame.shape[0]):
|
|
301
422
|
cc = connect_in_frame[ee,:]
|
|
302
423
|
|
|
303
424
|
(px_coord_z,
|
|
304
425
|
field_interp,
|
|
305
426
|
subpx_inds_x_in,
|
|
306
|
-
subpx_inds_y_in) = RasterNP.raster_elem(
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
field_frame_div_z[cc])
|
|
427
|
+
subpx_inds_y_in) = RasterNP.raster_elem(coords_raster[cc,:],
|
|
428
|
+
elem_bound_box_inds[ee,:],
|
|
429
|
+
elem_areas[ee],
|
|
430
|
+
field_frame_div_z[cc],
|
|
431
|
+
sub_samp)
|
|
312
432
|
|
|
313
433
|
|
|
314
434
|
# Build a mask to replace the depth information if there is already an
|
|
315
435
|
# element in front of the one we are rendering
|
|
316
436
|
px_coord_z_depth_mask = (px_coord_z
|
|
317
|
-
<
|
|
437
|
+
< depth_buff_subpx[subpx_inds_y_in,subpx_inds_x_in])
|
|
318
438
|
|
|
319
439
|
# Initialise the z coord to the value in the depth buffer
|
|
320
|
-
px_coord_z_masked =
|
|
440
|
+
px_coord_z_masked = depth_buff_subpx[subpx_inds_y_in,subpx_inds_x_in]
|
|
321
441
|
# Use the depth mask to overwrite the depth buffer values if points are in
|
|
322
442
|
# front of the values in the depth buffer
|
|
323
443
|
px_coord_z_masked[px_coord_z_depth_mask] = px_coord_z[px_coord_z_depth_mask]
|
|
324
444
|
|
|
325
445
|
# Push the masked values into the depth buffer
|
|
326
|
-
|
|
446
|
+
depth_buff_subpx[subpx_inds_y_in,subpx_inds_x_in] = px_coord_z_masked
|
|
327
447
|
|
|
328
448
|
# Mask the image buffer using the depth mask
|
|
329
|
-
image_buffer_depth_masked =
|
|
449
|
+
image_buffer_depth_masked = image_buff_subpx[subpx_inds_y_in,subpx_inds_x_in]
|
|
330
450
|
image_buffer_depth_masked[px_coord_z_depth_mask] = field_interp[px_coord_z_depth_mask]
|
|
331
451
|
|
|
332
452
|
# Push the masked values into the image buffer
|
|
333
|
-
|
|
453
|
+
image_buff_subpx[subpx_inds_y_in,subpx_inds_x_in] = image_buffer_depth_masked
|
|
334
454
|
|
|
335
455
|
#---------------------------------------------------------------------------
|
|
336
456
|
# END RASTER LOOP
|
|
337
|
-
|
|
338
|
-
if Path(rastercyth.__file__).suffix == ".so":
|
|
339
|
-
depth_buff = np.empty((cam_data.pixels_num[1],cam_data.pixels_num[0]),dtype=np.float64)
|
|
340
|
-
depth_buff = np.array(rastercyth.average_image(depth_buffer,cam_data.sub_samp))
|
|
341
|
-
image_buff = np.empty((cam_data.pixels_num[1],cam_data.pixels_num[0]),dtype=np.float64)
|
|
342
|
-
image_buff = np.array(rastercyth.average_image(image_buffer,cam_data.sub_samp))
|
|
343
|
-
else:
|
|
344
|
-
depth_buff = CameraTools.average_subpixel_image(depth_buffer,cam_data.sub_samp)
|
|
345
|
-
image_buff = CameraTools.average_subpixel_image(image_buffer,cam_data.sub_samp)
|
|
346
|
-
|
|
347
|
-
return (image_buff,depth_buff)
|
|
348
|
-
|
|
457
|
+
return (image_buff_subpx,depth_buff_subpx)
|
|
349
458
|
|
|
350
459
|
@staticmethod
|
|
351
|
-
def
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
coords_raster,
|
|
395
|
-
connect_in_frame,
|
|
396
|
-
elem_bound_box_inds,
|
|
397
|
-
elem_areas,
|
|
398
|
-
render_mesh.fields_render[:,frames[ff],fields[ff]],
|
|
399
|
-
save_path
|
|
400
|
-
)
|
|
460
|
+
def raster_frame(cam_ind: int,
|
|
461
|
+
frame_ind: int,
|
|
462
|
+
field_ind: int,
|
|
463
|
+
cam_data: CameraData,
|
|
464
|
+
meshes: list[RenderMesh],
|
|
465
|
+
opts: RasterOpts,
|
|
466
|
+
save_path: Path | None
|
|
467
|
+
) -> np.ndarray | None:
|
|
468
|
+
|
|
469
|
+
depth_buff_subpx = 1e5*cam_data.image_dist*np.ones(cam_data.sub_samp*cam_data.pixels_num).T
|
|
470
|
+
image_buff_subpx = np.full(cam_data.sub_samp*cam_data.pixels_num,0.0).T
|
|
471
|
+
|
|
472
|
+
for mm in meshes:
|
|
473
|
+
# coords_raster.shape=(num_coords,coord[x,y,z,w])
|
|
474
|
+
# connect_in_frame.shape=(num_elems_in_scene,nodes_per_elem)
|
|
475
|
+
# elem_bound_box_inds.shape=(num_elems_in_scene,4[x_min,x_max,y_min,y_max])
|
|
476
|
+
# elem_areas.shape=(num_elems,)
|
|
477
|
+
(coords_raster,
|
|
478
|
+
connect_in_frame,
|
|
479
|
+
elem_bound_box_inds,
|
|
480
|
+
elem_areas) = RasterNP.setup_frame(
|
|
481
|
+
cam_data,
|
|
482
|
+
mm,
|
|
483
|
+
frame_ind,
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
# NOTE: the z coord has already been inverted in setup so we multiply here
|
|
487
|
+
render_field_div_z = (mm.fields_render[:,frame_ind,field_ind]
|
|
488
|
+
*coords_raster[:,2])
|
|
489
|
+
|
|
490
|
+
# image_buffer.shape=(num_px_y,num_px_x)
|
|
491
|
+
# depth_buffer.shape=(num_px_y,num_px_x)
|
|
492
|
+
(image_buff_subpx,
|
|
493
|
+
depth_buff_subpx) = RasterNP.raster_one_mesh(
|
|
494
|
+
connect_in_frame,
|
|
495
|
+
coords_raster,
|
|
496
|
+
elem_bound_box_inds,
|
|
497
|
+
elem_areas,
|
|
498
|
+
render_field_div_z,
|
|
499
|
+
cam_data.sub_samp,
|
|
500
|
+
image_buff_subpx,
|
|
501
|
+
depth_buff_subpx
|
|
502
|
+
)
|
|
401
503
|
|
|
402
|
-
if images is not None:
|
|
403
|
-
images[:,:,frames[ff],fields[ff]] = image
|
|
404
|
-
else:
|
|
405
|
-
with Pool(threads_num) as pool:
|
|
406
|
-
processes_with_id = []
|
|
407
504
|
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
cam_data,
|
|
412
|
-
coords_raster,
|
|
413
|
-
connect_in_frame,
|
|
414
|
-
elem_bound_box_inds,
|
|
415
|
-
elem_areas,
|
|
416
|
-
render_mesh.fields_render[:,frames[ff],fields[ff]],
|
|
417
|
-
save_path)
|
|
505
|
+
# TODO: make this configurable
|
|
506
|
+
image_buff_subpx[depth_buff_subpx > 1000*cam_data.image_dist] = np.nan
|
|
507
|
+
depth_buff_subpx[depth_buff_subpx > 1000*cam_data.image_dist] = np.nan
|
|
418
508
|
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
"field": fields[ff]})
|
|
509
|
+
# Average buffers
|
|
510
|
+
(image_buff_avg,
|
|
511
|
+
depth_buff_avg) = RasterNP.average_buffers(cam_data,
|
|
512
|
+
image_buff_subpx,
|
|
513
|
+
depth_buff_subpx)
|
|
425
514
|
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
images[:,:,pp["frame"],pp["field"]] = image
|
|
429
|
-
|
|
430
|
-
if images is not None:
|
|
431
|
-
return images
|
|
515
|
+
if save_path is None:
|
|
516
|
+
return image_buff_avg
|
|
432
517
|
|
|
518
|
+
save_name = ImageTools.get_save_name(cam_ind,frame_ind,field_ind)
|
|
519
|
+
image_save_file = save_path/save_name
|
|
520
|
+
save_raster(image_save_file,image_buff_avg,depth_buff_avg,opts)
|
|
433
521
|
return None
|
|
434
522
|
|
|
435
523
|
|
|
436
524
|
|
|
437
|
-
|
|
438
525
|
@staticmethod
|
|
439
|
-
def
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
) -> np.ndarray | None:
|
|
526
|
+
def raster_scene(scene: RenderScene,
|
|
527
|
+
opts: RasterOpts,
|
|
528
|
+
save_path: Path | None = None,
|
|
529
|
+
) -> list[np.ndarray] | None:
|
|
444
530
|
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
531
|
+
# TODO: we assume the number of frames and fields is the same per camera
|
|
532
|
+
# Fix this
|
|
533
|
+
frames_num = scene.meshes[0].fields_render.shape[1]
|
|
534
|
+
field_num = scene.meshes[0].fields_render.shape[2]
|
|
535
|
+
|
|
536
|
+
(cam_inds,
|
|
537
|
+
frame_inds,
|
|
538
|
+
field_inds) = np.meshgrid(np.arange(0,len(scene.cameras)),
|
|
539
|
+
np.arange(0,frames_num),
|
|
540
|
+
np.arange(0,field_num))
|
|
541
|
+
|
|
542
|
+
cam_inds = cam_inds.flatten()
|
|
543
|
+
frame_inds = frame_inds.flatten()
|
|
544
|
+
field_inds = field_inds.flatten()
|
|
545
|
+
frames_total = cam_inds.shape[0]
|
|
451
546
|
|
|
452
547
|
|
|
453
548
|
if save_path is None:
|
|
454
|
-
images =
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
549
|
+
images = []
|
|
550
|
+
for cc in scene.cameras:
|
|
551
|
+
images.append(np.empty((cc.pixels_num[1],
|
|
552
|
+
cc.pixels_num[0],
|
|
553
|
+
frames_num,
|
|
554
|
+
field_num)))
|
|
458
555
|
else:
|
|
459
556
|
images = None
|
|
460
557
|
if not save_path.is_dir():
|
|
461
558
|
save_path.mkdir()
|
|
462
559
|
|
|
463
560
|
|
|
464
|
-
if parallel is None:
|
|
465
|
-
for ff in range(0,
|
|
466
|
-
image = RasterNP.
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
561
|
+
if opts.parallel is None:
|
|
562
|
+
for ff in range(0,frames_total):
|
|
563
|
+
image = RasterNP.raster_frame(
|
|
564
|
+
cam_inds[ff],
|
|
565
|
+
frame_inds[ff],
|
|
566
|
+
field_inds[ff],
|
|
567
|
+
scene.cameras[cam_inds[ff]],
|
|
568
|
+
scene.meshes,
|
|
569
|
+
opts,
|
|
471
570
|
save_path,
|
|
472
571
|
)
|
|
473
572
|
|
|
474
573
|
if images is not None:
|
|
475
|
-
images[:,:,
|
|
574
|
+
images[cam_inds[ff]][:,:,frame_inds[ff],field_inds[ff]] = image
|
|
575
|
+
|
|
476
576
|
else:
|
|
477
|
-
with Pool(parallel) as pool:
|
|
577
|
+
with Pool(opts.parallel) as pool:
|
|
478
578
|
processes_with_id = []
|
|
479
579
|
|
|
480
|
-
for ff in range(0,
|
|
481
|
-
args = (
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
580
|
+
for ff in range(0,frames_total):
|
|
581
|
+
args = (cam_inds[ff],
|
|
582
|
+
frame_inds[ff],
|
|
583
|
+
field_inds[ff],
|
|
584
|
+
scene.cameras[cam_inds[ff]],
|
|
585
|
+
scene.meshes,
|
|
586
|
+
opts,
|
|
485
587
|
save_path)
|
|
486
588
|
|
|
487
589
|
process = pool.apply_async(
|
|
488
|
-
RasterNP.
|
|
590
|
+
RasterNP.raster_frame, args=args
|
|
489
591
|
)
|
|
490
592
|
processes_with_id.append({"process": process,
|
|
491
|
-
"
|
|
492
|
-
"
|
|
593
|
+
"camera": cam_inds[ff],
|
|
594
|
+
"frame": frame_inds[ff],
|
|
595
|
+
"field": field_inds[ff]})
|
|
493
596
|
|
|
494
597
|
for pp in processes_with_id:
|
|
495
598
|
image = pp["process"].get()
|
|
496
|
-
images
|
|
599
|
+
if images is not None:
|
|
600
|
+
images[cam_inds[ff]][:,:,pp["frame"],pp["field"]] = image
|
|
497
601
|
|
|
498
602
|
if images is not None:
|
|
499
603
|
return images
|
|
@@ -501,87 +605,6 @@ class RasterNP:
|
|
|
501
605
|
return None
|
|
502
606
|
|
|
503
607
|
|
|
504
|
-
@staticmethod
|
|
505
|
-
def _static_mesh_frame_loop(frame_ind: int,
|
|
506
|
-
field_ind: int,
|
|
507
|
-
cam_data: CameraData,
|
|
508
|
-
coords_raster: np.ndarray,
|
|
509
|
-
connect_in_frame: np.ndarray,
|
|
510
|
-
elem_bound_box_inds: np.ndarray,
|
|
511
|
-
elem_areas: np.ndarray,
|
|
512
|
-
field_to_render: np.ndarray,
|
|
513
|
-
save_path: Path | None,
|
|
514
|
-
) -> np.ndarray | None:
|
|
515
|
-
|
|
516
|
-
# NOTE: the z coord has already been inverted in setup so we multiply here
|
|
517
|
-
render_field_div_z = field_to_render*coords_raster[:,2]
|
|
518
|
-
|
|
519
|
-
(image_buffer,
|
|
520
|
-
depth_buffer) = RasterNP.raster_frame(
|
|
521
|
-
cam_data,
|
|
522
|
-
connect_in_frame,
|
|
523
|
-
coords_raster,
|
|
524
|
-
elem_bound_box_inds,
|
|
525
|
-
elem_areas,
|
|
526
|
-
render_field_div_z)
|
|
527
|
-
|
|
528
|
-
# TODO: make this configurable
|
|
529
|
-
image_buffer[depth_buffer > 1000*cam_data.image_dist] = np.nan
|
|
530
|
-
|
|
531
|
-
if save_path is None:
|
|
532
|
-
return image_buffer
|
|
533
|
-
|
|
534
|
-
image_file = save_path/f"image_frame{frame_ind}_field{field_ind}"
|
|
535
|
-
np.save(image_file,image_buffer)
|
|
536
|
-
return None
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
@staticmethod
|
|
540
|
-
def _deformed_mesh_frame_loop(frame_ind: int,
|
|
541
|
-
field_ind: int,
|
|
542
|
-
cam_data: CameraData,
|
|
543
|
-
render_mesh: RenderMeshData,
|
|
544
|
-
save_path: Path | None
|
|
545
|
-
) -> np.ndarray | None:
|
|
546
|
-
# coords_raster.shape=(num_coords,coord[x,y,z,w])
|
|
547
|
-
# connect_in_frame.shape=(num_elems_in_scene,nodes_per_elem)
|
|
548
|
-
# elem_bound_box_inds.shape=(num_elems_in_scene,4[x_min,x_max,y_min,y_max])
|
|
549
|
-
# elem_areas.shape=(num_elems,)
|
|
550
|
-
(coords_raster,
|
|
551
|
-
connect_in_frame,
|
|
552
|
-
elem_bound_box_inds,
|
|
553
|
-
elem_areas) = RasterNP.setup_frame(
|
|
554
|
-
cam_data,
|
|
555
|
-
render_mesh.coords,
|
|
556
|
-
render_mesh.connectivity,
|
|
557
|
-
render_mesh.fields_disp[:,frame_ind,:],
|
|
558
|
-
)
|
|
559
|
-
|
|
560
|
-
# NOTE: the z coord has already been inverted in setup so we multiply here
|
|
561
|
-
render_field_div_z = (render_mesh.fields_render[:,frame_ind,field_ind]
|
|
562
|
-
*coords_raster[:,2])
|
|
563
|
-
|
|
564
|
-
# image_buffer.shape=(num_px_y,num_px_x)
|
|
565
|
-
# depth_buffer.shape=(num_px_y,num_px_x)
|
|
566
|
-
(image_buffer,
|
|
567
|
-
depth_buffer) = RasterNP.raster_frame(
|
|
568
|
-
cam_data,
|
|
569
|
-
connect_in_frame,
|
|
570
|
-
coords_raster,
|
|
571
|
-
elem_bound_box_inds,
|
|
572
|
-
elem_areas,
|
|
573
|
-
render_field_div_z)
|
|
574
|
-
|
|
575
|
-
# TODO: make this configurable
|
|
576
|
-
image_buffer[depth_buffer > 1000*cam_data.image_dist] = np.nan
|
|
577
|
-
|
|
578
|
-
if save_path is None:
|
|
579
|
-
return image_buffer
|
|
580
|
-
|
|
581
|
-
image_file = save_path/f"image_frame{frame_ind}_field{field_ind}"
|
|
582
|
-
np.save(image_file.with_suffix(".npy"),image_buffer)
|
|
583
|
-
return None
|
|
584
|
-
|
|
585
608
|
|
|
586
609
|
#-------------------------------------------------------------------------------
|
|
587
610
|
@numba.jit(nopython=True)
|
|
@@ -599,5 +622,6 @@ def edge_function_slice(vert_a: np.ndarray,
|
|
|
599
622
|
|
|
600
623
|
return ((vert_c[:,0] - vert_a[:,0]) * (vert_b[:,1] - vert_a[:,1])
|
|
601
624
|
- (vert_c[:,1] - vert_a[:,1]) * (vert_b[:,0] - vert_a[:,0]))
|
|
625
|
+
#-------------------------------------------------------------------------------
|
|
602
626
|
|
|
603
627
|
|