resqpy 4.16.11__py3-none-any.whl → 4.17.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- resqpy/__init__.py +1 -1
- resqpy/fault/_grid_connection_set.py +75 -47
- resqpy/grid/_grid.py +4 -0
- resqpy/grid_surface/_blocked_well_populate.py +5 -5
- resqpy/grid_surface/_find_faces.py +4 -3
- resqpy/model/_hdf5.py +3 -3
- resqpy/olio/triangulation.py +17 -13
- resqpy/olio/wellspec_keywords.py +16 -10
- resqpy/property/grid_property_collection.py +10 -10
- resqpy/rq_import/_import_vdb_ensemble.py +12 -13
- resqpy/surface/_mesh.py +4 -0
- resqpy/surface/_surface.py +15 -10
- resqpy/surface/_tri_mesh.py +8 -7
- resqpy/surface/_triangulated_patch.py +27 -15
- resqpy/well/_blocked_well.py +28 -25
- resqpy/well/_trajectory.py +2 -2
- resqpy/well/blocked_well_frame.py +1 -1
- resqpy/well/well_object_funcs.py +5 -5
- {resqpy-4.16.11.dist-info → resqpy-4.17.0.dist-info}/METADATA +1 -1
- {resqpy-4.16.11.dist-info → resqpy-4.17.0.dist-info}/RECORD +22 -22
- {resqpy-4.16.11.dist-info → resqpy-4.17.0.dist-info}/LICENSE +0 -0
- {resqpy-4.16.11.dist-info → resqpy-4.17.0.dist-info}/WHEEL +0 -0
@@ -225,11 +225,11 @@ def import_vdb_ensemble(
|
|
225
225
|
vdb_file = ensemble_list[realisation]
|
226
226
|
log.info('processing realisation ' + str(realisation) + ' from: ' + str(vdb_file))
|
227
227
|
vdbase = vdb.VDB(vdb_file)
|
228
|
-
#
|
229
|
-
#
|
230
|
-
#
|
231
|
-
#
|
232
|
-
#
|
228
|
+
# case_list = vdbase.cases()
|
229
|
+
# assert len(case_list) > 0, 'no cases found in vdb: ' + str(vdb_file)
|
230
|
+
# if len(case_list) > 1: log.warning('more than one case found in vdb (using first): ' + str(vdb_file))
|
231
|
+
# vdb_case = case_list[0]
|
232
|
+
# vdbase.set_use_case(vdb_case)
|
233
233
|
vdbase.set_extent_kji(grid.extent_kji)
|
234
234
|
|
235
235
|
prop_import_collection = rp.GridPropertyCollection(realization = realisation)
|
@@ -243,9 +243,8 @@ def import_vdb_ensemble(
|
|
243
243
|
if keyword_list is not None and keyword not in keyword_list:
|
244
244
|
continue
|
245
245
|
prop_kind, facet_type, facet = rp.property_kind_and_facet_from_keyword(keyword)
|
246
|
-
if property_kind_list is not None and prop_kind not in property_kind_list and
|
247
|
-
|
248
|
-
]:
|
246
|
+
if property_kind_list is not None and prop_kind not in property_kind_list and \
|
247
|
+
prop_kind not in ['active', 'region initialization']:
|
249
248
|
continue
|
250
249
|
prop_import_collection.import_vdb_static_property_to_cache(vdbase,
|
251
250
|
keyword,
|
@@ -312,9 +311,9 @@ def import_vdb_ensemble(
|
|
312
311
|
if decoarsen_array is not None:
|
313
312
|
step_import_collection.decoarsen_imported_list(decoarsen_array = decoarsen_array)
|
314
313
|
# extend hdf5 with cached arrays for this timestep
|
315
|
-
#
|
316
|
-
#
|
317
|
-
#
|
314
|
+
# log.info('number of recurrent grid property arrays for timestep: ' + str(timestep_number) +
|
315
|
+
# ' is: ' + str(step_import_collection.number_of_imports()))
|
316
|
+
# log.info('extending hdf5 file with recurrent properties for timestep: ' + str(timestep_number))
|
318
317
|
grid.write_hdf5_from_caches(hdf5_file,
|
319
318
|
mode = 'a',
|
320
319
|
geometry = False,
|
@@ -322,8 +321,8 @@ def import_vdb_ensemble(
|
|
322
321
|
write_active = False)
|
323
322
|
# add imported list for this timestep to full imported list
|
324
323
|
prop_import_collection.inherit_imported_list_from_other_collection(step_import_collection)
|
325
|
-
#
|
326
|
-
#
|
324
|
+
# log.debug('total number of property arrays after timestep: ' + str(timestep_number) +
|
325
|
+
# ' is: ' + str(prop_import_collection.number_of_imports()))
|
327
326
|
# remove cached copies of arrays
|
328
327
|
step_import_collection.remove_all_cached_arrays()
|
329
328
|
|
resqpy/surface/_mesh.py
CHANGED
@@ -207,6 +207,10 @@ class Mesh(rqsb.BaseSurface):
|
|
207
207
|
extra_metadata = em)
|
208
208
|
return mesh
|
209
209
|
|
210
|
+
def is_big(self):
|
211
|
+
"""Returns True if the number of nodes exceeds 2^31 - 1, False otherwise."""
|
212
|
+
return (self.ni * self.nj >= 2_147_483_648)
|
213
|
+
|
210
214
|
def set_represented_interpretation_root(self, interp_root):
|
211
215
|
"""Makes a note of the xml root of the represented interpretation."""
|
212
216
|
|
resqpy/surface/_surface.py
CHANGED
@@ -438,10 +438,11 @@ class Surface(rqsb.BaseSurface):
|
|
438
438
|
t, p = large_surface.triangles_and_points()
|
439
439
|
assert p.ndim == 2 and p.shape[1] == 3
|
440
440
|
pp = np.concatenate((p, line), axis = 0)
|
441
|
-
|
441
|
+
t_type = np.int32 if len(pp) <= 2_147_483_647 else np.int64
|
442
|
+
tp = np.empty(p.shape, dtype = t_type)
|
442
443
|
tp[:, 0] = len(p)
|
443
444
|
tp[:, 1] = len(p) + 1
|
444
|
-
tp[:, 2] = np.arange(len(p), dtype =
|
445
|
+
tp[:, 2] = np.arange(len(p), dtype = t_type)
|
445
446
|
cw = vec.clockwise_triangles(pp, tp)
|
446
447
|
pai = (cw >= 0.0) # bool mask over p
|
447
448
|
pbi = (cw <= 0.0) # bool mask over p
|
@@ -453,11 +454,11 @@ class Surface(rqsb.BaseSurface):
|
|
453
454
|
# here we stick the two halves together into a single patch
|
454
455
|
# todo: keep as two patches as required by RESQML business rules
|
455
456
|
p_combo = np.empty((0, 3))
|
456
|
-
t_combo = np.empty((0, 3), dtype =
|
457
|
+
t_combo = np.empty((0, 3), dtype = t_type)
|
457
458
|
for i, tab in enumerate((ta, tb)):
|
458
459
|
p_keep = np.unique(t[tab])
|
459
460
|
# note new point index for each old point that is being kept
|
460
|
-
p_map = np.full(len(p), -1, dtype =
|
461
|
+
p_map = np.full(len(p), -1, dtype = t_type)
|
461
462
|
p_map[p_keep] = np.arange(len(p_keep))
|
462
463
|
# copy those unique points into a trimmed points array
|
463
464
|
points_trimmed = p[p_keep].copy()
|
@@ -950,7 +951,8 @@ class Surface(rqsb.BaseSurface):
|
|
950
951
|
triangles.append(line.rstrip().split(" ")[1:4])
|
951
952
|
assert len(vertices) >= 3, 'vertices missing'
|
952
953
|
assert len(triangles) > 0, 'triangles missing'
|
953
|
-
|
954
|
+
t_type = np.int32 if len(vertices) <= 2_147_483_647 else np.int64
|
955
|
+
triangles = np.array(triangles, dtype = t_type) - index_offset
|
954
956
|
vertices = np.array(vertices, dtype = float)
|
955
957
|
assert np.all(triangles >= 0) and np.all(triangles < len(vertices)), 'triangle vertex indices out of range'
|
956
958
|
self.set_from_triangles_and_points(triangles = triangles, points = vertices)
|
@@ -1139,8 +1141,9 @@ class Surface(rqsb.BaseSurface):
|
|
1139
1141
|
|
1140
1142
|
# TODO: implement alternate solution using edge functions in olio triangulation to optimise
|
1141
1143
|
points_unique, inverse = np.unique(allpoints, axis = 0, return_inverse = True)
|
1142
|
-
|
1143
|
-
|
1144
|
+
t_type = np.int32 if len(allpoints) <= 2_147_483_647 else np.int64
|
1145
|
+
tris = np.array(tris, dtype = t_type)
|
1146
|
+
tris_unique = np.empty(shape = tris.shape, dtype = t_type)
|
1144
1147
|
tris_unique[:, 0] = inverse[tris[:, 0]]
|
1145
1148
|
tris_unique[:, 1] = inverse[tris[:, 1]]
|
1146
1149
|
tris_unique[:, 2] = inverse[tris[:, 2]]
|
@@ -1333,7 +1336,8 @@ def distill_triangle_points(t, p):
|
|
1333
1336
|
# find unique points used by triangles
|
1334
1337
|
p_keep = np.unique(t)
|
1335
1338
|
# note new point index for each old point that is being kept
|
1336
|
-
|
1339
|
+
t_type = np.int32 if len(p) <= 2_147_483_647 else np.int64
|
1340
|
+
p_map = np.full(len(p), -1, dtype = t_type)
|
1337
1341
|
p_map[p_keep] = np.arange(len(p_keep))
|
1338
1342
|
# copy those unique points into a trimmed points array
|
1339
1343
|
points_distilled = p[p_keep]
|
@@ -1360,8 +1364,9 @@ def nan_removed_triangles_and_points(t, p):
|
|
1360
1364
|
expanded_mask[:] = np.expand_dims(np.logical_not(t_nan_mask), axis = -1)
|
1361
1365
|
t_filtered = t[expanded_mask].reshape((-1, 3))
|
1362
1366
|
# modified the filtered t values to adjust for the compression of filtered p
|
1363
|
-
|
1364
|
-
p_map
|
1367
|
+
t_type = np.int32 if len(p) <= 2_147_483_647 else np.int64
|
1368
|
+
p_map = np.full(len(p), -1, dtype = t_type)
|
1369
|
+
p_map[p_non_nan_mask] = np.arange(len(p_filtered), dtype = t_type)
|
1365
1370
|
t_filtered = p_map[t_filtered]
|
1366
1371
|
assert t_filtered.ndim == 2 and t_filtered.shape[1] == 3
|
1367
1372
|
assert not np.any(t_filtered < 0) and not np.any(t_filtered >= len(p_filtered))
|
resqpy/surface/_tri_mesh.py
CHANGED
@@ -120,6 +120,7 @@ class TriMesh(rqs.Mesh):
|
|
120
120
|
self.origin = None
|
121
121
|
else:
|
122
122
|
self.origin = origin
|
123
|
+
self.t_type = np.int32 if self.is_big() else np.int64
|
123
124
|
|
124
125
|
@classmethod
|
125
126
|
def from_tri_mesh_and_z_values(cls,
|
@@ -302,7 +303,7 @@ class TriMesh(rqs.Mesh):
|
|
302
303
|
def tri_nodes_for_tji(self, tji):
|
303
304
|
"""Return mesh node indices, shape (3, 2), for triangle tji (tj, ti)."""
|
304
305
|
j, i = tji
|
305
|
-
tn = np.zeros((3, 2), dtype =
|
306
|
+
tn = np.zeros((3, 2), dtype = self.t_type)
|
306
307
|
j_odd = j % 2
|
307
308
|
i2, i_odd = divmod(i, 2)
|
308
309
|
assert 0 <= j < self.nj - 1 and 0 <= i < 2 * (self.ni - 1)
|
@@ -325,7 +326,7 @@ class TriMesh(rqs.Mesh):
|
|
325
326
|
j = tji_array[..., 0]
|
326
327
|
i = tji_array[..., 1]
|
327
328
|
tn_shape = tuple(list(tji_array.shape[:-1]) + [3, 2])
|
328
|
-
tn = np.zeros(tn_shape, dtype =
|
329
|
+
tn = np.zeros(tn_shape, dtype = self.t_type)
|
329
330
|
j_odd = j % 2
|
330
331
|
i2, i_odd = np.divmod(i, 2)
|
331
332
|
mask = np.logical_or(np.logical_or(j < 0, j >= self.nj - 1), np.logical_or(i < 0, i >= 2 * (self.ni - 1)))
|
@@ -342,9 +343,9 @@ class TriMesh(rqs.Mesh):
|
|
342
343
|
|
343
344
|
def all_tri_nodes(self):
|
344
345
|
"""Returns array of mesh node indices for all triangles, shape (nj - 1, 2 * (ni - 1), 3, 2)."""
|
345
|
-
tna = np.zeros((self.nj - 1, 2 * (self.ni - 1), 3, 2), dtype =
|
346
|
+
tna = np.zeros((self.nj - 1, 2 * (self.ni - 1), 3, 2), dtype = self.t_type)
|
346
347
|
# set mesh j indices
|
347
|
-
tna[:, :, 0, 0] = np.expand_dims(np.arange(self.nj - 1, dtype =
|
348
|
+
tna[:, :, 0, 0] = np.expand_dims(np.arange(self.nj - 1, dtype = self.t_type), axis = -1)
|
348
349
|
tna[1::2, ::2, 0, 0] += 1
|
349
350
|
tna[::2, 1::2, 0, 0] += 1
|
350
351
|
tna[:, :, 1, 0] = tna[:, :, 0, 0]
|
@@ -352,7 +353,7 @@ class TriMesh(rqs.Mesh):
|
|
352
353
|
tna[1::2, ::2, 2, 0] -= 2
|
353
354
|
tna[::2, 1::2, 2, 0] -= 2
|
354
355
|
# set mesh i indices
|
355
|
-
tna[:, ::2, 0, 1] = np.expand_dims(np.arange(self.ni - 1, dtype =
|
356
|
+
tna[:, ::2, 0, 1] = np.expand_dims(np.arange(self.ni - 1, dtype = self.t_type), axis = 0)
|
356
357
|
tna[:, 1::2, 0, 1] = tna[:, ::2, 0, 1]
|
357
358
|
tna[:, :, 1, 1] = tna[:, :, 0, 1] + 1
|
358
359
|
tna[:, :, 2, 1] = tna[:, :, 0, 1]
|
@@ -362,7 +363,7 @@ class TriMesh(rqs.Mesh):
|
|
362
363
|
def triangles_and_points(self):
|
363
364
|
"""Returns node indices and xyz points in form suitable for a Surface (triangulated set)."""
|
364
365
|
tna = self.all_tri_nodes()
|
365
|
-
composite_ji = tna[:, :, :, 0] * self.ni + tna[:, :, :, 1]
|
366
|
+
composite_ji = (tna[:, :, :, 0] * self.ni + tna[:, :, :, 1]).astype(self.t_type)
|
366
367
|
return (composite_ji.reshape((-1, 3)), self.full_array_ref().reshape((-1, 3)))
|
367
368
|
|
368
369
|
def tji_for_triangle_index(self, ti):
|
@@ -410,7 +411,7 @@ class TriMesh(rqs.Mesh):
|
|
410
411
|
tp)
|
411
412
|
tn_a[:, 1] *= 2 # node j
|
412
413
|
|
413
|
-
return np.concatenate((tn_a, tn_b), axis = 0)
|
414
|
+
return np.concatenate((tn_a, tn_b), axis = 0).astype(self.t_type)
|
414
415
|
|
415
416
|
def edge_zero_crossings(self, z_values = None):
|
416
417
|
"""Returns numpy list of points from edges where z values cross zero (or given value).
|
@@ -31,6 +31,7 @@ class TriangulatedPatch:
|
|
31
31
|
self.ni = None # used to convert a triangle index back into a (j, i) pair when freshly built from mesh
|
32
32
|
self.points = None
|
33
33
|
self.crs_uuid = crs_uuid
|
34
|
+
self.t_type = np.int32 # gets set to int64 if number of points requires it
|
34
35
|
if patch_node is not None:
|
35
36
|
xml_patch_index = rqet.find_tag_int(patch_node, 'PatchIndex')
|
36
37
|
assert xml_patch_index is not None
|
@@ -87,6 +88,7 @@ class TriangulatedPatch:
|
|
87
88
|
except Exception:
|
88
89
|
log.error('hdf5 points failure for triangle patch ' + str(self.patch_index))
|
89
90
|
raise
|
91
|
+
self._set_t_type()
|
90
92
|
triangles_node = rqet.find_tag(self.node, 'Triangles')
|
91
93
|
h5_key_pair = self.model.h5_uuid_and_path_for_node(triangles_node)
|
92
94
|
if h5_key_pair is None:
|
@@ -97,7 +99,7 @@ class TriangulatedPatch:
|
|
97
99
|
cache_array = True,
|
98
100
|
object = self,
|
99
101
|
array_attribute = 'triangles',
|
100
|
-
dtype =
|
102
|
+
dtype = self.t_type)
|
101
103
|
except Exception:
|
102
104
|
log.error('hdf5 triangles failure for triangle patch ' + str(self.patch_index))
|
103
105
|
raise
|
@@ -147,7 +149,7 @@ class TriangulatedPatch:
|
|
147
149
|
# find unique points used by those triangles
|
148
150
|
p_keep = np.unique(large_t[t_in])
|
149
151
|
# note new point index for each old point that is being kept
|
150
|
-
p_map = np.full(len(points_in), -1, dtype =
|
152
|
+
p_map = np.full(len(points_in), -1, dtype = large_t.dtype)
|
151
153
|
p_map[p_keep] = np.arange(len(p_keep))
|
152
154
|
# copy those unique points into a trimmed points array
|
153
155
|
points_trimmed = large_p[p_keep]
|
@@ -190,10 +192,10 @@ class TriangulatedPatch:
|
|
190
192
|
# create pair of triangles
|
191
193
|
if quad_triangles:
|
192
194
|
self.triangle_count = 4
|
193
|
-
self.triangles = np.array([[0, 2, 4], [2, 1, 4], [1, 3, 4], [3, 0, 4]], dtype =
|
195
|
+
self.triangles = np.array([[0, 2, 4], [2, 1, 4], [1, 3, 4], [3, 0, 4]], dtype = self.t_type)
|
194
196
|
else:
|
195
197
|
self.triangle_count = 2
|
196
|
-
self.triangles = np.array([[0, 1, 2], [0, 3, 1]], dtype =
|
198
|
+
self.triangles = np.array([[0, 1, 2], [0, 3, 1]], dtype = self.t_type)
|
197
199
|
|
198
200
|
def set_to_triangle(self, corners):
|
199
201
|
"""Populate this (empty) patch with a single triangle."""
|
@@ -202,12 +204,12 @@ class TriangulatedPatch:
|
|
202
204
|
self.node_count = 3
|
203
205
|
self.points = corners.copy()
|
204
206
|
self.triangle_count = 1
|
205
|
-
self.triangles = np.array([[0, 1, 2]], dtype =
|
207
|
+
self.triangles = np.array([[0, 1, 2]], dtype = self.t_type)
|
206
208
|
|
207
209
|
def set_to_triangle_pair(self, corners):
|
208
210
|
"""Populate this (empty) patch with a pair of triangles."""
|
209
211
|
|
210
|
-
self.set_from_triangles_and_points(np.array([[0, 1, 3], [0, 3, 2]], dtype =
|
212
|
+
self.set_from_triangles_and_points(np.array([[0, 1, 3], [0, 3, 2]], dtype = self.t_type), corners)
|
211
213
|
|
212
214
|
def set_from_triangles_and_points(self, triangles, points):
|
213
215
|
"""Populate this (empty) patch from triangle node indices and points from elsewhere."""
|
@@ -240,7 +242,7 @@ class TriangulatedPatch:
|
|
240
242
|
self.node_count = (n + 1) * (n + 2) // 2
|
241
243
|
self.points = np.empty((self.node_count, 3))
|
242
244
|
self.triangle_count = n * n
|
243
|
-
self.triangles = np.empty((self.triangle_count, 3), dtype =
|
245
|
+
self.triangles = np.empty((self.triangle_count, 3), dtype = self.t_type)
|
244
246
|
self.points[0] = sail_point(centre, radius, azimuth, 0.0).copy()
|
245
247
|
p = 0
|
246
248
|
t = 0
|
@@ -282,11 +284,12 @@ class TriangulatedPatch:
|
|
282
284
|
quad_centres[:, :] = 0.25 * (mesh_xyz[:-1, :-1, :] + mesh_xyz[:-1, 1:, :] + mesh_xyz[1:, :-1, :] +
|
283
285
|
mesh_xyz[1:, 1:, :]).reshape((-1, 3))
|
284
286
|
self.points = np.concatenate((mesh_xyz.copy().reshape((-1, 3)), quad_centres))
|
287
|
+
self._set_t_type()
|
285
288
|
mesh_size = mesh_xyz.size // 3
|
286
289
|
self.node_count = self.points.size // 3
|
287
290
|
self.triangle_count = 4 * (mesh_shape[0] - 1) * (mesh_shape[1] - 1)
|
288
291
|
self.quad_triangles = True
|
289
|
-
triangles = np.empty((mesh_shape[0] - 1, mesh_shape[1] - 1, 4, 3), dtype =
|
292
|
+
triangles = np.empty((mesh_shape[0] - 1, mesh_shape[1] - 1, 4, 3), dtype = self.t_type) # flatten later
|
290
293
|
nic = ni - 1
|
291
294
|
for j in range(mesh_shape[0] - 1):
|
292
295
|
for i in range(nic):
|
@@ -298,10 +301,11 @@ class TriangulatedPatch:
|
|
298
301
|
triangles[j, i, 3, 2] = j * ni + i
|
299
302
|
else:
|
300
303
|
self.points = mesh_xyz.copy().reshape((-1, 3))
|
304
|
+
self._set_t_type()
|
301
305
|
self.node_count = mesh_shape[0] * mesh_shape[1]
|
302
306
|
self.triangle_count = 2 * (mesh_shape[0] - 1) * (mesh_shape[1] - 1)
|
303
307
|
self.quad_triangles = False
|
304
|
-
triangles = np.empty((mesh_shape[0] - 1, mesh_shape[1] - 1, 2, 3), dtype =
|
308
|
+
triangles = np.empty((mesh_shape[0] - 1, mesh_shape[1] - 1, 2, 3), dtype = self.t_type) # flatten later
|
305
309
|
for j in range(mesh_shape[0] - 1):
|
306
310
|
for i in range(mesh_shape[1] - 1):
|
307
311
|
triangles[j, i, 0, 0] = j * ni + i
|
@@ -321,7 +325,7 @@ class TriangulatedPatch:
|
|
321
325
|
|
322
326
|
indices = self.get_indices_from_sparse_meshxyz(mesh_xyz)
|
323
327
|
|
324
|
-
triangles = np.zeros((2 * (mesh_shape[0] - 1) * (mesh_shape[1] - 1), 3), dtype =
|
328
|
+
triangles = np.zeros((2 * (mesh_shape[0] - 1) * (mesh_shape[1] - 1), 3), dtype = self.t_type) # truncate later
|
325
329
|
nt = 0
|
326
330
|
for j in range(mesh_shape[0] - 1):
|
327
331
|
for i in range(mesh_shape[1] - 1):
|
@@ -357,7 +361,7 @@ class TriangulatedPatch:
|
|
357
361
|
else:
|
358
362
|
raise Exception('code failure in sparse mesh processing')
|
359
363
|
self.ni = None
|
360
|
-
self.triangles = triangles[:nt, :]
|
364
|
+
self.triangles = triangles[:nt, :].copy()
|
361
365
|
self.triangle_count = nt
|
362
366
|
|
363
367
|
def get_indices_from_sparse_meshxyz(self, mesh_xyz):
|
@@ -373,6 +377,7 @@ class TriangulatedPatch:
|
|
373
377
|
points[i] = mesh_xyz[non_nans[0][i], non_nans[1][i]]
|
374
378
|
indices[non_nans[0][i], non_nans[1][i]] = i
|
375
379
|
self.points = points[:len(non_nans[0]), :]
|
380
|
+
self._set_t_type()
|
376
381
|
self.node_count = len(non_nans[0])
|
377
382
|
|
378
383
|
return indices
|
@@ -389,11 +394,12 @@ class TriangulatedPatch:
|
|
389
394
|
quad_centres = np.empty((nj, ni, 3))
|
390
395
|
quad_centres[:, :, :] = 0.25 * np.sum(mesh_xyz, axis = (2, 3))
|
391
396
|
self.points = np.concatenate((mesh_xyz.copy().reshape((-1, 3)), quad_centres.reshape((-1, 3))))
|
397
|
+
self._set_t_type()
|
392
398
|
mesh_size = mesh_xyz.size // 3
|
393
399
|
self.node_count = 5 * nj * ni
|
394
400
|
self.triangle_count = 4 * nj * ni
|
395
401
|
self.quad_triangles = True
|
396
|
-
triangles = np.empty((nj, ni, 4, 3), dtype =
|
402
|
+
triangles = np.empty((nj, ni, 4, 3), dtype = self.t_type) # flatten later
|
397
403
|
for j in range(nj):
|
398
404
|
for i in range(ni):
|
399
405
|
base_p = 4 * (j * ni + i)
|
@@ -405,10 +411,11 @@ class TriangulatedPatch:
|
|
405
411
|
triangles[j, i, 3, 2] = base_p
|
406
412
|
else:
|
407
413
|
self.points = mesh_xyz.copy().reshape((-1, 3))
|
414
|
+
self._set_t_type()
|
408
415
|
self.node_count = 4 * nj * ni
|
409
416
|
self.triangle_count = 2 * nj * ni
|
410
417
|
self.quad_triangles = False
|
411
|
-
triangles = np.empty((nj, ni, 2, 3), dtype =
|
418
|
+
triangles = np.empty((nj, ni, 2, 3), dtype = self.t_type) # flatten later
|
412
419
|
for j in range(nj):
|
413
420
|
for i in range(ni):
|
414
421
|
base_p = 4 * (j * ni + i)
|
@@ -469,7 +476,8 @@ class TriangulatedPatch:
|
|
469
476
|
self.triangle_count = 12
|
470
477
|
self.node_count = 8
|
471
478
|
self.points = cp.copy().reshape((-1, 3))
|
472
|
-
|
479
|
+
self._set_t_type()
|
480
|
+
triangles = np.empty((3, 2, 2, 3), dtype = self.t_type) # flatten later
|
473
481
|
for axis in range(3):
|
474
482
|
if axis == 0:
|
475
483
|
ip1, ip2 = 2, 1
|
@@ -500,7 +508,8 @@ class TriangulatedPatch:
|
|
500
508
|
quad_centres[2, 1, :] = 0.25 * np.sum(cp[:, :, 1, :], axis = (0, 1)) # I+
|
501
509
|
self.node_count = 14
|
502
510
|
self.points = np.concatenate((cp.copy().reshape((-1, 3)), quad_centres.reshape((-1, 3))))
|
503
|
-
|
511
|
+
self._set_t_type()
|
512
|
+
triangles = np.empty((3, 2, 4, 3), dtype = self.t_type) # flatten later
|
504
513
|
for axis in range(3):
|
505
514
|
if axis == 0:
|
506
515
|
ip1, ip2 = 2, 1
|
@@ -544,3 +553,6 @@ class TriangulatedPatch:
|
|
544
553
|
_, _ = self.triangles_and_points() # ensure points are loaded
|
545
554
|
z_values = self.points[:, 2].copy()
|
546
555
|
self.points[:, 2] = ref_depth + scaling_factor * (z_values - ref_depth)
|
556
|
+
|
557
|
+
def _set_t_type(self):
|
558
|
+
self.t_type = np.int64 if len(self.points) > 2_147_483_648 else np.int32
|
resqpy/well/_blocked_well.py
CHANGED
@@ -136,6 +136,7 @@ class BlockedWell(BaseResqpy):
|
|
136
136
|
self.wellbore_interpretation = None #: associated wellbore interpretation object
|
137
137
|
self.wellbore_feature = None #: associated wellbore feature object
|
138
138
|
self.well_name = None #: name of well to import from ascii file formats
|
139
|
+
self.cell_index_dtype = np.int32 #: set to int64 if any grid has more than 2^31 - 1 cells, otherwise int32
|
139
140
|
|
140
141
|
self.cell_interval_map = None # maps from cell index to interval (ie. node) index; populated on demand
|
141
142
|
|
@@ -150,11 +151,11 @@ class BlockedWell(BaseResqpy):
|
|
150
151
|
# this is the default as indicated on page 139 (but not p. 180) of the RESQML Usage Gude v2.0.1
|
151
152
|
# also assumes K is generally increasing downwards
|
152
153
|
# see DevOps backlog item 269001 discussion for more information
|
153
|
-
# self.face_index_map = np.array([[0, 1], [4, 2], [5, 3]], dtype =
|
154
|
-
self.face_index_map = np.array([[0, 1], [2, 4], [5, 3]], dtype =
|
154
|
+
# self.face_index_map = np.array([[0, 1], [4, 2], [5, 3]], dtype = np.int8)
|
155
|
+
self.face_index_map = np.array([[0, 1], [2, 4], [5, 3]], dtype = np.int8) # order: top, base, J-, I+, J+, I-
|
155
156
|
# and the inverse, maps from 0..5 to (axis, p01)
|
156
|
-
# self.face_index_inverse_map = np.array([[0, 0], [0, 1], [1, 1], [2, 1], [1, 0], [2, 0]], dtype =
|
157
|
-
self.face_index_inverse_map = np.array([[0, 0], [0, 1], [1, 0], [2, 1], [1, 1], [2, 0]], dtype =
|
157
|
+
# self.face_index_inverse_map = np.array([[0, 0], [0, 1], [1, 1], [2, 1], [1, 0], [2, 0]], dtype = np.int8)
|
158
|
+
self.face_index_inverse_map = np.array([[0, 0], [0, 1], [1, 0], [2, 1], [1, 1], [2, 0]], dtype = np.int8)
|
158
159
|
# note: the rework_face_pairs() method, below, overwrites the face indices based on I, J cell indices
|
159
160
|
|
160
161
|
super().__init__(model = parent_model,
|
@@ -238,14 +239,14 @@ class BlockedWell(BaseResqpy):
|
|
238
239
|
|
239
240
|
assert self.cell_count < self.node_count
|
240
241
|
|
241
|
-
self.__find_ci_node_and_load_hdf5_array(node = node)
|
242
|
-
|
243
|
-
self.__find_fi_node_and_load_hdf5_array(node)
|
244
|
-
|
245
242
|
unique_grid_indices = self.__find_gi_node_and_load_hdf5_array(node = node)
|
246
243
|
|
247
244
|
self.__find_grid_node(node = node, unique_grid_indices = unique_grid_indices)
|
248
245
|
|
246
|
+
self.__find_ci_node_and_load_hdf5_array(node = node)
|
247
|
+
|
248
|
+
self.__find_fi_node_and_load_hdf5_array(node)
|
249
|
+
|
249
250
|
interp_uuid = rqet.find_nested_tags_text(node, ['RepresentedInterpretation', 'UUID'])
|
250
251
|
if interp_uuid is None:
|
251
252
|
self.wellbore_interpretation = None
|
@@ -273,7 +274,7 @@ class BlockedWell(BaseResqpy):
|
|
273
274
|
|
274
275
|
ci_node = rqet.find_tag(node, 'CellIndices')
|
275
276
|
assert ci_node is not None, 'blocked well cell indices hdf5 reference not found in xml'
|
276
|
-
rqwu.load_hdf5_array(self, ci_node, 'cell_indices', dtype =
|
277
|
+
rqwu.load_hdf5_array(self, ci_node, 'cell_indices', dtype = self.cell_index_dtype)
|
277
278
|
assert (self.cell_indices is not None and self.cell_indices.ndim == 1 and
|
278
279
|
self.cell_indices.size == self.cell_count), 'mismatch in number of cell indices for blocked well'
|
279
280
|
self.cellind_null = rqet.find_tag_int(ci_node, 'NullValue')
|
@@ -285,7 +286,7 @@ class BlockedWell(BaseResqpy):
|
|
285
286
|
|
286
287
|
fi_node = rqet.find_tag(node, 'LocalFacePairPerCellIndices')
|
287
288
|
assert fi_node is not None, 'blocked well face indices hdf5 reference not found in xml'
|
288
|
-
rqwu.load_hdf5_array(self, fi_node, 'raw_face_indices', dtype =
|
289
|
+
rqwu.load_hdf5_array(self, fi_node, 'raw_face_indices', dtype = np.int8)
|
289
290
|
assert self.raw_face_indices is not None, 'failed to load face indices for blocked well'
|
290
291
|
assert self.raw_face_indices.size == 2 * self.cell_count, 'mismatch in number of cell faces for blocked well'
|
291
292
|
if self.raw_face_indices.ndim > 1:
|
@@ -305,15 +306,14 @@ class BlockedWell(BaseResqpy):
|
|
305
306
|
|
306
307
|
gi_node = rqet.find_tag(node, 'GridIndices')
|
307
308
|
assert gi_node is not None, 'blocked well grid indices hdf5 reference not found in xml'
|
308
|
-
rqwu.load_hdf5_array(self, gi_node, 'grid_indices', dtype =
|
309
|
+
rqwu.load_hdf5_array(self, gi_node, 'grid_indices', dtype = np.int32)
|
309
310
|
# assert self.grid_indices is not None and self.grid_indices.ndim == 1 and self.grid_indices.size == self.node_count - 1
|
310
311
|
# temporary code to handle blocked wells with incorrectly shaped grid indices wrt. nodes
|
311
312
|
assert self.grid_indices is not None and self.grid_indices.ndim == 1
|
312
313
|
if self.grid_indices.size != self.node_count - 1:
|
313
314
|
if self.grid_indices.size == self.cell_count and self.node_count == 2 * self.cell_count:
|
314
315
|
log.warning(f'handling node duplication or missing unblocked intervals in blocked well: {self.title}')
|
315
|
-
|
316
|
-
expanded_grid_indices = np.full(self.node_count - 1, -1, dtype = int)
|
316
|
+
expanded_grid_indices = np.full(self.node_count - 1, -1, dtype = np.int32)
|
317
317
|
expanded_grid_indices[::2] = self.grid_indices
|
318
318
|
self.grid_indices = expanded_grid_indices
|
319
319
|
else:
|
@@ -342,6 +342,8 @@ class BlockedWell(BaseResqpy):
|
|
342
342
|
grid_uuid = rqet.uuid_for_part_root(grid_node)
|
343
343
|
grid_obj = self.model.grid(uuid = grid_uuid, find_properties = False)
|
344
344
|
self.grid_list.append(grid_obj)
|
345
|
+
if grid_obj.is_big():
|
346
|
+
self.cell_index_dtype = np.int64
|
345
347
|
|
346
348
|
def extract_property_collection(self, refresh = False):
|
347
349
|
"""Returns a property collection for the blocked well."""
|
@@ -434,7 +436,7 @@ class BlockedWell(BaseResqpy):
|
|
434
436
|
def _set_cell_interval_map(self):
|
435
437
|
"""Sets up an index mapping from blocked cell index to interval index, accounting for unblocked intervals."""
|
436
438
|
|
437
|
-
self.cell_interval_map = np.zeros(self.cell_count, dtype =
|
439
|
+
self.cell_interval_map = np.zeros(self.cell_count, dtype = np.int32)
|
438
440
|
ci = 0
|
439
441
|
for ii in range(self.node_count - 1):
|
440
442
|
if self.grid_indices[ii] < 0:
|
@@ -461,7 +463,7 @@ class BlockedWell(BaseResqpy):
|
|
461
463
|
grid_for_cell_list = []
|
462
464
|
grid_indices = self.compressed_grid_indices()
|
463
465
|
assert len(grid_indices) == self.cell_count
|
464
|
-
cell_indices = np.empty((self.cell_count, 3), dtype =
|
466
|
+
cell_indices = np.empty((self.cell_count, 3), dtype = np.int32)
|
465
467
|
for cell_number in range(self.cell_count):
|
466
468
|
grid = self.grid_list[grid_indices[cell_number]]
|
467
469
|
grid_for_cell_list.append(grid)
|
@@ -493,7 +495,7 @@ class BlockedWell(BaseResqpy):
|
|
493
495
|
|
494
496
|
if cells_kji0 is None or len(cells_kji0) == 0:
|
495
497
|
return None
|
496
|
-
well_box = np.empty((2, 3), dtype =
|
498
|
+
well_box = np.empty((2, 3), dtype = np.int32)
|
497
499
|
well_box[0] = np.min(cells_kji0, axis = 0)
|
498
500
|
well_box[1] = np.max(cells_kji0, axis = 0)
|
499
501
|
return well_box
|
@@ -853,9 +855,9 @@ class BlockedWell(BaseResqpy):
|
|
853
855
|
self.node_count = len(trajectory_mds)
|
854
856
|
self.node_mds = np.array(trajectory_mds)
|
855
857
|
self.cell_count = len(blocked_cells_kji0)
|
856
|
-
self.grid_indices = np.array(blocked_intervals, dtype =
|
857
|
-
self.cell_indices = grid.natural_cell_indices(np.array(blocked_cells_kji0))
|
858
|
-
self.face_pair_indices = np.array(blocked_face_pairs, dtype =
|
858
|
+
self.grid_indices = np.array(blocked_intervals, dtype = np.int32) # NB. only supporting one grid at the moment
|
859
|
+
self.cell_indices = grid.natural_cell_indices(np.array(blocked_cells_kji0)).astype(self.cell_index_dtype)
|
860
|
+
self.face_pair_indices = np.array(blocked_face_pairs, dtype = np.int8)
|
859
861
|
self.grid_list = [grid]
|
860
862
|
|
861
863
|
trajectory_points, trajectory_mds = BlockedWell.__add_tail_to_trajectory_if_necessary(
|
@@ -877,7 +879,7 @@ class BlockedWell(BaseResqpy):
|
|
877
879
|
row = df.iloc[df_row]
|
878
880
|
if pd.isna(row[0]) or pd.isna(row[1]) or pd.isna(row[2]):
|
879
881
|
return None
|
880
|
-
cell_kji0 = np.empty((3,), dtype =
|
882
|
+
cell_kji0 = np.empty((3,), dtype = np.int32)
|
881
883
|
cell_kji0[:] = row[2], row[1], row[0]
|
882
884
|
cell_kji0[:] -= 1
|
883
885
|
return cell_kji0
|
@@ -1201,9 +1203,10 @@ class BlockedWell(BaseResqpy):
|
|
1201
1203
|
self.node_count = len(trajectory_mds)
|
1202
1204
|
self.node_mds = np.array(trajectory_mds)
|
1203
1205
|
self.cell_count = len(blocked_cells_kji0)
|
1204
|
-
|
1205
|
-
|
1206
|
-
self.cell_indices = grid.natural_cell_indices(np.array(blocked_cells_kji0))
|
1206
|
+
# NB. only supporting one grid at the moment
|
1207
|
+
self.grid_indices = np.array(blocked_intervals, dtype = np.int32)
|
1208
|
+
self.cell_indices = grid.natural_cell_indices(np.array(blocked_cells_kji0)).astype(
|
1209
|
+
self.cell_index_dtype)
|
1207
1210
|
self.face_pair_indices = np.array(blocked_face_pairs)
|
1208
1211
|
self.grid_list = [grid]
|
1209
1212
|
|
@@ -1240,7 +1243,7 @@ class BlockedWell(BaseResqpy):
|
|
1240
1243
|
words = line.split()
|
1241
1244
|
assert len(words) >= 9, 'not enough items on data line in cell I/O file, minimum 9 expected'
|
1242
1245
|
i1, j1, k1 = int(words[0]), int(words[1]), int(words[2])
|
1243
|
-
cell_kji0 = np.array((k1 - 1, j1 - 1, i1 - 1), dtype =
|
1246
|
+
cell_kji0 = np.array((k1 - 1, j1 - 1, i1 - 1), dtype = np.int32)
|
1244
1247
|
assert np.all(0 <= cell_kji0) and np.all(
|
1245
1248
|
cell_kji0 < grid.extent_kji), 'cell I/O cell index not within grid extent'
|
1246
1249
|
entry_xyz = np.array((float(words[3]), float(words[4]), float(words[5])))
|
@@ -1511,7 +1514,7 @@ class BlockedWell(BaseResqpy):
|
|
1511
1514
|
for grid in self.grid_list:
|
1512
1515
|
grid.cache_all_geometry_arrays()
|
1513
1516
|
|
1514
|
-
k_face_check = np.zeros((2, 2), dtype =
|
1517
|
+
k_face_check = np.zeros((2, 2), dtype = np.int8)
|
1515
1518
|
k_face_check[1, 1] = 1 # now represents entry, exit of K-, K+
|
1516
1519
|
k_face_check_end = k_face_check.copy()
|
1517
1520
|
k_face_check_end[1] = -1 # entry through K-, terminating (TD) within cell
|
resqpy/well/_trajectory.py
CHANGED
@@ -448,7 +448,7 @@ class Trajectory(BaseResqpy):
|
|
448
448
|
"""Loads the trajectory object based on the centre points of a list of cells."""
|
449
449
|
|
450
450
|
assert grid is not None, 'grid argument missing for trajectory initislisation from cell list'
|
451
|
-
cell_kji0_list = np.array(cell_kji0_list, dtype =
|
451
|
+
cell_kji0_list = np.array(cell_kji0_list, dtype = np.int32)
|
452
452
|
assert cell_kji0_list.ndim == 2 and cell_kji0_list.shape[1] == 3
|
453
453
|
assert spline_mode in ['none', 'linear', 'square', 'cube']
|
454
454
|
|
@@ -483,7 +483,7 @@ class Trajectory(BaseResqpy):
|
|
483
483
|
df = wellspec_dict[well_name]
|
484
484
|
assert len(df) > 0, 'no rows of perforation data found in wellspec for well ' + well_name
|
485
485
|
|
486
|
-
cell_kji0_list = np.empty((len(df), 3), dtype =
|
486
|
+
cell_kji0_list = np.empty((len(df), 3), dtype = np.int32)
|
487
487
|
cell_kji0_list[:, 0] = df['L']
|
488
488
|
cell_kji0_list[:, 1] = df['JW']
|
489
489
|
cell_kji0_list[:, 2] = df['IW']
|
@@ -191,7 +191,7 @@ def add_blocked_well_properties_from_wellbore_frame(bw,
|
|
191
191
|
wb_a = np.zeros(bw.cell_count, dtype = bool)
|
192
192
|
length = np.zeros(bw.cell_count, dtype = float)
|
193
193
|
pperf = np.zeros(bw.cell_count, dtype = float)
|
194
|
-
dominant_wbf_interval = np.full(bw.cell_count, -1, dtype =
|
194
|
+
dominant_wbf_interval = np.full(bw.cell_count, -1, dtype = np.int32)
|
195
195
|
ci = 0
|
196
196
|
for wb_ii in range(bw.node_count - 1):
|
197
197
|
if bw.grid_indices[wb_ii] < 0:
|
resqpy/well/well_object_funcs.py
CHANGED
@@ -442,10 +442,10 @@ def add_logs_from_cellio(blockedwell, cellio):
|
|
442
442
|
df = df.apply(pd.to_numeric)
|
443
443
|
# Get the cell_indices from the grid for the given i/j/k
|
444
444
|
df['cell_indices'] = grid.natural_cell_indices(
|
445
|
-
np.array((df['k_index'] - 1, df['j_index'] - 1, df['i_index'] - 1), dtype =
|
445
|
+
np.array((df['k_index'] - 1, df['j_index'] - 1, df['i_index'] - 1), dtype = np.int32).T)
|
446
446
|
df = df.drop(['i_index', 'j_index', 'k_index', 'x_in', 'y_in', 'z_in', 'x_out', 'y_out', 'z_out'], axis = 1)
|
447
|
-
assert (df['cell_indices'] == blockedwell.cell_indices
|
448
|
-
|
447
|
+
assert (df['cell_indices'] == blockedwell.cell_indices).all(), \
|
448
|
+
'Cell indices do not match between blocked well and log inputs'
|
449
449
|
|
450
450
|
# Work out if the data columns are continuous, categorical or discrete
|
451
451
|
type_dict = {}
|
@@ -468,11 +468,11 @@ def add_logs_from_cellio(blockedwell, cellio):
|
|
468
468
|
if log not in ['cell_indices']:
|
469
469
|
data_type = type_dict[log]
|
470
470
|
if log == 'ZONES':
|
471
|
-
data_type, dtype, null, discrete = 'discrete',
|
471
|
+
data_type, dtype, null, discrete = 'discrete', np.int32, -1, True
|
472
472
|
elif data_type == 'continuous':
|
473
473
|
dtype, null, discrete = float, np.nan, False
|
474
474
|
else:
|
475
|
-
dtype, null, discrete =
|
475
|
+
dtype, null, discrete = np.int32, -1, True
|
476
476
|
if data_type == 'categorical':
|
477
477
|
lookup_uuid = lookup_dict[log] # For categorical data, find or generate a StringLookupTable
|
478
478
|
else:
|