xtgeo 4.12.1__cp313-cp313-macosx_11_0_arm64.whl → 4.13.1__cp313-cp313-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xtgeo might be problematic. Click here for more details.

Binary file
Binary file
xtgeo/common/version.py CHANGED
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '4.12.1'
32
- __version_tuple__ = version_tuple = (4, 12, 1)
31
+ __version__ = version = '4.13.1'
32
+ __version_tuple__ = version_tuple = (4, 13, 1)
33
33
 
34
- __commit_id__ = commit_id = 'g6f276d4df'
34
+ __commit_id__ = commit_id = 'gdae14dc56'
@@ -7,7 +7,6 @@ from dataclasses import dataclass, field
7
7
  from typing import TYPE_CHECKING, Final
8
8
 
9
9
  import numpy as np
10
- from scipy.interpolate import make_interp_spline
11
10
 
12
11
  import xtgeo._internal as _internal # type: ignore
13
12
  from xtgeo.common.log import null_logger
@@ -42,7 +41,13 @@ SUM_ATTRS: Final = [
42
41
 
43
42
  @dataclass
44
43
  class CubeAttrs:
45
- """Internal class for computing attributes in window between two surfaces."""
44
+ """Internal class for computing attributes in window between two surfaces.
45
+
46
+ Compared with the former implementation (mid September 2025), more logic is moved
47
+ to the C++ routine, ensuring:
48
+ - Significantly smaller memory overhead (e.g. 0.1 GB vs 20 GB)
49
+ - Much faster execution, in particularly when using multiple processers. (5-10 x)
50
+ """
46
51
 
47
52
  cube: Cube
48
53
  upper_surface: RegularSurface | float | int
@@ -55,6 +60,8 @@ class CubeAttrs:
55
60
  _template_surface: RegularSurface | None = None
56
61
  _depth_array: np.ndarray | None = None
57
62
  _outside_depth: float | None = None # detected and updated from the depth cube
63
+ _min_indices: int = 0 # minimum Z index for cube slicing
64
+ _max_indices: int = 0 # maximum Z index for cube slicing
58
65
  _reduced_cube: Cube = None
59
66
  _reduced_depth_array: np.ndarray | None = None
60
67
  _refined_cube: Cube | None = None
@@ -70,9 +77,7 @@ class CubeAttrs:
70
77
  def __post_init__(self) -> None:
71
78
  self._process_upper_lower_surface()
72
79
  self._create_depth_array()
73
- self._create_reduced_cube()
74
- self._refine_interpolate()
75
- self._depth_mask()
80
+ self._determine_slice_indices()
76
81
  self._compute_statistical_attribute_surfaces()
77
82
 
78
83
  def result(self) -> dict[RegularSurface]:
@@ -84,6 +89,8 @@ class CubeAttrs:
84
89
 
85
90
  from xtgeo import surface_from_cube # avoid circular import by having this here
86
91
 
92
+ logger.debug("Process upper and lower surface...")
93
+
87
94
  upper = (
88
95
  surface_from_cube(self.cube, self.upper_surface)
89
96
  if isinstance(self.upper_surface, (float, int))
@@ -132,6 +139,7 @@ class CubeAttrs:
132
139
  "The minimum thickness is too large, no valid data in the interval. "
133
140
  "Perhaps surfaces are overlapping?"
134
141
  )
142
+ logger.debug("Process upper and lower surface... done")
135
143
 
136
144
  def _create_depth_array(self) -> None:
137
145
  """Create a 1D array where values are cube depths; to be used as filter.
@@ -142,6 +150,7 @@ class CubeAttrs:
142
150
  Will also issue warnings or errors if the surfaces are outside the cube,
143
151
  depending on severity.
144
152
  """
153
+ logger.debug("Create depth array...")
145
154
 
146
155
  self._depth_array = np.array(
147
156
  [
@@ -175,18 +184,16 @@ class CubeAttrs:
175
184
  self._outside_depth,
176
185
  self._depth_array,
177
186
  )
187
+ logger.debug("Create depth array... done")
178
188
 
179
- def _create_reduced_cube(self) -> None:
180
- """Create a smaller cube based on the depth cube filter.
189
+ def _determine_slice_indices(self) -> None:
190
+ """Create parameters for cube slicing.
181
191
 
182
192
  The purpose is to limit the computation to the relevant volume, to save
183
193
  CPU time. I.e. cube values above the upper surface and below the lower are
184
194
  now excluded.
185
195
  """
186
- from xtgeo import Cube # avoid circular import by having this here
187
-
188
- cubev = self.cube.values.copy() # copy, so we don't change the input instance
189
- cubev[self.cube.traceidcodes == 2] = 0.0 # set traceidcode 2 to zero
196
+ logger.debug("Determine cube slice indices...")
190
197
 
191
198
  # Create a boolean mask based on the threshold
192
199
  mask = self._depth_array < self._outside_depth
@@ -200,124 +207,21 @@ class CubeAttrs:
200
207
  "outside the cube?"
201
208
  )
202
209
 
203
- min_indices = np.min(non_zero_indices)
204
- max_indices = np.max(non_zero_indices) + 1 # Add 1 to include the upper bound
205
-
206
- # Extract the reduced cube using slicing
207
- reduced = cubev[:, :, min_indices:max_indices]
208
-
209
- zori = float(self._depth_array.min())
210
-
211
- self._reduced_cube = Cube(
212
- ncol=reduced.shape[0],
213
- nrow=reduced.shape[1],
214
- nlay=reduced.shape[2],
215
- xinc=self.cube.xinc,
216
- yinc=self.cube.yinc,
217
- zinc=self.cube.zinc,
218
- xori=self.cube.xori,
219
- yori=self.cube.yori,
220
- zori=zori,
221
- rotation=self.cube.rotation,
222
- yflip=self.cube.yflip,
223
- values=reduced.astype(np.float32),
224
- )
225
-
226
- self._reduced_depth_array = self._depth_array[min_indices:max_indices]
227
-
228
- logger.debug("Reduced cubes created %s", self._reduced_cube.values.shape)
229
-
230
- def _refine_interpolate(self) -> None:
231
- """Apply reduced cubes and interpolate to a finer grid vertically.
210
+ self._min_indices = int(np.min(non_zero_indices))
211
+ # Add 1 to include the upper bound
212
+ self._max_indices = int(np.max(non_zero_indices) + 1)
232
213
 
233
- This is done to get a more accurate representation of the cube values.
234
- """
235
- from xtgeo import Cube
236
-
237
- logger.debug("Refine cubes and interpolate...")
238
- arr = self._reduced_cube.values
239
- ndiv = self.ndiv
240
-
241
- # Create linear interpolation function along the last axis
242
- fdepth = make_interp_spline(
243
- np.arange(arr.shape[2]),
244
- self._reduced_depth_array,
245
- axis=0,
246
- k=1,
247
- )
248
-
249
- # Create interpolation function along the last axis
250
- if self.interpolation not in ["cubic", "linear"]:
251
- raise ValueError("Interpolation must be either 'cubic' or 'linear'")
252
-
253
- fcube = make_interp_spline(
254
- np.arange(arr.shape[2]),
255
- arr,
256
- axis=2,
257
- k=3 if self.interpolation == "cubic" else 1,
214
+ logger.debug("Determine cube slice indices... done")
215
+ logger.debug(
216
+ "Cube slice indices: %d to %d", self._min_indices, self._max_indices
258
217
  )
259
- # Define new sampling points along the last axis
260
- new_z = np.linspace(0, arr.shape[2] - 1, arr.shape[2] * ndiv)
261
-
262
- # Resample the cube array
263
- resampled_arr = fcube(new_z)
264
-
265
- # Resample the depth array (always linear)
266
- self._refined_depth_array = new_depth = fdepth(new_z)
267
- new_zinc = (new_depth.max() - new_depth.min()) / (new_depth.shape[0] - 1)
268
-
269
- self._refined_cube = Cube(
270
- ncol=resampled_arr.shape[0],
271
- nrow=resampled_arr.shape[1],
272
- nlay=resampled_arr.shape[2],
273
- xinc=self.cube.xinc,
274
- yinc=self.cube.yinc,
275
- zinc=new_zinc,
276
- xori=self.cube.xori,
277
- yori=self.cube.yori,
278
- zori=self._refined_depth_array.min(),
279
- rotation=self._reduced_cube.rotation,
280
- yflip=self._reduced_cube.yflip,
281
- values=resampled_arr.astype(np.float32),
282
- )
283
-
284
- def _depth_mask(self) -> None:
285
- """Set nan values outside the interval defined by the upper + lower surface.
286
-
287
- In addition, set nan values where the thickness is less than the minimum.
288
-
289
- """
290
-
291
- darry = np.expand_dims(self._refined_depth_array, axis=(0, 1))
292
- upper_exp = np.expand_dims(self._upper.values, 2)
293
- lower_exp = np.expand_dims(self._lower.values, 2)
294
- mask_2d_exp = np.expand_dims(self._min_thickness_mask.values, 2)
295
-
296
- self._refined_cube.values = np.where(
297
- (darry < upper_exp) | (darry > lower_exp) | (mask_2d_exp == 0),
298
- np.nan,
299
- self._refined_cube.values,
300
- ).astype(np.float32)
301
-
302
- # similar for reduced cubes with original resolution
303
- darry = np.expand_dims(self._reduced_depth_array, axis=(0, 1))
304
-
305
- self._reduced_cube.values = np.where(
306
- (darry < upper_exp) | (darry > lower_exp) | (mask_2d_exp == 0),
307
- np.nan,
308
- self._reduced_cube.values,
309
- ).astype(np.float32)
310
218
 
311
219
  def _add_to_attribute_map(self, attr_name: str, values: np.ndarray) -> None:
312
220
  """Compute the attribute map and add to result dictionary."""
221
+ logger.debug("Add to attribute map...")
313
222
  attr_map = self._upper.copy()
314
223
  attr_map.values = np.ma.masked_invalid(values)
315
224
 
316
- # apply mask for the cube's dead traces (traceidcode 2)
317
- attr_map.values = np.ma.masked_where(
318
- self.cube.traceidcodes == 2, attr_map.values
319
- )
320
-
321
225
  # now resample to the original input map
322
226
  attr_map_resampled = self._template_surface.copy()
323
227
  attr_map_resampled.resample(attr_map)
@@ -329,21 +233,41 @@ class CubeAttrs:
329
233
  )
330
234
 
331
235
  self._result_attr_maps[attr_name] = attr_map_resampled
236
+ logger.debug("Add to attribute map... done")
332
237
 
333
238
  def _compute_statistical_attribute_surfaces(self) -> None:
334
239
  """Compute stats very fast by using internal C++ bindings."""
335
-
336
- # compute statistics for vertically refined cube
337
- cubecpp = _internal.cube.Cube(self._refined_cube)
338
- all_attrs = cubecpp.cube_stats_along_z()
240
+ logger.debug("Compute statistical attribute surfaces...")
241
+
242
+ # compute statistics for vertically refined cube using original cube
243
+ cubecpp = _internal.cube.Cube(self.cube)
244
+ all_attrs = cubecpp.cube_stats_along_z(
245
+ self._upper.values,
246
+ self._lower.values,
247
+ self._depth_array, # use original depth array
248
+ self.ndiv,
249
+ self.interpolation,
250
+ self.minimum_thickness,
251
+ self._min_indices, # pass slice indices
252
+ self._max_indices,
253
+ )
339
254
 
340
255
  for attr in STAT_ATTRS:
341
256
  self._add_to_attribute_map(attr, all_attrs[attr])
342
257
 
343
- # compute statistics for reduced cube (for sum attributes)
344
- cubecpp = _internal.cube.Cube(self._reduced_cube)
345
- all_attrs = cubecpp.cube_stats_along_z()
258
+ # compute statistics with ndiv=1 (for sum attributes)
259
+ all_attrs = cubecpp.cube_stats_along_z(
260
+ self._upper.values,
261
+ self._lower.values,
262
+ self._depth_array, # use original depth array
263
+ 1,
264
+ self.interpolation,
265
+ self.minimum_thickness,
266
+ self._min_indices, # pass slice indices
267
+ self._max_indices,
268
+ )
346
269
 
347
- # add sum attributes which are the last 3 in the list
348
270
  for attr in SUM_ATTRS:
349
271
  self._add_to_attribute_map(attr, all_attrs[attr])
272
+
273
+ logger.debug("Compute statistical attribute surfaces... done")
@@ -1,13 +1,16 @@
1
1
  from __future__ import annotations
2
2
 
3
- import warnings
4
3
  from typing import Any, Literal, cast
5
4
 
6
5
  import numpy as np
7
6
  import numpy.typing as npt
8
7
 
8
+ from xtgeo.common.log import null_logger
9
+
9
10
  from ._ecl_output_file import Phases, Simulator, TypeOfGrid, UnitSystem
10
11
 
12
+ _logger = null_logger(__name__)
13
+
11
14
 
12
15
  class InteHead:
13
16
  """Contains the values for the INTEHEAD array in ecl restart
@@ -134,7 +137,8 @@ class InteHead:
134
137
  try:
135
138
  return Simulator(s_code)
136
139
  except ValueError:
137
- warnings.warn(f"Unknown simulator code {s_code}")
140
+ # changed from a UserWarning to a logging message
141
+ _logger.warning("Unknown simulator code %s", s_code)
138
142
  return s_code
139
143
 
140
144
  @property
@@ -193,7 +193,8 @@ def get_dz(
193
193
  if not flip:
194
194
  result *= -1
195
195
 
196
- result = np.ma.masked_array(result, self._actnumsv == 0 if asmasked else False)
196
+ if asmasked:
197
+ result = np.ma.masked_array(result, self._actnumsv == 0)
197
198
 
198
199
  return GridProperty(
199
200
  ncol=self._ncol,
@@ -1258,85 +1259,86 @@ def reduce_to_one_layer(self: Grid) -> None:
1258
1259
  self._subgrids = None
1259
1260
 
1260
1261
 
1261
- def translate_coordinates(
1262
- self: Grid,
1263
- translate: tuple[float, float, float] = (0.0, 0.0, 0.0),
1264
- flip: tuple[int, int, int] = (1, 1, 1),
1262
+ def reverse_row_axis(
1263
+ self: Grid, ijk_handedness: Literal["left", "right"] | None = None
1265
1264
  ) -> None:
1266
- """Translate grid coordinates."""
1267
- self._set_xtgformat1()
1265
+ """Flip the row-axis for xtgformat=2 grid arrays.
1268
1266
 
1269
- tx, ty, tz = translate
1270
- fx, fy, fz = flip
1267
+ This reverses the J-direction (rows) by flipping along axis 1 for both
1268
+ coordsv and zcornsv arrays, and also handles the corner ordering within
1269
+ each pillar to maintain proper geometry.
1270
+ """
1271
+ if ijk_handedness == self.ijk_handedness:
1272
+ return
1271
1273
 
1272
- ier = _cxtgeo.grd3d_translate(
1273
- self._ncol,
1274
- self._nrow,
1275
- self._nlay,
1276
- fx,
1277
- fy,
1278
- fz,
1279
- tx,
1280
- ty,
1281
- tz,
1282
- self._coordsv,
1283
- self._zcornsv,
1284
- )
1285
- if ier != 0:
1286
- raise RuntimeError(f"Something went wrong in translate, code: {ier}")
1274
+ self._set_xtgformat2()
1287
1275
 
1288
- logger.info("Translation of coords done")
1276
+ # Flip coordsv along the row axis (axis 1) and make contiguous with copy()
1277
+ self._coordsv = np.flip(self._coordsv, axis=1).copy()
1289
1278
 
1279
+ # For zcornsv, we need to flip along row axis and also swap corner ordering
1280
+ # Original corner order: SW, SE, NW, NE (indices 0,1,2,3)
1281
+ # After row flip: NW, NE, SW, SE (should become indices 0,1,2,3)
1282
+ # So we need to rearrange: [2,3,0,1]
1283
+ zcorns_flipped = np.flip(self._zcornsv, axis=1) # Flip along row axis
1284
+ self._zcornsv = zcorns_flipped[:, :, :, [2, 3, 0, 1]].copy() # Reorder corners
1290
1285
 
1291
- def reverse_row_axis(
1292
- self: Grid, ijk_handedness: Literal["left", "right"] | None = None
1293
- ) -> None:
1294
- """Reverse rows (aka flip) for geometry and assosiated properties."""
1295
- if ijk_handedness == self.ijk_handedness:
1296
- return
1286
+ # Also flip actnum along row axis
1287
+ self._actnumsv = np.flip(self._actnumsv, axis=1).copy()
1297
1288
 
1298
- # update the handedness
1299
- if ijk_handedness is None:
1300
- self._ijk_handedness = estimate_handedness(self)
1289
+ # Update handedness
1290
+ if self._ijk_handedness == "left":
1291
+ self._ijk_handedness = "right"
1292
+ else:
1293
+ self._ijk_handedness = "left"
1301
1294
 
1302
- original_handedness = self._ijk_handedness
1303
- original_xtgformat = self._xtgformat
1295
+ # Handle properties if they exist
1296
+ if self._props and self._props.props:
1297
+ for prop in self._props.props:
1298
+ prop.values = np.flip(prop.values, axis=1).copy()
1304
1299
 
1305
- self._set_xtgformat1()
1300
+ logger.info("Reversing of row axis done")
1306
1301
 
1307
- ier = _cxtgeo.grd3d_reverse_jrows(
1308
- self._ncol,
1309
- self._nrow,
1310
- self._nlay,
1311
- self._coordsv.ravel(),
1312
- self._zcornsv.ravel(),
1313
- self._actnumsv.ravel(),
1314
- )
1315
1302
 
1316
- if ier != 0:
1317
- raise RuntimeError(f"Something went wrong in jswapping, code: {ier}")
1303
+ def reverse_column_axis(
1304
+ self: Grid, ijk_handedness: Literal["left", "right"] | None = None
1305
+ ) -> None:
1306
+ """Flip the column-axis for xtgformat=2 grid arrays.
1318
1307
 
1319
- if self._props is None:
1308
+ This reverses the I-direction (columns) by flipping along axis 0 for both
1309
+ coordsv and zcornsv arrays, and also handles the corner ordering within
1310
+ each pillar to maintain proper geometry.
1311
+ """
1312
+ if ijk_handedness == self.ijk_handedness:
1320
1313
  return
1321
1314
 
1322
- # do it for properties
1323
- if self._props.props:
1324
- for prp in self._props.props:
1325
- prp.values = prp.values[:, ::-1, :]
1315
+ self._set_xtgformat2()
1316
+
1317
+ # Flip coordsv along the column axis (axis 0) and make contiguous with copy()
1318
+ self._coordsv = np.flip(self._coordsv, axis=0).copy()
1326
1319
 
1327
- # update the handedness
1328
- if ijk_handedness is None:
1329
- self._ijk_handedness = estimate_handedness(self)
1320
+ # For zcornsv, we need to flip along column axis and also swap corner ordering
1321
+ # Original corner order: SW, SE, NW, NE (indices 0,1,2,3)
1322
+ # After column flip: SE, SW, NE, NW (should become indices 0,1,2,3)
1323
+ # So we need to rearrange: [1,0,3,2]
1324
+ zcorns_flipped = np.flip(self._zcornsv, axis=0) # Flip along column axis
1325
+ self._zcornsv = zcorns_flipped[:, :, :, [1, 0, 3, 2]].copy() # Reorder corners
1330
1326
 
1331
- if original_handedness == "left":
1327
+ # Also flip actnum along column axis
1328
+ self._actnumsv = np.flip(self._actnumsv, axis=0).copy()
1329
+
1330
+ # Update handedness
1331
+ if self._ijk_handedness == "left":
1332
1332
  self._ijk_handedness = "right"
1333
1333
  else:
1334
1334
  self._ijk_handedness = "left"
1335
1335
 
1336
- if original_xtgformat == 2:
1337
- self._set_xtgformat2()
1336
+ # Handle properties if they exist
1337
+ if self._props and self._props.props:
1338
+ for prop in self._props.props:
1339
+ prop.values = np.flip(prop.values, axis=0).copy()
1338
1340
 
1339
- logger.info("Reversing of rows done")
1341
+ logger.info("Reversing of column axis done")
1340
1342
 
1341
1343
 
1342
1344
  def get_adjacent_cells(
@@ -1399,7 +1401,10 @@ def estimate_design(
1399
1401
  """Estimate (guess) (sub)grid design by examing DZ in median thickness column."""
1400
1402
  actv = self.get_actnum().values
1401
1403
 
1402
- dzv = self.get_dz(asmasked=False).values
1404
+ dzv_raw = self.get_dz(asmasked=False).values
1405
+ # Although asmasked is False the array values will still be a masked numpy
1406
+ # Need to convert to an ordinary numpy to avoid warnings later
1407
+ dzv = np.ma.filled(dzv_raw, fill_value=0.0)
1403
1408
 
1404
1409
  # treat inactive thicknesses as zero
1405
1410
  dzv[actv == 0] = 0.0
@@ -1416,6 +1421,11 @@ def estimate_design(
1416
1421
  # find cumulative thickness as a 2D array
1417
1422
  dzcum: np.ndarray = np.sum(dzv, axis=2, keepdims=False)
1418
1423
 
1424
+ # Ensure dzcum is a regular numpy array to avoid warnings
1425
+ if isinstance(dzcum, np.ma.MaskedArray):
1426
+ dzcum = np.ma.filled(dzcum, fill_value=0.0)
1427
+ dzcum = np.asarray(dzcum)
1428
+
1419
1429
  # find the average thickness for nonzero thicknesses
1420
1430
  dzcum2 = dzcum.copy()
1421
1431
  dzcum2[dzcum == 0.0] = np.nan
@@ -4,9 +4,8 @@ from typing import TYPE_CHECKING
4
4
 
5
5
  import numpy as np
6
6
 
7
- from xtgeo import _cxtgeo
7
+ import xtgeo._internal as _internal # type: ignore
8
8
  from xtgeo.common import null_logger
9
- from xtgeo.common.constants import UNDEF_INT
10
9
 
11
10
  logger = null_logger(__name__)
12
11
 
@@ -23,44 +22,29 @@ def make_hybridgrid(
23
22
  region_number: int | None = None,
24
23
  ) -> None:
25
24
  """Make hybrid grid."""
26
- self._set_xtgformat1()
25
+ self._set_xtgformat2()
27
26
 
28
27
  newnlay = self.nlay * 2 + nhdiv
29
- newnzcorn = self.ncol * self.nrow * (newnlay + 1) * 4
30
- newnactnum = self.ncol * self.nrow * newnlay
31
28
 
32
- # initialize
33
- hyb_zcornsv = np.zeros(newnzcorn, dtype=np.float64)
34
- hyb_actnumsv = np.zeros(newnactnum, dtype=np.int32)
35
-
36
- if region is None:
37
- region_number = -1
38
- rvalues = np.ones(1, dtype=np.int32)
39
- else:
40
- rvalues = np.ma.filled(region.values, fill_value=UNDEF_INT)
41
- rvalues = rvalues.ravel()
42
-
43
- _cxtgeo.grd3d_convert_hybrid(
44
- self.ncol,
45
- self.nrow,
46
- self.nlay,
47
- self._coordsv,
48
- self._zcornsv,
49
- self._actnumsv,
50
- newnlay,
51
- hyb_zcornsv,
52
- hyb_actnumsv,
29
+ grid3d_cpp = _internal.grid3d.Grid(self)
30
+ region_array = (
31
+ region.values.astype(np.int32)
32
+ if region
33
+ else np.empty((0, 0, 0), dtype=np.int32)
34
+ )
35
+ hyb_zcornsv, hyb_actnumsv = grid3d_cpp.convert_to_hybrid_grid(
53
36
  toplevel,
54
37
  bottomlevel,
55
38
  nhdiv,
56
- rvalues,
57
- region_number,
39
+ region_array,
40
+ int(region_number) if region_number is not None else -1,
58
41
  )
59
42
 
60
43
  # when a hybridgrid is made, the current subrid settings lose relevance, hence
61
44
  # it is forced set to None
62
45
  self.subgrids = None
63
46
 
47
+ # update the grid in place
64
48
  self._nlay = newnlay
65
49
  self._zcornsv = hyb_zcornsv
66
- self._actnumsv = hyb_actnumsv
50
+ self._actnumsv = hyb_actnumsv.astype(np.int32)