xtgeo 4.12.0__cp313-cp313-macosx_11_0_arm64.whl → 4.13.0__cp313-cp313-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xtgeo might be problematic. Click here for more details.

Binary file
Binary file
xtgeo/common/version.py CHANGED
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '4.12.0'
32
- __version_tuple__ = version_tuple = (4, 12, 0)
31
+ __version__ = version = '4.13.0'
32
+ __version_tuple__ = version_tuple = (4, 13, 0)
33
33
 
34
- __commit_id__ = commit_id = 'g4f136d221'
34
+ __commit_id__ = commit_id = 'g766805bb7'
@@ -7,7 +7,6 @@ from dataclasses import dataclass, field
7
7
  from typing import TYPE_CHECKING, Final
8
8
 
9
9
  import numpy as np
10
- from scipy.interpolate import make_interp_spline
11
10
 
12
11
  import xtgeo._internal as _internal # type: ignore
13
12
  from xtgeo.common.log import null_logger
@@ -42,7 +41,13 @@ SUM_ATTRS: Final = [
42
41
 
43
42
  @dataclass
44
43
  class CubeAttrs:
45
- """Internal class for computing attributes in window between two surfaces."""
44
+ """Internal class for computing attributes in window between two surfaces.
45
+
46
+ Compared with the former implementation (mid September 2025), more logic is moved
47
+ to the C++ routine, ensuring:
48
+ - Significantly smaller memory overhead (e.g. 0.1 GB vs 20 GB)
49
+ - Much faster execution, in particularly when using multiple processers. (5-10 x)
50
+ """
46
51
 
47
52
  cube: Cube
48
53
  upper_surface: RegularSurface | float | int
@@ -55,6 +60,8 @@ class CubeAttrs:
55
60
  _template_surface: RegularSurface | None = None
56
61
  _depth_array: np.ndarray | None = None
57
62
  _outside_depth: float | None = None # detected and updated from the depth cube
63
+ _min_indices: int = 0 # minimum Z index for cube slicing
64
+ _max_indices: int = 0 # maximum Z index for cube slicing
58
65
  _reduced_cube: Cube = None
59
66
  _reduced_depth_array: np.ndarray | None = None
60
67
  _refined_cube: Cube | None = None
@@ -70,9 +77,7 @@ class CubeAttrs:
70
77
  def __post_init__(self) -> None:
71
78
  self._process_upper_lower_surface()
72
79
  self._create_depth_array()
73
- self._create_reduced_cube()
74
- self._refine_interpolate()
75
- self._depth_mask()
80
+ self._determine_slice_indices()
76
81
  self._compute_statistical_attribute_surfaces()
77
82
 
78
83
  def result(self) -> dict[RegularSurface]:
@@ -84,6 +89,8 @@ class CubeAttrs:
84
89
 
85
90
  from xtgeo import surface_from_cube # avoid circular import by having this here
86
91
 
92
+ logger.debug("Process upper and lower surface...")
93
+
87
94
  upper = (
88
95
  surface_from_cube(self.cube, self.upper_surface)
89
96
  if isinstance(self.upper_surface, (float, int))
@@ -132,6 +139,7 @@ class CubeAttrs:
132
139
  "The minimum thickness is too large, no valid data in the interval. "
133
140
  "Perhaps surfaces are overlapping?"
134
141
  )
142
+ logger.debug("Process upper and lower surface... done")
135
143
 
136
144
  def _create_depth_array(self) -> None:
137
145
  """Create a 1D array where values are cube depths; to be used as filter.
@@ -142,6 +150,7 @@ class CubeAttrs:
142
150
  Will also issue warnings or errors if the surfaces are outside the cube,
143
151
  depending on severity.
144
152
  """
153
+ logger.debug("Create depth array...")
145
154
 
146
155
  self._depth_array = np.array(
147
156
  [
@@ -175,18 +184,16 @@ class CubeAttrs:
175
184
  self._outside_depth,
176
185
  self._depth_array,
177
186
  )
187
+ logger.debug("Create depth array... done")
178
188
 
179
- def _create_reduced_cube(self) -> None:
180
- """Create a smaller cube based on the depth cube filter.
189
+ def _determine_slice_indices(self) -> None:
190
+ """Create parameters for cube slicing.
181
191
 
182
192
  The purpose is to limit the computation to the relevant volume, to save
183
193
  CPU time. I.e. cube values above the upper surface and below the lower are
184
194
  now excluded.
185
195
  """
186
- from xtgeo import Cube # avoid circular import by having this here
187
-
188
- cubev = self.cube.values.copy() # copy, so we don't change the input instance
189
- cubev[self.cube.traceidcodes == 2] = 0.0 # set traceidcode 2 to zero
196
+ logger.debug("Determine cube slice indices...")
190
197
 
191
198
  # Create a boolean mask based on the threshold
192
199
  mask = self._depth_array < self._outside_depth
@@ -200,124 +207,21 @@ class CubeAttrs:
200
207
  "outside the cube?"
201
208
  )
202
209
 
203
- min_indices = np.min(non_zero_indices)
204
- max_indices = np.max(non_zero_indices) + 1 # Add 1 to include the upper bound
205
-
206
- # Extract the reduced cube using slicing
207
- reduced = cubev[:, :, min_indices:max_indices]
208
-
209
- zori = float(self._depth_array.min())
210
-
211
- self._reduced_cube = Cube(
212
- ncol=reduced.shape[0],
213
- nrow=reduced.shape[1],
214
- nlay=reduced.shape[2],
215
- xinc=self.cube.xinc,
216
- yinc=self.cube.yinc,
217
- zinc=self.cube.zinc,
218
- xori=self.cube.xori,
219
- yori=self.cube.yori,
220
- zori=zori,
221
- rotation=self.cube.rotation,
222
- yflip=self.cube.yflip,
223
- values=reduced.astype(np.float32),
224
- )
225
-
226
- self._reduced_depth_array = self._depth_array[min_indices:max_indices]
227
-
228
- logger.debug("Reduced cubes created %s", self._reduced_cube.values.shape)
229
-
230
- def _refine_interpolate(self) -> None:
231
- """Apply reduced cubes and interpolate to a finer grid vertically.
210
+ self._min_indices = int(np.min(non_zero_indices))
211
+ # Add 1 to include the upper bound
212
+ self._max_indices = int(np.max(non_zero_indices) + 1)
232
213
 
233
- This is done to get a more accurate representation of the cube values.
234
- """
235
- from xtgeo import Cube
236
-
237
- logger.debug("Refine cubes and interpolate...")
238
- arr = self._reduced_cube.values
239
- ndiv = self.ndiv
240
-
241
- # Create linear interpolation function along the last axis
242
- fdepth = make_interp_spline(
243
- np.arange(arr.shape[2]),
244
- self._reduced_depth_array,
245
- axis=0,
246
- k=1,
247
- )
248
-
249
- # Create interpolation function along the last axis
250
- if self.interpolation not in ["cubic", "linear"]:
251
- raise ValueError("Interpolation must be either 'cubic' or 'linear'")
252
-
253
- fcube = make_interp_spline(
254
- np.arange(arr.shape[2]),
255
- arr,
256
- axis=2,
257
- k=3 if self.interpolation == "cubic" else 1,
214
+ logger.debug("Determine cube slice indices... done")
215
+ logger.debug(
216
+ "Cube slice indices: %d to %d", self._min_indices, self._max_indices
258
217
  )
259
- # Define new sampling points along the last axis
260
- new_z = np.linspace(0, arr.shape[2] - 1, arr.shape[2] * ndiv)
261
-
262
- # Resample the cube array
263
- resampled_arr = fcube(new_z)
264
-
265
- # Resample the depth array (always linear)
266
- self._refined_depth_array = new_depth = fdepth(new_z)
267
- new_zinc = (new_depth.max() - new_depth.min()) / (new_depth.shape[0] - 1)
268
-
269
- self._refined_cube = Cube(
270
- ncol=resampled_arr.shape[0],
271
- nrow=resampled_arr.shape[1],
272
- nlay=resampled_arr.shape[2],
273
- xinc=self.cube.xinc,
274
- yinc=self.cube.yinc,
275
- zinc=new_zinc,
276
- xori=self.cube.xori,
277
- yori=self.cube.yori,
278
- zori=self._refined_depth_array.min(),
279
- rotation=self._reduced_cube.rotation,
280
- yflip=self._reduced_cube.yflip,
281
- values=resampled_arr.astype(np.float32),
282
- )
283
-
284
- def _depth_mask(self) -> None:
285
- """Set nan values outside the interval defined by the upper + lower surface.
286
-
287
- In addition, set nan values where the thickness is less than the minimum.
288
-
289
- """
290
-
291
- darry = np.expand_dims(self._refined_depth_array, axis=(0, 1))
292
- upper_exp = np.expand_dims(self._upper.values, 2)
293
- lower_exp = np.expand_dims(self._lower.values, 2)
294
- mask_2d_exp = np.expand_dims(self._min_thickness_mask.values, 2)
295
-
296
- self._refined_cube.values = np.where(
297
- (darry < upper_exp) | (darry > lower_exp) | (mask_2d_exp == 0),
298
- np.nan,
299
- self._refined_cube.values,
300
- ).astype(np.float32)
301
-
302
- # similar for reduced cubes with original resolution
303
- darry = np.expand_dims(self._reduced_depth_array, axis=(0, 1))
304
-
305
- self._reduced_cube.values = np.where(
306
- (darry < upper_exp) | (darry > lower_exp) | (mask_2d_exp == 0),
307
- np.nan,
308
- self._reduced_cube.values,
309
- ).astype(np.float32)
310
218
 
311
219
  def _add_to_attribute_map(self, attr_name: str, values: np.ndarray) -> None:
312
220
  """Compute the attribute map and add to result dictionary."""
221
+ logger.debug("Add to attribute map...")
313
222
  attr_map = self._upper.copy()
314
223
  attr_map.values = np.ma.masked_invalid(values)
315
224
 
316
- # apply mask for the cube's dead traces (traceidcode 2)
317
- attr_map.values = np.ma.masked_where(
318
- self.cube.traceidcodes == 2, attr_map.values
319
- )
320
-
321
225
  # now resample to the original input map
322
226
  attr_map_resampled = self._template_surface.copy()
323
227
  attr_map_resampled.resample(attr_map)
@@ -329,21 +233,41 @@ class CubeAttrs:
329
233
  )
330
234
 
331
235
  self._result_attr_maps[attr_name] = attr_map_resampled
236
+ logger.debug("Add to attribute map... done")
332
237
 
333
238
  def _compute_statistical_attribute_surfaces(self) -> None:
334
239
  """Compute stats very fast by using internal C++ bindings."""
335
-
336
- # compute statistics for vertically refined cube
337
- cubecpp = _internal.cube.Cube(self._refined_cube)
338
- all_attrs = cubecpp.cube_stats_along_z()
240
+ logger.debug("Compute statistical attribute surfaces...")
241
+
242
+ # compute statistics for vertically refined cube using original cube
243
+ cubecpp = _internal.cube.Cube(self.cube)
244
+ all_attrs = cubecpp.cube_stats_along_z(
245
+ self._upper.values,
246
+ self._lower.values,
247
+ self._depth_array, # use original depth array
248
+ self.ndiv,
249
+ self.interpolation,
250
+ self.minimum_thickness,
251
+ self._min_indices, # pass slice indices
252
+ self._max_indices,
253
+ )
339
254
 
340
255
  for attr in STAT_ATTRS:
341
256
  self._add_to_attribute_map(attr, all_attrs[attr])
342
257
 
343
- # compute statistics for reduced cube (for sum attributes)
344
- cubecpp = _internal.cube.Cube(self._reduced_cube)
345
- all_attrs = cubecpp.cube_stats_along_z()
258
+ # compute statistics with ndiv=1 (for sum attributes)
259
+ all_attrs = cubecpp.cube_stats_along_z(
260
+ self._upper.values,
261
+ self._lower.values,
262
+ self._depth_array, # use original depth array
263
+ 1,
264
+ self.interpolation,
265
+ self.minimum_thickness,
266
+ self._min_indices, # pass slice indices
267
+ self._max_indices,
268
+ )
346
269
 
347
- # add sum attributes which are the last 3 in the list
348
270
  for attr in SUM_ATTRS:
349
271
  self._add_to_attribute_map(attr, all_attrs[attr])
272
+
273
+ logger.debug("Compute statistical attribute surfaces... done")
@@ -1,13 +1,16 @@
1
1
  from __future__ import annotations
2
2
 
3
- import warnings
4
3
  from typing import Any, Literal, cast
5
4
 
6
5
  import numpy as np
7
6
  import numpy.typing as npt
8
7
 
8
+ from xtgeo.common.log import null_logger
9
+
9
10
  from ._ecl_output_file import Phases, Simulator, TypeOfGrid, UnitSystem
10
11
 
12
+ _logger = null_logger(__name__)
13
+
11
14
 
12
15
  class InteHead:
13
16
  """Contains the values for the INTEHEAD array in ecl restart
@@ -134,7 +137,8 @@ class InteHead:
134
137
  try:
135
138
  return Simulator(s_code)
136
139
  except ValueError:
137
- warnings.warn(f"Unknown simulator code {s_code}")
140
+ # changed from a UserWarning to a logging message
141
+ _logger.warning("Unknown simulator code %s", s_code)
138
142
  return s_code
139
143
 
140
144
  @property
@@ -193,7 +193,8 @@ def get_dz(
193
193
  if not flip:
194
194
  result *= -1
195
195
 
196
- result = np.ma.masked_array(result, self._actnumsv == 0 if asmasked else False)
196
+ if asmasked:
197
+ result = np.ma.masked_array(result, self._actnumsv == 0)
197
198
 
198
199
  return GridProperty(
199
200
  ncol=self._ncol,
@@ -1399,7 +1400,10 @@ def estimate_design(
1399
1400
  """Estimate (guess) (sub)grid design by examing DZ in median thickness column."""
1400
1401
  actv = self.get_actnum().values
1401
1402
 
1402
- dzv = self.get_dz(asmasked=False).values
1403
+ dzv_raw = self.get_dz(asmasked=False).values
1404
+ # Although asmasked is False the array values will still be a masked numpy
1405
+ # Need to convert to an ordinary numpy to avoid warnings later
1406
+ dzv = np.ma.filled(dzv_raw, fill_value=0.0)
1403
1407
 
1404
1408
  # treat inactive thicknesses as zero
1405
1409
  dzv[actv == 0] = 0.0
@@ -1416,6 +1420,11 @@ def estimate_design(
1416
1420
  # find cumulative thickness as a 2D array
1417
1421
  dzcum: np.ndarray = np.sum(dzv, axis=2, keepdims=False)
1418
1422
 
1423
+ # Ensure dzcum is a regular numpy array to avoid warnings
1424
+ if isinstance(dzcum, np.ma.MaskedArray):
1425
+ dzcum = np.ma.filled(dzcum, fill_value=0.0)
1426
+ dzcum = np.asarray(dzcum)
1427
+
1419
1428
  # find the average thickness for nonzero thicknesses
1420
1429
  dzcum2 = dzcum.copy()
1421
1430
  dzcum2[dzcum == 0.0] = np.nan
@@ -4,9 +4,8 @@ from typing import TYPE_CHECKING
4
4
 
5
5
  import numpy as np
6
6
 
7
- from xtgeo import _cxtgeo
7
+ import xtgeo._internal as _internal # type: ignore
8
8
  from xtgeo.common import null_logger
9
- from xtgeo.common.constants import UNDEF_INT
10
9
 
11
10
  logger = null_logger(__name__)
12
11
 
@@ -23,44 +22,29 @@ def make_hybridgrid(
23
22
  region_number: int | None = None,
24
23
  ) -> None:
25
24
  """Make hybrid grid."""
26
- self._set_xtgformat1()
25
+ self._set_xtgformat2()
27
26
 
28
27
  newnlay = self.nlay * 2 + nhdiv
29
- newnzcorn = self.ncol * self.nrow * (newnlay + 1) * 4
30
- newnactnum = self.ncol * self.nrow * newnlay
31
28
 
32
- # initialize
33
- hyb_zcornsv = np.zeros(newnzcorn, dtype=np.float64)
34
- hyb_actnumsv = np.zeros(newnactnum, dtype=np.int32)
35
-
36
- if region is None:
37
- region_number = -1
38
- rvalues = np.ones(1, dtype=np.int32)
39
- else:
40
- rvalues = np.ma.filled(region.values, fill_value=UNDEF_INT)
41
- rvalues = rvalues.ravel()
42
-
43
- _cxtgeo.grd3d_convert_hybrid(
44
- self.ncol,
45
- self.nrow,
46
- self.nlay,
47
- self._coordsv,
48
- self._zcornsv,
49
- self._actnumsv,
50
- newnlay,
51
- hyb_zcornsv,
52
- hyb_actnumsv,
29
+ grid3d_cpp = _internal.grid3d.Grid(self)
30
+ region_array = (
31
+ region.values.astype(np.int32)
32
+ if region
33
+ else np.empty((0, 0, 0), dtype=np.int32)
34
+ )
35
+ hyb_zcornsv, hyb_actnumsv = grid3d_cpp.convert_to_hybrid_grid(
53
36
  toplevel,
54
37
  bottomlevel,
55
38
  nhdiv,
56
- rvalues,
57
- region_number,
39
+ region_array,
40
+ int(region_number) if region_number is not None else -1,
58
41
  )
59
42
 
60
43
  # when a hybridgrid is made, the current subrid settings lose relevance, hence
61
44
  # it is forced set to None
62
45
  self.subgrids = None
63
46
 
47
+ # update the grid in place
64
48
  self._nlay = newnlay
65
49
  self._zcornsv = hyb_zcornsv
66
- self._actnumsv = hyb_actnumsv
50
+ self._actnumsv = hyb_actnumsv.astype(np.int32)
@@ -243,7 +243,7 @@ def refine_vertically(
243
243
  # update instance:
244
244
  self._nlay = newnlay
245
245
  self._zcornsv = ref_zcornsv
246
- self._actnumsv = ref_actnumsv
246
+ self._actnumsv = ref_actnumsv.astype(np.int32)
247
247
 
248
248
  if self.subgrids is None or len(self.subgrids) <= 1:
249
249
  self.subgrids = None
@@ -150,12 +150,18 @@ def _validate_dtype_in_roxar(
150
150
  )
151
151
 
152
152
  try:
153
+ # for mypy
154
+ min_val: int | float
155
+ max_val: int | float
156
+
153
157
  if np.issubdtype(dtype, np.integer):
154
158
  # Integer type
155
- min_val, max_val = np.iinfo(dtype).min, np.iinfo(dtype).max
159
+ min_val = int(np.iinfo(dtype).min)
160
+ max_val = int(np.iinfo(dtype).max)
156
161
  elif np.issubdtype(dtype, np.floating):
157
162
  # Float type
158
- min_val, max_val = np.finfo(dtype).min, np.finfo(dtype).max
163
+ min_val = float(np.finfo(dtype).min)
164
+ max_val = float(np.finfo(dtype).max)
159
165
  else:
160
166
  # Unknown type
161
167
  raise RuntimeError("Probable bug, values array not integer or float")
xtgeo/grid3d/grid.py CHANGED
@@ -1818,7 +1818,9 @@ class Grid(_Grid3D):
1818
1818
  name (str): name of property
1819
1819
  flip (bool): Use False for Petrel grids were Z is negative down
1820
1820
  (experimental)
1821
- asmasked (bool): True if only for active cells, False for all cells
1821
+ asmasked (bool): True if only for active cells, False for all cells.
1822
+ With `False` the inactive cells are included, but the numpy
1823
+ array is still a MaskedArray instance.
1822
1824
  metric (str): One of the following metrics:
1823
1825
  * "euclid": sqrt(dx^2 + dy^2 + dz^2)
1824
1826
  * "horizontal": sqrt(dx^2 + dy^2)
@@ -1854,7 +1856,8 @@ class Grid(_Grid3D):
1854
1856
  Args:
1855
1857
  name (str): names of properties
1856
1858
  asmasked (bool). If True, make a np.ma array where inactive cells
1857
- are masked.
1859
+ are masked. Otherwise the inactive cells are included, but the numpy
1860
+ array is still a MaskedArray instance.
1858
1861
  metric (str): One of the following metrics:
1859
1862
  * "euclid": sqrt(dx^2 + dy^2 + dz^2)
1860
1863
  * "horizontal": sqrt(dx^2 + dy^2)
@@ -1884,7 +1887,8 @@ class Grid(_Grid3D):
1884
1887
  Args:
1885
1888
  name (str): names of properties
1886
1889
  asmasked (bool). If True, make a np.ma array where inactive cells
1887
- are masked.
1890
+ are masked. Otherwise the inactive cells are included, but the numpy
1891
+ array is still a MaskedArray instance.
1888
1892
  metric (str): One of the following metrics:
1889
1893
  * "euclid": sqrt(dx^2 + dy^2 + dz^2)
1890
1894
  * "horizontal": sqrt(dx^2 + dy^2)
@@ -2556,11 +2560,13 @@ class Grid(_Grid3D):
2556
2560
  :align: center
2557
2561
 
2558
2562
  Args:
2559
- nhdiv (int): Number of hybrid layers.
2560
- toplevel (float): Top of hybrid grid.
2561
- bottomlevel (float): Base of hybrid grid.
2562
- region (GridProperty, optional): Region property (if needed).
2563
- region_number (int): Which region to apply hybrid grid in if region.
2563
+ nhdiv: Number of hybrid layers.
2564
+ toplevel: Top of hybrid grid.
2565
+ bottomlevel: Base of hybrid grid.
2566
+ region: Region property (if needed). Note that the region will only be
2567
+ applied in a lateral sense: i.e. if a column is in the region, then
2568
+ the full column will be converted to hybrid.
2569
+ region_number: Which region to apply hybrid grid in if region.
2564
2570
 
2565
2571
  Example:
2566
2572
  Create a hybridgrid from file, based on a GRDECL file (no region)::
@@ -2577,6 +2583,7 @@ class Grid(_Grid3D):
2577
2583
  .. _usage in the Troll field: https://doi.org/10.2118/148023-MS
2578
2584
 
2579
2585
  """
2586
+
2580
2587
  _grid_hybrid.make_hybridgrid(
2581
2588
  self,
2582
2589
  nhdiv=nhdiv,
xtgeo/lib/libfmt.a CHANGED
Binary file
@@ -1,5 +1,5 @@
1
- prefix=/var/folders/sw/lqy3v8g55m76fhwckg439jjm0000gn/T/tmpn62oi5uc/wheel/platlib/xtgeo
2
- exec_prefix=/var/folders/sw/lqy3v8g55m76fhwckg439jjm0000gn/T/tmpn62oi5uc/wheel/platlib/xtgeo
1
+ prefix=/var/folders/q0/wmf37v850txck86cpnvwm_zw0000gn/T/tmph927ysk1/wheel/platlib/xtgeo
2
+ exec_prefix=/var/folders/q0/wmf37v850txck86cpnvwm_zw0000gn/T/tmph927ysk1/wheel/platlib/xtgeo
3
3
  libdir=${exec_prefix}/lib
4
4
  includedir=${prefix}/include
5
5
 
@@ -448,99 +448,6 @@ def _get_randomline_fence(self, fencespec, hincrement, atleast, nextend):
448
448
  return fspec
449
449
 
450
450
 
451
- def operation_polygons(self, poly, value, opname="add", inside=True):
452
- """Operations restricted to polygons"""
453
-
454
- # keep this for a while (e.g. mid 2024), and then replace it with _v2 below.
455
- if not isinstance(poly, Polygons):
456
- raise ValueError("The poly input is not a Polygons instance")
457
- if opname not in VALID_OPER_POLYS:
458
- raise ValueError(f"Operation key opname has invalid value: {opname}")
459
-
460
- # make a copy of the RegularSurface which is used a "filter" or "proxy"
461
- # value will be 1 inside polygons, 0 outside. Undef cells are kept as is
462
-
463
- proxy = self.copy()
464
- proxy.values *= 0.0
465
- vals = proxy.get_values1d(fill_value=UNDEF)
466
-
467
- # value could be a scalar or another surface; if another surface,
468
- # must ensure same topology
469
-
470
- if isinstance(value, type(self)):
471
- if not self.compare_topology(value):
472
- raise ValueError("Input is RegularSurface, but not same map topology")
473
- value = value.values.copy()
474
- else:
475
- # turn scalar value into numpy array
476
- value = self.values.copy() * 0 + value
477
-
478
- idgroups = poly.get_dataframe(copy=False).groupby(poly.pname)
479
-
480
- for _, grp in idgroups:
481
- xcor = grp[poly.xname].values
482
- ycor = grp[poly.yname].values
483
-
484
- ier = _cxtgeo.surf_setval_poly(
485
- proxy.xori,
486
- proxy.xinc,
487
- proxy.yori,
488
- proxy.yinc,
489
- proxy.ncol,
490
- proxy.nrow,
491
- proxy.yflip,
492
- proxy.rotation,
493
- vals,
494
- xcor,
495
- ycor,
496
- 1.0,
497
- 0,
498
- )
499
- if ier == -9:
500
- xtg.warn("Polygon is not closed")
501
-
502
- proxy.set_values1d(vals)
503
- proxyv = proxy.values.astype(np.int8)
504
-
505
- proxytarget = 1
506
- if not inside:
507
- proxytarget = 0
508
-
509
- tmp = None
510
- if opname == "add":
511
- tmp = self.values.copy() + value
512
- elif opname == "sub":
513
- tmp = self.values.copy() - value
514
- elif opname == "mul":
515
- tmp = self.values.copy() * value
516
- elif opname == "div":
517
- # Dividing a map of zero is always a hazzle; try to obtain 0.0
518
- # as result in these cases
519
- if 0.0 in value:
520
- xtg.warn(
521
- "Dividing a surface with value=0.0 or surface with zero "
522
- "elements; may get unexpected results, try to "
523
- "achieve zero values as result!"
524
- )
525
- with np.errstate(divide="ignore", invalid="ignore"):
526
- this = ma.filled(self.values, fill_value=1.0)
527
- that = ma.filled(value, fill_value=1.0)
528
- mask = ma.getmaskarray(self.values)
529
- tmp = np.true_divide(this, that)
530
- tmp = np.where(np.isinf(tmp), 0, tmp)
531
- tmp = np.nan_to_num(tmp)
532
- tmp = ma.array(tmp, mask=mask)
533
-
534
- elif opname == "set":
535
- tmp = value
536
- elif opname == "eli":
537
- tmp = value * 0 + UNDEF
538
- tmp = ma.masked_greater(tmp, UNDEF_LIMIT)
539
-
540
- self.values[proxyv == proxytarget] = tmp[proxyv == proxytarget]
541
- del tmp
542
-
543
-
544
451
  def _proxy_map_polygons(surf, poly, inside=True):
545
452
  """Return a proxy map where on one to do operations, as 0 and 1."""
546
453
  inside_value = 1 if inside else 0
@@ -574,7 +481,7 @@ def _proxy_map_polygons(surf, poly, inside=True):
574
481
  return proxy
575
482
 
576
483
 
577
- def operation_polygons_v2(self, poly, value: float | Any, opname="add", inside=True):
484
+ def operation_polygons(self, poly, value: float | Any, opname="add", inside=True):
578
485
  """Operations restricted to polygons, using matplotlib (much faster).
579
486
 
580
487
  The 'value' can be a number or another regular surface (with same design)
@@ -1883,7 +1883,7 @@ class RegularSurface:
1883
1883
  # Operations restricted to inside/outside polygons
1884
1884
  # ==================================================================================
1885
1885
 
1886
- def operation_polygons(self, poly, value, opname="add", inside=True, _version=2):
1886
+ def operation_polygons(self, poly, value, opname="add", inside=True):
1887
1887
  """A generic function for map operations inside or outside polygon(s).
1888
1888
 
1889
1889
  Args:
@@ -1895,14 +1895,9 @@ class RegularSurface:
1895
1895
  on polygons (this key will be removed in later versions and shall not
1896
1896
  be applied)
1897
1897
  """
1898
- if _version == 2:
1899
- _regsurf_oper.operation_polygons_v2(
1900
- self, poly, value, opname=opname, inside=inside
1901
- )
1902
- else:
1903
- _regsurf_oper.operation_polygons(
1904
- self, poly, value, opname=opname, inside=inside
1905
- )
1898
+ _regsurf_oper.operation_polygons(
1899
+ self, poly, value, opname=opname, inside=inside
1900
+ )
1906
1901
 
1907
1902
  # shortforms
1908
1903
  def add_inside(self, poly, value):