fmu-pem 0.0.2__py3-none-any.whl → 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fmu/pem/__main__.py +32 -16
- fmu/pem/forward_models/pem_model.py +19 -27
- fmu/pem/pem_functions/__init__.py +2 -2
- fmu/pem/pem_functions/density.py +32 -38
- fmu/pem/pem_functions/effective_pressure.py +153 -49
- fmu/pem/pem_functions/estimate_saturated_rock.py +244 -52
- fmu/pem/pem_functions/fluid_properties.py +447 -245
- fmu/pem/pem_functions/mineral_properties.py +77 -74
- fmu/pem/pem_functions/pressure_sensitivity.py +430 -0
- fmu/pem/pem_functions/regression_models.py +129 -97
- fmu/pem/pem_functions/run_friable_model.py +106 -37
- fmu/pem/pem_functions/run_patchy_cement_model.py +107 -45
- fmu/pem/pem_functions/{run_t_matrix_and_pressure.py → run_t_matrix_model.py} +48 -27
- fmu/pem/pem_utilities/__init__.py +31 -9
- fmu/pem/pem_utilities/cumsum_properties.py +29 -37
- fmu/pem/pem_utilities/delta_cumsum_time.py +8 -13
- fmu/pem/pem_utilities/enum_defs.py +65 -8
- fmu/pem/pem_utilities/export_routines.py +84 -72
- fmu/pem/pem_utilities/fipnum_pvtnum_utilities.py +217 -0
- fmu/pem/pem_utilities/import_config.py +64 -46
- fmu/pem/pem_utilities/import_routines.py +57 -69
- fmu/pem/pem_utilities/pem_class_definitions.py +81 -23
- fmu/pem/pem_utilities/pem_config_validation.py +331 -139
- fmu/pem/pem_utilities/rpm_models.py +473 -100
- fmu/pem/pem_utilities/update_grid.py +3 -2
- fmu/pem/pem_utilities/utils.py +90 -38
- fmu/pem/run_pem.py +70 -39
- fmu/pem/version.py +16 -3
- {fmu_pem-0.0.2.dist-info → fmu_pem-0.0.3.dist-info}/METADATA +18 -11
- fmu_pem-0.0.3.dist-info/RECORD +39 -0
- fmu_pem-0.0.2.dist-info/RECORD +0 -37
- {fmu_pem-0.0.2.dist-info → fmu_pem-0.0.3.dist-info}/WHEEL +0 -0
- {fmu_pem-0.0.2.dist-info → fmu_pem-0.0.3.dist-info}/entry_points.txt +0 -0
- {fmu_pem-0.0.2.dist-info → fmu_pem-0.0.3.dist-info}/licenses/LICENSE +0 -0
- {fmu_pem-0.0.2.dist-info → fmu_pem-0.0.3.dist-info}/top_level.txt +0 -0
|
@@ -3,6 +3,7 @@ Define enumerated strings
|
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
5
|
from enum import Enum
|
|
6
|
+
from typing import Literal
|
|
6
7
|
|
|
7
8
|
|
|
8
9
|
class OverburdenPressureTypes(str, Enum):
|
|
@@ -49,22 +50,78 @@ class RPMType(str, Enum):
|
|
|
49
50
|
REGRESSION = "regression"
|
|
50
51
|
|
|
51
52
|
|
|
52
|
-
class VolumeFractions(str, Enum):
|
|
53
|
-
NTG_SIM = "ntg_sim"
|
|
54
|
-
VOL_FRAC = "fraction_files"
|
|
55
|
-
|
|
56
|
-
|
|
57
53
|
class GasModels(str, Enum):
|
|
58
54
|
GLOBAL = "Global"
|
|
59
55
|
LIGHT = "Light"
|
|
60
56
|
HC2016 = "HC2016"
|
|
61
57
|
|
|
62
58
|
|
|
63
|
-
class CoordinationNumberFunction(str, Enum):
|
|
64
|
-
|
|
65
|
-
|
|
59
|
+
# class CoordinationNumberFunction(str, Enum):
|
|
60
|
+
# PORBASED = "PorBased"
|
|
61
|
+
# CONSTANT = "ConstVal"
|
|
62
|
+
CoordinationNumberFunction = Literal["PorBased", "ConstVal"]
|
|
66
63
|
|
|
67
64
|
|
|
68
65
|
class TemperatureMethod(str, Enum):
|
|
69
66
|
CONSTANT = "constant"
|
|
70
67
|
FROMSIM = "from_sim"
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class DifferenceMethod(str, Enum):
|
|
71
|
+
DIFF = "diff"
|
|
72
|
+
DIFFPERCENT = "diffpercent"
|
|
73
|
+
RATIO = "ratio"
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
class DifferenceAttribute(str, Enum):
|
|
77
|
+
AI = "ai"
|
|
78
|
+
VPVS = "vpvs"
|
|
79
|
+
SI = "si"
|
|
80
|
+
VP = "vp"
|
|
81
|
+
VS = "vs"
|
|
82
|
+
DENS = "dens"
|
|
83
|
+
TWT = "twt"
|
|
84
|
+
SGAS = "sgas"
|
|
85
|
+
SWAT = "swat"
|
|
86
|
+
SOIL = "soil"
|
|
87
|
+
RS = "rs"
|
|
88
|
+
RV = "rv"
|
|
89
|
+
PRESSURE = "pressure"
|
|
90
|
+
SALT = "salt"
|
|
91
|
+
TEMP = "temp"
|
|
92
|
+
TWTPP = "twtpp"
|
|
93
|
+
TWTSS = "twtss"
|
|
94
|
+
TWTPS = "twtps"
|
|
95
|
+
FORMATION_PRESSURE = "formation_pressure"
|
|
96
|
+
EFFECTIVE_PRESSURE = "effective_pressure"
|
|
97
|
+
OVERBURDEN_PRESSURE = "overburden_pressure"
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
class RegressionPressureModelTypes(str, Enum):
|
|
101
|
+
EXPONENTIAL = "exponential"
|
|
102
|
+
POLYNOMIAL = "polynomial"
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
class PhysicsPressureModelTypes(str, Enum):
|
|
106
|
+
FRIABLE = "friable"
|
|
107
|
+
PATCHY_CEMENT = "patchy_cement"
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class RegressionPressureParameterTypes(str, Enum):
|
|
111
|
+
VP_VS = "vp_vs"
|
|
112
|
+
K_MU = "k_mu"
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class ParameterTypes(str, Enum):
|
|
116
|
+
VP = "vp"
|
|
117
|
+
VS = "vs"
|
|
118
|
+
K = "k"
|
|
119
|
+
MU = "mu"
|
|
120
|
+
RHO = "rho"
|
|
121
|
+
POROSITY = "poro"
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
class Sim2SeisRequiredParams(str, Enum):
|
|
125
|
+
VP = "vp"
|
|
126
|
+
VS = "vs"
|
|
127
|
+
DENSITY = "density"
|
|
@@ -1,42 +1,57 @@
|
|
|
1
1
|
import warnings
|
|
2
2
|
from dataclasses import asdict
|
|
3
3
|
from pathlib import Path
|
|
4
|
-
from typing import List, Union
|
|
5
4
|
|
|
5
|
+
import numpy as np
|
|
6
6
|
import xtgeo
|
|
7
7
|
|
|
8
|
+
from .enum_defs import Sim2SeisRequiredParams
|
|
8
9
|
from .pem_class_definitions import (
|
|
10
|
+
DryRockProperties,
|
|
9
11
|
EffectiveFluidProperties,
|
|
10
|
-
|
|
12
|
+
EffectiveMineralProperties,
|
|
11
13
|
PressureProperties,
|
|
12
14
|
SaturatedRockProperties,
|
|
13
15
|
)
|
|
14
|
-
from .pem_config_validation import FromGlobal, PemConfig
|
|
15
16
|
from .utils import _verify_export_inputs, restore_dir
|
|
16
17
|
|
|
17
18
|
|
|
18
19
|
def save_results(
|
|
19
|
-
|
|
20
|
+
config_dir: Path,
|
|
20
21
|
run_from_rms_flag: bool,
|
|
21
|
-
config_settings: PemConfig,
|
|
22
22
|
rms_project: object,
|
|
23
23
|
sim_grid: xtgeo.grid3d.Grid,
|
|
24
|
+
grid_name: str,
|
|
25
|
+
seis_dates: list[str],
|
|
26
|
+
save_to_rms: bool,
|
|
27
|
+
save_to_disk: bool,
|
|
28
|
+
save_intermediate: bool,
|
|
29
|
+
mandatory_path: Path,
|
|
30
|
+
pem_output_path: Path,
|
|
24
31
|
eff_pres_props: list[PressureProperties],
|
|
25
32
|
sat_rock_props: list[SaturatedRockProperties],
|
|
26
|
-
difference_props:
|
|
27
|
-
difference_date_strs:
|
|
28
|
-
matrix_props:
|
|
33
|
+
difference_props: list[dict],
|
|
34
|
+
difference_date_strs: list[str],
|
|
35
|
+
matrix_props: EffectiveMineralProperties,
|
|
29
36
|
fluid_props: list[EffectiveFluidProperties],
|
|
37
|
+
bubble_point_grids: list[dict[str, np.ma.MaskedArray]],
|
|
38
|
+
dry_rock_props: list[DryRockProperties],
|
|
30
39
|
) -> None:
|
|
31
40
|
"""Saves all intermediate and final results according to the settings in the PEM
|
|
32
41
|
and global config files
|
|
33
42
|
|
|
34
43
|
Args:
|
|
35
|
-
|
|
44
|
+
config_dir: initial directory setting
|
|
36
45
|
run_from_rms_flag: call to PEM from RMS
|
|
37
|
-
config_settings: PEM and global settings
|
|
38
46
|
rms_project: RMS project
|
|
39
47
|
sim_grid: grid definition
|
|
48
|
+
grid_name: stem of output grid name
|
|
49
|
+
seis_dates: list of dates for simulation runs
|
|
50
|
+
save_to_rms: save results to RMS project
|
|
51
|
+
save_to_disk: save non-mandatory results to disk
|
|
52
|
+
save_intermediate: save intermediate calculations to disk
|
|
53
|
+
mandatory_path: path for mandatory output
|
|
54
|
+
pem_output_path: path for non-mandatory PEM output
|
|
40
55
|
eff_pres_props: effective, overburden and formation pressure per time step
|
|
41
56
|
sat_rock_props: elastic properties of saturated rock
|
|
42
57
|
difference_props: differences in elastic properties between selected restart
|
|
@@ -49,61 +64,51 @@ def save_results(
|
|
|
49
64
|
None, warning or KeyError
|
|
50
65
|
"""
|
|
51
66
|
# Saving results:
|
|
52
|
-
# 1. Mandatory part: Save Vp, Vs, Density to disk for seismic forward modelling.
|
|
53
|
-
# Use FMU standard term "DENS" for density
|
|
54
|
-
|
|
55
|
-
# mypy needs an assert in the same function as the usage - it did not pick up a
|
|
56
|
-
# verification in a separate function without it, mypy reports errors, assuming
|
|
57
|
-
# global_params is None
|
|
58
67
|
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
config_settings.paths.rel_path_mandatory_output
|
|
63
|
-
)
|
|
64
|
-
output_path = start_dir.joinpath(config_settings.paths.rel_path_output)
|
|
68
|
+
# 1. Mandatory part: Save Vp, Vs, Density to disk for seismic forward modelling.
|
|
69
|
+
full_mandatory_path = config_dir / mandatory_path
|
|
70
|
+
full_output_path = config_dir / pem_output_path
|
|
65
71
|
output_set = [
|
|
66
72
|
{
|
|
67
73
|
k: v
|
|
68
|
-
for (k, v) in asdict(sat_prop).items()
|
|
69
|
-
if k
|
|
74
|
+
for (k, v) in asdict(sat_prop).items()
|
|
75
|
+
if k in list(Sim2SeisRequiredParams)
|
|
70
76
|
}
|
|
71
77
|
for sat_prop in sat_rock_props
|
|
72
78
|
]
|
|
73
79
|
export_results_disk(
|
|
74
|
-
output_set,
|
|
75
|
-
sim_grid,
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
time_steps=
|
|
80
|
+
result_props=output_set,
|
|
81
|
+
grid=sim_grid,
|
|
82
|
+
grid_name=grid_name,
|
|
83
|
+
results_dir=full_mandatory_path,
|
|
84
|
+
time_steps=seis_dates,
|
|
79
85
|
export_format="grdecl",
|
|
80
86
|
)
|
|
81
87
|
|
|
82
88
|
# 2. Save results to rms and/or disk according to config file
|
|
83
89
|
|
|
84
90
|
# create list of dict from list of pressure and saturated rock objects
|
|
85
|
-
eff_pres_dict_list = [asdict(obj) for obj in eff_pres_props]
|
|
86
|
-
sat_prop_dict_list = [asdict(obj) for obj in sat_rock_props]
|
|
91
|
+
eff_pres_dict_list = [asdict(obj) for obj in eff_pres_props]
|
|
92
|
+
sat_prop_dict_list = [asdict(obj) for obj in sat_rock_props]
|
|
87
93
|
|
|
88
94
|
try:
|
|
89
|
-
if
|
|
90
|
-
grid_model = config_settings.global_params.grid_model
|
|
95
|
+
if save_to_rms and run_from_rms_flag:
|
|
91
96
|
# Time dependent absolute properties
|
|
92
97
|
for props in [eff_pres_dict_list, sat_prop_dict_list]:
|
|
93
98
|
prop_dict = list(props)
|
|
94
99
|
export_results_roxar(
|
|
95
|
-
rms_project,
|
|
96
|
-
prop_dict,
|
|
97
|
-
sim_grid,
|
|
98
|
-
|
|
99
|
-
time_steps=
|
|
100
|
+
prj=rms_project,
|
|
101
|
+
result_props=prop_dict,
|
|
102
|
+
grid=sim_grid,
|
|
103
|
+
rms_grid_name=grid_name,
|
|
104
|
+
time_steps=seis_dates,
|
|
100
105
|
)
|
|
101
106
|
# Difference properties
|
|
102
107
|
export_results_roxar(
|
|
103
|
-
rms_project,
|
|
104
|
-
difference_props,
|
|
105
|
-
sim_grid,
|
|
106
|
-
|
|
108
|
+
prj=rms_project,
|
|
109
|
+
result_props=difference_props,
|
|
110
|
+
grid=sim_grid,
|
|
111
|
+
rms_grid_name=grid_name,
|
|
107
112
|
time_steps=difference_date_strs,
|
|
108
113
|
)
|
|
109
114
|
except KeyError: # warn user that results are not saved
|
|
@@ -112,21 +117,21 @@ def save_results(
|
|
|
112
117
|
f"config file"
|
|
113
118
|
)
|
|
114
119
|
try:
|
|
115
|
-
if
|
|
120
|
+
if save_to_disk:
|
|
116
121
|
for props in [eff_pres_dict_list, sat_prop_dict_list]:
|
|
117
122
|
prop_dict = list(props)
|
|
118
123
|
export_results_disk(
|
|
119
|
-
prop_dict,
|
|
120
|
-
sim_grid,
|
|
121
|
-
sim_grid.name,
|
|
122
|
-
|
|
123
|
-
time_steps=
|
|
124
|
+
result_props=prop_dict,
|
|
125
|
+
grid=sim_grid,
|
|
126
|
+
grid_name=sim_grid.name,
|
|
127
|
+
results_dir=full_output_path,
|
|
128
|
+
time_steps=seis_dates,
|
|
124
129
|
)
|
|
125
130
|
export_results_disk(
|
|
126
|
-
difference_props,
|
|
127
|
-
sim_grid,
|
|
128
|
-
|
|
129
|
-
|
|
131
|
+
result_props=difference_props,
|
|
132
|
+
grid=sim_grid,
|
|
133
|
+
grid_name=grid_name,
|
|
134
|
+
results_dir=full_output_path,
|
|
130
135
|
time_steps=difference_date_strs,
|
|
131
136
|
)
|
|
132
137
|
except KeyError: # warn user that results are not saved
|
|
@@ -137,22 +142,29 @@ def save_results(
|
|
|
137
142
|
|
|
138
143
|
# 3. Save intermediate results only if specified in the config file
|
|
139
144
|
try:
|
|
140
|
-
if
|
|
141
|
-
|
|
142
|
-
[asdict(fl_props) for fl_props in fluid_props],
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
145
|
+
if save_intermediate:
|
|
146
|
+
export_dicts = [
|
|
147
|
+
[asdict(fl_props) for fl_props in fluid_props],
|
|
148
|
+
asdict(matrix_props),
|
|
149
|
+
bubble_point_grids,
|
|
150
|
+
[asdict(dry_props) for dry_props in dry_rock_props],
|
|
151
|
+
]
|
|
152
|
+
suffices = [
|
|
153
|
+
"_FLUID",
|
|
154
|
+
"_MINERAL",
|
|
155
|
+
"",
|
|
156
|
+
"_DRY_ROCK",
|
|
157
|
+
]
|
|
158
|
+
dates = [seis_dates, None, seis_dates, seis_dates]
|
|
159
|
+
for props, date_info, suffix in zip(export_dicts, dates, suffices):
|
|
160
|
+
export_results_disk(
|
|
161
|
+
result_props=props,
|
|
162
|
+
grid=sim_grid,
|
|
163
|
+
grid_name=grid_name,
|
|
164
|
+
results_dir=full_output_path,
|
|
165
|
+
time_steps=date_info,
|
|
166
|
+
name_suffix=suffix,
|
|
167
|
+
)
|
|
156
168
|
except KeyError:
|
|
157
169
|
# just skip silently if save_intermediate_results is not present in the
|
|
158
170
|
# pem_config
|
|
@@ -162,10 +174,10 @@ def save_results(
|
|
|
162
174
|
|
|
163
175
|
def export_results_roxar(
|
|
164
176
|
prj: object,
|
|
165
|
-
result_props:
|
|
177
|
+
result_props: list[dict] | dict,
|
|
166
178
|
grid: xtgeo.grid3d.Grid,
|
|
167
179
|
rms_grid_name: str,
|
|
168
|
-
time_steps:
|
|
180
|
+
time_steps: list[str] | None = None,
|
|
169
181
|
name_suffix: str = "",
|
|
170
182
|
force_write_grid: bool = False,
|
|
171
183
|
) -> None:
|
|
@@ -218,11 +230,11 @@ def _verify_gridmodel(prj: object, rms_grid_model_name: str, grid: xtgeo.grid3d.
|
|
|
218
230
|
|
|
219
231
|
|
|
220
232
|
def export_results_disk(
|
|
221
|
-
result_props:
|
|
233
|
+
result_props: list[dict] | dict,
|
|
222
234
|
grid: xtgeo.grid3d.Grid,
|
|
223
235
|
grid_name: str,
|
|
224
236
|
results_dir: Path,
|
|
225
|
-
time_steps:
|
|
237
|
+
time_steps: list[str] | None = None,
|
|
226
238
|
name_suffix: str = "",
|
|
227
239
|
export_format: str = "roff",
|
|
228
240
|
) -> None:
|
|
@@ -0,0 +1,217 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def input_num_string_to_list(
|
|
7
|
+
input_string: str,
|
|
8
|
+
num_array: list[int],
|
|
9
|
+
) -> list[int]:
|
|
10
|
+
"""Input is a string of comma-separated ranges like
|
|
11
|
+
10-15, 20-25 and outputs the expanded verbose list of individual integers.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
parts = [part.strip() for part in input_string.split(",")]
|
|
15
|
+
|
|
16
|
+
integers = []
|
|
17
|
+
|
|
18
|
+
for part in parts:
|
|
19
|
+
if "-" in part:
|
|
20
|
+
[start, end] = [int(integer) for integer in part.split("-")]
|
|
21
|
+
integers += list(range(start, end + 1))
|
|
22
|
+
elif "*" in part:
|
|
23
|
+
return num_array
|
|
24
|
+
else:
|
|
25
|
+
try:
|
|
26
|
+
integers.append(int(part))
|
|
27
|
+
except ValueError as e:
|
|
28
|
+
raise ValueError(f"unable to convert string '{part}' to integers: {e}")
|
|
29
|
+
|
|
30
|
+
unique_integer_list = list(set(integers))
|
|
31
|
+
unique_integer_list.sort()
|
|
32
|
+
|
|
33
|
+
return unique_integer_list
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def num_boolean_array(
|
|
37
|
+
input_string: str,
|
|
38
|
+
num_array: list[int],
|
|
39
|
+
) -> np.ndarray:
|
|
40
|
+
"""Returns a boolean array where a given element is True if the corresponding
|
|
41
|
+
element in num_array (representing PVTNUM/FIPNUM) is part of the input_string
|
|
42
|
+
definition, which is of format e.g. "10-20, 25"
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
if input_string.strip() == "*":
|
|
46
|
+
return np.ones(np.shape(num_array), dtype=bool)
|
|
47
|
+
|
|
48
|
+
_validate_input_strings(
|
|
49
|
+
[
|
|
50
|
+
input_string,
|
|
51
|
+
],
|
|
52
|
+
num_array,
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
return np.isin(num_array, input_num_string_to_list(input_string, num_array))
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def missing_num_areas(
|
|
59
|
+
input_strings: list[str],
|
|
60
|
+
num_array: list[int],
|
|
61
|
+
) -> list[int]:
|
|
62
|
+
"""Returns a list of all FIPNUM/PVTNUM integers not covered by the user input.
|
|
63
|
+
If all integers are covered, the return value is an empty list. This function
|
|
64
|
+
can be used to get a list of FIPNUM or PVTNUM areas the user has not given
|
|
65
|
+
a value for.
|
|
66
|
+
"""
|
|
67
|
+
_validate_input_strings(input_strings, num_array)
|
|
68
|
+
|
|
69
|
+
if any(input_string.strip() == "*" for input_string in input_strings):
|
|
70
|
+
return []
|
|
71
|
+
|
|
72
|
+
unique_integers_grid = set(num_array)
|
|
73
|
+
|
|
74
|
+
unique_integers_input_config = {
|
|
75
|
+
num
|
|
76
|
+
for input_string in input_strings
|
|
77
|
+
for num in input_num_string_to_list(input_string, num_array)
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
missing_areas = list(unique_integers_grid - unique_integers_input_config)
|
|
81
|
+
missing_areas.sort()
|
|
82
|
+
|
|
83
|
+
return missing_areas
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def detect_overlaps(
|
|
87
|
+
input_strings: list[str],
|
|
88
|
+
num_array: list[int],
|
|
89
|
+
) -> bool:
|
|
90
|
+
"""If there are any overlapping groups in the input strings, returns True."""
|
|
91
|
+
_validate_input_strings(input_strings, num_array)
|
|
92
|
+
|
|
93
|
+
def _map_values(inp_array: list[int], all_num_array: list[int]) -> list[int]:
|
|
94
|
+
return [all_num_array.index(num) for num in inp_array if num in all_num_array]
|
|
95
|
+
|
|
96
|
+
is_already_taken = np.zeros_like(num_array, dtype=bool)
|
|
97
|
+
for string in input_strings:
|
|
98
|
+
string_nums = input_num_string_to_list(string, num_array)
|
|
99
|
+
position_list = _map_values(string_nums, num_array)
|
|
100
|
+
if np.any(is_already_taken[position_list]):
|
|
101
|
+
return True
|
|
102
|
+
is_already_taken[position_list] = True
|
|
103
|
+
return False
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def _validate_input_strings(
|
|
107
|
+
input_strings: list[str],
|
|
108
|
+
num_array: list[int],
|
|
109
|
+
) -> None:
|
|
110
|
+
"""Make sure that there are no numbers in the input strings that are not part
|
|
111
|
+
of num_array, but allow ranges to cover missing numbers in num_array"""
|
|
112
|
+
num_set = set(num_array)
|
|
113
|
+
min_num = min(num_array)
|
|
114
|
+
max_num = max(num_array)
|
|
115
|
+
|
|
116
|
+
for input_string in input_strings:
|
|
117
|
+
parts = [part.strip() for part in input_string.split(",")]
|
|
118
|
+
|
|
119
|
+
for part in parts:
|
|
120
|
+
if "*" in part:
|
|
121
|
+
continue
|
|
122
|
+
if "-" in part:
|
|
123
|
+
# Ranges are allowed to span missing numbers in num_array
|
|
124
|
+
# But range endpoints must be within min/max bounds
|
|
125
|
+
try:
|
|
126
|
+
start, end = [int(integer) for integer in part.split("-")]
|
|
127
|
+
if start > end:
|
|
128
|
+
raise ValueError(f"Invalid range '{part}': start > end")
|
|
129
|
+
if start < min_num or start > max_num:
|
|
130
|
+
raise ValueError(
|
|
131
|
+
f"Range start {start} in '{part}' is outside "
|
|
132
|
+
f"num_array bounds [{min_num}, {max_num}]"
|
|
133
|
+
)
|
|
134
|
+
if end > max_num:
|
|
135
|
+
raise ValueError(
|
|
136
|
+
f"Range end {end} in '{part}' is outside "
|
|
137
|
+
f"num_array bounds [{min_num}, {max_num}]"
|
|
138
|
+
)
|
|
139
|
+
except ValueError as e:
|
|
140
|
+
if "outside" in str(e) or "Invalid range" in str(e):
|
|
141
|
+
raise
|
|
142
|
+
raise ValueError(f"Invalid range format '{part}': {e}")
|
|
143
|
+
else:
|
|
144
|
+
# Individual numbers must exist in num_array
|
|
145
|
+
try:
|
|
146
|
+
num = int(part)
|
|
147
|
+
if num not in num_set:
|
|
148
|
+
raise ValueError(
|
|
149
|
+
f"Individual number {num} from input '{input_string}' "
|
|
150
|
+
f"not found in num_array"
|
|
151
|
+
)
|
|
152
|
+
except ValueError as e:
|
|
153
|
+
if "not found in num_array" in str(e):
|
|
154
|
+
raise
|
|
155
|
+
raise ValueError(f"Unable to parse '{part}' as integer: {e}")
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def validate_zone_coverage(
|
|
159
|
+
zone_strings: list[str],
|
|
160
|
+
grid_values: np.ma.MaskedArray,
|
|
161
|
+
zone_name: str = "zone",
|
|
162
|
+
) -> None:
|
|
163
|
+
"""
|
|
164
|
+
Validate that all grid values have corresponding zone definitions.
|
|
165
|
+
|
|
166
|
+
Enforces:
|
|
167
|
+
- Single wildcard '*' cannot appear with other groups
|
|
168
|
+
- No overlaps among explicit definitions
|
|
169
|
+
- All grid values covered by definitions
|
|
170
|
+
|
|
171
|
+
Args:
|
|
172
|
+
zone_strings: List of zone definition strings (e.g., ["*"] or ["1-5", "6-10"])
|
|
173
|
+
grid_values: Masked array of zone integers from simulator grid
|
|
174
|
+
zone_name: Name of zone type for error messages (e.g., "PVTNUM", "FIPNUM")
|
|
175
|
+
|
|
176
|
+
Raises:
|
|
177
|
+
ValueError: If wildcard misused, overlaps detected, or grid values lack
|
|
178
|
+
definitions
|
|
179
|
+
"""
|
|
180
|
+
# Extract unique values from grid
|
|
181
|
+
grid_data = grid_values.data
|
|
182
|
+
grid_mask = (
|
|
183
|
+
grid_values.mask
|
|
184
|
+
if hasattr(grid_values, "mask")
|
|
185
|
+
else np.zeros_like(grid_data, dtype=bool)
|
|
186
|
+
)
|
|
187
|
+
actual_values = set(np.unique(grid_data[~grid_mask]).astype(int))
|
|
188
|
+
|
|
189
|
+
if not actual_values:
|
|
190
|
+
raise ValueError(f"No valid {zone_name} values found in grid")
|
|
191
|
+
|
|
192
|
+
# Check for wildcard-only definition
|
|
193
|
+
if "*" in zone_strings:
|
|
194
|
+
if len(zone_strings) > 1:
|
|
195
|
+
raise ValueError(
|
|
196
|
+
f"Wildcard '*' cannot be combined with explicit {zone_name} "
|
|
197
|
+
"definitions. Either use '*' alone or list all zones explicitly."
|
|
198
|
+
)
|
|
199
|
+
return # Wildcard covers everything
|
|
200
|
+
|
|
201
|
+
# Check for overlaps in explicit definitions
|
|
202
|
+
max_val = max(actual_values)
|
|
203
|
+
tmp_array = list(range(1, max_val + 1))
|
|
204
|
+
if detect_overlaps(zone_strings, tmp_array):
|
|
205
|
+
raise ValueError(f"Overlapping {zone_name} definitions found: {zone_strings}")
|
|
206
|
+
|
|
207
|
+
# Check coverage: all grid values must have definitions
|
|
208
|
+
defined_values = set()
|
|
209
|
+
for zone_str in zone_strings:
|
|
210
|
+
defined_values.update(input_num_string_to_list(zone_str, tmp_array))
|
|
211
|
+
|
|
212
|
+
missing = actual_values - defined_values
|
|
213
|
+
if missing:
|
|
214
|
+
raise ValueError(
|
|
215
|
+
f"{zone_name} values {sorted(missing)} are present in grid but have no "
|
|
216
|
+
f"zone definition. Add explicit definitions for these values."
|
|
217
|
+
)
|