res2df 1.3.7__py3-none-any.whl → 1.3.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- res2df/__init__.py +2 -3
- res2df/common.py +79 -75
- res2df/compdat.py +27 -32
- res2df/csv2res.py +5 -9
- res2df/equil.py +24 -29
- res2df/faults.py +2 -7
- res2df/fipreports.py +10 -14
- res2df/grid.py +61 -66
- res2df/gruptree.py +33 -35
- res2df/inferdims.py +6 -9
- res2df/nnc.py +5 -10
- res2df/opmkeywords/__init__.py +0 -0
- res2df/parameters.py +12 -12
- res2df/pillars.py +24 -31
- res2df/pvt.py +29 -34
- res2df/res2csv.py +10 -15
- res2df/res2csvlogger.py +1 -3
- res2df/resdatafiles.py +8 -8
- res2df/rft.py +37 -44
- res2df/satfunc.py +22 -28
- res2df/summary.py +57 -60
- res2df/trans.py +16 -38
- res2df/version.py +2 -2
- res2df/vfp/__init__.py +1 -1
- res2df/vfp/_vfp.py +28 -33
- res2df/vfp/_vfpcommon.py +18 -19
- res2df/vfp/_vfpdefs.py +2 -3
- res2df/vfp/_vfpinj.py +23 -58
- res2df/vfp/_vfpprod.py +28 -64
- res2df/wcon.py +4 -11
- res2df/wellcompletiondata.py +26 -26
- res2df/wellconnstatus.py +4 -5
- {res2df-1.3.7.dist-info → res2df-1.3.9.dist-info}/METADATA +6 -3
- {res2df-1.3.7.dist-info → res2df-1.3.9.dist-info}/RECORD +38 -37
- {res2df-1.3.7.dist-info → res2df-1.3.9.dist-info}/WHEEL +0 -0
- {res2df-1.3.7.dist-info → res2df-1.3.9.dist-info}/entry_points.txt +0 -0
- {res2df-1.3.7.dist-info → res2df-1.3.9.dist-info}/licenses/LICENSE +0 -0
- {res2df-1.3.7.dist-info → res2df-1.3.9.dist-info}/top_level.txt +0 -0
res2df/gruptree.py
CHANGED
|
@@ -7,7 +7,7 @@ import datetime
|
|
|
7
7
|
import logging
|
|
8
8
|
import sys
|
|
9
9
|
import warnings
|
|
10
|
-
from typing import Any
|
|
10
|
+
from typing import Any
|
|
11
11
|
|
|
12
12
|
import numpy as np
|
|
13
13
|
import pandas as pd
|
|
@@ -15,8 +15,6 @@ import treelib
|
|
|
15
15
|
|
|
16
16
|
with contextlib.suppress(ImportError):
|
|
17
17
|
# Needed for mypy
|
|
18
|
-
|
|
19
|
-
# pylint: disable=unused-import
|
|
20
18
|
import opm.io
|
|
21
19
|
|
|
22
20
|
from .common import (
|
|
@@ -33,8 +31,8 @@ logger = logging.getLogger(__name__)
|
|
|
33
31
|
|
|
34
32
|
|
|
35
33
|
def df(
|
|
36
|
-
deck:
|
|
37
|
-
startdate:
|
|
34
|
+
deck: "ResdataFiles | opm.opmcommon_python.Deck",
|
|
35
|
+
startdate: datetime.date | None = None,
|
|
38
36
|
welspecs: bool = True,
|
|
39
37
|
) -> pd.DataFrame:
|
|
40
38
|
"""Extract all group information from a :term:`deck`
|
|
@@ -64,7 +62,7 @@ def df(
|
|
|
64
62
|
information is found in :term:`deck`.
|
|
65
63
|
"""
|
|
66
64
|
|
|
67
|
-
date:
|
|
65
|
+
date: datetime.date | None
|
|
68
66
|
date = startdate if startdate is not None else None
|
|
69
67
|
|
|
70
68
|
if isinstance(deck, ResdataFiles):
|
|
@@ -76,16 +74,16 @@ def df(
|
|
|
76
74
|
# In order for the GRUPTREE/BRANPROP keywords to accumulate, we
|
|
77
75
|
# store the edges as dictionaries indexed by the edge
|
|
78
76
|
# (which is a tuple of child and parent).
|
|
79
|
-
currentedges:
|
|
77
|
+
currentedges: dict[str, dict[tuple[str, str], dict[str, Any]]] = {
|
|
80
78
|
"GRUPTREE": {},
|
|
81
79
|
"BRANPROP": {},
|
|
82
80
|
}
|
|
83
81
|
# Same approach for the welspecs keywords
|
|
84
|
-
wellspecsedges:
|
|
82
|
+
wellspecsedges: dict[tuple[str, str], str] = {}
|
|
85
83
|
# Node properties from GRUPNET/NODEPROP is stored in a dataframe
|
|
86
84
|
# Note that it's not allowed to mix GRUPNET and NODEPROP in eclipse
|
|
87
85
|
# so the datframe will only contain columns from one of them
|
|
88
|
-
nodedata:
|
|
86
|
+
nodedata: dict[str, pd.DataFrame] = {
|
|
89
87
|
"GRUPNET": pd.DataFrame(),
|
|
90
88
|
"NODEPROP": pd.DataFrame(),
|
|
91
89
|
}
|
|
@@ -115,7 +113,7 @@ def df(
|
|
|
115
113
|
if kword.name in ["DATES", "START"]:
|
|
116
114
|
for rec in kword:
|
|
117
115
|
date = parse_opmio_date_rec(rec)
|
|
118
|
-
logger.debug("Parsing at date %s",
|
|
116
|
+
logger.debug("Parsing at date %s", date)
|
|
119
117
|
elif kword.name == "TSTEP":
|
|
120
118
|
assert date is not None
|
|
121
119
|
for rec in kword:
|
|
@@ -123,9 +121,7 @@ def df(
|
|
|
123
121
|
# Assuming not LAB units, then the unit is days.
|
|
124
122
|
days = sum(steplist)
|
|
125
123
|
date += datetime.timedelta(days=days)
|
|
126
|
-
logger.info(
|
|
127
|
-
"Advancing %s days to %s through TSTEP", str(days), str(date)
|
|
128
|
-
)
|
|
124
|
+
logger.info("Advancing %s days to %s through TSTEP", days, date)
|
|
129
125
|
if kword.name in ["GRUPTREE", "BRANPROP"]:
|
|
130
126
|
found_keywords[kword.name] = True
|
|
131
127
|
renamer = (
|
|
@@ -137,13 +133,13 @@ def df(
|
|
|
137
133
|
edge_dict = parse_opmio_deckrecord(edgerec, kword.name, renamer=renamer)
|
|
138
134
|
child = edge_dict.pop("CHILD_GROUP")
|
|
139
135
|
parent = edge_dict.pop("PARENT_GROUP")
|
|
140
|
-
currentedges[kword.name][
|
|
136
|
+
currentedges[kword.name][child, parent] = edge_dict
|
|
141
137
|
|
|
142
138
|
if kword.name == "WELSPECS" and welspecs:
|
|
143
139
|
found_keywords["WELSPECS"] = True
|
|
144
140
|
for wellrec in kword:
|
|
145
141
|
wspc_dict = parse_opmio_deckrecord(wellrec, "WELSPECS")
|
|
146
|
-
wellspecsedges[
|
|
142
|
+
wellspecsedges[wspc_dict["WELL"], wspc_dict["GROUP"]] = "WELSPECS"
|
|
147
143
|
|
|
148
144
|
if kword.name in ["GRUPNET", "NODEPROP"]:
|
|
149
145
|
found_keywords[kword.name] = True
|
|
@@ -189,12 +185,12 @@ def df(
|
|
|
189
185
|
|
|
190
186
|
|
|
191
187
|
def _write_edgerecords(
|
|
192
|
-
currentedges:
|
|
193
|
-
nodedata:
|
|
194
|
-
wellspecsedges:
|
|
188
|
+
currentedges: dict[str, dict[tuple[str, str], dict[str, Any]]],
|
|
189
|
+
nodedata: dict[str, pd.DataFrame],
|
|
190
|
+
wellspecsedges: dict[tuple[str, str], str],
|
|
195
191
|
found_keywords: dict,
|
|
196
|
-
date:
|
|
197
|
-
) ->
|
|
192
|
+
date: datetime.date | None,
|
|
193
|
+
) -> list[dict]:
|
|
198
194
|
"""Writes a new GRUPTREE tree if there are new instances of
|
|
199
195
|
GRUPTREE, GRUPNET or WELSPECS and writes a new BRANPROP tree
|
|
200
196
|
if there are new instances of BRANPROP, NODEPROP or WELSPECS.
|
|
@@ -219,12 +215,12 @@ def _write_edgerecords(
|
|
|
219
215
|
|
|
220
216
|
|
|
221
217
|
def _merge_edges_and_nodeinfo(
|
|
222
|
-
currentedges:
|
|
218
|
+
currentedges: dict[tuple[str, str], dict[str, Any]],
|
|
223
219
|
nodedata_df: pd.DataFrame,
|
|
224
|
-
wellspecsedges:
|
|
225
|
-
date:
|
|
220
|
+
wellspecsedges: dict[tuple[str, str], str],
|
|
221
|
+
date: datetime.date | None,
|
|
226
222
|
treetype: str,
|
|
227
|
-
) ->
|
|
223
|
+
) -> list[dict[str, Any]]:
|
|
228
224
|
"""Merge a list of edges with information from the nodedata dataframe.
|
|
229
225
|
|
|
230
226
|
Edges where there is no parent (root nodes) are identified and added
|
|
@@ -241,7 +237,7 @@ def _merge_edges_and_nodeinfo(
|
|
|
241
237
|
Returns:
|
|
242
238
|
List of dictionaries (that can be made into a dataframe)
|
|
243
239
|
"""
|
|
244
|
-
edgerecords = []
|
|
240
|
+
edgerecords: list[dict[str, Any]] = []
|
|
245
241
|
childs = set()
|
|
246
242
|
parents = set()
|
|
247
243
|
# Write GRUPTREE/BRANPROP edges
|
|
@@ -253,7 +249,9 @@ def _merge_edges_and_nodeinfo(
|
|
|
253
249
|
rec_dict.update(edge_dict)
|
|
254
250
|
# Add node data
|
|
255
251
|
if child in nodedata_df.index:
|
|
256
|
-
rec_dict.update(
|
|
252
|
+
rec_dict.update(
|
|
253
|
+
{str(k): v for k, v in nodedata_df.loc[child].to_dict().items()}
|
|
254
|
+
)
|
|
257
255
|
edgerecords.append(rec_dict)
|
|
258
256
|
|
|
259
257
|
# Write WELSPECS edges
|
|
@@ -280,16 +278,18 @@ def _merge_edges_and_nodeinfo(
|
|
|
280
278
|
parents |= {"FIELD"}
|
|
281
279
|
|
|
282
280
|
roots = parents - childs
|
|
283
|
-
rootrecords = []
|
|
281
|
+
rootrecords: list[dict[str, Any]] = []
|
|
284
282
|
for root in roots:
|
|
285
283
|
rec_dict = {"DATE": date, "CHILD": root, "KEYWORD": treetype}
|
|
286
284
|
if root in nodedata_df.index:
|
|
287
|
-
rec_dict.update(
|
|
285
|
+
rec_dict.update(
|
|
286
|
+
{str(k): v for k, v in nodedata_df.loc[root].to_dict().items()}
|
|
287
|
+
)
|
|
288
288
|
rootrecords.append(rec_dict)
|
|
289
289
|
return rootrecords + edgerecords
|
|
290
290
|
|
|
291
291
|
|
|
292
|
-
def edge_dataframe2dict(dframe: pd.DataFrame) ->
|
|
292
|
+
def edge_dataframe2dict(dframe: pd.DataFrame) -> list[dict]:
|
|
293
293
|
"""Convert list of edges in a dataframe into a
|
|
294
294
|
nested dictionary (tree).
|
|
295
295
|
|
|
@@ -321,7 +321,7 @@ def edge_dataframe2dict(dframe: pd.DataFrame) -> List[dict]:
|
|
|
321
321
|
subtrees: dict = collections.defaultdict(dict)
|
|
322
322
|
edges = [] # List of tuples
|
|
323
323
|
for _, row in dframe.iterrows():
|
|
324
|
-
if not pd.
|
|
324
|
+
if not pd.isna(row["PARENT"]):
|
|
325
325
|
edges.append((row["CHILD"], row["PARENT"]))
|
|
326
326
|
for child, parent in edges:
|
|
327
327
|
subtrees[parent][child] = subtrees[child]
|
|
@@ -332,7 +332,7 @@ def edge_dataframe2dict(dframe: pd.DataFrame) -> List[dict]:
|
|
|
332
332
|
|
|
333
333
|
|
|
334
334
|
def _add_to_tree_from_dict(
|
|
335
|
-
nested_dict: dict, name: str, tree: treelib.Tree, parent:
|
|
335
|
+
nested_dict: dict, name: str, tree: treelib.Tree, parent: str | None = None
|
|
336
336
|
) -> None:
|
|
337
337
|
assert isinstance(nested_dict, dict)
|
|
338
338
|
tree.create_node(name, name, parent=parent)
|
|
@@ -361,7 +361,7 @@ def tree_from_dict(nested_dict: dict) -> treelib.Tree | str:
|
|
|
361
361
|
"The dict given to tree_from_dict() must have "
|
|
362
362
|
"exactly one top level key, representing a single tree."
|
|
363
363
|
)
|
|
364
|
-
root_name =
|
|
364
|
+
root_name = next(iter(nested_dict.keys()))
|
|
365
365
|
tree = treelib.Tree()
|
|
366
366
|
_add_to_tree_from_dict(nested_dict[root_name], root_name, tree)
|
|
367
367
|
return tree
|
|
@@ -459,9 +459,7 @@ def prettyprint(dframe: pd.DataFrame) -> str:
|
|
|
459
459
|
|
|
460
460
|
def gruptree_main(args) -> None:
|
|
461
461
|
"""Entry-point for module, for command line utility."""
|
|
462
|
-
logger = getLogger_res2csv(
|
|
463
|
-
__name__, vars(args)
|
|
464
|
-
)
|
|
462
|
+
logger = getLogger_res2csv(__name__, vars(args))
|
|
465
463
|
if not args.output and not args.prettyprint:
|
|
466
464
|
print("Nothing to do. Set --output or --prettyprint")
|
|
467
465
|
sys.exit(0)
|
res2df/inferdims.py
CHANGED
|
@@ -5,7 +5,6 @@ reservoir simulator decks (typically single include-files)
|
|
|
5
5
|
|
|
6
6
|
import contextlib
|
|
7
7
|
import logging
|
|
8
|
-
from typing import Dict, Optional, Union
|
|
9
8
|
|
|
10
9
|
with contextlib.suppress(ImportError):
|
|
11
10
|
import opm.io
|
|
@@ -16,7 +15,7 @@ from .resdatafiles import ResdataFiles
|
|
|
16
15
|
logger = logging.getLogger(__name__)
|
|
17
16
|
|
|
18
17
|
# Constants to use for pointing to positions in the xxxDIMS keyword
|
|
19
|
-
DIMS_POS:
|
|
18
|
+
DIMS_POS: dict[str, int] = {"NTPVT": 1, "NTSFUN": 0, "NTEQUL": 0}
|
|
20
19
|
|
|
21
20
|
|
|
22
21
|
def guess_dim(deckstring: str, dimkeyword: str, dimitem: int = 0) -> int:
|
|
@@ -138,9 +137,9 @@ def inject_dimcount(
|
|
|
138
137
|
def inject_xxxdims_ntxxx(
|
|
139
138
|
xxxdims: str,
|
|
140
139
|
ntxxx_name: str,
|
|
141
|
-
deck:
|
|
142
|
-
ntxxx_value:
|
|
143
|
-
) -> "opm.
|
|
140
|
+
deck: "str | opm.opmcommon_python.Deck",
|
|
141
|
+
ntxxx_value: int | None = None,
|
|
142
|
+
) -> "opm.opmcommon_python.Deck":
|
|
144
143
|
"""Ensures TABDIMS/EQLDIMS is present in a :term:`deck`.
|
|
145
144
|
|
|
146
145
|
If ntxxx_value=None and ntxxx_name not in the :term:`deck`, ntxxx_name will
|
|
@@ -169,9 +168,7 @@ def inject_xxxdims_ntxxx(
|
|
|
169
168
|
return deck
|
|
170
169
|
|
|
171
170
|
if xxxdims in deck and ntxxx_value is not None:
|
|
172
|
-
logger.warning(
|
|
173
|
-
"Ignoring %s argument, it is already in the deck", str(ntxxx_name)
|
|
174
|
-
)
|
|
171
|
+
logger.warning("Ignoring %s argument, it is already in the deck", ntxxx_name)
|
|
175
172
|
return deck
|
|
176
173
|
|
|
177
174
|
if not isinstance(deck, str):
|
|
@@ -182,7 +179,7 @@ def inject_xxxdims_ntxxx(
|
|
|
182
179
|
# Estimate if ntxxx_value is not provided:
|
|
183
180
|
if ntxxx_value is None:
|
|
184
181
|
ntxxx_estimate = guess_dim(deck, xxxdims, DIMS_POS[ntxxx_name])
|
|
185
|
-
logger.warning("Estimated %s=%s", ntxxx_name,
|
|
182
|
+
logger.warning("Estimated %s=%s", ntxxx_name, ntxxx_estimate)
|
|
186
183
|
else:
|
|
187
184
|
ntxxx_estimate = ntxxx_value
|
|
188
185
|
|
res2df/nnc.py
CHANGED
|
@@ -8,7 +8,6 @@ import datetime
|
|
|
8
8
|
import logging
|
|
9
9
|
import os
|
|
10
10
|
from pathlib import Path
|
|
11
|
-
from typing import Optional
|
|
12
11
|
|
|
13
12
|
import pandas as pd
|
|
14
13
|
|
|
@@ -124,15 +123,13 @@ def add_nnc_coords(nncdf: pd.DataFrame, resdatafiles: ResdataFiles) -> pd.DataFr
|
|
|
124
123
|
Incoming dataframe augmented with the columns X, Y and Z.
|
|
125
124
|
"""
|
|
126
125
|
gridgeometry = gridgeometry2df(resdatafiles)
|
|
127
|
-
gnncdf =
|
|
128
|
-
nncdf,
|
|
126
|
+
gnncdf = nncdf.merge(
|
|
129
127
|
gridgeometry,
|
|
130
128
|
how="left",
|
|
131
129
|
left_on=["I1", "J1", "K1"],
|
|
132
130
|
right_on=["I", "J", "K"],
|
|
133
131
|
)
|
|
134
|
-
gnncdf =
|
|
135
|
-
gnncdf,
|
|
132
|
+
gnncdf = gnncdf.merge(
|
|
136
133
|
gridgeometry,
|
|
137
134
|
how="left",
|
|
138
135
|
left_on=["I2", "J2", "K2"],
|
|
@@ -147,7 +144,7 @@ def add_nnc_coords(nncdf: pd.DataFrame, resdatafiles: ResdataFiles) -> pd.DataFr
|
|
|
147
144
|
gnncdf["Z"] = gnncdf[["Z", "Z_2"]].mean(axis=1)
|
|
148
145
|
|
|
149
146
|
# Let go of the temporary columns we have in gnncdf
|
|
150
|
-
return gnncdf[list(nncdf.columns)
|
|
147
|
+
return gnncdf[[*list(nncdf.columns), "X", "Y", "Z"]]
|
|
151
148
|
|
|
152
149
|
|
|
153
150
|
def filter_vertical(nncdf: pd.DataFrame) -> pd.DataFrame:
|
|
@@ -207,7 +204,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
|
|
207
204
|
|
|
208
205
|
|
|
209
206
|
def df2res_editnnc(
|
|
210
|
-
nnc_df: pd.DataFrame, filename:
|
|
207
|
+
nnc_df: pd.DataFrame, filename: str | None = None, nocomments: bool = False
|
|
211
208
|
) -> str:
|
|
212
209
|
"""Write an EDITNNC keyword
|
|
213
210
|
|
|
@@ -285,9 +282,7 @@ def df2res_editnnc(
|
|
|
285
282
|
|
|
286
283
|
def nnc_main(args) -> None:
|
|
287
284
|
"""Command line access point from main() or from res2csv via subparser"""
|
|
288
|
-
logger = getLogger_res2csv(
|
|
289
|
-
__name__, vars(args)
|
|
290
|
-
)
|
|
285
|
+
logger = getLogger_res2csv(__name__, vars(args))
|
|
291
286
|
resdatafiles = ResdataFiles(args.DATAFILE)
|
|
292
287
|
nncdf = df(resdatafiles, coords=args.coords, pillars=args.pillars)
|
|
293
288
|
write_dframe_stdout_file(
|
|
File without changes
|
res2df/parameters.py
CHANGED
|
@@ -5,7 +5,7 @@ import json
|
|
|
5
5
|
import logging
|
|
6
6
|
import warnings
|
|
7
7
|
from pathlib import Path
|
|
8
|
-
from typing import Any
|
|
8
|
+
from typing import Any
|
|
9
9
|
|
|
10
10
|
import pandas as pd
|
|
11
11
|
import yaml
|
|
@@ -16,8 +16,8 @@ logger = logging.getLogger(__name__)
|
|
|
16
16
|
|
|
17
17
|
|
|
18
18
|
def find_parameter_files(
|
|
19
|
-
deckpath:
|
|
20
|
-
) ->
|
|
19
|
+
deckpath: ResdataFiles | str | Path, filebase: str = "parameters"
|
|
20
|
+
) -> list[Path]:
|
|
21
21
|
"""Locate a default prioritized list of files to try to read as key-value
|
|
22
22
|
|
|
23
23
|
File extensions .yml, .json and .txt are recognized and will be found in
|
|
@@ -41,13 +41,13 @@ def find_parameter_files(
|
|
|
41
41
|
eclbasepath = Path(deckpath).parent.absolute()
|
|
42
42
|
else:
|
|
43
43
|
raise TypeError
|
|
44
|
-
files_to_lookfor:
|
|
44
|
+
files_to_lookfor: list[str] = [
|
|
45
45
|
filebase + ".json",
|
|
46
46
|
filebase + ".yml",
|
|
47
47
|
filebase + ".txt",
|
|
48
48
|
filebase,
|
|
49
49
|
]
|
|
50
|
-
paths_to_check:
|
|
50
|
+
paths_to_check: list[Path] = [Path("."), Path(".."), Path("..") / Path("..")]
|
|
51
51
|
foundfiles = []
|
|
52
52
|
for path in paths_to_check:
|
|
53
53
|
for fname in files_to_lookfor:
|
|
@@ -57,7 +57,7 @@ def find_parameter_files(
|
|
|
57
57
|
return foundfiles
|
|
58
58
|
|
|
59
59
|
|
|
60
|
-
def load_parameterstxt(filename:
|
|
60
|
+
def load_parameterstxt(filename: str | Path) -> dict[str, Any]:
|
|
61
61
|
"""Read parameters.txt into a dictionary
|
|
62
62
|
|
|
63
63
|
Lines starting with a hash will be ignored.
|
|
@@ -87,8 +87,8 @@ def load_parameterstxt(filename: Union[str, Path]) -> Dict[str, Any]:
|
|
|
87
87
|
|
|
88
88
|
|
|
89
89
|
def load_all(
|
|
90
|
-
filenames:
|
|
91
|
-
) ->
|
|
90
|
+
filenames: list[str] | list[Path], warnduplicates: bool = True
|
|
91
|
+
) -> dict[str, Any]:
|
|
92
92
|
"""Reads a list of parameter filenames
|
|
93
93
|
|
|
94
94
|
Dictionaries for all files will be merged into one.
|
|
@@ -101,19 +101,19 @@ def load_all(
|
|
|
101
101
|
filenames: Order matters.
|
|
102
102
|
warnduplicates: If True (default), overlapping keys will be warned.
|
|
103
103
|
"""
|
|
104
|
-
keyvalues:
|
|
104
|
+
keyvalues: dict[str, Any] = {}
|
|
105
105
|
for fname in filenames:
|
|
106
106
|
new_params = load(fname)
|
|
107
107
|
if warnduplicates and keyvalues:
|
|
108
108
|
duplicates = set(keyvalues.keys()).intersection(set(new_params.keys()))
|
|
109
109
|
if duplicates:
|
|
110
|
-
logger.debug("Duplicates keys %s",
|
|
110
|
+
logger.debug("Duplicates keys %s", duplicates)
|
|
111
111
|
new_params.update(keyvalues)
|
|
112
112
|
keyvalues = new_params
|
|
113
113
|
return keyvalues
|
|
114
114
|
|
|
115
115
|
|
|
116
|
-
def load(filename:
|
|
116
|
+
def load(filename: str | Path) -> dict[str, Any]:
|
|
117
117
|
"""Read a parameter file as txt, yaml or json
|
|
118
118
|
|
|
119
119
|
Returns:
|
|
@@ -168,7 +168,7 @@ def load(filename: Union[str, Path]) -> Dict[str, Any]:
|
|
|
168
168
|
|
|
169
169
|
if not params_dict:
|
|
170
170
|
logger.warning("%s could not be parsed as yaml, json or txt", filename)
|
|
171
|
-
logger.warning("%s%s%s",
|
|
171
|
+
logger.warning("%s%s%s", yaml_error, json_error, txt_error)
|
|
172
172
|
raise ValueError(f"Could not parse {filename}")
|
|
173
173
|
|
|
174
174
|
# Filter to values that are NOT dict's. We can have dict as value when
|
res2df/pillars.py
CHANGED
|
@@ -3,7 +3,6 @@
|
|
|
3
3
|
import argparse
|
|
4
4
|
import datetime
|
|
5
5
|
import logging
|
|
6
|
-
from typing import Dict, List, Optional, Union
|
|
7
6
|
|
|
8
7
|
import dateutil.parser
|
|
9
8
|
import pandas as pd
|
|
@@ -16,7 +15,7 @@ from .resdatafiles import ResdataFiles
|
|
|
16
15
|
|
|
17
16
|
logger: logging.Logger = logging.getLogger(__name__)
|
|
18
17
|
|
|
19
|
-
AGGREGATORS:
|
|
18
|
+
AGGREGATORS: dict[str, str] = {
|
|
20
19
|
"VOLUME": "sum",
|
|
21
20
|
"PORV": "sum",
|
|
22
21
|
"PERMX": "mean",
|
|
@@ -38,8 +37,8 @@ AGGREGATORS: Dict[str, str] = {
|
|
|
38
37
|
|
|
39
38
|
def df(
|
|
40
39
|
resdatafiles: ResdataFiles,
|
|
41
|
-
region:
|
|
42
|
-
rstdates:
|
|
40
|
+
region: str | None = None,
|
|
41
|
+
rstdates: str | datetime.date | list[datetime.date] | None = None,
|
|
43
42
|
soilcutoff: float = 0.2,
|
|
44
43
|
sgascutoff: float = 0.7,
|
|
45
44
|
swatcutoff: float = 0.7,
|
|
@@ -135,16 +134,14 @@ def df(
|
|
|
135
134
|
datestr=datestr,
|
|
136
135
|
)
|
|
137
136
|
if not contacts.empty:
|
|
138
|
-
grouped =
|
|
137
|
+
grouped = grouped.merge(contacts, how="left")
|
|
139
138
|
|
|
140
139
|
if stackdates:
|
|
141
140
|
return stack_on_colnames(grouped, sep="@", stackcolname="DATE", inplace=True)
|
|
142
141
|
return grouped
|
|
143
142
|
|
|
144
143
|
|
|
145
|
-
def compute_volumes(
|
|
146
|
-
grid_df: pd.DataFrame, datestr: Optional[str] = None
|
|
147
|
-
) -> pd.DataFrame:
|
|
144
|
+
def compute_volumes(grid_df: pd.DataFrame, datestr: str | None = None) -> pd.DataFrame:
|
|
148
145
|
"""Compute "dynamic" volumes, volumes for data coming from the
|
|
149
146
|
UNRST file (SWAT+SGAS)
|
|
150
147
|
|
|
@@ -194,11 +191,11 @@ def compute_volumes(
|
|
|
194
191
|
|
|
195
192
|
def compute_pillar_contacts(
|
|
196
193
|
grid_df: pd.DataFrame,
|
|
197
|
-
region:
|
|
194
|
+
region: str | None = None,
|
|
198
195
|
soilcutoff: float = 0.2,
|
|
199
196
|
sgascutoff: float = 0.7,
|
|
200
197
|
swatcutoff: float = 0.7,
|
|
201
|
-
datestr:
|
|
198
|
+
datestr: str | None = None,
|
|
202
199
|
) -> pd.DataFrame:
|
|
203
200
|
"""Compute contacts pr. pillar in a grid dataframe.
|
|
204
201
|
|
|
@@ -268,21 +265,21 @@ def compute_pillar_contacts(
|
|
|
268
265
|
.reset_index()
|
|
269
266
|
)
|
|
270
267
|
if soilcutoff and "SOIL" + atdatestr in grid_df:
|
|
271
|
-
logger.info(
|
|
272
|
-
"Calculating oil-water-contacts based on SOILcutoff %s", str(soilcutoff)
|
|
273
|
-
)
|
|
268
|
+
logger.info("Calculating oil-water-contacts based on SOILcutoff %s", soilcutoff)
|
|
274
269
|
owc = (
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
270
|
+
(
|
|
271
|
+
grid_df[grid_df["SOIL" + atdatestr] > soilcutoff]
|
|
272
|
+
.groupby(groupbies)
|
|
273
|
+
.agg({"Z": "max"})
|
|
274
|
+
)
|
|
275
|
+
.rename(columns={"Z": "OWC" + atdatestr})
|
|
276
|
+
.reset_index()
|
|
278
277
|
)
|
|
279
|
-
owc.rename(columns={"Z": "OWC" + atdatestr}, inplace=True)
|
|
280
|
-
owc.reset_index(inplace=True)
|
|
281
278
|
# Filter the owc frame to only those pillars that also has water:
|
|
282
|
-
owc =
|
|
279
|
+
owc = waterpillars.merge(owc, how="inner").drop("Z", axis="columns")
|
|
283
280
|
|
|
284
281
|
if sgascutoff and "SGAS" + atdatestr in grid_df:
|
|
285
|
-
logger.info("Calculating gas-contacts based on gas cutoff %s",
|
|
282
|
+
logger.info("Calculating gas-contacts based on gas cutoff %s", sgascutoff)
|
|
286
283
|
if "SOIL" + atdatestr in grid_df and "SGAS" + atdatestr in grid_df:
|
|
287
284
|
# Pillars to be used for GOC computation
|
|
288
285
|
gocpillars = (
|
|
@@ -298,8 +295,7 @@ def compute_pillar_contacts(
|
|
|
298
295
|
]
|
|
299
296
|
.groupby(groupbies)
|
|
300
297
|
.agg({"Z": "max"})
|
|
301
|
-
)
|
|
302
|
-
goc.rename(columns={"Z": "GOC" + atdatestr}, inplace=True)
|
|
298
|
+
).rename(columns={"Z": "GOC" + atdatestr})
|
|
303
299
|
else:
|
|
304
300
|
# Two-phase gas-water: GWC computation
|
|
305
301
|
gocpillars = waterpillars # In case of gas-water
|
|
@@ -307,11 +303,10 @@ def compute_pillar_contacts(
|
|
|
307
303
|
grid_df[grid_df["SGAS" + atdatestr] > sgascutoff]
|
|
308
304
|
.groupby(groupbies)
|
|
309
305
|
.agg({"Z": "max"})
|
|
310
|
-
)
|
|
311
|
-
|
|
312
|
-
goc.reset_index(inplace=True)
|
|
306
|
+
).rename(columns={"Z": "GWC" + atdatestr})
|
|
307
|
+
goc = goc.reset_index()
|
|
313
308
|
# Filter the goc frame to only those with oil or water:
|
|
314
|
-
goc =
|
|
309
|
+
goc = gocpillars.merge(goc, how="inner").drop("Z", axis="columns")
|
|
315
310
|
|
|
316
311
|
# We need to avoid merging with potentially empty DataFrames
|
|
317
312
|
if owc.empty and goc.empty:
|
|
@@ -320,7 +315,7 @@ def compute_pillar_contacts(
|
|
|
320
315
|
return owc
|
|
321
316
|
if owc.empty and not goc.empty:
|
|
322
317
|
return goc
|
|
323
|
-
return
|
|
318
|
+
return owc.merge(goc)
|
|
324
319
|
|
|
325
320
|
|
|
326
321
|
def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
|
@@ -413,9 +408,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
|
|
413
408
|
|
|
414
409
|
def pillars_main(args) -> None:
|
|
415
410
|
"""This is the command line API"""
|
|
416
|
-
logger = getLogger_res2csv(
|
|
417
|
-
__name__, vars(args)
|
|
418
|
-
)
|
|
411
|
+
logger = getLogger_res2csv(__name__, vars(args))
|
|
419
412
|
|
|
420
413
|
resdatafiles = ResdataFiles(args.DATAFILE)
|
|
421
414
|
dframe = df(
|
|
@@ -430,7 +423,7 @@ def pillars_main(args) -> None:
|
|
|
430
423
|
groupbies = []
|
|
431
424
|
aggregators = {
|
|
432
425
|
key: AGGREGATORS[key.split("@")[0]]
|
|
433
|
-
for key in dframe
|
|
426
|
+
for key in dframe.columns
|
|
434
427
|
if key.split("@")[0] in AGGREGATORS
|
|
435
428
|
}
|
|
436
429
|
if args.region and args.group:
|