res2df 1.3.9__py3-none-any.whl → 1.3.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- res2df/__init__.py +43 -4
- res2df/__version__.py +1 -1
- res2df/common.py +23 -26
- res2df/compdat.py +7 -8
- res2df/csv2res.py +1 -1
- res2df/equil.py +60 -60
- res2df/faults.py +12 -12
- res2df/fipreports.py +3 -2
- res2df/grid.py +11 -14
- res2df/gruptree.py +10 -12
- res2df/hook_implementations/forward_model_steps.py +11 -8
- res2df/inferdims.py +3 -6
- res2df/nnc.py +3 -7
- res2df/parameters.py +2 -2
- res2df/pillars.py +3 -3
- res2df/pvt.py +9 -12
- res2df/py.typed +0 -0
- res2df/res2csv.py +3 -5
- res2df/resdatafiles.py +20 -27
- res2df/rft.py +1 -1
- res2df/satfunc.py +12 -16
- res2df/summary.py +11 -16
- res2df/trans.py +5 -5
- res2df/version.py +2 -2
- res2df/vfp/__init__.py +18 -1
- res2df/vfp/_vfp.py +10 -14
- res2df/vfp/_vfpcommon.py +27 -29
- res2df/vfp/_vfpinj.py +4 -3
- res2df/wcon.py +3 -6
- res2df/wellcompletiondata.py +8 -3
- res2df/wellconnstatus.py +2 -2
- res2df-1.3.11.dist-info/METADATA +95 -0
- {res2df-1.3.9.dist-info → res2df-1.3.11.dist-info}/RECORD +37 -36
- res2df-1.3.9.dist-info/METADATA +0 -769
- {res2df-1.3.9.dist-info → res2df-1.3.11.dist-info}/WHEEL +0 -0
- {res2df-1.3.9.dist-info → res2df-1.3.11.dist-info}/entry_points.txt +0 -0
- {res2df-1.3.9.dist-info → res2df-1.3.11.dist-info}/licenses/LICENSE +0 -0
- {res2df-1.3.9.dist-info → res2df-1.3.11.dist-info}/top_level.txt +0 -0
res2df/gruptree.py
CHANGED
|
@@ -2,7 +2,6 @@
|
|
|
2
2
|
|
|
3
3
|
import argparse
|
|
4
4
|
import collections
|
|
5
|
-
import contextlib
|
|
6
5
|
import datetime
|
|
7
6
|
import logging
|
|
8
7
|
import sys
|
|
@@ -10,13 +9,12 @@ import warnings
|
|
|
10
9
|
from typing import Any
|
|
11
10
|
|
|
12
11
|
import numpy as np
|
|
12
|
+
|
|
13
|
+
# Needed for mypy
|
|
14
|
+
import opm.io
|
|
13
15
|
import pandas as pd
|
|
14
16
|
import treelib
|
|
15
17
|
|
|
16
|
-
with contextlib.suppress(ImportError):
|
|
17
|
-
# Needed for mypy
|
|
18
|
-
import opm.io
|
|
19
|
-
|
|
20
18
|
from .common import (
|
|
21
19
|
OPMKEYWORDS,
|
|
22
20
|
parse_opmio_date_rec,
|
|
@@ -69,7 +67,7 @@ def df(
|
|
|
69
67
|
deck = deck.get_deck()
|
|
70
68
|
|
|
71
69
|
edgerecords = [] # list of dict of rows containing an edge.
|
|
72
|
-
nodedatarecords = []
|
|
70
|
+
nodedatarecords: list[dict[str, Any]] = []
|
|
73
71
|
|
|
74
72
|
# In order for the GRUPTREE/BRANPROP keywords to accumulate, we
|
|
75
73
|
# store the edges as dictionaries indexed by the edge
|
|
@@ -146,10 +144,10 @@ def df(
|
|
|
146
144
|
renamer = (
|
|
147
145
|
{"PRESSURE": "TERMINAL_PRESSURE"} if kword.name == "NODEPROP" else None
|
|
148
146
|
)
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
147
|
+
nodedatarecords.extend(
|
|
148
|
+
parse_opmio_deckrecord(rec, kword.name, renamer=renamer)
|
|
149
|
+
for rec in kword
|
|
150
|
+
)
|
|
153
151
|
nodedata[kword.name] = (
|
|
154
152
|
pd.DataFrame(nodedatarecords)
|
|
155
153
|
.drop_duplicates(subset="NAME", keep="last")
|
|
@@ -256,7 +254,7 @@ def _merge_edges_and_nodeinfo(
|
|
|
256
254
|
|
|
257
255
|
# Write WELSPECS edges
|
|
258
256
|
welspecs_parents = set()
|
|
259
|
-
for
|
|
257
|
+
for child, parent in wellspecsedges: # noqa: PLE1141
|
|
260
258
|
# For BRANPROP trees, only wells with a parent in the tree are added
|
|
261
259
|
if (treetype == "BRANPROP" and parent in childs) or (treetype == "GRUPTREE"):
|
|
262
260
|
rec_dict = {
|
|
@@ -457,7 +455,7 @@ def prettyprint(dframe: pd.DataFrame) -> str:
|
|
|
457
455
|
return output
|
|
458
456
|
|
|
459
457
|
|
|
460
|
-
def gruptree_main(args) -> None:
|
|
458
|
+
def gruptree_main(args: argparse.Namespace) -> None:
|
|
461
459
|
"""Entry-point for module, for command line utility."""
|
|
462
460
|
logger = getLogger_res2csv(__name__, vars(args))
|
|
463
461
|
if not args.output and not args.prettyprint:
|
|
@@ -1,31 +1,34 @@
|
|
|
1
1
|
import shutil
|
|
2
|
+
from collections.abc import Callable
|
|
3
|
+
from typing import Any, ParamSpec
|
|
2
4
|
|
|
5
|
+
P = ParamSpec("P")
|
|
3
6
|
try:
|
|
4
|
-
from ert import (
|
|
7
|
+
from ert import (
|
|
5
8
|
ForwardModelStepDocumentation,
|
|
6
9
|
ForwardModelStepJSON,
|
|
7
10
|
ForwardModelStepPlugin,
|
|
8
11
|
ForwardModelStepValidationError,
|
|
9
12
|
)
|
|
10
|
-
from ert import plugin as ert_plugin
|
|
13
|
+
from ert import plugin as ert_plugin
|
|
11
14
|
except ModuleNotFoundError:
|
|
12
15
|
# ert is not installed, use dummy/transparent function decorator:
|
|
13
|
-
def ert_plugin(name: str = ""):
|
|
14
|
-
def decorator(func):
|
|
16
|
+
def ert_plugin(name: str = "") -> Callable[[Callable[P, Any]], Callable[P, Any]]:
|
|
17
|
+
def decorator(func: Callable[P, Any]) -> Callable[P, Any]:
|
|
15
18
|
return func
|
|
16
19
|
|
|
17
20
|
return decorator
|
|
18
21
|
|
|
19
|
-
class ForwardModelStepDocumentation: # type: ignore
|
|
22
|
+
class ForwardModelStepDocumentation: # type: ignore[no-redef]
|
|
20
23
|
pass
|
|
21
24
|
|
|
22
|
-
class ForwardModelStepJSON: # type: ignore
|
|
25
|
+
class ForwardModelStepJSON: # type: ignore[no-redef]
|
|
23
26
|
pass
|
|
24
27
|
|
|
25
|
-
class ForwardModelStepPlugin: # type: ignore
|
|
28
|
+
class ForwardModelStepPlugin: # type: ignore[no-redef]
|
|
26
29
|
pass
|
|
27
30
|
|
|
28
|
-
class ForwardModelStepValidationError: # type: ignore
|
|
31
|
+
class ForwardModelStepValidationError: # type: ignore[no-redef]
|
|
29
32
|
pass
|
|
30
33
|
|
|
31
34
|
|
res2df/inferdims.py
CHANGED
|
@@ -3,12 +3,9 @@ Support module for inferring EQLDIMS and TABDIMS from incomplete
|
|
|
3
3
|
reservoir simulator decks (typically single include-files)
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
|
-
import contextlib
|
|
7
6
|
import logging
|
|
8
7
|
|
|
9
|
-
|
|
10
|
-
import opm.io
|
|
11
|
-
# Let parts of res2df work without OPM:
|
|
8
|
+
import opm.io
|
|
12
9
|
|
|
13
10
|
from .resdatafiles import ResdataFiles
|
|
14
11
|
|
|
@@ -41,7 +38,7 @@ def guess_dim(deckstring: str, dimkeyword: str, dimitem: int = 0) -> int:
|
|
|
41
38
|
raise ValueError("Only supports TABDIMS and EQLDIMS")
|
|
42
39
|
if dimkeyword == "TABDIMS" and dimitem not in [0, 1]:
|
|
43
40
|
raise ValueError("Only support item 0 and 1 in TABDIMS")
|
|
44
|
-
if dimkeyword == "EQLDIMS" and dimitem
|
|
41
|
+
if dimkeyword == "EQLDIMS" and dimitem != 0:
|
|
45
42
|
raise ValueError("Only item 0 in EQLDIMS can be estimated")
|
|
46
43
|
|
|
47
44
|
# A less than res2df-standard permissive opm.io, when using
|
|
@@ -115,7 +112,7 @@ def inject_dimcount(
|
|
|
115
112
|
raise ValueError("Only supports TABDIMS and EQLDIMS")
|
|
116
113
|
if dimkeyword == "TABDIMS" and dimitem not in [0, 1]:
|
|
117
114
|
raise ValueError("Only support item 0 and 1 in TABDIMS")
|
|
118
|
-
if dimkeyword == "EQLDIMS" and dimitem
|
|
115
|
+
if dimkeyword == "EQLDIMS" and dimitem != 0:
|
|
119
116
|
raise ValueError("Only item 0 in EQLDIMS can be injected")
|
|
120
117
|
|
|
121
118
|
if dimkeyword in deckstr:
|
res2df/nnc.py
CHANGED
|
@@ -181,7 +181,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
|
|
181
181
|
parser.add_argument(
|
|
182
182
|
"DATAFILE",
|
|
183
183
|
help="Name of the .DATA input file for the reservoir simulator."
|
|
184
|
-
|
|
184
|
+
" There must exist .INIT and .EGRID files with the same path and basename.",
|
|
185
185
|
)
|
|
186
186
|
parser.add_argument(
|
|
187
187
|
"-c",
|
|
@@ -242,11 +242,7 @@ def df2res_editnnc(
|
|
|
242
242
|
string = ""
|
|
243
243
|
res2df_header = (
|
|
244
244
|
"Output file printed by res2df.nnc"
|
|
245
|
-
+ " "
|
|
246
|
-
+ __version__
|
|
247
|
-
+ "\n"
|
|
248
|
-
+ " at "
|
|
249
|
-
+ str(datetime.datetime.now())
|
|
245
|
+
" " + __version__ + "\n" + " at " + str(datetime.datetime.now())
|
|
250
246
|
)
|
|
251
247
|
if not nocomments:
|
|
252
248
|
string += comment_formatter(res2df_header)
|
|
@@ -280,7 +276,7 @@ def df2res_editnnc(
|
|
|
280
276
|
return string
|
|
281
277
|
|
|
282
278
|
|
|
283
|
-
def nnc_main(args) -> None:
|
|
279
|
+
def nnc_main(args: argparse.Namespace) -> None:
|
|
284
280
|
"""Command line access point from main() or from res2csv via subparser"""
|
|
285
281
|
logger = getLogger_res2csv(__name__, vars(args))
|
|
286
282
|
resdatafiles = ResdataFiles(args.DATAFILE)
|
res2df/parameters.py
CHANGED
|
@@ -47,7 +47,7 @@ def find_parameter_files(
|
|
|
47
47
|
filebase + ".txt",
|
|
48
48
|
filebase,
|
|
49
49
|
]
|
|
50
|
-
paths_to_check: list[Path] = [Path(
|
|
50
|
+
paths_to_check: list[Path] = [Path(), Path(".."), Path("..") / Path("..")]
|
|
51
51
|
foundfiles = []
|
|
52
52
|
for path in paths_to_check:
|
|
53
53
|
for fname in files_to_lookfor:
|
|
@@ -147,7 +147,7 @@ def load(filename: str | Path) -> dict[str, Any]:
|
|
|
147
147
|
if not params_dict:
|
|
148
148
|
try:
|
|
149
149
|
logger.debug("Trying to parse %s with json.load()", filename)
|
|
150
|
-
with open(
|
|
150
|
+
with Path(filename).open(encoding="utf-8") as f_handle:
|
|
151
151
|
params_dict = json.load(f_handle)
|
|
152
152
|
assert isinstance(params_dict, dict)
|
|
153
153
|
logger.debug(" - ok, parsed as yaml")
|
res2df/pillars.py
CHANGED
|
@@ -109,7 +109,7 @@ def df(
|
|
|
109
109
|
|
|
110
110
|
aggregators = {
|
|
111
111
|
key: AGGREGATORS[key.split("@")[0]]
|
|
112
|
-
for key in grid_df
|
|
112
|
+
for key in grid_df.columns
|
|
113
113
|
if key.split("@")[0] in AGGREGATORS
|
|
114
114
|
}
|
|
115
115
|
|
|
@@ -327,7 +327,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
|
|
327
327
|
parser.add_argument(
|
|
328
328
|
"DATAFILE",
|
|
329
329
|
help="Name of the .DATA input file for the reservoir simulator."
|
|
330
|
-
|
|
330
|
+
" There must exist .INIT and .EGRID files with the same path and basename.",
|
|
331
331
|
)
|
|
332
332
|
parser.add_argument(
|
|
333
333
|
"--region",
|
|
@@ -406,7 +406,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
|
|
406
406
|
return parser
|
|
407
407
|
|
|
408
408
|
|
|
409
|
-
def pillars_main(args) -> None:
|
|
409
|
+
def pillars_main(args: argparse.Namespace) -> None:
|
|
410
410
|
"""This is the command line API"""
|
|
411
411
|
logger = getLogger_res2csv(__name__, vars(args))
|
|
412
412
|
|
res2df/pvt.py
CHANGED
|
@@ -5,11 +5,12 @@ Data can be extracted from a complete deck or from individual files.
|
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
7
|
import argparse
|
|
8
|
-
import contextlib
|
|
9
8
|
import logging
|
|
10
9
|
from pathlib import Path
|
|
11
10
|
from typing import cast
|
|
12
11
|
|
|
12
|
+
# Needed for mypy
|
|
13
|
+
import opm.io
|
|
13
14
|
import pandas as pd
|
|
14
15
|
|
|
15
16
|
from .common import (
|
|
@@ -25,11 +26,6 @@ from .inferdims import DIMS_POS, inject_xxxdims_ntxxx
|
|
|
25
26
|
from .res2csvlogger import getLogger_res2csv
|
|
26
27
|
from .resdatafiles import ResdataFiles
|
|
27
28
|
|
|
28
|
-
with contextlib.suppress(ImportError):
|
|
29
|
-
# Needed for mypy
|
|
30
|
-
import opm.io
|
|
31
|
-
|
|
32
|
-
|
|
33
29
|
logger: logging.Logger = logging.getLogger(__name__)
|
|
34
30
|
|
|
35
31
|
SUPPORTED_KEYWORDS: list[str] = [
|
|
@@ -256,7 +252,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
|
|
256
252
|
parser.add_argument(
|
|
257
253
|
"DATAFILE",
|
|
258
254
|
help="Name of the .DATA input file for the reservoir simulator,"
|
|
259
|
-
|
|
255
|
+
" or file with PVT keywords.",
|
|
260
256
|
)
|
|
261
257
|
parser.add_argument(
|
|
262
258
|
"-o",
|
|
@@ -288,7 +284,7 @@ def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentPar
|
|
|
288
284
|
return common_fill_reverse_parser(parser, "PVT", "pvt.inc")
|
|
289
285
|
|
|
290
286
|
|
|
291
|
-
def pvt_main(args) -> None:
|
|
287
|
+
def pvt_main(args: argparse.Namespace) -> None:
|
|
292
288
|
"""Entry-point for module, for command line utility for Eclipse to CSV"""
|
|
293
289
|
logger = getLogger_res2csv(__name__, vars(args))
|
|
294
290
|
resdatafiles = ResdataFiles(args.DATAFILE)
|
|
@@ -320,7 +316,7 @@ def pvt_main(args) -> None:
|
|
|
320
316
|
)
|
|
321
317
|
|
|
322
318
|
|
|
323
|
-
def pvt_reverse_main(args) -> None:
|
|
319
|
+
def pvt_reverse_main(args: argparse.Namespace) -> None:
|
|
324
320
|
"""Entry-point for module, for command line utility for CSV to simulator
|
|
325
321
|
:term:`deck`"""
|
|
326
322
|
logger = getLogger_res2csv(__name__, vars(args))
|
|
@@ -469,7 +465,7 @@ def df2res_pvtg(dframe: pd.DataFrame, comment: str | None = None) -> str:
|
|
|
469
465
|
subset["PVTNUM"] = 1
|
|
470
466
|
subset = subset.set_index("PVTNUM").sort_index()
|
|
471
467
|
|
|
472
|
-
def _pvtg_pvtnum(dframe):
|
|
468
|
+
def _pvtg_pvtnum(dframe: pd.DataFrame) -> str:
|
|
473
469
|
"""Create string with :term:`include file` contents for
|
|
474
470
|
PVTG-data with a specific PVTNUM"""
|
|
475
471
|
string = ""
|
|
@@ -478,7 +474,7 @@ def df2res_pvtg(dframe: pd.DataFrame, comment: str | None = None) -> str:
|
|
|
478
474
|
string += _pvtg_pvtnum_pg(dframe[dframe.index == p_gas])
|
|
479
475
|
return string + "/\n"
|
|
480
476
|
|
|
481
|
-
def _pvtg_pvtnum_pg(dframe):
|
|
477
|
+
def _pvtg_pvtnum_pg(dframe: pd.DataFrame) -> str:
|
|
482
478
|
"""Create string with :term:`include file` contents for
|
|
483
479
|
PVTG-data with a particular gas phase pressure"""
|
|
484
480
|
string = ""
|
|
@@ -486,6 +482,7 @@ def df2res_pvtg(dframe: pd.DataFrame, comment: str | None = None) -> str:
|
|
|
486
482
|
p_gas = dframe.index.to_numpy()[0]
|
|
487
483
|
string += f"{p_gas:20.7f} "
|
|
488
484
|
for rowidx, row in dframe.reset_index().iterrows():
|
|
485
|
+
rowidx = cast(int, rowidx)
|
|
489
486
|
indent = "\n" + " " * 22 if rowidx > 0 else ""
|
|
490
487
|
string += (
|
|
491
488
|
indent
|
|
@@ -523,7 +520,7 @@ def df2res_pvdg(dframe: pd.DataFrame, comment: str | None = None) -> str:
|
|
|
523
520
|
return ""
|
|
524
521
|
subset["PVTNUM"] = 1
|
|
525
522
|
|
|
526
|
-
def _pvdg_pvtnum(dframe):
|
|
523
|
+
def _pvdg_pvtnum(dframe: pd.DataFrame) -> str:
|
|
527
524
|
"""Create string with :term:`include file` contents for
|
|
528
525
|
PVDG-data with a specific PVTNUM
|
|
529
526
|
|
res2df/py.typed
ADDED
|
File without changes
|
res2df/res2csv.py
CHANGED
|
@@ -24,7 +24,7 @@ def get_parser() -> argparse.ArgumentParser:
|
|
|
24
24
|
"--version", action="version", version=f"%(prog)s {__version__}"
|
|
25
25
|
)
|
|
26
26
|
|
|
27
|
-
subparsers = parser.add_subparsers(
|
|
27
|
+
subparsers = parser.add_subparsers(
|
|
28
28
|
required=True,
|
|
29
29
|
dest="subcommand",
|
|
30
30
|
parser_class=argparse.ArgumentParser,
|
|
@@ -203,9 +203,7 @@ def get_parser() -> argparse.ArgumentParser:
|
|
|
203
203
|
for submodule, subparser in subparsers_dict.items():
|
|
204
204
|
# Use the submodule's fill_parser() to add the submodule specific
|
|
205
205
|
# arguments:
|
|
206
|
-
importlib.import_module("res2df." + submodule).fill_parser(
|
|
207
|
-
subparser
|
|
208
|
-
)
|
|
206
|
+
importlib.import_module("res2df." + submodule).fill_parser(subparser)
|
|
209
207
|
|
|
210
208
|
# Add empty placeholders, this looks strange but is needed for the
|
|
211
209
|
# ERT forward model frontend, where non-used options must be supplied
|
|
@@ -226,7 +224,7 @@ def get_parser() -> argparse.ArgumentParser:
|
|
|
226
224
|
|
|
227
225
|
|
|
228
226
|
def run_subparser_main(
|
|
229
|
-
args,
|
|
227
|
+
args: argparse.Namespace,
|
|
230
228
|
submodule: str,
|
|
231
229
|
parser: argparse.ArgumentParser | None = None,
|
|
232
230
|
) -> None:
|
res2df/resdatafiles.py
CHANGED
|
@@ -6,13 +6,7 @@ import os
|
|
|
6
6
|
from pathlib import Path
|
|
7
7
|
from typing import Any
|
|
8
8
|
|
|
9
|
-
|
|
10
|
-
import opm.io
|
|
11
|
-
|
|
12
|
-
HAVE_OPM = True
|
|
13
|
-
except ImportError:
|
|
14
|
-
HAVE_OPM = False
|
|
15
|
-
|
|
9
|
+
import opm.io
|
|
16
10
|
from resdata.grid import Grid
|
|
17
11
|
from resdata.rd_util import FileMode
|
|
18
12
|
from resdata.resfile import ResdataFile
|
|
@@ -22,21 +16,20 @@ from .common import convert_lyrlist_to_zonemap, parse_lyrfile
|
|
|
22
16
|
|
|
23
17
|
logger = logging.getLogger(__name__)
|
|
24
18
|
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
]
|
|
19
|
+
# Default parse option to opm.io for a very permissive parsing
|
|
20
|
+
OPMIOPARSER_RECOVERY: list[tuple[str, Any]] = [
|
|
21
|
+
("PARSE_EXTRA_DATA", opm.io.action.ignore),
|
|
22
|
+
("PARSE_EXTRA_RECORDS", opm.io.action.ignore),
|
|
23
|
+
("PARSE_INVALID_KEYWORD_COMBINATION", opm.io.action.ignore),
|
|
24
|
+
("PARSE_MISSING_DIMS_KEYWORD", opm.io.action.ignore),
|
|
25
|
+
("PARSE_MISSING_INCLUDE", opm.io.action.ignore),
|
|
26
|
+
("PARSE_MISSING_SECTIONS", opm.io.action.ignore),
|
|
27
|
+
("PARSE_RANDOM_SLASH", opm.io.action.ignore),
|
|
28
|
+
("PARSE_RANDOM_TEXT", opm.io.action.ignore),
|
|
29
|
+
("PARSE_UNKNOWN_KEYWORD", opm.io.action.ignore),
|
|
30
|
+
("SUMMARY_UNKNOWN_GROUP", opm.io.action.ignore),
|
|
31
|
+
("UNSUPPORTED_*", opm.io.action.ignore),
|
|
32
|
+
]
|
|
40
33
|
|
|
41
34
|
|
|
42
35
|
class ResdataFiles:
|
|
@@ -52,7 +45,7 @@ class ResdataFiles:
|
|
|
52
45
|
it should be loaded or served from cache.
|
|
53
46
|
"""
|
|
54
47
|
|
|
55
|
-
def __init__(self, eclbase):
|
|
48
|
+
def __init__(self, eclbase: str | Path) -> None:
|
|
56
49
|
# eclbase might be a a Posix path object
|
|
57
50
|
eclbase = str(eclbase)
|
|
58
51
|
|
|
@@ -211,7 +204,7 @@ class ResdataFiles:
|
|
|
211
204
|
self._rstfile = None
|
|
212
205
|
self._rftfile = None
|
|
213
206
|
|
|
214
|
-
def get_zonemap(self, filename=None):
|
|
207
|
+
def get_zonemap(self, filename: str | None = None) -> dict[int, str]:
|
|
215
208
|
"""Return a dictionary from (int) K layers in the simgrid to strings
|
|
216
209
|
|
|
217
210
|
Typical usage is to map from grid layer to zone names.
|
|
@@ -243,15 +236,15 @@ class ResdataFiles:
|
|
|
243
236
|
if not Path(filename).is_absolute():
|
|
244
237
|
fullpath = Path(self.get_path()) / filename
|
|
245
238
|
else:
|
|
246
|
-
fullpath = filename
|
|
247
|
-
if not
|
|
239
|
+
fullpath = Path(filename)
|
|
240
|
+
if not fullpath.is_file():
|
|
248
241
|
if filename_defaulted:
|
|
249
242
|
# No warnings when the default filename is not there.
|
|
250
243
|
return {}
|
|
251
244
|
logger.warning("Zonefile %s not found, ignoring", fullpath)
|
|
252
245
|
return {}
|
|
253
246
|
lyrlist = parse_lyrfile(fullpath)
|
|
254
|
-
return convert_lyrlist_to_zonemap(lyrlist)
|
|
247
|
+
return convert_lyrlist_to_zonemap(lyrlist) or {}
|
|
255
248
|
|
|
256
249
|
|
|
257
250
|
def rreplace(pat: str, sub: str, string: str) -> str:
|
res2df/rft.py
CHANGED
|
@@ -667,7 +667,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
|
|
667
667
|
return parser
|
|
668
668
|
|
|
669
669
|
|
|
670
|
-
def rft_main(args) -> None:
|
|
670
|
+
def rft_main(args: argparse.Namespace) -> None:
|
|
671
671
|
"""Entry-point for module, for command line utility"""
|
|
672
672
|
logger = getLogger_res2csv(__name__, vars(args))
|
|
673
673
|
if args.DATAFILE.endswith(".RFT"):
|
res2df/satfunc.py
CHANGED
|
@@ -14,15 +14,12 @@ TABDIMS or to supply the satnumcount directly to avoid possible bugs.
|
|
|
14
14
|
"""
|
|
15
15
|
|
|
16
16
|
import argparse
|
|
17
|
-
import contextlib
|
|
18
17
|
import logging
|
|
19
18
|
from pathlib import Path
|
|
20
19
|
|
|
20
|
+
import opm.io
|
|
21
21
|
import pandas as pd
|
|
22
22
|
|
|
23
|
-
with contextlib.suppress(ImportError):
|
|
24
|
-
import opm.io
|
|
25
|
-
|
|
26
23
|
from .common import (
|
|
27
24
|
comment_formatter,
|
|
28
25
|
handle_wanted_keywords,
|
|
@@ -106,15 +103,14 @@ def df(
|
|
|
106
103
|
|
|
107
104
|
wanted_keywords = handle_wanted_keywords(keywords, deck, SUPPORTED_KEYWORDS)
|
|
108
105
|
|
|
109
|
-
frames = [
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
deck, keyword, renamer=RENAMERS[keyword], recordcountername="SATNUM"
|
|
115
|
-
).assign(KEYWORD=keyword)
|
|
116
|
-
)
|
|
106
|
+
frames = [
|
|
107
|
+
interpolate_defaults(
|
|
108
|
+
keyworddata_to_df(
|
|
109
|
+
deck, keyword, renamer=RENAMERS[keyword], recordcountername="SATNUM"
|
|
110
|
+
).assign(KEYWORD=keyword)
|
|
117
111
|
)
|
|
112
|
+
for keyword in wanted_keywords
|
|
113
|
+
]
|
|
118
114
|
nonempty_frames = [frame for frame in frames if not frame.empty]
|
|
119
115
|
if nonempty_frames:
|
|
120
116
|
dframe = pd.concat(nonempty_frames, axis=0, sort=False, ignore_index=True)
|
|
@@ -170,7 +166,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
|
|
170
166
|
parser.add_argument(
|
|
171
167
|
"DATAFILE",
|
|
172
168
|
help="Name of .DATA input file for the reservoir simulator,"
|
|
173
|
-
|
|
169
|
+
" or file with saturation functions.",
|
|
174
170
|
)
|
|
175
171
|
parser.add_argument(
|
|
176
172
|
"-o",
|
|
@@ -197,7 +193,7 @@ def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentPar
|
|
|
197
193
|
return common_fill_reverse_parser(parser, "SWOF, SGOF++", "relperm.inc")
|
|
198
194
|
|
|
199
195
|
|
|
200
|
-
def satfunc_main(args) -> None:
|
|
196
|
+
def satfunc_main(args: argparse.Namespace) -> None:
|
|
201
197
|
"""Entry-point for module, for command line utility"""
|
|
202
198
|
logger = getLogger_res2csv(__name__, vars(args))
|
|
203
199
|
resdatafiles = ResdataFiles(args.DATAFILE)
|
|
@@ -228,7 +224,7 @@ def satfunc_main(args) -> None:
|
|
|
228
224
|
)
|
|
229
225
|
|
|
230
226
|
|
|
231
|
-
def satfunc_reverse_main(args) -> None:
|
|
227
|
+
def satfunc_reverse_main(args: argparse.Namespace) -> None:
|
|
232
228
|
"""For command line utility for CSV to resdata"""
|
|
233
229
|
logger = getLogger_res2csv(__name__, vars(args))
|
|
234
230
|
satfunc_df = pd.read_csv(args.csvfile)
|
|
@@ -368,7 +364,7 @@ def _df2res_satfuncs(
|
|
|
368
364
|
subset = subset.set_index("SATNUM").sort_index()
|
|
369
365
|
|
|
370
366
|
# Make a function that is to be called for each SATNUM
|
|
371
|
-
def _df2res_satfuncs_satnum(keyword, dframe):
|
|
367
|
+
def _df2res_satfuncs_satnum(keyword: str, dframe: pd.DataFrame) -> str:
|
|
372
368
|
"""Create string with :term:`include file` contents
|
|
373
369
|
for one saturation function for one specific SATNUM"""
|
|
374
370
|
col_headers = RENAMERS[keyword]["DATA"]
|
res2df/summary.py
CHANGED
|
@@ -80,7 +80,7 @@ def _ensure_date_or_none(some_date: str | dt.date | None) -> dt.date | None:
|
|
|
80
80
|
if not some_date:
|
|
81
81
|
return None
|
|
82
82
|
if isinstance(some_date, str):
|
|
83
|
-
return dateutil.parser.parse(some_date).date()
|
|
83
|
+
return dateutil.parser.parse(some_date).date()
|
|
84
84
|
raise TypeError(f"some_date must be a string or a date, got {some_date}")
|
|
85
85
|
|
|
86
86
|
|
|
@@ -104,7 +104,7 @@ def _crop_datelist(
|
|
|
104
104
|
Returns:
|
|
105
105
|
list of datetimes.
|
|
106
106
|
"""
|
|
107
|
-
datetimes: list[dt.date] | list[dt.datetime] = []
|
|
107
|
+
datetimes: list[dt.date] | list[dt.datetime] = []
|
|
108
108
|
if freq == FREQ_RAW:
|
|
109
109
|
datetimes = summarydates
|
|
110
110
|
datetimes.sort()
|
|
@@ -153,9 +153,7 @@ def _fallback_date_roll(rollme: dt.datetime, direction: str, freq: str) -> dt.da
|
|
|
153
153
|
return dt.datetime(year=rollme.year, month=rollme.month, day=1)
|
|
154
154
|
return dt.datetime(
|
|
155
155
|
year=rollme.year, month=rollme.month, day=1
|
|
156
|
-
) + dateutil.relativedelta.relativedelta(
|
|
157
|
-
months=1
|
|
158
|
-
)
|
|
156
|
+
) + dateutil.relativedelta.relativedelta(months=1)
|
|
159
157
|
return dt.datetime(year=rollme.year, month=rollme.month, day=1)
|
|
160
158
|
|
|
161
159
|
raise ValueError(
|
|
@@ -188,7 +186,7 @@ def _fallback_date_range(start: dt.date, end: dt.date, freq: str) -> list[dt.dat
|
|
|
188
186
|
enddatetime = dt.datetime.combine(end, dt.datetime.min.time())
|
|
189
187
|
while date <= enddatetime:
|
|
190
188
|
dates.append(date)
|
|
191
|
-
date += dateutil.relativedelta.relativedelta(months=1)
|
|
189
|
+
date += dateutil.relativedelta.relativedelta(months=1)
|
|
192
190
|
return dates
|
|
193
191
|
raise ValueError("Unsupported frequency for datetimes beyond year 2262")
|
|
194
192
|
|
|
@@ -238,7 +236,7 @@ def resample_smry_dates(
|
|
|
238
236
|
|
|
239
237
|
# In case freq is an ISO-date(time)-string, interpret as such:
|
|
240
238
|
try:
|
|
241
|
-
parseddate = dateutil.parser.isoparse(freq)
|
|
239
|
+
parseddate = dateutil.parser.isoparse(freq)
|
|
242
240
|
return [parseddate]
|
|
243
241
|
except ValueError:
|
|
244
242
|
# freq is a frequency string or datetime.date (or similar)
|
|
@@ -301,7 +299,7 @@ def df(
|
|
|
301
299
|
params: bool = False,
|
|
302
300
|
paramfile: str | None = None,
|
|
303
301
|
datetime: bool = False, # A very poor choice of argument name [pylint]
|
|
304
|
-
):
|
|
302
|
+
) -> pd.DataFrame:
|
|
305
303
|
"""
|
|
306
304
|
Extract data from UNSMRY as Pandas dataframes.
|
|
307
305
|
|
|
@@ -623,10 +621,7 @@ def _fix_dframe_for_resdata(dframe: pd.DataFrame) -> pd.DataFrame:
|
|
|
623
621
|
# Do not use pd.Series.apply() here, Pandas would try to convert it to
|
|
624
622
|
# datetime64[ns] which is limited at year 2262.
|
|
625
623
|
dframe["DATE"] = pd.Series(
|
|
626
|
-
[
|
|
627
|
-
dateutil.parser.parse(datestr) # type: ignore
|
|
628
|
-
for datestr in dframe["DATE"]
|
|
629
|
-
],
|
|
624
|
+
[dateutil.parser.parse(datestr) for datestr in dframe["DATE"]],
|
|
630
625
|
dtype="object",
|
|
631
626
|
index=dframe.index,
|
|
632
627
|
)
|
|
@@ -702,7 +697,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
|
|
702
697
|
parser.add_argument(
|
|
703
698
|
"DATAFILE",
|
|
704
699
|
help="Name of the .DATA input file for the reservoir simulator."
|
|
705
|
-
|
|
700
|
+
" There must exist a UNSMRY file with the same path and basename.",
|
|
706
701
|
)
|
|
707
702
|
parser.add_argument(
|
|
708
703
|
"--time_index",
|
|
@@ -794,7 +789,7 @@ def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentPar
|
|
|
794
789
|
return parser
|
|
795
790
|
|
|
796
791
|
|
|
797
|
-
def summary_main(args) -> None:
|
|
792
|
+
def summary_main(args: argparse.Namespace) -> None:
|
|
798
793
|
"""Read summary data from disk and write CSV back to disk"""
|
|
799
794
|
logger = getLogger_res2csv(__name__, vars(args))
|
|
800
795
|
eclbase = (
|
|
@@ -824,7 +819,7 @@ def summary_main(args) -> None:
|
|
|
824
819
|
write_dframe_stdout_file(sum_df, args.output, index=True, caller_logger=logger)
|
|
825
820
|
|
|
826
821
|
|
|
827
|
-
def summary_reverse_main(args) -> None:
|
|
822
|
+
def summary_reverse_main(args: argparse.Namespace) -> None:
|
|
828
823
|
"""Entry point for usage with "csv2res summary" on the command line"""
|
|
829
824
|
logger = getLogger_res2csv(__name__, vars(args))
|
|
830
825
|
|
|
@@ -835,7 +830,7 @@ def summary_reverse_main(args) -> None:
|
|
|
835
830
|
eclbase = Path(args.output).name
|
|
836
831
|
|
|
837
832
|
# Summary.fwrite() can only write to current directory:
|
|
838
|
-
cwd =
|
|
833
|
+
cwd = Path.cwd()
|
|
839
834
|
summary = df2ressum(summary_df, eclbase)
|
|
840
835
|
try:
|
|
841
836
|
os.chdir(outputdir)
|
res2df/trans.py
CHANGED
|
@@ -6,7 +6,7 @@ Extract transmissibility information from output files as Dataframes.
|
|
|
6
6
|
import argparse
|
|
7
7
|
import logging
|
|
8
8
|
|
|
9
|
-
import networkx
|
|
9
|
+
import networkx as nx
|
|
10
10
|
import pandas as pd
|
|
11
11
|
|
|
12
12
|
from .common import write_dframe_stdout_file
|
|
@@ -221,12 +221,12 @@ def df(
|
|
|
221
221
|
return trans_df
|
|
222
222
|
|
|
223
223
|
|
|
224
|
-
def make_nx_graph(resdatafiles: ResdataFiles, region: str = "FIPNUM") ->
|
|
224
|
+
def make_nx_graph(resdatafiles: ResdataFiles, region: str = "FIPNUM") -> nx.Graph:
|
|
225
225
|
"""Construct a networkx graph for the transmissibilities."""
|
|
226
226
|
trans_df = df(resdatafiles, vectors=[region], coords=True, group=True)
|
|
227
227
|
reg1 = region + "1"
|
|
228
228
|
reg2 = region + "2"
|
|
229
|
-
graph:
|
|
229
|
+
graph: nx.Graph = nx.Graph()
|
|
230
230
|
graph.add_weighted_edges_from(
|
|
231
231
|
[tuple(row) for row in trans_df[[reg1, reg2, "TRAN"]].to_numpy()]
|
|
232
232
|
)
|
|
@@ -242,7 +242,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
|
|
242
242
|
parser.add_argument(
|
|
243
243
|
"DATAFILE",
|
|
244
244
|
help="Name of the .DATA input file for the reservoir simulator."
|
|
245
|
-
|
|
245
|
+
" There must exist INIT and EGRID files with the same path and basename.",
|
|
246
246
|
)
|
|
247
247
|
parser.add_argument("--vectors", nargs="+", help="Extra INIT vectors to be added")
|
|
248
248
|
parser.add_argument(
|
|
@@ -284,7 +284,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
|
|
284
284
|
return parser
|
|
285
285
|
|
|
286
286
|
|
|
287
|
-
def trans_main(args):
|
|
287
|
+
def trans_main(args: argparse.Namespace) -> None:
|
|
288
288
|
"""This is the command line API"""
|
|
289
289
|
logger = getLogger_res2csv(__name__, vars(args))
|
|
290
290
|
resdatafiles = ResdataFiles(args.DATAFILE)
|
res2df/version.py
CHANGED
|
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
|
|
|
28
28
|
commit_id: COMMIT_ID
|
|
29
29
|
__commit_id__: COMMIT_ID
|
|
30
30
|
|
|
31
|
-
__version__ = version = '1.3.
|
|
32
|
-
__version_tuple__ = version_tuple = (1, 3,
|
|
31
|
+
__version__ = version = '1.3.11'
|
|
32
|
+
__version_tuple__ = version_tuple = (1, 3, 11)
|
|
33
33
|
|
|
34
34
|
__commit_id__ = commit_id = None
|