res2df 1.3.7__py3-none-any.whl → 1.3.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- res2df/__init__.py +2 -3
- res2df/common.py +79 -75
- res2df/compdat.py +27 -32
- res2df/csv2res.py +5 -9
- res2df/equil.py +24 -29
- res2df/faults.py +2 -7
- res2df/fipreports.py +10 -14
- res2df/grid.py +58 -63
- res2df/gruptree.py +33 -35
- res2df/inferdims.py +6 -9
- res2df/nnc.py +5 -10
- res2df/opmkeywords/__init__.py +0 -0
- res2df/parameters.py +12 -12
- res2df/pillars.py +24 -31
- res2df/pvt.py +29 -34
- res2df/res2csv.py +10 -15
- res2df/res2csvlogger.py +1 -3
- res2df/resdatafiles.py +8 -8
- res2df/rft.py +36 -42
- res2df/satfunc.py +22 -28
- res2df/summary.py +57 -60
- res2df/trans.py +16 -38
- res2df/version.py +2 -2
- res2df/vfp/__init__.py +1 -1
- res2df/vfp/_vfp.py +28 -33
- res2df/vfp/_vfpcommon.py +18 -19
- res2df/vfp/_vfpdefs.py +2 -3
- res2df/vfp/_vfpinj.py +23 -58
- res2df/vfp/_vfpprod.py +28 -64
- res2df/wcon.py +4 -11
- res2df/wellcompletiondata.py +26 -26
- res2df/wellconnstatus.py +4 -5
- {res2df-1.3.7.dist-info → res2df-1.3.8.dist-info}/METADATA +4 -2
- {res2df-1.3.7.dist-info → res2df-1.3.8.dist-info}/RECORD +38 -37
- {res2df-1.3.7.dist-info → res2df-1.3.8.dist-info}/WHEEL +0 -0
- {res2df-1.3.7.dist-info → res2df-1.3.8.dist-info}/entry_points.txt +0 -0
- {res2df-1.3.7.dist-info → res2df-1.3.8.dist-info}/licenses/LICENSE +0 -0
- {res2df-1.3.7.dist-info → res2df-1.3.8.dist-info}/top_level.txt +0 -0
res2df/pvt.py
CHANGED
|
@@ -8,7 +8,7 @@ import argparse
|
|
|
8
8
|
import contextlib
|
|
9
9
|
import logging
|
|
10
10
|
from pathlib import Path
|
|
11
|
-
from typing import
|
|
11
|
+
from typing import cast
|
|
12
12
|
|
|
13
13
|
import pandas as pd
|
|
14
14
|
|
|
@@ -27,14 +27,12 @@ from .resdatafiles import ResdataFiles
|
|
|
27
27
|
|
|
28
28
|
with contextlib.suppress(ImportError):
|
|
29
29
|
# Needed for mypy
|
|
30
|
-
|
|
31
|
-
# pylint: disable=unused-import
|
|
32
30
|
import opm.io
|
|
33
31
|
|
|
34
32
|
|
|
35
33
|
logger: logging.Logger = logging.getLogger(__name__)
|
|
36
34
|
|
|
37
|
-
SUPPORTED_KEYWORDS:
|
|
35
|
+
SUPPORTED_KEYWORDS: list[str] = [
|
|
38
36
|
"PVTO",
|
|
39
37
|
"PVDO",
|
|
40
38
|
"PVTG",
|
|
@@ -48,7 +46,7 @@ SUPPORTED_KEYWORDS: List[str] = [
|
|
|
48
46
|
# desired column names in produced dataframes. They also to a certain
|
|
49
47
|
# extent determine the structure of the dataframe, in particular
|
|
50
48
|
# for keywords with arbitrary data amount pr. record (PVTO f.ex)
|
|
51
|
-
RENAMERS:
|
|
49
|
+
RENAMERS: dict[str, dict[str, str | list[str]]] = {}
|
|
52
50
|
|
|
53
51
|
# P_bub (bubble point pressure) is called PRESSURE for ability to merge with
|
|
54
52
|
# other pressure data from other frames.
|
|
@@ -80,7 +78,7 @@ RENAMERS["ROCK"] = {"PREF": "PRESSURE", "COMPRESSIBILITY": "COMPRESSIBILITY"}
|
|
|
80
78
|
|
|
81
79
|
|
|
82
80
|
def pvtw_fromdeck(
|
|
83
|
-
deck:
|
|
81
|
+
deck: "str | opm.opmcommon_python.Deck", ntpvt: int | None = None
|
|
84
82
|
) -> pd.DataFrame:
|
|
85
83
|
"""Extract PVTW from a :term:`deck`
|
|
86
84
|
|
|
@@ -97,7 +95,7 @@ def pvtw_fromdeck(
|
|
|
97
95
|
|
|
98
96
|
|
|
99
97
|
def density_fromdeck(
|
|
100
|
-
deck:
|
|
98
|
+
deck: "str | opm.opmcommon_python.Deck", ntpvt: int | None = None
|
|
101
99
|
) -> pd.DataFrame:
|
|
102
100
|
"""Extract DENSITY from a :term:`deck`
|
|
103
101
|
|
|
@@ -114,7 +112,7 @@ def density_fromdeck(
|
|
|
114
112
|
|
|
115
113
|
|
|
116
114
|
def rock_fromdeck(
|
|
117
|
-
deck:
|
|
115
|
+
deck: "str | opm.opmcommon_python.Deck", ntpvt: int | None = None
|
|
118
116
|
) -> pd.DataFrame:
|
|
119
117
|
"""Extract ROCK from a :term:`deck`
|
|
120
118
|
|
|
@@ -131,7 +129,7 @@ def rock_fromdeck(
|
|
|
131
129
|
|
|
132
130
|
|
|
133
131
|
def pvto_fromdeck(
|
|
134
|
-
deck:
|
|
132
|
+
deck: "str | opm.opmcommon_python.Deck", ntpvt: int | None = None
|
|
135
133
|
) -> pd.DataFrame:
|
|
136
134
|
"""Extract PVTO from a :term:`deck`
|
|
137
135
|
|
|
@@ -149,7 +147,7 @@ def pvto_fromdeck(
|
|
|
149
147
|
|
|
150
148
|
|
|
151
149
|
def pvdo_fromdeck(
|
|
152
|
-
deck:
|
|
150
|
+
deck: "str | opm.opmcommon_python.Deck", ntpvt: int | None = None
|
|
153
151
|
) -> pd.DataFrame:
|
|
154
152
|
"""Extract PVDO from a :term:`deck`
|
|
155
153
|
|
|
@@ -167,7 +165,7 @@ def pvdo_fromdeck(
|
|
|
167
165
|
|
|
168
166
|
|
|
169
167
|
def pvdg_fromdeck(
|
|
170
|
-
deck:
|
|
168
|
+
deck: "str | opm.opmcommon_python.Deck", ntpvt: int | None = None
|
|
171
169
|
) -> pd.DataFrame:
|
|
172
170
|
"""Extract PVDG from a :term:`deck`
|
|
173
171
|
|
|
@@ -185,7 +183,7 @@ def pvdg_fromdeck(
|
|
|
185
183
|
|
|
186
184
|
|
|
187
185
|
def pvtg_fromdeck(
|
|
188
|
-
deck:
|
|
186
|
+
deck: "str | opm.opmcommon_python.Deck", ntpvt: int | None = None
|
|
189
187
|
) -> pd.DataFrame:
|
|
190
188
|
"""Extract PVTG from a :term:`deck`
|
|
191
189
|
|
|
@@ -203,9 +201,9 @@ def pvtg_fromdeck(
|
|
|
203
201
|
|
|
204
202
|
|
|
205
203
|
def df(
|
|
206
|
-
deck:
|
|
207
|
-
keywords:
|
|
208
|
-
ntpvt:
|
|
204
|
+
deck: "str | opm.opmcommon_python.Deck",
|
|
205
|
+
keywords: list[str] | None = None,
|
|
206
|
+
ntpvt: int | None = None,
|
|
209
207
|
) -> pd.DataFrame:
|
|
210
208
|
"""Extract all (most) PVT data from a :term:`deck`.
|
|
211
209
|
|
|
@@ -292,9 +290,7 @@ def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentPar
|
|
|
292
290
|
|
|
293
291
|
def pvt_main(args) -> None:
|
|
294
292
|
"""Entry-point for module, for command line utility for Eclipse to CSV"""
|
|
295
|
-
logger = getLogger_res2csv(
|
|
296
|
-
__name__, vars(args)
|
|
297
|
-
)
|
|
293
|
+
logger = getLogger_res2csv(__name__, vars(args))
|
|
298
294
|
resdatafiles = ResdataFiles(args.DATAFILE)
|
|
299
295
|
logger.info("Parsed %s", args.DATAFILE)
|
|
300
296
|
if resdatafiles:
|
|
@@ -327,9 +323,7 @@ def pvt_main(args) -> None:
|
|
|
327
323
|
def pvt_reverse_main(args) -> None:
|
|
328
324
|
"""Entry-point for module, for command line utility for CSV to simulator
|
|
329
325
|
:term:`deck`"""
|
|
330
|
-
logger = getLogger_res2csv(
|
|
331
|
-
__name__, vars(args)
|
|
332
|
-
)
|
|
326
|
+
logger = getLogger_res2csv(__name__, vars(args))
|
|
333
327
|
pvt_df = pd.read_csv(args.csvfile)
|
|
334
328
|
logger.info("Parsed %s", args.csvfile)
|
|
335
329
|
inc_string = df2res(pvt_df, keywords=args.keywords)
|
|
@@ -338,9 +332,9 @@ def pvt_reverse_main(args) -> None:
|
|
|
338
332
|
|
|
339
333
|
def df2res(
|
|
340
334
|
pvt_df: pd.DataFrame,
|
|
341
|
-
keywords:
|
|
342
|
-
comments:
|
|
343
|
-
filename:
|
|
335
|
+
keywords: str | list[str] | None = None,
|
|
336
|
+
comments: dict[str, str] | None = None,
|
|
337
|
+
filename: str | None = None,
|
|
344
338
|
) -> str:
|
|
345
339
|
"""Generate resdata :term:`include file` content from PVT dataframes
|
|
346
340
|
|
|
@@ -364,7 +358,7 @@ def df2res(
|
|
|
364
358
|
)
|
|
365
359
|
|
|
366
360
|
|
|
367
|
-
def df2res_rock(dframe: pd.DataFrame, comment:
|
|
361
|
+
def df2res_rock(dframe: pd.DataFrame, comment: str | None = None) -> str:
|
|
368
362
|
"""Create string with :term:`include file` contents for ROCK keyword
|
|
369
363
|
|
|
370
364
|
Args:
|
|
@@ -389,7 +383,7 @@ def df2res_rock(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
|
|
|
389
383
|
return string + "\n"
|
|
390
384
|
|
|
391
385
|
|
|
392
|
-
def df2res_density(dframe: pd.DataFrame, comment:
|
|
386
|
+
def df2res_density(dframe: pd.DataFrame, comment: str | None = None) -> str:
|
|
393
387
|
"""Create string with :term:`include file` contents for DENSITY keyword
|
|
394
388
|
|
|
395
389
|
Args:
|
|
@@ -418,7 +412,7 @@ def df2res_density(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
|
|
|
418
412
|
return string + "\n"
|
|
419
413
|
|
|
420
414
|
|
|
421
|
-
def df2res_pvtw(dframe: pd.DataFrame, comment:
|
|
415
|
+
def df2res_pvtw(dframe: pd.DataFrame, comment: str | None = None) -> str:
|
|
422
416
|
"""Create string with :term:`include file` contents for PVTW keyword
|
|
423
417
|
|
|
424
418
|
PVTW is one line/record with data for a reference pressure
|
|
@@ -451,7 +445,7 @@ def df2res_pvtw(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
|
|
|
451
445
|
return string + "\n"
|
|
452
446
|
|
|
453
447
|
|
|
454
|
-
def df2res_pvtg(dframe: pd.DataFrame, comment:
|
|
448
|
+
def df2res_pvtg(dframe: pd.DataFrame, comment: str | None = None) -> str:
|
|
455
449
|
"""Create string with :term:`include file` contents for PVTG keyword
|
|
456
450
|
|
|
457
451
|
Args:
|
|
@@ -489,7 +483,7 @@ def df2res_pvtg(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
|
|
|
489
483
|
PVTG-data with a particular gas phase pressure"""
|
|
490
484
|
string = ""
|
|
491
485
|
assert len(dframe.index.unique()) == 1
|
|
492
|
-
p_gas = dframe.index.
|
|
486
|
+
p_gas = dframe.index.to_numpy()[0]
|
|
493
487
|
string += f"{p_gas:20.7f} "
|
|
494
488
|
for rowidx, row in dframe.reset_index().iterrows():
|
|
495
489
|
indent = "\n" + " " * 22 if rowidx > 0 else ""
|
|
@@ -506,7 +500,7 @@ def df2res_pvtg(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
|
|
|
506
500
|
return string + "\n"
|
|
507
501
|
|
|
508
502
|
|
|
509
|
-
def df2res_pvdg(dframe: pd.DataFrame, comment:
|
|
503
|
+
def df2res_pvdg(dframe: pd.DataFrame, comment: str | None = None) -> str:
|
|
510
504
|
"""Create string with :term:`include file` contents for PVDG keyword
|
|
511
505
|
|
|
512
506
|
This data consists of one table (volumefactor and visosity
|
|
@@ -548,13 +542,13 @@ def df2res_pvdg(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
|
|
|
548
542
|
|
|
549
543
|
subset = subset.set_index("PVTNUM").sort_index()
|
|
550
544
|
for pvtnum in subset.index.unique():
|
|
551
|
-
string += "-- PVTNUM: {pvtnum}\n"
|
|
545
|
+
string += f"-- PVTNUM: {pvtnum}\n"
|
|
552
546
|
string += _pvdg_pvtnum(subset[subset.index == pvtnum])
|
|
553
547
|
|
|
554
548
|
return string + "\n"
|
|
555
549
|
|
|
556
550
|
|
|
557
|
-
def df2res_pvdo(dframe: pd.DataFrame, comment:
|
|
551
|
+
def df2res_pvdo(dframe: pd.DataFrame, comment: str | None = None) -> str:
|
|
558
552
|
"""Create string with :term:`include file` contents for PVDO keyword
|
|
559
553
|
|
|
560
554
|
Args:
|
|
@@ -599,7 +593,7 @@ def df2res_pvdo(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
|
|
|
599
593
|
return string + "\n"
|
|
600
594
|
|
|
601
595
|
|
|
602
|
-
def df2res_pvto(dframe: pd.DataFrame, comment:
|
|
596
|
+
def df2res_pvto(dframe: pd.DataFrame, comment: str | None = None) -> str:
|
|
603
597
|
"""Create string with :term:`include file` contents for PVTO-data from a dataframe
|
|
604
598
|
|
|
605
599
|
Args:
|
|
@@ -636,9 +630,10 @@ def df2res_pvto(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
|
|
|
636
630
|
for PVTO-data for a particular RS"""
|
|
637
631
|
string = ""
|
|
638
632
|
assert len(dframe.index.unique()) == 1
|
|
639
|
-
rs = dframe.index.
|
|
633
|
+
rs = dframe.index.to_numpy()[0]
|
|
640
634
|
string += f"{rs:20.7f} "
|
|
641
635
|
for rowidx, row in dframe.reset_index().iterrows():
|
|
636
|
+
rowidx = cast(int, rowidx)
|
|
642
637
|
indent = "\n" + " " * 22 if rowidx > 0 else ""
|
|
643
638
|
string += (
|
|
644
639
|
indent
|
res2df/res2csv.py
CHANGED
|
@@ -7,8 +7,6 @@ in res2df
|
|
|
7
7
|
import argparse
|
|
8
8
|
import functools
|
|
9
9
|
import importlib
|
|
10
|
-
import sys
|
|
11
|
-
from typing import Optional
|
|
12
10
|
|
|
13
11
|
from .__version__ import __version__
|
|
14
12
|
|
|
@@ -26,14 +24,11 @@ def get_parser() -> argparse.ArgumentParser:
|
|
|
26
24
|
"--version", action="version", version=f"%(prog)s {__version__}"
|
|
27
25
|
)
|
|
28
26
|
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
)
|
|
35
|
-
else:
|
|
36
|
-
subparsers = parser.add_subparsers(parser_class=argparse.ArgumentParser)
|
|
27
|
+
subparsers = parser.add_subparsers( # type: ignore
|
|
28
|
+
required=True,
|
|
29
|
+
dest="subcommand",
|
|
30
|
+
parser_class=argparse.ArgumentParser,
|
|
31
|
+
)
|
|
37
32
|
|
|
38
33
|
subparsers_dict = {}
|
|
39
34
|
subparsers_dict["grid"] = subparsers.add_parser(
|
|
@@ -233,7 +228,7 @@ def get_parser() -> argparse.ArgumentParser:
|
|
|
233
228
|
def run_subparser_main(
|
|
234
229
|
args,
|
|
235
230
|
submodule: str,
|
|
236
|
-
parser:
|
|
231
|
+
parser: argparse.ArgumentParser | None = None,
|
|
237
232
|
) -> None:
|
|
238
233
|
"""Wrapper for running the subparsers main() function, with
|
|
239
234
|
custom argument handling.
|
|
@@ -255,12 +250,12 @@ def run_subparser_main(
|
|
|
255
250
|
parser: Used for raising errors.
|
|
256
251
|
"""
|
|
257
252
|
if "DATAFILE" in args:
|
|
258
|
-
positionals = list(filter(len, [args.DATAFILE
|
|
259
|
-
args.DATAFILE = "".join([args.DATAFILE
|
|
253
|
+
positionals = list(filter(len, [args.DATAFILE, *args.hiddenemptyplaceholders]))
|
|
254
|
+
args.DATAFILE = "".join([args.DATAFILE, *args.hiddenemptyplaceholders])
|
|
260
255
|
elif "PRTFILE" in args:
|
|
261
256
|
# Special treatment for the fipreports submodule
|
|
262
|
-
positionals = list(filter(len, [args.PRTFILE
|
|
263
|
-
args.PRTFILE = "".join([args.PRTFILE
|
|
257
|
+
positionals = list(filter(len, [args.PRTFILE, *args.hiddenemptyplaceholders]))
|
|
258
|
+
args.PRTFILE = "".join([args.PRTFILE, *args.hiddenemptyplaceholders])
|
|
264
259
|
if len(positionals) > 1 and parser is not None:
|
|
265
260
|
parser.error(f"Unknown argument in {positionals}")
|
|
266
261
|
|
res2df/res2csvlogger.py
CHANGED
|
@@ -1,14 +1,12 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import sys
|
|
3
|
-
from typing import Dict, Optional, Union
|
|
4
3
|
|
|
5
4
|
from .constants import MAGIC_STDOUT
|
|
6
5
|
|
|
7
6
|
|
|
8
7
|
def getLogger_res2csv(
|
|
9
|
-
module_name: str = "res2df", args_dict:
|
|
8
|
+
module_name: str = "res2df", args_dict: dict[str, str | bool] | None = None
|
|
10
9
|
) -> logging.Logger:
|
|
11
|
-
# pylint: disable=invalid-name
|
|
12
10
|
"""Provide a custom logger for res2csv and csv2res
|
|
13
11
|
|
|
14
12
|
Logging output is by default split by logging levels (split between WARNING and
|
res2df/resdatafiles.py
CHANGED
|
@@ -4,7 +4,7 @@ import errno
|
|
|
4
4
|
import logging
|
|
5
5
|
import os
|
|
6
6
|
from pathlib import Path
|
|
7
|
-
from typing import Any
|
|
7
|
+
from typing import Any
|
|
8
8
|
|
|
9
9
|
try:
|
|
10
10
|
import opm.io
|
|
@@ -24,7 +24,7 @@ logger = logging.getLogger(__name__)
|
|
|
24
24
|
|
|
25
25
|
if HAVE_OPM:
|
|
26
26
|
# Default parse option to opm.io for a very permissive parsing
|
|
27
|
-
OPMIOPARSER_RECOVERY:
|
|
27
|
+
OPMIOPARSER_RECOVERY: list[tuple[str, Any]] = [
|
|
28
28
|
("PARSE_EXTRA_DATA", opm.io.action.ignore),
|
|
29
29
|
("PARSE_EXTRA_RECORDS", opm.io.action.ignore),
|
|
30
30
|
("PARSE_INVALID_KEYWORD_COMBINATION", opm.io.action.ignore),
|
|
@@ -39,7 +39,7 @@ if HAVE_OPM:
|
|
|
39
39
|
]
|
|
40
40
|
|
|
41
41
|
|
|
42
|
-
class ResdataFiles
|
|
42
|
+
class ResdataFiles:
|
|
43
43
|
"""
|
|
44
44
|
Class for holding reservoir simulator :term:`output files <output file>`
|
|
45
45
|
|
|
@@ -83,7 +83,7 @@ class ResdataFiles(object):
|
|
|
83
83
|
"""Return the full path to the directory with the .DATA file"""
|
|
84
84
|
return Path(self._eclbase).absolute().parent
|
|
85
85
|
|
|
86
|
-
def get_deck(self) -> "opm.
|
|
86
|
+
def get_deck(self) -> "opm.opmcommon_python.Deck":
|
|
87
87
|
"""Return a opm.io :term:`deck` of the .DATA file"""
|
|
88
88
|
if not self._deck:
|
|
89
89
|
if Path(self._eclbase + ".DATA").is_file():
|
|
@@ -98,8 +98,8 @@ class ResdataFiles(object):
|
|
|
98
98
|
|
|
99
99
|
@staticmethod
|
|
100
100
|
def str2deck(
|
|
101
|
-
string: str, parsecontext:
|
|
102
|
-
) -> "opm.
|
|
101
|
+
string: str, parsecontext: list[tuple[str, Any]] | None = None
|
|
102
|
+
) -> "opm.opmcommon_python.Deck":
|
|
103
103
|
"""Produce a opm.io :term:`deck` from a string, using permissive
|
|
104
104
|
parsing by default"""
|
|
105
105
|
if parsecontext is None:
|
|
@@ -107,7 +107,7 @@ class ResdataFiles(object):
|
|
|
107
107
|
return opm.io.Parser().parse_string(string, parsecontext)
|
|
108
108
|
|
|
109
109
|
@staticmethod
|
|
110
|
-
def file2deck(filename:
|
|
110
|
+
def file2deck(filename: str | Path) -> "opm.opmcommon_python.Deck":
|
|
111
111
|
"""Try to convert standalone files into opm.io Deck objects"""
|
|
112
112
|
return ResdataFiles.str2deck(Path(filename).read_text(encoding="utf-8"))
|
|
113
113
|
|
|
@@ -256,4 +256,4 @@ class ResdataFiles(object):
|
|
|
256
256
|
|
|
257
257
|
def rreplace(pat: str, sub: str, string: str) -> str:
|
|
258
258
|
"""Variant of str.replace() that only replaces at the end of the string"""
|
|
259
|
-
return string[0 : -len(pat)] + sub if string.endswith(pat) else string
|
|
259
|
+
return string[0 : -len(pat)] + sub if string.endswith(pat) else string
|
res2df/rft.py
CHANGED
|
@@ -17,7 +17,8 @@ import argparse
|
|
|
17
17
|
import collections
|
|
18
18
|
import datetime
|
|
19
19
|
import logging
|
|
20
|
-
from
|
|
20
|
+
from collections.abc import Iterable
|
|
21
|
+
from typing import Any
|
|
21
22
|
|
|
22
23
|
import numpy as np
|
|
23
24
|
import pandas as pd
|
|
@@ -31,8 +32,8 @@ from .resdatafiles import ResdataFiles
|
|
|
31
32
|
logger: logging.Logger = logging.getLogger(__name__)
|
|
32
33
|
|
|
33
34
|
# In debug mode, these columns will be exported to three csv files.
|
|
34
|
-
CON_TOPOLOGY_COLS:
|
|
35
|
-
SEG_TOPOLOGY_COLS:
|
|
35
|
+
CON_TOPOLOGY_COLS: set = {"CONIDX", "CONBRNO", "CONSEGNO", "CONNXT", "DEPTH"}
|
|
36
|
+
SEG_TOPOLOGY_COLS: set = {
|
|
36
37
|
"SEGIDX",
|
|
37
38
|
"SEGIDX_upstream",
|
|
38
39
|
"SEGBRNO",
|
|
@@ -45,7 +46,7 @@ SEG_TOPOLOGY_COLS: Set = {
|
|
|
45
46
|
"LEAF",
|
|
46
47
|
"SEGDEPTH",
|
|
47
48
|
}
|
|
48
|
-
ICD_TOPOLOGY_COLS:
|
|
49
|
+
ICD_TOPOLOGY_COLS: set = {
|
|
49
50
|
"ICD_SEGBRNO_upstream",
|
|
50
51
|
"ICD_SEGIDX_upstream",
|
|
51
52
|
"ICD_LEAF",
|
|
@@ -83,27 +84,25 @@ def _rftrecords2df(rftfile: ResdataFile) -> pd.DataFrame:
|
|
|
83
84
|
"""
|
|
84
85
|
nav_df = pd.DataFrame(rftfile.headers)
|
|
85
86
|
nav_df.columns = ["recordname", "recordlength", "recordtype"]
|
|
86
|
-
nav_df["
|
|
87
|
+
mask = nav_df["recordname"].eq("TIME")
|
|
88
|
+
nav_df["timeindex"] = np.where(mask, nav_df.index, np.nan)
|
|
87
89
|
# the TIME record (in recordname) signifies that the forthcoming records
|
|
88
90
|
# belong to this TIME value, and we make a new column in the header data that
|
|
89
91
|
# tells us the row number for the associated TIME record
|
|
90
|
-
nav_df
|
|
91
|
-
nav_df
|
|
92
|
-
].index
|
|
93
|
-
nav_df.ffill(
|
|
94
|
-
inplace=True
|
|
92
|
+
nav_df = (
|
|
93
|
+
nav_df.ffill()
|
|
95
94
|
) # forward fill (because any record is associated to the previous TIME record)
|
|
96
95
|
nav_df["timeindex"] = nav_df["timeindex"].astype(int)
|
|
97
96
|
logger.info(
|
|
98
97
|
"Located %s RFT headers at %s distinct dates",
|
|
99
|
-
|
|
100
|
-
|
|
98
|
+
len(nav_df),
|
|
99
|
+
len(nav_df["timeindex"].unique()),
|
|
101
100
|
)
|
|
102
101
|
nav_df.index.name = "recordidx"
|
|
103
102
|
return nav_df.reset_index()
|
|
104
103
|
|
|
105
104
|
|
|
106
|
-
def rftrecords(rftfile: ResdataFile) -> Iterable[
|
|
105
|
+
def rftrecords(rftfile: ResdataFile) -> Iterable[dict[str, Any]]:
|
|
107
106
|
"""Generator for looping over RFT records in a ResdataFile object.
|
|
108
107
|
|
|
109
108
|
Each returned RFT record is represented as a dict with the keys:
|
|
@@ -115,7 +114,7 @@ def rftrecords(rftfile: ResdataFile) -> Iterable[Dict[str, Any]]:
|
|
|
115
114
|
navigation_frame = _rftrecords2df(rftfile)
|
|
116
115
|
for timeindex, headers in navigation_frame.groupby("timeindex"):
|
|
117
116
|
headers = headers.set_index("recordname")
|
|
118
|
-
rftrecord = {}
|
|
117
|
+
rftrecord: dict[str, Any] = {}
|
|
119
118
|
rftrecord["headers"] = headers
|
|
120
119
|
# All rows in nav_record_df represents the data in the current
|
|
121
120
|
# RFT record
|
|
@@ -137,7 +136,7 @@ def rftrecords(rftfile: ResdataFile) -> Iterable[Dict[str, Any]]:
|
|
|
137
136
|
|
|
138
137
|
|
|
139
138
|
def get_con_seg_data(
|
|
140
|
-
rftrecord:
|
|
139
|
+
rftrecord: dict[str, Any], rftfile: ResdataFile, datatype: str
|
|
141
140
|
) -> pd.DataFrame:
|
|
142
141
|
"""
|
|
143
142
|
Build a dataframe of CON* or SEG* data for a specific RFT record,
|
|
@@ -185,9 +184,9 @@ def get_con_seg_data(
|
|
|
185
184
|
)
|
|
186
185
|
|
|
187
186
|
# Ensure integer headers are integers:
|
|
188
|
-
integer_columns = headers[headers["recordtype"] == "INTE"].index.
|
|
187
|
+
integer_columns = headers[headers["recordtype"] == "INTE"].index.to_numpy()
|
|
189
188
|
for col in (set(integer_columns) - {"DATE"}).intersection(
|
|
190
|
-
set(data_headers["recordname"].
|
|
189
|
+
set(data_headers["recordname"].to_numpy())
|
|
191
190
|
):
|
|
192
191
|
data[col] = data[col].astype(int)
|
|
193
192
|
data[datatype + "IDX"] = data.index + 1 # Add an index that starts with 1
|
|
@@ -245,8 +244,7 @@ def process_seg_topology(seg_data: pd.DataFrame) -> pd.DataFrame:
|
|
|
245
244
|
seg_data["SEGNXT"] = seg_data["SEGNXT"].fillna(value="0").astype(int)
|
|
246
245
|
|
|
247
246
|
# Outer merge first to add the upstream segment information to every row.
|
|
248
|
-
merged =
|
|
249
|
-
seg_data,
|
|
247
|
+
merged = seg_data.merge(
|
|
250
248
|
seg_data,
|
|
251
249
|
how="left",
|
|
252
250
|
left_on="SEGIDX",
|
|
@@ -261,7 +259,7 @@ def process_seg_topology(seg_data: pd.DataFrame) -> pd.DataFrame:
|
|
|
261
259
|
|
|
262
260
|
# Now we can determine leaf segments by those with no extra information, since
|
|
263
261
|
# we did an outer merge:
|
|
264
|
-
merged["LEAF"] = merged["SEGIDX_upstream"].replace(0, np.nan).
|
|
262
|
+
merged["LEAF"] = merged["SEGIDX_upstream"].replace(0, np.nan).isna()
|
|
265
263
|
|
|
266
264
|
# Flag segments that have multiple upstream segments as junctions
|
|
267
265
|
merged["JUNCTION"] = merged["SEGIDX"].duplicated(keep=False)
|
|
@@ -300,7 +298,7 @@ def seg2dicttree(seg_data: pd.DataFrame) -> dict:
|
|
|
300
298
|
if "SEGIDX_upstream" in row and row["SEGIDX_upstream"] > 0:
|
|
301
299
|
edges.append((row["SEGIDX_upstream"], row["SEGIDX"]))
|
|
302
300
|
if not edges:
|
|
303
|
-
return {seg_data["SEGIDX"].
|
|
301
|
+
return {seg_data["SEGIDX"].to_numpy()[0]: {}}
|
|
304
302
|
for child, parent in edges:
|
|
305
303
|
subtrees[parent][child] = subtrees[child]
|
|
306
304
|
|
|
@@ -325,7 +323,7 @@ def pretty_print_well(seg_data: pd.DataFrame) -> str:
|
|
|
325
323
|
return str(tree_from_dict(dicttree))
|
|
326
324
|
|
|
327
325
|
|
|
328
|
-
def split_seg_icd(seg_data: pd.DataFrame) -> pd.DataFrame:
|
|
326
|
+
def split_seg_icd(seg_data: pd.DataFrame) -> tuple[pd.DataFrame, pd.DataFrame]:
|
|
329
327
|
"""Split a segment dataframe into a dataframe
|
|
330
328
|
with non-ICD segments and one with.
|
|
331
329
|
|
|
@@ -354,10 +352,12 @@ def split_seg_icd(seg_data: pd.DataFrame) -> pd.DataFrame:
|
|
|
354
352
|
# STOP: Cannot use this criteria, because junctions due to ICDs
|
|
355
353
|
# are legit.
|
|
356
354
|
# * The segment must be on a branch with only one segment
|
|
357
|
-
icd_seg_indices = seg_data[
|
|
355
|
+
icd_seg_indices = seg_data[
|
|
356
|
+
seg_data["LEAF"] & seg_data["LONELYSEG"]
|
|
357
|
+
].index.to_numpy()
|
|
358
358
|
non_icd_seg_indices = seg_data[
|
|
359
359
|
~(seg_data["LEAF"] & seg_data["LONELYSEG"])
|
|
360
|
-
].index.
|
|
360
|
+
].index.to_numpy()
|
|
361
361
|
|
|
362
362
|
icd_seg_data = seg_data.reindex(icd_seg_indices)
|
|
363
363
|
seg_data = seg_data.reindex(non_icd_seg_indices)
|
|
@@ -367,7 +367,7 @@ def split_seg_icd(seg_data: pd.DataFrame) -> pd.DataFrame:
|
|
|
367
367
|
logger.debug(
|
|
368
368
|
"Found %d ICD segments, indices %s",
|
|
369
369
|
len(icd_seg_data),
|
|
370
|
-
|
|
370
|
+
icd_seg_data["ICD_SEGIDX"].to_numpy(),
|
|
371
371
|
)
|
|
372
372
|
|
|
373
373
|
return (seg_data, icd_seg_data)
|
|
@@ -375,8 +375,8 @@ def split_seg_icd(seg_data: pd.DataFrame) -> pd.DataFrame:
|
|
|
375
375
|
|
|
376
376
|
def merge_icd_seg_conseg(
|
|
377
377
|
con_data: pd.DataFrame,
|
|
378
|
-
seg_data:
|
|
379
|
-
icd_data:
|
|
378
|
+
seg_data: pd.DataFrame | None = None,
|
|
379
|
+
icd_data: pd.DataFrame | None = None,
|
|
380
380
|
) -> pd.DataFrame:
|
|
381
381
|
"""
|
|
382
382
|
Merge ICD segments to the CONxxxxx data. We will be
|
|
@@ -425,14 +425,12 @@ def merge_icd_seg_conseg(
|
|
|
425
425
|
if not icd_data.empty:
|
|
426
426
|
# Merge ICD_* columns onto the dataframe representing reservoir
|
|
427
427
|
# connections.
|
|
428
|
-
data =
|
|
428
|
+
data = con_data.merge(icd_data, left_on="CONSEGNO", right_on="ICD_SEGIDX")
|
|
429
429
|
|
|
430
430
|
# Merge SEGxxxxx to the dataframe with icd's and reservoir connections.
|
|
431
431
|
assert not seg_data.empty
|
|
432
432
|
|
|
433
|
-
data =
|
|
434
|
-
data, seg_data, how="left", left_on="ICD_SEGNXT", right_on="SEGIDX"
|
|
435
|
-
)
|
|
433
|
+
data = data.merge(seg_data, how="left", left_on="ICD_SEGNXT", right_on="SEGIDX")
|
|
436
434
|
|
|
437
435
|
# The merge has potentially included extra rows due to junctions.
|
|
438
436
|
# After ICD merge, we can require that SEGIDX_upstream equals CONSEGNO
|
|
@@ -453,9 +451,7 @@ def merge_icd_seg_conseg(
|
|
|
453
451
|
data = pd.concat(
|
|
454
452
|
[
|
|
455
453
|
data,
|
|
456
|
-
|
|
457
|
-
con_data_no_icd, seg_data, left_on="CONSEGNO", right_on="SEGIDX"
|
|
458
|
-
),
|
|
454
|
+
con_data_no_icd.merge(seg_data, left_on="CONSEGNO", right_on="SEGIDX"),
|
|
459
455
|
],
|
|
460
456
|
sort=False,
|
|
461
457
|
)
|
|
@@ -515,8 +511,8 @@ def add_extras(dframe: pd.DataFrame, inplace: bool = True) -> pd.DataFrame:
|
|
|
515
511
|
|
|
516
512
|
def df(
|
|
517
513
|
resdatafiles: ResdataFiles,
|
|
518
|
-
wellname:
|
|
519
|
-
date:
|
|
514
|
+
wellname: str | None = None,
|
|
515
|
+
date: str | None = None,
|
|
520
516
|
) -> pd.DataFrame:
|
|
521
517
|
"""Loop over an RFT file and construct a dataframe representation
|
|
522
518
|
of the data, ordered by well and date.
|
|
@@ -549,8 +545,8 @@ def df(
|
|
|
549
545
|
if "DEPTH" not in headers.index:
|
|
550
546
|
logger.debug(
|
|
551
547
|
"Well %s has no data to extract at %s",
|
|
552
|
-
|
|
553
|
-
|
|
548
|
+
rftrecord["wellname"],
|
|
549
|
+
rftrecord["date"],
|
|
554
550
|
)
|
|
555
551
|
continue
|
|
556
552
|
|
|
@@ -625,7 +621,7 @@ def df(
|
|
|
625
621
|
# interpreting columns with numbers as strings. An alternative
|
|
626
622
|
# solution that keeps NaN would be to add a second row in the
|
|
627
623
|
# output containing the datatype
|
|
628
|
-
rftdata_df.fillna(0
|
|
624
|
+
rftdata_df = rftdata_df.fillna(0)
|
|
629
625
|
|
|
630
626
|
# The HOSTGRID data seems often to be empty, check if it is and delete if so:
|
|
631
627
|
if (
|
|
@@ -674,9 +670,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
|
|
674
670
|
|
|
675
671
|
def rft_main(args) -> None:
|
|
676
672
|
"""Entry-point for module, for command line utility"""
|
|
677
|
-
logger = getLogger_res2csv(
|
|
678
|
-
__name__, vars(args)
|
|
679
|
-
)
|
|
673
|
+
logger = getLogger_res2csv(__name__, vars(args))
|
|
680
674
|
if args.DATAFILE.endswith(".RFT"):
|
|
681
675
|
# Support the RFT file as an argument also:
|
|
682
676
|
resdatafiles = ResdataFiles(args.DATAFILE.replace(".RFT", "") + ".DATA")
|