res2df 1.3.9__py3-none-any.whl → 1.3.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
res2df/pvt.py CHANGED
@@ -5,11 +5,12 @@ Data can be extracted from a complete deck or from individual files.
5
5
  """
6
6
 
7
7
  import argparse
8
- import contextlib
9
8
  import logging
10
9
  from pathlib import Path
11
10
  from typing import cast
12
11
 
12
+ # Needed for mypy
13
+ import opm.io
13
14
  import pandas as pd
14
15
 
15
16
  from .common import (
@@ -25,11 +26,6 @@ from .inferdims import DIMS_POS, inject_xxxdims_ntxxx
25
26
  from .res2csvlogger import getLogger_res2csv
26
27
  from .resdatafiles import ResdataFiles
27
28
 
28
- with contextlib.suppress(ImportError):
29
- # Needed for mypy
30
- import opm.io
31
-
32
-
33
29
  logger: logging.Logger = logging.getLogger(__name__)
34
30
 
35
31
  SUPPORTED_KEYWORDS: list[str] = [
@@ -288,7 +284,7 @@ def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentPar
288
284
  return common_fill_reverse_parser(parser, "PVT", "pvt.inc")
289
285
 
290
286
 
291
- def pvt_main(args) -> None:
287
+ def pvt_main(args: argparse.Namespace) -> None:
292
288
  """Entry-point for module, for command line utility for Eclipse to CSV"""
293
289
  logger = getLogger_res2csv(__name__, vars(args))
294
290
  resdatafiles = ResdataFiles(args.DATAFILE)
@@ -320,7 +316,7 @@ def pvt_main(args) -> None:
320
316
  )
321
317
 
322
318
 
323
- def pvt_reverse_main(args) -> None:
319
+ def pvt_reverse_main(args: argparse.Namespace) -> None:
324
320
  """Entry-point for module, for command line utility for CSV to simulator
325
321
  :term:`deck`"""
326
322
  logger = getLogger_res2csv(__name__, vars(args))
@@ -469,7 +465,7 @@ def df2res_pvtg(dframe: pd.DataFrame, comment: str | None = None) -> str:
469
465
  subset["PVTNUM"] = 1
470
466
  subset = subset.set_index("PVTNUM").sort_index()
471
467
 
472
- def _pvtg_pvtnum(dframe):
468
+ def _pvtg_pvtnum(dframe: pd.DataFrame) -> str:
473
469
  """Create string with :term:`include file` contents for
474
470
  PVTG-data with a specific PVTNUM"""
475
471
  string = ""
@@ -478,7 +474,7 @@ def df2res_pvtg(dframe: pd.DataFrame, comment: str | None = None) -> str:
478
474
  string += _pvtg_pvtnum_pg(dframe[dframe.index == p_gas])
479
475
  return string + "/\n"
480
476
 
481
- def _pvtg_pvtnum_pg(dframe):
477
+ def _pvtg_pvtnum_pg(dframe: pd.DataFrame) -> str:
482
478
  """Create string with :term:`include file` contents for
483
479
  PVTG-data with a particular gas phase pressure"""
484
480
  string = ""
@@ -486,6 +482,7 @@ def df2res_pvtg(dframe: pd.DataFrame, comment: str | None = None) -> str:
486
482
  p_gas = dframe.index.to_numpy()[0]
487
483
  string += f"{p_gas:20.7f} "
488
484
  for rowidx, row in dframe.reset_index().iterrows():
485
+ rowidx = cast(int, rowidx)
489
486
  indent = "\n" + " " * 22 if rowidx > 0 else ""
490
487
  string += (
491
488
  indent
@@ -523,7 +520,7 @@ def df2res_pvdg(dframe: pd.DataFrame, comment: str | None = None) -> str:
523
520
  return ""
524
521
  subset["PVTNUM"] = 1
525
522
 
526
- def _pvdg_pvtnum(dframe):
523
+ def _pvdg_pvtnum(dframe: pd.DataFrame) -> str:
527
524
  """Create string with :term:`include file` contents for
528
525
  PVDG-data with a specific PVTNUM
529
526
 
res2df/py.typed ADDED
File without changes
res2df/res2csv.py CHANGED
@@ -24,7 +24,7 @@ def get_parser() -> argparse.ArgumentParser:
24
24
  "--version", action="version", version=f"%(prog)s {__version__}"
25
25
  )
26
26
 
27
- subparsers = parser.add_subparsers( # type: ignore
27
+ subparsers = parser.add_subparsers(
28
28
  required=True,
29
29
  dest="subcommand",
30
30
  parser_class=argparse.ArgumentParser,
@@ -203,9 +203,7 @@ def get_parser() -> argparse.ArgumentParser:
203
203
  for submodule, subparser in subparsers_dict.items():
204
204
  # Use the submodule's fill_parser() to add the submodule specific
205
205
  # arguments:
206
- importlib.import_module("res2df." + submodule).fill_parser( # type: ignore
207
- subparser
208
- )
206
+ importlib.import_module("res2df." + submodule).fill_parser(subparser)
209
207
 
210
208
  # Add empty placeholders, this looks strange but is needed for the
211
209
  # ERT forward model frontend, where non-used options must be supplied
@@ -226,7 +224,7 @@ def get_parser() -> argparse.ArgumentParser:
226
224
 
227
225
 
228
226
  def run_subparser_main(
229
- args,
227
+ args: argparse.Namespace,
230
228
  submodule: str,
231
229
  parser: argparse.ArgumentParser | None = None,
232
230
  ) -> None:
res2df/resdatafiles.py CHANGED
@@ -6,13 +6,7 @@ import os
6
6
  from pathlib import Path
7
7
  from typing import Any
8
8
 
9
- try:
10
- import opm.io
11
-
12
- HAVE_OPM = True
13
- except ImportError:
14
- HAVE_OPM = False
15
-
9
+ import opm.io
16
10
  from resdata.grid import Grid
17
11
  from resdata.rd_util import FileMode
18
12
  from resdata.resfile import ResdataFile
@@ -22,21 +16,20 @@ from .common import convert_lyrlist_to_zonemap, parse_lyrfile
22
16
 
23
17
  logger = logging.getLogger(__name__)
24
18
 
25
- if HAVE_OPM:
26
- # Default parse option to opm.io for a very permissive parsing
27
- OPMIOPARSER_RECOVERY: list[tuple[str, Any]] = [
28
- ("PARSE_EXTRA_DATA", opm.io.action.ignore),
29
- ("PARSE_EXTRA_RECORDS", opm.io.action.ignore),
30
- ("PARSE_INVALID_KEYWORD_COMBINATION", opm.io.action.ignore),
31
- ("PARSE_MISSING_DIMS_KEYWORD", opm.io.action.ignore),
32
- ("PARSE_MISSING_INCLUDE", opm.io.action.ignore),
33
- ("PARSE_MISSING_SECTIONS", opm.io.action.ignore),
34
- ("PARSE_RANDOM_SLASH", opm.io.action.ignore),
35
- ("PARSE_RANDOM_TEXT", opm.io.action.ignore),
36
- ("PARSE_UNKNOWN_KEYWORD", opm.io.action.ignore),
37
- ("SUMMARY_UNKNOWN_GROUP", opm.io.action.ignore),
38
- ("UNSUPPORTED_*", opm.io.action.ignore),
39
- ]
19
+ # Default parse option to opm.io for a very permissive parsing
20
+ OPMIOPARSER_RECOVERY: list[tuple[str, Any]] = [
21
+ ("PARSE_EXTRA_DATA", opm.io.action.ignore),
22
+ ("PARSE_EXTRA_RECORDS", opm.io.action.ignore),
23
+ ("PARSE_INVALID_KEYWORD_COMBINATION", opm.io.action.ignore),
24
+ ("PARSE_MISSING_DIMS_KEYWORD", opm.io.action.ignore),
25
+ ("PARSE_MISSING_INCLUDE", opm.io.action.ignore),
26
+ ("PARSE_MISSING_SECTIONS", opm.io.action.ignore),
27
+ ("PARSE_RANDOM_SLASH", opm.io.action.ignore),
28
+ ("PARSE_RANDOM_TEXT", opm.io.action.ignore),
29
+ ("PARSE_UNKNOWN_KEYWORD", opm.io.action.ignore),
30
+ ("SUMMARY_UNKNOWN_GROUP", opm.io.action.ignore),
31
+ ("UNSUPPORTED_*", opm.io.action.ignore),
32
+ ]
40
33
 
41
34
 
42
35
  class ResdataFiles:
@@ -52,7 +45,7 @@ class ResdataFiles:
52
45
  it should be loaded or served from cache.
53
46
  """
54
47
 
55
- def __init__(self, eclbase):
48
+ def __init__(self, eclbase: str | Path) -> None:
56
49
  # eclbase might be a a Posix path object
57
50
  eclbase = str(eclbase)
58
51
 
@@ -211,7 +204,7 @@ class ResdataFiles:
211
204
  self._rstfile = None
212
205
  self._rftfile = None
213
206
 
214
- def get_zonemap(self, filename=None):
207
+ def get_zonemap(self, filename: str | None = None) -> dict[int, str]:
215
208
  """Return a dictionary from (int) K layers in the simgrid to strings
216
209
 
217
210
  Typical usage is to map from grid layer to zone names.
@@ -243,15 +236,15 @@ class ResdataFiles:
243
236
  if not Path(filename).is_absolute():
244
237
  fullpath = Path(self.get_path()) / filename
245
238
  else:
246
- fullpath = filename
247
- if not Path(fullpath).is_file():
239
+ fullpath = Path(filename)
240
+ if not fullpath.is_file():
248
241
  if filename_defaulted:
249
242
  # No warnings when the default filename is not there.
250
243
  return {}
251
244
  logger.warning("Zonefile %s not found, ignoring", fullpath)
252
245
  return {}
253
246
  lyrlist = parse_lyrfile(fullpath)
254
- return convert_lyrlist_to_zonemap(lyrlist)
247
+ return convert_lyrlist_to_zonemap(lyrlist) or {}
255
248
 
256
249
 
257
250
  def rreplace(pat: str, sub: str, string: str) -> str:
res2df/rft.py CHANGED
@@ -667,7 +667,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
667
667
  return parser
668
668
 
669
669
 
670
- def rft_main(args) -> None:
670
+ def rft_main(args: argparse.Namespace) -> None:
671
671
  """Entry-point for module, for command line utility"""
672
672
  logger = getLogger_res2csv(__name__, vars(args))
673
673
  if args.DATAFILE.endswith(".RFT"):
res2df/satfunc.py CHANGED
@@ -14,15 +14,12 @@ TABDIMS or to supply the satnumcount directly to avoid possible bugs.
14
14
  """
15
15
 
16
16
  import argparse
17
- import contextlib
18
17
  import logging
19
18
  from pathlib import Path
20
19
 
20
+ import opm.io
21
21
  import pandas as pd
22
22
 
23
- with contextlib.suppress(ImportError):
24
- import opm.io
25
-
26
23
  from .common import (
27
24
  comment_formatter,
28
25
  handle_wanted_keywords,
@@ -197,7 +194,7 @@ def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentPar
197
194
  return common_fill_reverse_parser(parser, "SWOF, SGOF++", "relperm.inc")
198
195
 
199
196
 
200
- def satfunc_main(args) -> None:
197
+ def satfunc_main(args: argparse.Namespace) -> None:
201
198
  """Entry-point for module, for command line utility"""
202
199
  logger = getLogger_res2csv(__name__, vars(args))
203
200
  resdatafiles = ResdataFiles(args.DATAFILE)
@@ -228,7 +225,7 @@ def satfunc_main(args) -> None:
228
225
  )
229
226
 
230
227
 
231
- def satfunc_reverse_main(args) -> None:
228
+ def satfunc_reverse_main(args: argparse.Namespace) -> None:
232
229
  """For command line utility for CSV to resdata"""
233
230
  logger = getLogger_res2csv(__name__, vars(args))
234
231
  satfunc_df = pd.read_csv(args.csvfile)
@@ -368,7 +365,7 @@ def _df2res_satfuncs(
368
365
  subset = subset.set_index("SATNUM").sort_index()
369
366
 
370
367
  # Make a function that is to be called for each SATNUM
371
- def _df2res_satfuncs_satnum(keyword, dframe):
368
+ def _df2res_satfuncs_satnum(keyword: str, dframe: pd.DataFrame) -> str:
372
369
  """Create string with :term:`include file` contents
373
370
  for one saturation function for one specific SATNUM"""
374
371
  col_headers = RENAMERS[keyword]["DATA"]
res2df/summary.py CHANGED
@@ -80,7 +80,7 @@ def _ensure_date_or_none(some_date: str | dt.date | None) -> dt.date | None:
80
80
  if not some_date:
81
81
  return None
82
82
  if isinstance(some_date, str):
83
- return dateutil.parser.parse(some_date).date() # type: ignore
83
+ return dateutil.parser.parse(some_date).date()
84
84
  raise TypeError(f"some_date must be a string or a date, got {some_date}")
85
85
 
86
86
 
@@ -104,7 +104,7 @@ def _crop_datelist(
104
104
  Returns:
105
105
  list of datetimes.
106
106
  """
107
- datetimes: list[dt.date] | list[dt.datetime] = [] # type: ignore
107
+ datetimes: list[dt.date] | list[dt.datetime] = []
108
108
  if freq == FREQ_RAW:
109
109
  datetimes = summarydates
110
110
  datetimes.sort()
@@ -153,9 +153,7 @@ def _fallback_date_roll(rollme: dt.datetime, direction: str, freq: str) -> dt.da
153
153
  return dt.datetime(year=rollme.year, month=rollme.month, day=1)
154
154
  return dt.datetime(
155
155
  year=rollme.year, month=rollme.month, day=1
156
- ) + dateutil.relativedelta.relativedelta( # type: ignore
157
- months=1
158
- )
156
+ ) + dateutil.relativedelta.relativedelta(months=1)
159
157
  return dt.datetime(year=rollme.year, month=rollme.month, day=1)
160
158
 
161
159
  raise ValueError(
@@ -188,7 +186,7 @@ def _fallback_date_range(start: dt.date, end: dt.date, freq: str) -> list[dt.dat
188
186
  enddatetime = dt.datetime.combine(end, dt.datetime.min.time())
189
187
  while date <= enddatetime:
190
188
  dates.append(date)
191
- date += dateutil.relativedelta.relativedelta(months=1) # type: ignore
189
+ date += dateutil.relativedelta.relativedelta(months=1)
192
190
  return dates
193
191
  raise ValueError("Unsupported frequency for datetimes beyond year 2262")
194
192
 
@@ -238,7 +236,7 @@ def resample_smry_dates(
238
236
 
239
237
  # In case freq is an ISO-date(time)-string, interpret as such:
240
238
  try:
241
- parseddate = dateutil.parser.isoparse(freq) # type: ignore
239
+ parseddate = dateutil.parser.isoparse(freq)
242
240
  return [parseddate]
243
241
  except ValueError:
244
242
  # freq is a frequency string or datetime.date (or similar)
@@ -301,7 +299,7 @@ def df(
301
299
  params: bool = False,
302
300
  paramfile: str | None = None,
303
301
  datetime: bool = False, # A very poor choice of argument name [pylint]
304
- ):
302
+ ) -> pd.DataFrame:
305
303
  """
306
304
  Extract data from UNSMRY as Pandas dataframes.
307
305
 
@@ -623,10 +621,7 @@ def _fix_dframe_for_resdata(dframe: pd.DataFrame) -> pd.DataFrame:
623
621
  # Do not use pd.Series.apply() here, Pandas would try to convert it to
624
622
  # datetime64[ns] which is limited at year 2262.
625
623
  dframe["DATE"] = pd.Series(
626
- [
627
- dateutil.parser.parse(datestr) # type: ignore
628
- for datestr in dframe["DATE"]
629
- ],
624
+ [dateutil.parser.parse(datestr) for datestr in dframe["DATE"]],
630
625
  dtype="object",
631
626
  index=dframe.index,
632
627
  )
@@ -794,7 +789,7 @@ def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentPar
794
789
  return parser
795
790
 
796
791
 
797
- def summary_main(args) -> None:
792
+ def summary_main(args: argparse.Namespace) -> None:
798
793
  """Read summary data from disk and write CSV back to disk"""
799
794
  logger = getLogger_res2csv(__name__, vars(args))
800
795
  eclbase = (
@@ -824,7 +819,7 @@ def summary_main(args) -> None:
824
819
  write_dframe_stdout_file(sum_df, args.output, index=True, caller_logger=logger)
825
820
 
826
821
 
827
- def summary_reverse_main(args) -> None:
822
+ def summary_reverse_main(args: argparse.Namespace) -> None:
828
823
  """Entry point for usage with "csv2res summary" on the command line"""
829
824
  logger = getLogger_res2csv(__name__, vars(args))
830
825
 
res2df/trans.py CHANGED
@@ -284,7 +284,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
284
284
  return parser
285
285
 
286
286
 
287
- def trans_main(args):
287
+ def trans_main(args: argparse.Namespace) -> None:
288
288
  """This is the command line API"""
289
289
  logger = getLogger_res2csv(__name__, vars(args))
290
290
  resdatafiles = ResdataFiles(args.DATAFILE)
res2df/version.py CHANGED
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '1.3.9'
32
- __version_tuple__ = version_tuple = (1, 3, 9)
31
+ __version__ = version = '1.3.10'
32
+ __version_tuple__ = version_tuple = (1, 3, 10)
33
33
 
34
34
  __commit_id__ = commit_id = None
res2df/vfp/__init__.py CHANGED
@@ -2,7 +2,7 @@
2
2
  keywords in Eclipse.
3
3
  """
4
4
 
5
- from ._vfp import ( # noqa: F401
5
+ from ._vfp import (
6
6
  basic_data,
7
7
  basic_data2df,
8
8
  basic_data2pyarrow,
@@ -18,3 +18,20 @@ from ._vfp import ( # noqa: F401
18
18
  vfp_main,
19
19
  vfp_reverse_main,
20
20
  )
21
+
22
+ __all__ = [
23
+ "basic_data",
24
+ "basic_data2df",
25
+ "basic_data2pyarrow",
26
+ "df",
27
+ "df2basic_data",
28
+ "df2res",
29
+ "df2ress",
30
+ "dfs",
31
+ "fill_parser",
32
+ "fill_reverse_parser",
33
+ "pyarrow2basic_data",
34
+ "pyarrow_tables",
35
+ "vfp_main",
36
+ "vfp_reverse_main",
37
+ ]
res2df/vfp/_vfp.py CHANGED
@@ -10,19 +10,15 @@ import sys
10
10
  from typing import Any
11
11
 
12
12
  import numpy as np
13
+
14
+ # Needed for mypy
15
+ import opm.io
13
16
  import pandas as pd
14
17
  import pyarrow as pa
15
18
 
16
- try:
17
- # Needed for mypy
18
-
19
- import opm.io
20
-
21
- # This import is seemingly not used, but necessary for some attributes
22
- # to be included in DeckItem objects.
23
- from opm.io.deck import DeckKeyword # noqa: F401
24
- except ImportError:
25
- pass
19
+ # This import is seemingly not used, but necessary for some attributes
20
+ # to be included in DeckItem objects.
21
+ from opm.io.deck import DeckKeyword # noqa: F401
26
22
 
27
23
  from ..common import comment_formatter, write_dframe_stdout_file, write_inc_stdout_file
28
24
  from ..common import fill_reverse_parser as common_fill_reverse_parser
@@ -484,7 +480,7 @@ def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentPar
484
480
  return common_fill_reverse_parser(parser, "VFPPROD, VFPINJ", "vfp.inc")
485
481
 
486
482
 
487
- def vfp_main(args) -> None:
483
+ def vfp_main(args: argparse.Namespace) -> None:
488
484
  """Entry-point for module, for command line utility."""
489
485
  logger = getLogger_res2csv(__name__, vars(args))
490
486
  if args.keyword and args.keyword not in SUPPORTED_KEYWORDS:
@@ -523,7 +519,7 @@ def vfp_main(args) -> None:
523
519
  logger.info("Parsed file %s for vfp.df", args.DATAFILE)
524
520
 
525
521
 
526
- def vfp_reverse_main(args) -> None:
522
+ def vfp_reverse_main(args: argparse.Namespace) -> None:
527
523
  """Entry-point for module, for command line utility for CSV to Eclipse"""
528
524
  logger = getLogger_res2csv(__name__, vars(args))
529
525
  vfp_df = pd.read_csv(args.csvfile)
res2df/vfp/_vfpcommon.py CHANGED
@@ -7,22 +7,16 @@ output both in csv format as a pandas DataFrame or in pyarrow and pyarrow.table
7
7
  """
8
8
 
9
9
  import logging
10
- import numbers
11
- from typing import Any
12
10
 
13
11
  import numpy as np
14
- import pandas as pd
15
-
16
- try:
17
- # Needed for mypy
18
12
 
19
- import opm.io
13
+ # Needed for mypy
14
+ import opm.io
15
+ import pandas as pd
20
16
 
21
- # This import is seemingly not used, but necessary for some attributes
22
- # to be included in DeckItem objects.
23
- from opm.io.deck import DeckKeyword # noqa: F401
24
- except ImportError:
25
- pass
17
+ # This import is seemingly not used, but necessary for some attributes
18
+ # to be included in DeckItem objects.
19
+ from opm.io.deck import DeckKeyword # noqa: F401
26
20
 
27
21
  from ..common import parse_opmio_deckrecord
28
22
 
@@ -56,7 +50,7 @@ def _deckrecord2list(
56
50
  keyword: str,
57
51
  recordindex: int,
58
52
  recordname: str,
59
- ) -> Any | list[float]:
53
+ ) -> list[float]:
60
54
  """
61
55
  Parse an opm.opmcommon_python.DeckRecord belonging to a certain keyword
62
56
  and return as list of numbers
@@ -68,14 +62,22 @@ def _deckrecord2list(
68
62
  list index to the "record".
69
63
  recordname: Name of the record
70
64
  """
71
- record = parse_opmio_deckrecord(record, keyword, "records", recordindex)
65
+ rec_dict = parse_opmio_deckrecord(record, keyword, "records", recordindex)
72
66
 
73
- values: Any | list[float]
67
+ values: list[float]
74
68
  # Extract interpolation ranges into lists
75
- if isinstance(record.get(recordname), list):
76
- values = record.get(recordname)
77
- elif isinstance(record.get(recordname), numbers.Number):
78
- values = [record.get(recordname)]
69
+ tmp_val = rec_dict.get(recordname)
70
+ if tmp_val is None:
71
+ raise KeyError(f"Missing record '{recordname}' in keyword '{keyword}'")
72
+ if isinstance(tmp_val, (list, tuple)):
73
+ try:
74
+ values = [float(val) for val in tmp_val]
75
+ except (TypeError, ValueError) as e:
76
+ raise ValueError(
77
+ f"Non-numeric value in record '{recordname}' for keyword '{keyword}'"
78
+ ) from e
79
+ elif isinstance(tmp_val, (int, float)):
80
+ values = [float(tmp_val)]
79
81
  else:
80
82
  raise ValueError(
81
83
  f"Keyword {keyword} and recordname {recordname} "
res2df/wcon.py CHANGED
@@ -1,16 +1,13 @@
1
1
  """Extract WCON* from a .DATA file"""
2
2
 
3
3
  import argparse
4
- import contextlib
5
4
  import datetime
6
5
  import logging
7
6
 
7
+ # Needed for mypy
8
+ import opm.io
8
9
  import pandas as pd
9
10
 
10
- with contextlib.suppress(ImportError):
11
- # Needed for mypy
12
- import opm.io
13
-
14
11
  from .common import (
15
12
  parse_opmio_date_rec,
16
13
  parse_opmio_deckrecord,
@@ -85,7 +82,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
85
82
  return parser
86
83
 
87
84
 
88
- def wcon_main(args) -> None:
85
+ def wcon_main(args: argparse.Namespace) -> None:
89
86
  """Read from disk and write CSV back to disk"""
90
87
  logger = getLogger_res2csv(__name__, vars(args))
91
88
  resdatafiles = ResdataFiles(args.DATAFILE)
@@ -34,7 +34,7 @@ class KHUnit(StrEnum):
34
34
 
35
35
  def df(
36
36
  resdatafiles: ResdataFiles,
37
- zonemap: dict[int, str],
37
+ zonemap: dict[int, str] | None = None,
38
38
  use_wellconnstatus: bool = False,
39
39
  excl_well_startswith: str | None = None,
40
40
  ) -> pd.DataFrame:
@@ -48,6 +48,9 @@ def df(
48
48
  if other connections are closed. And the KH is summed over open connections
49
49
  only.
50
50
 
51
+ If no zonemap is provided, it will be looked for in the default location
52
+ (the same directory as the DATA file loaded in resdatafiles).
53
+
51
54
  Args:
52
55
  resdatafiles; ResdataFiles object
53
56
  zonemap: dictionary with layer->zone mapping
@@ -56,7 +59,9 @@ def df(
56
59
  Returns:
57
60
  pd.DataFrame with one row per unique combination of well, zone and date.
58
61
  """
62
+
59
63
  compdat_df = create_compdat_df(resdatafiles, zonemap=zonemap)
64
+
60
65
  if "ZONE" not in compdat_df.columns:
61
66
  logger.warning(
62
67
  "ZONE column not generated in compdat table. "
@@ -286,7 +291,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
286
291
  return parser
287
292
 
288
293
 
289
- def wellcompletiondata_main(args):
294
+ def wellcompletiondata_main(args: argparse.Namespace) -> None:
290
295
  """Entry-point for module, for command line utility"""
291
296
  logger = getLogger_res2csv(__name__, vars(args))
292
297
 
res2df/wellconnstatus.py CHANGED
@@ -112,7 +112,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
112
112
  return parser
113
113
 
114
114
 
115
- def wellconnstatus_main(args):
115
+ def wellconnstatus_main(args: argparse.Namespace) -> None:
116
116
  """Entry-point for module, for command line utility"""
117
117
  logger = getLogger_res2csv(__name__, vars(args))
118
118
  resdatafiles = ResdataFiles(args.DATAFILE)
@@ -0,0 +1,94 @@
1
+ Metadata-Version: 2.4
2
+ Name: res2df
3
+ Version: 1.3.10
4
+ Summary: Convert reservoir simulator input and output to DataFrames
5
+ Author-email: Håvard Berland <havb@equinor.com>
6
+ License-Expression: GPL-3.0-only
7
+ Project-URL: Homepage, https://github.com/equinor/res2df
8
+ Project-URL: Repository, https://github.com/equinor/res2df
9
+ Project-URL: Documentation, https://equinor.github.io/res2df/
10
+ Keywords: energy,subsurface,reservoir,scientific,engineering
11
+ Classifier: Development Status :: 5 - Production/Stable
12
+ Classifier: Intended Audience :: Science/Research
13
+ Classifier: Topic :: Scientific/Engineering
14
+ Classifier: Topic :: Software Development :: Libraries
15
+ Classifier: Topic :: Utilities
16
+ Classifier: Programming Language :: Python :: 3.11
17
+ Classifier: Programming Language :: Python :: 3.12
18
+ Classifier: Programming Language :: Python :: 3.13
19
+ Classifier: Natural Language :: English
20
+ Requires-Python: >=3.11
21
+ Description-Content-Type: text/markdown
22
+ License-File: LICENSE
23
+ Requires-Dist: resdata>=5.0.0-b0
24
+ Requires-Dist: resfo
25
+ Requires-Dist: networkx
26
+ Requires-Dist: numpy
27
+ Requires-Dist: opm>=2020.10.2
28
+ Requires-Dist: pandas
29
+ Requires-Dist: pyarrow
30
+ Requires-Dist: pyyaml>=5.1
31
+ Requires-Dist: treelib
32
+ Provides-Extra: tests
33
+ Requires-Dist: pytest; extra == "tests"
34
+ Requires-Dist: pytest-cov; extra == "tests"
35
+ Requires-Dist: pytest-mock; extra == "tests"
36
+ Requires-Dist: pytest-timeout; extra == "tests"
37
+ Provides-Extra: style
38
+ Requires-Dist: pre-commit; extra == "style"
39
+ Provides-Extra: types
40
+ Requires-Dist: mypy; extra == "types"
41
+ Requires-Dist: types-PyYAML; extra == "types"
42
+ Requires-Dist: types-python-dateutil; extra == "types"
43
+ Requires-Dist: types-setuptools; extra == "types"
44
+ Requires-Dist: types-networkx; extra == "types"
45
+ Requires-Dist: pandas-stubs; extra == "types"
46
+ Provides-Extra: docs
47
+ Requires-Dist: sphinx-autoapi; extra == "docs"
48
+ Requires-Dist: ipython; extra == "docs"
49
+ Requires-Dist: rstcheck; extra == "docs"
50
+ Requires-Dist: setuptools; extra == "docs"
51
+ Requires-Dist: sphinx; extra == "docs"
52
+ Requires-Dist: sphinx-argparse; extra == "docs"
53
+ Requires-Dist: sphinx-autodoc-typehints; extra == "docs"
54
+ Requires-Dist: sphinx_rtd_theme; extra == "docs"
55
+ Provides-Extra: ert
56
+ Requires-Dist: ert>=10.2.0b13; extra == "ert"
57
+ Dynamic: license-file
58
+
59
+ [![Publish to PyPI](https://github.com/equinor/res2df/actions/workflows/publish.yml/badge.svg)](https://github.com/equinor/res2df/actions/workflows/publish.yml)
60
+ [![PyPI version](https://img.shields.io/pypi/v/res2df.svg)](https://pypi.org/project/res2df/)
61
+ [![codecov](https://codecov.io/gh/equinor/res2df/graph/badge.svg?token=3sZBGGu5VG)](https://codecov.io/gh/equinor/res2df)
62
+ [![Python 3.11-3.13](https://img.shields.io/badge/python-3.11%20|%203.12%20|%203.13-blue.svg)](https://www.python.org)
63
+ [![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff)
64
+ [![License: GPL v3](https://img.shields.io/badge/License-GPLv3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0)
65
+
66
+ # res2df
67
+
68
+ res2df is a Pandas DataFrame wrapper around resdata and opm.io, which
69
+ are used to access binary files outputted by reservoir simulators,
70
+ or its input files --- or any other tool outputting to the same data format.
71
+
72
+ The reverse operation, from a Pandas DataFrame to reservoir simulator include files
73
+ (commonly given the extension ".inc", ".grdecl" etc.) is provided for some of the
74
+ modules.
75
+
76
+ The package consists of a module pr. datatype, e.g. one module for summary
77
+ files (.UNSMRY), one for completion data etc.
78
+
79
+ There is a command line frontend for almost all functionality, called
80
+ `res2csv`, which converts the reservoir data to DataFrames, and then dumps
81
+ the dataframes to files in CSV format, and a similar `csv2res` for the
82
+ reverse operation.
83
+
84
+ For documentation, see <https://equinor.github.io/res2df/>
85
+
86
+ ## License
87
+
88
+ This library is released under GPLv3.
89
+
90
+ ## Copyright
91
+
92
+ The code is Copyright Equinor ASA 2019-2021.
93
+
94
+ Contributions without copyright transfer are welcome.