res2df 1.1.1__py2.py3-none-any.whl → 1.2.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
res2df/__init__.py CHANGED
@@ -1,9 +1,9 @@
1
1
  import importlib
2
2
  from typing import List
3
3
 
4
- from .__version__ import __version__
5
- from .res2csvlogger import getLogger_res2csv
6
- from .resdatafiles import ResdataFiles
4
+ from .__version__ import __version__ as __version__
5
+ from .res2csvlogger import getLogger_res2csv as getLogger_res2csv
6
+ from .resdatafiles import ResdataFiles as ResdataFiles
7
7
 
8
8
  SUBMODULES: List[str] = [
9
9
  "compdat",
res2df/common.py CHANGED
@@ -540,17 +540,20 @@ def df2res(
540
540
  calling_module = inspect.getmodule(from_module[0])
541
541
  if dataframe.empty:
542
542
  raise ValueError("Empty dataframe")
543
- if consecutive is not None and consecutive in dataframe:
544
- if not (
543
+ if (
544
+ consecutive is not None
545
+ and consecutive in dataframe
546
+ and not (
545
547
  min(dataframe[consecutive]) == 1
546
548
  and len(dataframe[consecutive].unique()) == max(dataframe[consecutive])
547
- ):
548
- logger.critical(
549
- "%s inconsistent in input dataframe, got the values %s",
550
- consecutive,
551
- str(dataframe[consecutive].unique()),
552
- )
553
- raise ValueError
549
+ )
550
+ ):
551
+ logger.critical(
552
+ "%s inconsistent in input dataframe, got the values %s",
553
+ consecutive,
554
+ str(dataframe[consecutive].unique()),
555
+ )
556
+ raise ValueError
554
557
 
555
558
  # "KEYWORD" must always be in the dataframe:
556
559
  if "KEYWORD" not in dataframe:
@@ -816,7 +819,7 @@ def stack_on_colnames(
816
819
  """
817
820
  if not inplace:
818
821
  dframe = dframe.copy()
819
- tuplecolumns = list(map(lambda x: tuple(x.split(sep)), dframe.columns))
822
+ tuplecolumns = [tuple(x.split(sep)) for x in dframe.columns]
820
823
  if max(map(len, tuplecolumns)) < 2:
821
824
  logger.info("No columns to stack")
822
825
  return dframe
res2df/compdat.py CHANGED
@@ -1,16 +1,17 @@
1
1
  """Parser and dataframe generator for the keywords:
2
- * COMPDAT
3
- * COMPLUMP
4
- * COMPSEGS
5
- * WELOPEN
6
- * WELSEGS
7
- * WLIST
8
- * WSEGAICD
9
- * WSEGSICD
10
- * WSEGVALV
2
+ * COMPDAT
3
+ * COMPLUMP
4
+ * COMPSEGS
5
+ * WELOPEN
6
+ * WELSEGS
7
+ * WLIST
8
+ * WSEGAICD
9
+ * WSEGSICD
10
+ * WSEGVALV
11
11
  """
12
12
 
13
13
  import argparse
14
+ import contextlib
14
15
  import datetime
15
16
  import logging
16
17
  from typing import Dict, List, Optional, Union
@@ -18,12 +19,9 @@ from typing import Dict, List, Optional, Union
18
19
  import numpy as np
19
20
  import pandas as pd
20
21
 
21
- try:
22
+ with contextlib.suppress(ImportError):
22
23
  # pylint: disable=unused-import
23
24
  import opm.io.deck
24
- except ImportError:
25
- # Allow parts of res2df to work without OPM:
26
- pass
27
25
 
28
26
  from .common import (
29
27
  get_wells_matching_template,
@@ -276,16 +274,16 @@ def deck2dfs(
276
274
  if "KEYWORD_IDX" in wsegvalv_df.columns:
277
275
  wsegvalv_df.drop(["KEYWORD_IDX"], axis=1, inplace=True)
278
276
 
279
- return dict(
280
- COMPDAT=compdat_df,
281
- COMPSEGS=compsegs_df,
282
- WELSEGS=welsegs_df,
283
- WELOPEN=welopen_df,
284
- WLIST=wlist_df,
285
- WSEGSICD=wsegsicd_df,
286
- WSEGAICD=wsegaicd_df,
287
- WSEGVALV=wsegvalv_df,
288
- )
277
+ return {
278
+ "COMPDAT": compdat_df,
279
+ "COMPSEGS": compsegs_df,
280
+ "WELSEGS": welsegs_df,
281
+ "WELOPEN": welopen_df,
282
+ "WLIST": wlist_df,
283
+ "WSEGSICD": wsegsicd_df,
284
+ "WSEGAICD": wsegaicd_df,
285
+ "WSEGVALV": wsegvalv_df,
286
+ }
289
287
 
290
288
 
291
289
  def expand_welopen(welopen_df: pd.DataFrame, compdat_df: pd.DataFrame) -> pd.DataFrame:
@@ -612,12 +610,14 @@ def expand_wlist(wlist_df: pd.DataFrame) -> pd.DataFrame:
612
610
  currentstate[wlist_record["NAME"]] = " ".join(
613
611
  sorted(wlist_record["WELLS"].split())
614
612
  )
615
- elif wlist_record["ACTION"] in ["ADD", "DEL"]:
616
- if wlist_record["NAME"] not in currentstate:
617
- raise ValueError(
618
- "WLIST ADD/DEL only works on existing well lists: "
619
- f"{str(wlist_record)}"
620
- )
613
+ elif (
614
+ wlist_record["ACTION"] in ["ADD", "DEL"]
615
+ and wlist_record["NAME"] not in currentstate
616
+ ):
617
+ raise ValueError(
618
+ "WLIST ADD/DEL only works on existing well lists: "
619
+ f"{str(wlist_record)}"
620
+ )
621
621
  if wlist_record["ACTION"] == "ADD":
622
622
  currentstate[wlist_record["NAME"]] = " ".join(
623
623
  sorted(
@@ -630,10 +630,8 @@ def expand_wlist(wlist_df: pd.DataFrame) -> pd.DataFrame:
630
630
  if wlist_record["ACTION"] == "DEL":
631
631
  currentstate[wlist_record["NAME"]] = " ".join(
632
632
  sorted(
633
- list(
634
- set(currentstate[wlist_record["NAME"]].split())
635
- - set(wlist_record["WELLS"].split())
636
- )
633
+ set(currentstate[wlist_record["NAME"]].split())
634
+ - set(wlist_record["WELLS"].split())
637
635
  )
638
636
  )
639
637
  if wlist_record["ACTION"] == "MOV":
@@ -641,25 +639,16 @@ def expand_wlist(wlist_df: pd.DataFrame) -> pd.DataFrame:
641
639
  currentstate[wlist_record["NAME"]] = ""
642
640
  currentstate[wlist_record["NAME"]] = " ".join(
643
641
  sorted(
644
- list(
645
- set(currentstate[wlist_record["NAME"]].split()).union(
646
- set(wlist_record["WELLS"].split())
647
- )
642
+ set(currentstate[wlist_record["NAME"]].split()).union(
643
+ set(wlist_record["WELLS"].split())
648
644
  )
649
645
  )
650
646
  )
651
- for (
652
- wlist
653
- ) in currentstate.keys(): # pylint: disable=consider-iterating-dictionary
647
+ for wlist, value in currentstate.items():
654
648
  if wlist == wlist_record["NAME"]:
655
649
  continue
656
650
  currentstate[wlist] = " ".join(
657
- sorted(
658
- list(
659
- set(currentstate[wlist].split())
660
- - set(wlist_record["WELLS"].split())
661
- )
662
- )
651
+ sorted(set(value.split()) - set(wlist_record["WELLS"].split()))
663
652
  )
664
653
 
665
654
  # Dump final state:
@@ -857,11 +846,10 @@ def applywelopen(
857
846
  if isinstance(wlist_df, pd.DataFrame):
858
847
  if wlist_df.empty:
859
848
  wlist_df = None
860
- else:
861
- if set(wlist_df["ACTION"]) != {"NEW"}:
862
- raise ValueError(
863
- "The WLIST dataframe must be expanded through expand_wlist()"
864
- )
849
+ elif set(wlist_df["ACTION"]) != {"NEW"}:
850
+ raise ValueError(
851
+ "The WLIST dataframe must be expanded through expand_wlist()"
852
+ )
865
853
 
866
854
  welopen_df = welopen_df.astype(object).where(pd.notnull(welopen_df), None)
867
855
  welopen_df = expand_wlist_in_welopen_df(welopen_df, wlist_df)
@@ -869,8 +857,8 @@ def applywelopen(
869
857
 
870
858
  for _, row in welopen_df.iterrows():
871
859
  acts_on_well = False
872
- if (row["I"] is None and row["J"] is None and row["K"] is None) or (
873
- row["I"] <= 0 and row["J"] <= 0 and row["K"] <= 0
860
+ if all(x is None for x in (row["I"], row["J"], row["K"])) or all(
861
+ x <= 0 for x in (row["I"], row["J"], row["K"])
874
862
  ):
875
863
  # Applies to all connections when the completion range
876
864
  # is set zero or negative.
res2df/equil.py CHANGED
@@ -3,33 +3,31 @@ Extract EQUIL from a :term:`.DATA file` as Pandas DataFrame
3
3
  """
4
4
 
5
5
  import argparse
6
+ import contextlib
6
7
  import logging
7
8
  from pathlib import Path
8
9
  from typing import Dict, List, Optional, Union
9
10
 
10
11
  import pandas as pd
11
12
 
12
- from .common import comment_formatter
13
- from .common import df2res as common_df2res
14
- from .common import fill_reverse_parser as common_fill_reverse_parser
15
13
  from .common import (
14
+ comment_formatter,
16
15
  generic_deck_table,
17
16
  handle_wanted_keywords,
18
17
  keyworddata_to_df,
19
18
  write_dframe_stdout_file,
20
19
  write_inc_stdout_file,
21
20
  )
21
+ from .common import df2res as common_df2res
22
+ from .common import fill_reverse_parser as common_fill_reverse_parser
22
23
  from .inferdims import DIMS_POS, inject_xxxdims_ntxxx
23
24
  from .res2csvlogger import getLogger_res2csv
24
25
  from .resdatafiles import ResdataFiles
25
26
 
26
- try:
27
+ with contextlib.suppress(ImportError):
27
28
  # pylint: disable=unused-import
28
29
  import opm.io
29
30
 
30
- except ImportError:
31
- pass
32
-
33
31
 
34
32
  logger = logging.getLogger(__name__)
35
33
 
@@ -336,7 +334,7 @@ def equil_main(args) -> None:
336
334
  else:
337
335
  # This might be an include file for which we have to infer/guess
338
336
  # EQLDIMS. Then we send it to df() as a string
339
- equil_df = df(Path(args.DATAFILE).read_text())
337
+ equil_df = df(Path(args.DATAFILE).read_text(encoding="utf-8"))
340
338
 
341
339
  if "EQLNUM" in equil_df and "KEYWORD" in equil_df:
342
340
  eqlnums = str(len(equil_df["EQLNUM"].unique()))
@@ -417,12 +415,8 @@ def df2res_equil(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
417
415
  return "-- No data!"
418
416
  string = "EQUIL\n"
419
417
  string += comment_formatter(comment)
420
-
421
- if "KEYWORD" not in dframe:
422
- # Use everything..
423
- subset = dframe
424
- else:
425
- subset = dframe[dframe["KEYWORD"] == "EQUIL"]
418
+ # Use everything if KEYWORD not in dframe..
419
+ subset = dframe if "KEYWORD" not in dframe else dframe[dframe["KEYWORD"] == "EQUIL"]
426
420
  if "EQLNUM" not in subset:
427
421
  if len(subset) != 1:
428
422
  logger.critical("If EQLNUM is not supplied, only one row should be given")
@@ -508,11 +502,8 @@ def _df2res_equilfuncs(
508
502
  col_headers = RENAMERS[keyword]["DATA"]
509
503
 
510
504
  string += f"-- {'DEPTH':^21} {col_headers[1]:^21} \n"
511
- if "KEYWORD" not in dframe:
512
- # Use everything..
513
- subset = dframe
514
- else:
515
- subset = dframe[dframe["KEYWORD"] == keyword]
505
+ # Use everything if KEYWORD not in dframe..
506
+ subset = dframe if "KEYWORD" not in dframe else dframe[dframe["KEYWORD"] == keyword]
516
507
 
517
508
  def _df2res_equilfuncs_eqlnum(dframe: pd.DataFrame) -> str:
518
509
  """Create string with :term:`include file` contents
res2df/faults.py CHANGED
@@ -4,7 +4,9 @@ Extract the contents of the FAULTS keyword into
4
4
  a DataFrame
5
5
 
6
6
  """
7
+
7
8
  import argparse
9
+ import contextlib
8
10
  import logging
9
11
  from typing import Union
10
12
 
@@ -14,15 +16,12 @@ from .common import parse_opmio_deckrecord, write_dframe_stdout_file
14
16
  from .res2csvlogger import getLogger_res2csv
15
17
  from .resdatafiles import ResdataFiles
16
18
 
17
- try:
19
+ with contextlib.suppress(ImportError):
18
20
  # Needed for mypy
19
21
 
20
22
  # pylint: disable=unused-import
21
23
  import opm.io
22
24
 
23
- except ImportError:
24
- pass
25
-
26
25
 
27
26
  logger = logging.getLogger(__name__)
28
27
 
res2df/fipreports.py CHANGED
@@ -47,7 +47,7 @@ def report_block_lineparser(line: str) -> tuple:
47
47
 
48
48
  allowed_line_starts = [":CURRENTLY", ":OUTFLOW", ":MATERIAL", ":ORIGINALLY"]
49
49
  if not any(line.strip().upper().startswith(x) for x in allowed_line_starts):
50
- return tuple()
50
+ return ()
51
51
 
52
52
  colonsections = line.split(":")
53
53
  to_index: Optional[int]
@@ -172,7 +172,7 @@ def df(prtfile: Union[str, ResdataFiles], fipname: str = "FIPNUM") -> pd.DataFra
172
172
 
173
173
  if in_report_block:
174
174
  interesting_strings = ["IN PLACE", "OUTFLOW", "MATERIAL"]
175
- if not sum([string in line.upper() for string in interesting_strings]):
175
+ if not sum(string in line.upper() for string in interesting_strings):
176
176
  # Skip if we are not on an interesting line.
177
177
  continue
178
178
 
res2df/grid.py CHANGED
@@ -9,6 +9,7 @@ geometric information. Static data (properties) can be merged from
9
9
  the INIT file, and dynamic data can be merged from the Restart (UNRST)
10
10
  file.
11
11
  """
12
+
12
13
  import argparse
13
14
  import datetime
14
15
  import fnmatch
@@ -93,13 +94,12 @@ def dates2rstindices(
93
94
  if isodate not in availabledates:
94
95
  raise ValueError("date " + str(isodate) + " not found in UNRST file")
95
96
  chosendates = [isodate]
96
- else:
97
- if dates == "first":
98
- chosendates = [availabledates[0]]
99
- elif dates == "last":
100
- chosendates = [availabledates[-1]]
101
- elif dates == "all":
102
- chosendates = availabledates
97
+ elif dates == "first":
98
+ chosendates = [availabledates[0]]
99
+ elif dates == "last":
100
+ chosendates = [availabledates[-1]]
101
+ elif dates == "all":
102
+ chosendates = availabledates
103
103
  elif isinstance(dates, datetime.datetime):
104
104
  chosendates = [dates.date()]
105
105
  elif isinstance(dates, datetime.date):
@@ -678,10 +678,9 @@ def df2res(
678
678
  # Figure out the total number of cells for which we need to export data for:
679
679
  global_size = None
680
680
  active_cells = None
681
- if resdatafiles is not None:
682
- if resdatafiles.get_egrid() is not None:
683
- global_size = resdatafiles.get_egrid().get_global_size()
684
- active_cells = resdatafiles.get_egrid().getNumActive()
681
+ if resdatafiles is not None and resdatafiles.get_egrid() is not None:
682
+ global_size = resdatafiles.get_egrid().get_global_size()
683
+ active_cells = resdatafiles.get_egrid().getNumActive()
685
684
 
686
685
  if "GLOBAL_INDEX" not in grid_df:
687
686
  logger.warning(
res2df/gruptree.py CHANGED
@@ -2,6 +2,7 @@
2
2
 
3
3
  import argparse
4
4
  import collections
5
+ import contextlib
5
6
  import datetime
6
7
  import logging
7
8
  import sys
@@ -11,13 +12,11 @@ from typing import Any, Dict, List, Optional, Tuple, Union
11
12
  import pandas as pd
12
13
  import treelib
13
14
 
14
- try:
15
+ with contextlib.suppress(ImportError):
15
16
  # Needed for mypy
16
17
 
17
18
  # pylint: disable=unused-import
18
19
  import opm.io
19
- except ImportError:
20
- pass
21
20
 
22
21
  from .common import (
23
22
  parse_opmio_date_rec,
@@ -64,10 +63,7 @@ def df(
64
63
  """
65
64
 
66
65
  date: Optional[datetime.date]
67
- if startdate is not None:
68
- date = startdate
69
- else:
70
- date = None
66
+ date = startdate if startdate is not None else None
71
67
 
72
68
  if isinstance(deck, ResdataFiles):
73
69
  deck = deck.get_deck()
@@ -307,9 +303,8 @@ def edge_dataframe2dict(dframe: pd.DataFrame) -> List[dict]:
307
303
  """
308
304
  if dframe.empty:
309
305
  return [{}]
310
- if "DATE" in dframe:
311
- if len(dframe["DATE"].unique()) > 1:
312
- raise ValueError("Can only handle one date at a time")
306
+ if "DATE" in dframe and len(dframe["DATE"].unique()) > 1:
307
+ raise ValueError("Can only handle one date at a time")
313
308
  subtrees: dict = collections.defaultdict(dict)
314
309
  edges = [] # List of tuples
315
310
  for _, row in dframe.iterrows():
@@ -377,12 +372,13 @@ def dict2treelib(name: str, nested_dict: dict) -> treelib.Tree:
377
372
  warnings.warn(
378
373
  "dict2treelib() is deprecated and will be removed, use tree_from_dict()",
379
374
  FutureWarning,
375
+ stacklevel=1,
380
376
  )
381
377
 
382
378
  tree = treelib.Tree()
383
379
  tree.create_node(name, name)
384
- for child in nested_dict.keys():
385
- tree.paste(name, dict2treelib(child, nested_dict[child]))
380
+ for child, value in nested_dict.items():
381
+ tree.paste(name, dict2treelib(child, value))
386
382
  return tree
387
383
 
388
384
 
@@ -426,7 +422,7 @@ def prettyprint(dframe: pd.DataFrame) -> str:
426
422
  BRANPROP trees"""
427
423
  output = ""
428
424
  for date in dframe["DATE"].dropna().unique():
429
- df_date = dframe[dframe.DATE == date]
425
+ df_date = dframe[date == dframe.DATE]
430
426
  output += "Date: " + pd.to_datetime(date).strftime("%Y-%m-%d") + "\n"
431
427
 
432
428
  for treetype in ["GRUPTREE", "BRANPROP"]:
@@ -3,14 +3,10 @@ import sys
3
3
  from pathlib import Path
4
4
 
5
5
  try:
6
- from ert.shared.plugins.plugin_manager import hook_implementation
7
- from ert.shared.plugins.plugin_response import plugin_response
6
+ from ert import plugin as ert_plugin # type: ignore
8
7
  except ModuleNotFoundError:
9
- # ert is not installed - use dummy/transparent function decorators.
10
- def hook_implementation(func):
11
- return func
12
-
13
- def plugin_response(plugin_name): # pylint: disable=unused-argument
8
+ # ert is not installed - use dummy/transparent function decorator:
9
+ def ert_plugin(name: str = ""):
14
10
  def decorator(func):
15
11
  return func
16
12
 
@@ -28,8 +24,7 @@ def _get_jobs_from_directory(directory):
28
24
  return {path.name: str(path) for path in all_files}
29
25
 
30
26
 
31
- @hook_implementation
32
- @plugin_response(plugin_name="res2df")
27
+ @ert_plugin(name="res2df")
33
28
  def installable_jobs():
34
29
  return _get_jobs_from_directory("config_jobs")
35
30
 
@@ -43,8 +38,7 @@ def _get_module_variable_if_exists(module_name, variable_name, default=""):
43
38
  return getattr(script_module, variable_name, default)
44
39
 
45
40
 
46
- @hook_implementation
47
- @plugin_response(plugin_name="res2df")
41
+ @ert_plugin(name="res2df")
48
42
  def job_documentation(job_name):
49
43
  res2df_jobs = set(installable_jobs().data.keys())
50
44
  if job_name not in res2df_jobs:
res2df/inferdims.py CHANGED
@@ -3,21 +3,20 @@ Support module for inferring EQLDIMS and TABDIMS from incomplete
3
3
  reservoir simulator decks (typically single include-files)
4
4
  """
5
5
 
6
+ import contextlib
6
7
  import logging
7
8
  from typing import Dict, Optional, Union
8
9
 
9
- try:
10
+ with contextlib.suppress(ImportError):
10
11
  import opm.io
11
- except ImportError:
12
12
  # Let parts of res2df work without OPM:
13
- pass
14
13
 
15
14
  from .resdatafiles import ResdataFiles
16
15
 
17
16
  logger = logging.getLogger(__name__)
18
17
 
19
18
  # Constants to use for pointing to positions in the xxxDIMS keyword
20
- DIMS_POS: Dict[str, int] = dict(NTPVT=1, NTSFUN=0, NTEQUL=0)
19
+ DIMS_POS: Dict[str, int] = {"NTPVT": 1, "NTSFUN": 0, "NTEQUL": 0}
21
20
 
22
21
 
23
22
  def guess_dim(deckstring: str, dimkeyword: str, dimitem: int = 0) -> int:
@@ -41,12 +40,10 @@ def guess_dim(deckstring: str, dimkeyword: str, dimitem: int = 0) -> int:
41
40
 
42
41
  if dimkeyword not in ["TABDIMS", "EQLDIMS"]:
43
42
  raise ValueError("Only supports TABDIMS and EQLDIMS")
44
- if dimkeyword == "TABDIMS":
45
- if dimitem not in [0, 1]:
46
- raise ValueError("Only support item 0 and 1 in TABDIMS")
47
- if dimkeyword == "EQLDIMS":
48
- if dimitem not in [0]:
49
- raise ValueError("Only item 0 in EQLDIMS can be estimated")
43
+ if dimkeyword == "TABDIMS" and dimitem not in [0, 1]:
44
+ raise ValueError("Only support item 0 and 1 in TABDIMS")
45
+ if dimkeyword == "EQLDIMS" and dimitem not in [0]:
46
+ raise ValueError("Only item 0 in EQLDIMS can be estimated")
50
47
 
51
48
  # A less than res2df-standard permissive opm.io, when using
52
49
  # this one opm.io will fail if there are extra records
@@ -117,12 +114,10 @@ def inject_dimcount(
117
114
  assert dimvalue > 0, "dimvalue must be larger than zero"
118
115
  if dimkeyword not in ["TABDIMS", "EQLDIMS"]:
119
116
  raise ValueError("Only supports TABDIMS and EQLDIMS")
120
- if dimkeyword == "TABDIMS":
121
- if dimitem not in [0, 1]:
122
- raise ValueError("Only support item 0 and 1 in TABDIMS")
123
- if dimkeyword == "EQLDIMS":
124
- if dimitem not in [0]:
125
- raise ValueError("Only item 0 in EQLDIMS can be injected")
117
+ if dimkeyword == "TABDIMS" and dimitem not in [0, 1]:
118
+ raise ValueError("Only support item 0 and 1 in TABDIMS")
119
+ if dimkeyword == "EQLDIMS" and dimitem not in [0]:
120
+ raise ValueError("Only item 0 in EQLDIMS can be injected")
126
121
 
127
122
  if dimkeyword in deckstr:
128
123
  if not nowarn:
res2df/nnc.py CHANGED
@@ -73,7 +73,7 @@ def df(
73
73
  data=[egrid_grid.get_ijk(global_index=int(x) - 1) for x in nnc1],
74
74
  )
75
75
  # Returned indices from get_ijk are zero-based, convert to 1-based indices
76
- nnc1_df[idx_cols1] = nnc1_df[idx_cols1] + 1
76
+ nnc1_df[idx_cols1] += 1
77
77
 
78
78
  # Grid indices for second cell in cell pairs
79
79
  nnc2 = egrid_file["NNC2"][0].numpy_view().reshape(-1, 1)
@@ -88,7 +88,7 @@ def df(
88
88
  columns=idx_cols2,
89
89
  data=[egrid_grid.get_ijk(global_index=int(x) - 1) for x in nnc2],
90
90
  )
91
- nnc2_df[idx_cols2] = nnc2_df[idx_cols2] + 1
91
+ nnc2_df[idx_cols2] += 1
92
92
 
93
93
  # Obtain transmissibility value, corresponding to the cell pairs above.
94
94
  tran = init_file["TRANNNC"][0].numpy_view().reshape(-1, 1)
res2df/opmkeywords/readme CHANGED
@@ -1,6 +1,5 @@
1
1
  This directory contains JSON files downloaded from
2
2
  https://github.com/OPM/opm-common/tree/master/src/opm/parser/eclipse/share/keywords/000_Eclipse100
3
3
 
4
- When a new keyword is to be supported by res2df, add it in
4
+ When a new keyword is to be supported by res2df, add it in
5
5
  runmetoupdate.sh AND add it to the list of supported keywords in common.py
6
-
res2df/parameters.py CHANGED
@@ -81,7 +81,7 @@ def load_parameterstxt(filename: Union[str, Path]) -> Dict[str, Any]:
81
81
  index_col=False,
82
82
  )
83
83
  except pd.errors.ParserWarning as txt_exc:
84
- raise pd.errors.ParserError(txt_exc)
84
+ raise pd.errors.ParserError(txt_exc) from txt_exc
85
85
 
86
86
  return dframe.set_index("KEY")["VALUE"].to_dict()
87
87
 
res2df/pillars.py CHANGED
@@ -235,10 +235,7 @@ def compute_pillar_contacts(
235
235
  assert 0 <= sgascutoff <= 1
236
236
 
237
237
  # assert datestr is None or in ISO-8601 format
238
- if datestr:
239
- atdatestr = "@" + datestr
240
- else:
241
- atdatestr = ""
238
+ atdatestr = "@" + datestr if datestr else ""
242
239
 
243
240
  # Non-user servicable parameter, for GOC computation
244
241
  # we require cells at the contact to contain a minute saturation