res2df 1.3.10__py3-none-any.whl → 1.3.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
res2df/common.py CHANGED
@@ -653,7 +653,7 @@ def generic_deck_table(
653
653
 
654
654
  # Start building the string we are to return:
655
655
  string = keyword + "\n"
656
- if comment is not None and comment:
656
+ if comment:
657
657
  string += "\n".join(["-- " + line for line in comment.splitlines()]) + "\n"
658
658
 
659
659
  # Empty tables are ok with Eclipse (at least sometimes)
@@ -664,7 +664,7 @@ def generic_deck_table(
664
664
  # Pandas make a pretty txt table:
665
665
  dframe = dframe.copy()
666
666
 
667
- # Column names are pr. ec2ldf standard, redo to opm.common in order to use
667
+ # Column names are pr. res2df standard, redo to opm.common in order to use
668
668
  # sorting from that:
669
669
  if renamer is not None:
670
670
  inv_renamer = {value: key for key, value in renamer.items()}
@@ -696,11 +696,14 @@ def generic_deck_table(
696
696
  dframe = dframe.fillna(value="1*")
697
697
 
698
698
  if drop_trailing_columns:
699
+ columns_to_drop = []
699
700
  for col_name in reversed(relevant_columns):
700
- if set(dframe[col_name].to_numpy()) == {"1*"}:
701
- del dframe[col_name]
701
+ if (dframe[col_name] == "1*").all():
702
+ columns_to_drop.append(col_name)
702
703
  else:
703
704
  break
705
+ if columns_to_drop:
706
+ dframe = dframe.drop(columns=columns_to_drop)
704
707
 
705
708
  # It is critical for opm.common, maybe also E100 to have integers printed
706
709
  # as integers, for correct parsing. Ensure these are integer where the json
@@ -708,13 +711,15 @@ def generic_deck_table(
708
711
  integer_cols = {
709
712
  item["name"]
710
713
  for item in OPMKEYWORDS[keyword]["items"]
711
- if item["value_type"] == "INT" # and item["name"] in col_headers
714
+ if item["value_type"] == "INT"
712
715
  }
713
716
  for int_col in integer_cols.intersection(dframe.columns):
714
- defaulted_rows = dframe[int_col] == "1*"
715
- integer_values = dframe.loc[~defaulted_rows, int_col].astype(int)
717
+ mask = dframe[int_col] != "1*"
716
718
  dframe[int_col] = dframe[int_col].astype(str)
717
- dframe.loc[~defaulted_rows, int_col] = integer_values
719
+ if mask.any():
720
+ dframe.loc[mask, int_col] = (
721
+ dframe.loc[mask, int_col].astype(float).astype(int).astype(str)
722
+ )
718
723
 
719
724
  # Quote all string data. This is not always needed, but needed
720
725
  # for some colums, for example well-names containing a slash.
@@ -726,9 +731,8 @@ def generic_deck_table(
726
731
  for str_col in string_cols.intersection(dframe.columns):
727
732
  # Ensure 1* is not quoted.
728
733
  non_defaulted_rows = dframe[str_col] != "1*"
729
- dframe.loc[non_defaulted_rows, str_col].str.replace("'", "")
730
734
  dframe.loc[non_defaulted_rows, str_col] = (
731
- "'" + dframe.loc[non_defaulted_rows, str_col] + "'"
735
+ "'" + dframe.loc[non_defaulted_rows, str_col].str.replace("'", "") + "'"
732
736
  )
733
737
 
734
738
  # Now rename again to have prettier column names:
@@ -739,8 +743,7 @@ def generic_deck_table(
739
743
  tablestring = dframe.to_string(header=True, index=False)
740
744
  # Indent all lines with two spaces:
741
745
  tablestring = "\n".join(
742
- [" " + line.strip().replace(" /", " /") for line in tablestring.splitlines()]
743
- # The replace() in there is needed for py36/pandas==1.1.5 only.
746
+ " " + line.strip().replace(" /", " /") for line in tablestring.splitlines()
744
747
  )
745
748
  # Eclipse comment for the header line:
746
749
  tablestring = "--" + tablestring[1:]
@@ -836,7 +839,7 @@ def stack_on_colnames(
836
839
  # Drop rows stemming from the NaNs in the second tuple-element for
837
840
  # static columns:
838
841
  dframe = dframe.dropna(axis="index", subset=["DATE"])
839
- del dframe["level_0"]
842
+ dframe = dframe.drop(columns="level_0")
840
843
  dframe.index.name = ""
841
844
  return dframe
842
845
 
@@ -912,11 +915,11 @@ def parse_lyrfile(filename: str | Path) -> list[dict[str, Any]] | None:
912
915
  zonedict["to_layer"] = to_layer
913
916
  else:
914
917
  logger.error("From_layer higher than to_layer")
915
- raise ValueError()
918
+ raise ValueError
916
919
  elif len(numbers) == 1:
917
920
  zonedict["span"] = int(numbers[0])
918
921
  else:
919
- raise ValueError()
922
+ raise ValueError
920
923
  lyrlist.append(zonedict)
921
924
  except ValueError:
922
925
  logger.error("Could not parse lyr file %s", filename)
@@ -967,14 +970,11 @@ def get_wells_matching_template(template: str, wells: list[str]) -> list[str]:
967
970
  Returns:
968
971
  List of matched wells
969
972
  """
970
- if template.startswith("*") or template.startswith("?"):
973
+ if template.startswith(("*", "?")):
971
974
  raise ValueError(
972
975
  "Well template not allowed to start with a wildcard character: "
973
976
  f"Must be preceded with a \\: {template}"
974
977
  )
975
- if template.startswith("\\"):
976
- # Note that the two \\ are actually read as one and
977
- # this will return True for f.ex '\*P1'
978
- template = template[1:]
978
+ template = template.removeprefix("\\")
979
979
  regex = template.replace("*", ".*").replace("?", ".")
980
980
  return [well for well in wells if bool(re.match(regex, well))]
res2df/compdat.py CHANGED
@@ -751,7 +751,7 @@ def expand_complump_in_welopen_df(
751
751
  exp_welopens.append(cell_row)
752
752
 
753
753
  dframe = pd.DataFrame(exp_welopens)
754
- return dframe.astype(object).where(pd.notna(dframe), None) # type: ignore[call-overload]
754
+ return dframe.astype(object).replace({np.nan: None})
755
755
 
756
756
 
757
757
  def expand_wlist_in_welopen_df(
@@ -785,7 +785,7 @@ def expand_wlist_in_welopen_df(
785
785
  # Explicit wellname was used, no expansion to happen:
786
786
  exp_welopens.append(row)
787
787
  dframe = pd.DataFrame(exp_welopens)
788
- return dframe.astype(object).where(pd.notna(dframe), None) # type: ignore[call-overload]
788
+ return dframe.astype(object).replace({np.nan: None})
789
789
 
790
790
 
791
791
  def applywelopen(
@@ -844,7 +844,7 @@ def applywelopen(
844
844
  "The WLIST dataframe must be expanded through expand_wlist()"
845
845
  )
846
846
 
847
- welopen_df = welopen_df.astype(object).where(pd.notna(welopen_df), None) # type: ignore[call-overload]
847
+ welopen_df = welopen_df.astype(object).replace({np.nan: None})
848
848
  if wlist_df is not None:
849
849
  welopen_df = expand_wlist_in_welopen_df(welopen_df, wlist_df)
850
850
  if complump_df is not None:
res2df/faults.py CHANGED
@@ -7,6 +7,7 @@ a DataFrame
7
7
 
8
8
  import argparse
9
9
  import logging
10
+ from itertools import product
10
11
 
11
12
  # Needed for mypy
12
13
  import opm.io
@@ -36,7 +37,7 @@ def df(deck: "ResdataFiles | opm.opmcommon_python.Deck") -> pd.DataFrame:
36
37
 
37
38
  # In[91]: list(deck['FAULTS'][0])
38
39
  # Out[91]: [[u'F1'], [36], [36], [41], [42], [1], [14], [u'I']]
39
- data = []
40
+ data: list[list[str | int]] = []
40
41
  # It is allowed in Eclipse to use the keyword FAULTS
41
42
  # as many times as needed. Thus we need to loop in some way:
42
43
  for keyword in deck:
@@ -47,10 +48,13 @@ def df(deck: "ResdataFiles | opm.opmcommon_python.Deck") -> pd.DataFrame:
47
48
  frec_dict = parse_opmio_deckrecord(rec, "FAULTS")
48
49
  faultname = frec_dict["NAME"]
49
50
  faultface = frec_dict["FACE"]
50
- for i_idx in range(frec_dict["IX1"], frec_dict["IX2"] + 1):
51
- for j_idx in range(frec_dict["IY1"], frec_dict["IY2"] + 1):
52
- for k_idx in range(frec_dict["IZ1"], frec_dict["IZ2"] + 1):
53
- data.append([faultname, i_idx, j_idx, k_idx, faultface])
51
+
52
+ indices = product(
53
+ range(frec_dict["IX1"], frec_dict["IX2"] + 1),
54
+ range(frec_dict["IY1"], frec_dict["IY2"] + 1),
55
+ range(frec_dict["IZ1"], frec_dict["IZ2"] + 1),
56
+ )
57
+ data.extend([faultname, i, j, k, faultface] for i, j, k in indices)
54
58
  dframe = pd.DataFrame(columns=COLUMNS, data=data)
55
59
  logger.info("Extracted %i faults", len(dframe["NAME"].unique()))
56
60
  return dframe
res2df/fipreports.py CHANGED
@@ -4,6 +4,7 @@ import argparse
4
4
  import datetime
5
5
  import logging
6
6
  import re
7
+ from pathlib import Path
7
8
 
8
9
  import numpy as np
9
10
  import pandas as pd
@@ -133,7 +134,7 @@ def df(prtfile: str | ResdataFiles, fipname: str = "FIPNUM") -> pd.DataFrame:
133
134
  ".+" + fipname + r"\s+REPORT\s+REGION\s+(\d+)", re.IGNORECASE
134
135
  )
135
136
 
136
- with open(prtfile, encoding="utf-8") as prt_fh:
137
+ with Path(prtfile).open(encoding="utf-8") as prt_fh:
137
138
  logger.info(
138
139
  "Parsing file %s for blocks starting with %s REPORT REGION",
139
140
  prtfile,
res2df/grid.py CHANGED
@@ -47,7 +47,7 @@ def get_available_rst_dates(resdatafiles: ResdataFiles) -> list[datetime.date]:
47
47
  )
48
48
  return [
49
49
  resdatafiles.get_rstfile().iget_restart_sim_time(index).date()
50
- for index in range(0, len(report_indices))
50
+ for index in range(len(report_indices))
51
51
  ]
52
52
 
53
53
 
@@ -544,7 +544,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
544
544
  parser.add_argument(
545
545
  "DATAFILE",
546
546
  help="Name of the .DATA input file for the reservoir simulator."
547
- + " There must exist .INIT and .EGRID files with the same path and basename.",
547
+ " There must exist .INIT and .EGRID files with the same path and basename.",
548
548
  )
549
549
  parser.add_argument(
550
550
  "--vectors",
@@ -556,8 +556,8 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
556
556
  "--rstdates",
557
557
  type=str,
558
558
  help="Point in time to grab restart data from, "
559
- + "either 'first' or 'last', 'all', or a date in "
560
- + "YYYY-MM-DD format",
559
+ "either 'first' or 'last', 'all', or a date in "
560
+ "YYYY-MM-DD format",
561
561
  default="",
562
562
  )
563
563
  parser.add_argument(
@@ -610,10 +610,11 @@ def drop_constant_columns(
610
610
  if dframe.empty:
611
611
  return dframe
612
612
 
613
- columnstodelete = []
614
- for col in set(dframe.columns) - set(alwayskeep):
615
- if len(dframe[col].unique()) == 1:
616
- columnstodelete.append(col)
613
+ columnstodelete = [
614
+ col
615
+ for col in (set(dframe.columns) - set(alwayskeep))
616
+ if len(dframe[col].unique()) == 1
617
+ ]
617
618
  if columnstodelete:
618
619
  logging.info("Deleting constant columns %s", columnstodelete)
619
620
  return dframe.drop(columnstodelete, axis=1)
@@ -698,11 +699,7 @@ def df2res(
698
699
 
699
700
  res2df_header = (
700
701
  "Output file printed by "
701
- + "res2df.grid "
702
- + __version__
703
- + "\n"
704
- + " at "
705
- + str(datetime.datetime.now())
702
+ "res2df.grid " + __version__ + "\n" + " at " + str(datetime.datetime.now())
706
703
  )
707
704
 
708
705
  string = ""
res2df/gruptree.py CHANGED
@@ -9,9 +9,7 @@ import warnings
9
9
  from typing import Any
10
10
 
11
11
  import numpy as np
12
-
13
- # Needed for mypy
14
- import opm.io
12
+ import opm
15
13
  import pandas as pd
16
14
  import treelib
17
15
 
@@ -63,11 +61,14 @@ def df(
63
61
  date: datetime.date | None
64
62
  date = startdate if startdate is not None else None
65
63
 
64
+ if not isinstance(deck, (ResdataFiles, opm.opmcommon_python.Deck)):
65
+ raise TypeError("Input deck must be either ResdataFiles or an opm Deck.")
66
+
66
67
  if isinstance(deck, ResdataFiles):
67
68
  deck = deck.get_deck()
68
69
 
69
70
  edgerecords = [] # list of dict of rows containing an edge.
70
- nodedatarecords = []
71
+ nodedatarecords: list[dict[str, Any]] = []
71
72
 
72
73
  # In order for the GRUPTREE/BRANPROP keywords to accumulate, we
73
74
  # store the edges as dictionaries indexed by the edge
@@ -144,10 +145,10 @@ def df(
144
145
  renamer = (
145
146
  {"PRESSURE": "TERMINAL_PRESSURE"} if kword.name == "NODEPROP" else None
146
147
  )
147
- for rec in kword:
148
- nodedatarecords.append(
149
- parse_opmio_deckrecord(rec, kword.name, renamer=renamer)
150
- )
148
+ nodedatarecords.extend(
149
+ parse_opmio_deckrecord(rec, kword.name, renamer=renamer)
150
+ for rec in kword
151
+ )
151
152
  nodedata[kword.name] = (
152
153
  pd.DataFrame(nodedatarecords)
153
154
  .drop_duplicates(subset="NAME", keep="last")
@@ -178,7 +179,6 @@ def df(
178
179
  # This happens with WELSPECS if both GRUPTREE and BRANPROP is defined
179
180
  # at the same timestep. And when a node is redirected to a new parent node
180
181
  dframe = dframe.drop_duplicates(subset=["DATE", "CHILD", "KEYWORD"], keep="last")
181
- print(dframe)
182
182
  return dframe
183
183
 
184
184
 
@@ -254,7 +254,7 @@ def _merge_edges_and_nodeinfo(
254
254
 
255
255
  # Write WELSPECS edges
256
256
  welspecs_parents = set()
257
- for (child, parent), _ in wellspecsedges.items():
257
+ for child, parent in wellspecsedges: # noqa: PLE1141
258
258
  # For BRANPROP trees, only wells with a parent in the tree are added
259
259
  if (treetype == "BRANPROP" and parent in childs) or (treetype == "GRUPTREE"):
260
260
  rec_dict = {
res2df/inferdims.py CHANGED
@@ -38,7 +38,7 @@ def guess_dim(deckstring: str, dimkeyword: str, dimitem: int = 0) -> int:
38
38
  raise ValueError("Only supports TABDIMS and EQLDIMS")
39
39
  if dimkeyword == "TABDIMS" and dimitem not in [0, 1]:
40
40
  raise ValueError("Only support item 0 and 1 in TABDIMS")
41
- if dimkeyword == "EQLDIMS" and dimitem not in [0]:
41
+ if dimkeyword == "EQLDIMS" and dimitem != 0:
42
42
  raise ValueError("Only item 0 in EQLDIMS can be estimated")
43
43
 
44
44
  # A less than res2df-standard permissive opm.io, when using
@@ -112,7 +112,7 @@ def inject_dimcount(
112
112
  raise ValueError("Only supports TABDIMS and EQLDIMS")
113
113
  if dimkeyword == "TABDIMS" and dimitem not in [0, 1]:
114
114
  raise ValueError("Only support item 0 and 1 in TABDIMS")
115
- if dimkeyword == "EQLDIMS" and dimitem not in [0]:
115
+ if dimkeyword == "EQLDIMS" and dimitem != 0:
116
116
  raise ValueError("Only item 0 in EQLDIMS can be injected")
117
117
 
118
118
  if dimkeyword in deckstr:
res2df/nnc.py CHANGED
@@ -181,7 +181,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
181
181
  parser.add_argument(
182
182
  "DATAFILE",
183
183
  help="Name of the .DATA input file for the reservoir simulator."
184
- + " There must exist .INIT and .EGRID files with the same path and basename.",
184
+ " There must exist .INIT and .EGRID files with the same path and basename.",
185
185
  )
186
186
  parser.add_argument(
187
187
  "-c",
@@ -242,11 +242,7 @@ def df2res_editnnc(
242
242
  string = ""
243
243
  res2df_header = (
244
244
  "Output file printed by res2df.nnc"
245
- + " "
246
- + __version__
247
- + "\n"
248
- + " at "
249
- + str(datetime.datetime.now())
245
+ " " + __version__ + "\n" + " at " + str(datetime.datetime.now())
250
246
  )
251
247
  if not nocomments:
252
248
  string += comment_formatter(res2df_header)
res2df/parameters.py CHANGED
@@ -5,7 +5,7 @@ import json
5
5
  import logging
6
6
  import warnings
7
7
  from pathlib import Path
8
- from typing import Any
8
+ from typing import Any, cast
9
9
 
10
10
  import pandas as pd
11
11
  import yaml
@@ -47,7 +47,7 @@ def find_parameter_files(
47
47
  filebase + ".txt",
48
48
  filebase,
49
49
  ]
50
- paths_to_check: list[Path] = [Path("."), Path(".."), Path("..") / Path("..")]
50
+ paths_to_check: list[Path] = [Path(), Path(".."), Path("..") / Path("..")]
51
51
  foundfiles = []
52
52
  for path in paths_to_check:
53
53
  for fname in files_to_lookfor:
@@ -79,11 +79,12 @@ def load_parameterstxt(filename: str | Path) -> dict[str, Any]:
79
79
  engine="python",
80
80
  names=["KEY", "VALUE"],
81
81
  index_col=False,
82
+ dtype={"KEY": str},
82
83
  )
83
84
  except pd.errors.ParserWarning as txt_exc:
84
85
  raise pd.errors.ParserError(txt_exc) from txt_exc
85
86
 
86
- return dframe.set_index("KEY")["VALUE"].to_dict()
87
+ return cast(dict[str, Any], dframe.set_index("KEY")["VALUE"].to_dict())
87
88
 
88
89
 
89
90
  def load_all(
@@ -147,7 +148,7 @@ def load(filename: str | Path) -> dict[str, Any]:
147
148
  if not params_dict:
148
149
  try:
149
150
  logger.debug("Trying to parse %s with json.load()", filename)
150
- with open(filename, encoding="utf-8") as f_handle:
151
+ with Path(filename).open(encoding="utf-8") as f_handle:
151
152
  params_dict = json.load(f_handle)
152
153
  assert isinstance(params_dict, dict)
153
154
  logger.debug(" - ok, parsed as yaml")
res2df/pillars.py CHANGED
@@ -327,7 +327,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
327
327
  parser.add_argument(
328
328
  "DATAFILE",
329
329
  help="Name of the .DATA input file for the reservoir simulator."
330
- + " There must exist .INIT and .EGRID files with the same path and basename.",
330
+ " There must exist .INIT and .EGRID files with the same path and basename.",
331
331
  )
332
332
  parser.add_argument(
333
333
  "--region",
res2df/pvt.py CHANGED
@@ -252,7 +252,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
252
252
  parser.add_argument(
253
253
  "DATAFILE",
254
254
  help="Name of the .DATA input file for the reservoir simulator,"
255
- + " or file with PVT keywords.",
255
+ " or file with PVT keywords.",
256
256
  )
257
257
  parser.add_argument(
258
258
  "-o",
res2df/satfunc.py CHANGED
@@ -103,15 +103,14 @@ def df(
103
103
 
104
104
  wanted_keywords = handle_wanted_keywords(keywords, deck, SUPPORTED_KEYWORDS)
105
105
 
106
- frames = []
107
- for keyword in wanted_keywords:
108
- frames.append(
109
- interpolate_defaults(
110
- keyworddata_to_df(
111
- deck, keyword, renamer=RENAMERS[keyword], recordcountername="SATNUM"
112
- ).assign(KEYWORD=keyword)
113
- )
106
+ frames = [
107
+ interpolate_defaults(
108
+ keyworddata_to_df(
109
+ deck, keyword, renamer=RENAMERS[keyword], recordcountername="SATNUM"
110
+ ).assign(KEYWORD=keyword)
114
111
  )
112
+ for keyword in wanted_keywords
113
+ ]
115
114
  nonempty_frames = [frame for frame in frames if not frame.empty]
116
115
  if nonempty_frames:
117
116
  dframe = pd.concat(nonempty_frames, axis=0, sort=False, ignore_index=True)
@@ -167,7 +166,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
167
166
  parser.add_argument(
168
167
  "DATAFILE",
169
168
  help="Name of .DATA input file for the reservoir simulator,"
170
- + " or file with saturation functions.",
169
+ " or file with saturation functions.",
171
170
  )
172
171
  parser.add_argument(
173
172
  "-o",
res2df/summary.py CHANGED
@@ -39,6 +39,7 @@ PD_FREQ_MNEMONICS: dict[str, str] = {
39
39
  See
40
40
  https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects
41
41
  """
42
+ PANDAS_MAJOR_VERSION = int(pd.__version__.split(".")[0])
42
43
 
43
44
 
44
45
  def date_range(
@@ -617,7 +618,9 @@ def _fix_dframe_for_resdata(dframe: pd.DataFrame) -> pd.DataFrame:
617
618
  dframe = dframe.copy()
618
619
  if "DATE" in dframe.columns:
619
620
  # Infer datatype (Pandas cannot answer it) based on the first element:
620
- if isinstance(dframe["DATE"].to_numpy()[0], str):
621
+ if PANDAS_MAJOR_VERSION >= 3:
622
+ dframe["DATE"] = pd.to_datetime(dframe["DATE"])
623
+ elif isinstance(dframe["DATE"].to_numpy()[0], str):
621
624
  # Do not use pd.Series.apply() here, Pandas would try to convert it to
622
625
  # datetime64[ns] which is limited at year 2262.
623
626
  dframe["DATE"] = pd.Series(
@@ -625,7 +628,7 @@ def _fix_dframe_for_resdata(dframe: pd.DataFrame) -> pd.DataFrame:
625
628
  dtype="object",
626
629
  index=dframe.index,
627
630
  )
628
- if isinstance(dframe["DATE"].to_numpy()[0], dt.date):
631
+ elif isinstance(dframe["DATE"].to_numpy()[0], dt.date):
629
632
  dframe["DATE"] = pd.Series(
630
633
  [
631
634
  dt.datetime.combine(dateobj, dt.datetime.min.time())
@@ -650,9 +653,7 @@ def _fix_dframe_for_resdata(dframe: pd.DataFrame) -> pd.DataFrame:
650
653
  if "Unnamed: 0" in dframe:
651
654
  dframe = dframe.drop("Unnamed: 0", axis="columns")
652
655
 
653
- block_columns = [
654
- col for col in dframe.columns if (col.startswith("B") or col.startswith("LB"))
655
- ]
656
+ block_columns = [col for col in dframe.columns if (col.startswith(("B", "LB")))]
656
657
  if block_columns:
657
658
  dframe = dframe.drop(columns=block_columns)
658
659
  logger.warning(
@@ -697,7 +698,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
697
698
  parser.add_argument(
698
699
  "DATAFILE",
699
700
  help="Name of the .DATA input file for the reservoir simulator."
700
- + " There must exist a UNSMRY file with the same path and basename.",
701
+ " There must exist a UNSMRY file with the same path and basename.",
701
702
  )
702
703
  parser.add_argument(
703
704
  "--time_index",
@@ -830,7 +831,7 @@ def summary_reverse_main(args: argparse.Namespace) -> None:
830
831
  eclbase = Path(args.output).name
831
832
 
832
833
  # Summary.fwrite() can only write to current directory:
833
- cwd = os.getcwd()
834
+ cwd = Path.cwd()
834
835
  summary = df2ressum(summary_df, eclbase)
835
836
  try:
836
837
  os.chdir(outputdir)
res2df/trans.py CHANGED
@@ -6,7 +6,7 @@ Extract transmissibility information from output files as Dataframes.
6
6
  import argparse
7
7
  import logging
8
8
 
9
- import networkx
9
+ import networkx as nx
10
10
  import pandas as pd
11
11
 
12
12
  from .common import write_dframe_stdout_file
@@ -221,12 +221,12 @@ def df(
221
221
  return trans_df
222
222
 
223
223
 
224
- def make_nx_graph(resdatafiles: ResdataFiles, region: str = "FIPNUM") -> networkx.Graph:
224
+ def make_nx_graph(resdatafiles: ResdataFiles, region: str = "FIPNUM") -> nx.Graph:
225
225
  """Construct a networkx graph for the transmissibilities."""
226
226
  trans_df = df(resdatafiles, vectors=[region], coords=True, group=True)
227
227
  reg1 = region + "1"
228
228
  reg2 = region + "2"
229
- graph: networkx.Graph = networkx.Graph()
229
+ graph: nx.Graph = nx.Graph()
230
230
  graph.add_weighted_edges_from(
231
231
  [tuple(row) for row in trans_df[[reg1, reg2, "TRAN"]].to_numpy()]
232
232
  )
@@ -242,7 +242,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
242
242
  parser.add_argument(
243
243
  "DATAFILE",
244
244
  help="Name of the .DATA input file for the reservoir simulator."
245
- + " There must exist INIT and EGRID files with the same path and basename.",
245
+ " There must exist INIT and EGRID files with the same path and basename.",
246
246
  )
247
247
  parser.add_argument("--vectors", nargs="+", help="Extra INIT vectors to be added")
248
248
  parser.add_argument(
res2df/version.py CHANGED
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '1.3.10'
32
- __version_tuple__ = version_tuple = (1, 3, 10)
31
+ __version__ = version = '1.3.12'
32
+ __version_tuple__ = version_tuple = (1, 3, 12)
33
33
 
34
34
  __commit_id__ = commit_id = None
res2df/vfp/_vfp.py CHANGED
@@ -7,6 +7,7 @@ output both in csv format as a pandas DataFrame or in pyarrow and pyarrow.table
7
7
  import argparse
8
8
  import logging
9
9
  import sys
10
+ from pathlib import Path
10
11
  from typing import Any
11
12
 
12
13
  import numpy as np
@@ -400,8 +401,7 @@ def df2res(
400
401
  str_vfps += "\n"
401
402
 
402
403
  if filename:
403
- with open(filename, "w", encoding="utf-8") as fout:
404
- fout.write(str_vfps)
404
+ Path(filename).write_text(str_vfps, encoding="utf-8")
405
405
 
406
406
  return str_vfps
407
407
 
res2df/vfp/_vfpcommon.py CHANGED
@@ -30,19 +30,18 @@ def _string2intlist(list_def_str: str) -> list[int]:
30
30
  list_def_str: String defining list of int
31
31
  Format "[1,2,6:9]" to define list [1,2,6,7,8,9]
32
32
  """
33
- list = []
33
+ int_list: list[int] = []
34
34
  list_def = list_def_str.strip().strip("[").strip("]")
35
35
  if list_def.strip():
36
36
  list_items = list_def.split(",") if "," in list_def else [list_def]
37
37
  for item in list_items:
38
38
  if ":" in item:
39
39
  item_split = item.split(":")
40
- for value in item_split:
41
- list.append(int(value))
40
+ int_list.extend(int(value) for value in item_split)
42
41
  else:
43
- list.append(int(item))
42
+ int_list.append(int(item))
44
43
 
45
- return list
44
+ return int_list
46
45
 
47
46
 
48
47
  def _deckrecord2list(
@@ -124,15 +123,12 @@ def _stack_vfptable2df(
124
123
  no_indices = len(index_names_list)
125
124
 
126
125
  # insert index values as first columns in dataframe
127
- for i in range(0, no_indices):
126
+ for i in range(no_indices):
128
127
  df_vfptable.insert(i, index_names_list[i], index_values_list[i])
129
128
 
130
129
  # create multi-index for columns
131
- indextuples = []
132
- for index_name in index_names_list:
133
- indextuples.append((index_name, "DELETE"))
134
- for flowvalue in flow_values_list:
135
- indextuples.append(("TAB", str(flowvalue)))
130
+ indextuples = [(index_name, "DELETE") for index_name in index_names_list]
131
+ indextuples.extend(("TAB", str(flowvalue)) for flowvalue in flow_values_list)
136
132
 
137
133
  # Set the columns to a MultiIndex, to facilitate stacking
138
134
  df_vfptable.columns = pd.MultiIndex.from_tuples(indextuples)
res2df/vfp/_vfpinj.py CHANGED
@@ -14,19 +14,15 @@ import numbers
14
14
  from typing import Any
15
15
 
16
16
  import numpy as np
17
+
18
+ # Needed for mypy
19
+ import opm.io
17
20
  import pandas as pd
18
21
  import pyarrow as pa
19
22
 
20
- try:
21
- # Needed for mypy
22
-
23
- import opm.io
24
-
25
- # This import is seemingly not used, but necessary for some attributes
26
- # to be included in DeckItem objects.
27
- from opm.io.deck import DeckKeyword # noqa: F401
28
- except ImportError:
29
- pass
23
+ # This import is seemingly not used, but necessary for some attributes
24
+ # to be included in DeckItem objects.
25
+ from opm.io.deck import DeckKeyword # noqa: F401
30
26
 
31
27
  from ..common import comment_formatter, parse_opmio_deckrecord
32
28
  from ._vfpcommon import (
@@ -276,7 +272,7 @@ def basic_data2pyarrow(
276
272
 
277
273
  # Column metadata is index in THP array
278
274
  col_metadata_list = []
279
- for i in range(0, len(thp_values)):
275
+ for i in range(len(thp_values)):
280
276
  col_name = str(i)
281
277
  col_dtype = pa.float64()
282
278
  col_metadata = {
@@ -395,9 +391,10 @@ def pyarrow2basic_data(pa_table: pa.Table) -> dict[str, Any]:
395
391
  """
396
392
 
397
393
  # Extract index data from colum metadata
398
- thp_indices = []
399
- for i in range(0, pa_table.num_columns):
400
- thp_indices.append(int(pa_table.schema.field(i).metadata[b"thp_idx"]))
394
+ thp_indices = [
395
+ int(pa_table.schema.field(i).metadata[b"thp_idx"])
396
+ for i in range(pa_table.num_columns)
397
+ ]
401
398
 
402
399
  # Extract table data as numpy.array
403
400
  bhp_data = np.array(pa_table.columns)
@@ -601,7 +598,7 @@ def _write_table_records(
601
598
  else:
602
599
  table = table.reshape(no_records, no_flow_values)
603
600
 
604
- for row in range(0, no_records):
601
+ for row in range(no_records):
605
602
  thp = thp_indices[row]
606
603
  deck_str += f"{thp:2d}"
607
604
  for n, value in enumerate(table[row, :]):
res2df/vfp/_vfpprod.py CHANGED
@@ -14,18 +14,15 @@ import numbers
14
14
  from typing import Any
15
15
 
16
16
  import numpy as np
17
+
18
+ # Needed for mypy
19
+ import opm.io
17
20
  import pandas as pd
18
21
  import pyarrow as pa
19
22
 
20
- try:
21
- # Needed for mypy
22
- import opm.io
23
-
24
- # This import is seemingly not used, but necessary for some attributes
25
- # to be included in DeckItem objects.
26
- from opm.io.deck import DeckKeyword # noqa: F401
27
- except ImportError:
28
- pass
23
+ # This import is seemingly not used, but necessary for some attributes
24
+ # to be included in DeckItem objects.
25
+ from opm.io.deck import DeckKeyword # noqa: F401
29
26
 
30
27
  from ..common import comment_formatter, parse_opmio_deckrecord
31
28
  from ._vfpcommon import (
@@ -382,7 +379,7 @@ def basic_data2pyarrow(
382
379
  # Column metadata is list of indices (THP,WFR,GFR,ALQ)
383
380
  col_metadata_list = []
384
381
  num_records = len(thp_values) * len(wfr_values) * len(gfr_values) * len(alq_values)
385
- for i in range(0, num_records):
382
+ for i in range(num_records):
386
383
  thp_idx = thp_indices[i]
387
384
  wfr_idx = wfr_indices[i]
388
385
  gfr_idx = gfr_indices[i]
@@ -597,7 +594,7 @@ def pyarrow2basic_data(pa_table: pa.Table) -> dict[str, Any]:
597
594
  wfr_indices = []
598
595
  gfr_indices = []
599
596
  alq_indices = []
600
- for i in range(0, pa_table.num_columns):
597
+ for i in range(pa_table.num_columns):
601
598
  thp_indices.append(int(pa_table.schema.field(i).metadata[b"thp_idx"]))
602
599
  wfr_indices.append(int(pa_table.schema.field(i).metadata[b"wfr_idx"]))
603
600
  gfr_indices.append(int(pa_table.schema.field(i).metadata[b"gfr_idx"]))
@@ -880,7 +877,7 @@ def _write_table_records(
880
877
  else:
881
878
  table = table.reshape(no_records, no_flow_values)
882
879
 
883
- for row in range(0, no_records):
880
+ for row in range(no_records):
884
881
  thp = thp_indices[row]
885
882
  wfr = wfr_indices[row]
886
883
  gfr = gfr_indices[row]
@@ -257,7 +257,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
257
257
  "DATAFILE",
258
258
  type=str,
259
259
  help="Name of the .DATA input file for the reservoir simulator."
260
- + " There must exist a UNSMRY file with the same path and basename",
260
+ " There must exist a UNSMRY file with the same path and basename",
261
261
  )
262
262
  parser.add_argument(
263
263
  "--zonemap",
res2df/wellconnstatus.py CHANGED
@@ -96,7 +96,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
96
96
  "DATAFILE",
97
97
  type=str,
98
98
  help="Name of the .DATA input file for the reservoir simulator."
99
- + " There must exist a UNSMRY file with the same path and basename.",
99
+ " There must exist a UNSMRY file with the same path and basename.",
100
100
  )
101
101
  parser.add_argument(
102
102
  "-o",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: res2df
3
- Version: 1.3.10
3
+ Version: 1.3.12
4
4
  Summary: Convert reservoir simulator input and output to DataFrames
5
5
  Author-email: Håvard Berland <havb@equinor.com>
6
6
  License-Expression: GPL-3.0-only
@@ -16,6 +16,7 @@ Classifier: Topic :: Utilities
16
16
  Classifier: Programming Language :: Python :: 3.11
17
17
  Classifier: Programming Language :: Python :: 3.12
18
18
  Classifier: Programming Language :: Python :: 3.13
19
+ Classifier: Programming Language :: Python :: 3.14
19
20
  Classifier: Natural Language :: English
20
21
  Requires-Python: >=3.11
21
22
  Description-Content-Type: text/markdown
@@ -25,7 +26,7 @@ Requires-Dist: resfo
25
26
  Requires-Dist: networkx
26
27
  Requires-Dist: numpy
27
28
  Requires-Dist: opm>=2020.10.2
28
- Requires-Dist: pandas
29
+ Requires-Dist: pandas>=2
29
30
  Requires-Dist: pyarrow
30
31
  Requires-Dist: pyyaml>=5.1
31
32
  Requires-Dist: treelib
@@ -48,10 +49,10 @@ Requires-Dist: sphinx-autoapi; extra == "docs"
48
49
  Requires-Dist: ipython; extra == "docs"
49
50
  Requires-Dist: rstcheck; extra == "docs"
50
51
  Requires-Dist: setuptools; extra == "docs"
51
- Requires-Dist: sphinx; extra == "docs"
52
+ Requires-Dist: sphinx<9; extra == "docs"
52
53
  Requires-Dist: sphinx-argparse; extra == "docs"
53
54
  Requires-Dist: sphinx-autodoc-typehints; extra == "docs"
54
- Requires-Dist: sphinx_rtd_theme; extra == "docs"
55
+ Requires-Dist: sphinx_rtd_theme>=3.1.0.rc1; extra == "docs"
55
56
  Provides-Extra: ert
56
57
  Requires-Dist: ert>=10.2.0b13; extra == "ert"
57
58
  Dynamic: license-file
@@ -59,7 +60,7 @@ Dynamic: license-file
59
60
  [![Publish to PyPI](https://github.com/equinor/res2df/actions/workflows/publish.yml/badge.svg)](https://github.com/equinor/res2df/actions/workflows/publish.yml)
60
61
  [![PyPI version](https://img.shields.io/pypi/v/res2df.svg)](https://pypi.org/project/res2df/)
61
62
  [![codecov](https://codecov.io/gh/equinor/res2df/graph/badge.svg?token=3sZBGGu5VG)](https://codecov.io/gh/equinor/res2df)
62
- [![Python 3.11-3.13](https://img.shields.io/badge/python-3.11%20|%203.12%20|%203.13-blue.svg)](https://www.python.org)
63
+ [![Python 3.11-3.14](https://img.shields.io/badge/python-3.11%20|%203.12%20|%203.13%20|%203.14-blue.svg)](https://www.python.org)
63
64
  [![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff)
64
65
  [![License: GPL v3](https://img.shields.io/badge/License-GPLv3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0)
65
66
 
@@ -1,32 +1,32 @@
1
1
  res2df/__init__.py,sha256=IXPD2hI8TlGP7oZcCtXBBQlcnx3QrvYkglI94G731d4,1429
2
2
  res2df/__version__.py,sha256=3bzEDF3c5flgPrfdaj_gccAfCp9firn58xrcjSwgh7E,112
3
- res2df/common.py,sha256=MXJXit92q_UyWwcLPXnM9HPeywDpB6oJgFpVEeskc6Y,35041
4
- res2df/compdat.py,sha256=TlG7jwh7ZnqPcAjoYfaUX7fEUYfJHjRPuCcrKy9jBxw,36741
3
+ res2df/common.py,sha256=ibSwz3g2IFAFwAv1ug0PaESGB9V1a6ziyakq4oAFiBg,34852
4
+ res2df/compdat.py,sha256=VZrwlPBW88o4jhELWrFAEU2-N8XhgOVCEtbp1cTfAJ0,36626
5
5
  res2df/constants.py,sha256=0W-ZYlm3Bbe9MONrRhA0c4yLXfJlqGX5apFN64i8RU0,258
6
6
  res2df/csv2res.py,sha256=EN3FsBXONmKWVuYk3cTmW25bsmvhp9Yg1VvJ_DSoAA8,3077
7
7
  res2df/equil.py,sha256=6DrStpmeZDnkSUyZMd2Hxdlun95P2xi6nXUd6QPbSHw,17102
8
- res2df/faults.py,sha256=PSUWi1kruyL0iv1lq1r6OeoH79YNRsEgx3o1jI3O9Q4,2991
9
- res2df/fipreports.py,sha256=v893Bt8qFcVd1YUYoerTV8uu6a_PeS1VDJ7uWnqy7iI,7569
10
- res2df/grid.py,sha256=03k71XDanIr1Jlk6uMm-td8IurOVu5Pw4c-HuPVugCI,27350
11
- res2df/gruptree.py,sha256=1DiRP1VWoyCnxncg-qN9qHZ2YKGpEsT9b734jEmzOtg,16881
12
- res2df/inferdims.py,sha256=4AkRX-LjyWLMlg1RgDdRCSz3ykDgblOps6xfDZXBeOQ,7136
13
- res2df/nnc.py,sha256=DvrRTojNycEQ-IBJqulBuSLGnvRtgoXTHSFoSU6-ckc,9213
14
- res2df/parameters.py,sha256=zdGvmFg3qyzopXRTgb9CvyI1izCBYS9aZYPNBRzeDOg,6215
15
- res2df/pillars.py,sha256=KrsqmLe9gqx6HteqQIvE2kVQlZqVRkTRs6VeEH2w_vo,15985
16
- res2df/pvt.py,sha256=EaDrzDASIb-Ce2j9kDJwwfkgB1HHszNnKlaKZ1TgPhw,22460
8
+ res2df/faults.py,sha256=wd--_6ilhOozsi0HID4YUTFuZi-8BcGhJP1f3MT17pY,3058
9
+ res2df/fipreports.py,sha256=XvkjFyPuJnroRKHDNRkrSfU4rcfkKhYJFRi8FWahgLA,7599
10
+ res2df/grid.py,sha256=myOGanVAGqigbbTgzxTCu12BMc4pL4S8hI1yJ1-2Udg,27288
11
+ res2df/gruptree.py,sha256=LfYKF7xO2Te2eOKGPpCmtxPKbedKYf9P1bqCUvmKTm0,17013
12
+ res2df/inferdims.py,sha256=NtjCaR0l54LiFRAg4Zq9xMJmYKyBSc3aLWa9RASgRNs,7124
13
+ res2df/nnc.py,sha256=_O2Uxc0wkUeXzGosLPRAqFcSXrQ9qKm8qduOJt9k8m8,9177
14
+ res2df/parameters.py,sha256=hr0sJC7GvhkW0OE4uNhhjnWXoSA1WJDKWbN5sElVLgE,6281
15
+ res2df/pillars.py,sha256=PkPNVp_ukLr1HAudjlQwgrsnlb8yAVY0YuyJZdEad60,15983
16
+ res2df/pvt.py,sha256=OAHYUpBoeFQlrNkYqsDK4oYqRMkxZpP6oyKXQ1pd0dw,22458
17
17
  res2df/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
18
  res2df/res2csv.py,sha256=RBDz3vt15CnK0WCeGPIpOf7TzzXnl5Svi9GX7dqb7ys,10817
19
19
  res2df/res2csvlogger.py,sha256=n9gMmlgzPNbtZAN9dhrBDpahEK1bG2jPWQZ7CHjSN6c,2243
20
20
  res2df/resdatafiles.py,sha256=tMyqYYcXUua6fHQspQgt7nGWfW34wQvSjWAWD6LIGxI,9946
21
21
  res2df/rft.py,sha256=_Y6mEbhMgaV2W_l7po2prMsXbQeITMQv84ao0IR9ufI,27974
22
- res2df/satfunc.py,sha256=EAfOi6Y40xMHSlsRNWx46p8YvpftPY87leJM9BCTXZg,13574
23
- res2df/summary.py,sha256=2izIMKxqFQ-gHVsuHpguBM168YnynIWMGeD8x8pzZw4,31355
22
+ res2df/satfunc.py,sha256=md_pb_T_XgAhL40VGWO_wH2JFOcljUvqpRSXvJc7dvg,13527
23
+ res2df/summary.py,sha256=tm0lYagNgSycDMAvVSmpqTCMLzFvq5PDxUfzhjlVr0I,31481
24
24
  res2df/svg_color_keyword_names.txt,sha256=yiI2Ohv3NxxEAJC5OjVERZi7MLi2-bk7okyI5-xFDr8,1456
25
- res2df/trans.py,sha256=2SdPa01fauGth4ElCmA8z9HtPQsyfvVBdVEfo6VKLtc,10483
26
- res2df/version.py,sha256=uj00JTTkKT8xNjdKWd_8A4SussXm2MvL84uHQ0XWhD4,706
25
+ res2df/trans.py,sha256=aTZ3tLQJjRGUUdw4gmGCXcAoSs33AJWHa7AYlCmoOak,10469
26
+ res2df/version.py,sha256=XSypKUR4Hmw-4FC_upVEcnmOdXCwhUI2DUVSjR35UiA,706
27
27
  res2df/wcon.py,sha256=l5yvjQGleLyQY_ySjw2hq0C8gC-U3EMQRDGtjo4iroA,3152
28
- res2df/wellcompletiondata.py,sha256=B1uXTVdDRLWhX9ftfPHti9NfzxTgAY-AcMmKrpwQsCY,11106
29
- res2df/wellconnstatus.py,sha256=rexjlxJhN-ylllo36TUjCNZGqNpKDP71RDv308ZjAJo,4063
28
+ res2df/wellcompletiondata.py,sha256=5wdipNlu0B9BDZaAfJcPT-WwskP_PtsTIZv-q3frKtM,11104
29
+ res2df/wellconnstatus.py,sha256=fMar2MkCXh2_-XWLbOmdUUDftQM9LnxDkqFdrfSAgUw,4061
30
30
  res2df/hook_implementations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
31
  res2df/hook_implementations/forward_model_steps.py,sha256=GLpmlv9exEKkCLsHv2AUKWnK4oIxJ4jhHuI0p41_m6U,4986
32
32
  res2df/opmkeywords/BRANPROP,sha256=MZA6L6J9olaprrUhRah_wgJoOiYXP-DknVAYOdsIC14,475
@@ -76,14 +76,14 @@ res2df/opmkeywords/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuF
76
76
  res2df/opmkeywords/readme,sha256=xJM2Or6oiSP02xTe-aY4efPeh5UQiTieScoPHGV4rH0,285
77
77
  res2df/opmkeywords/runmetoupdate.sh,sha256=GKvjvd3H3Sf0bmeIduQ43pYcDmcBlOL_Epfm_xjDFUA,996
78
78
  res2df/vfp/__init__.py,sha256=6ksjEvMSkHK_3C_x5Fz0_eR4dq-DtTqm_YMv4WI13Fc,633
79
- res2df/vfp/_vfp.py,sha256=ORKmO3c6YeF3bVtG3FAUgS7ByBkCmLlTm6pyDw52Dxg,19377
80
- res2df/vfp/_vfpcommon.py,sha256=IRlyITyP1KQAZDvSyfLvEYXyzT2Q4PpHC2JmPMj5HvU,7181
79
+ res2df/vfp/_vfp.py,sha256=sHN_08HJEtVzirltNnJ_wfm9JVm_iGrRdVF-BhSzkLQ,19371
80
+ res2df/vfp/_vfpcommon.py,sha256=rGuExqqUVzKp8Fu6HjWcDk0Voy7ZqcxI6-V24wgHc3g,7141
81
81
  res2df/vfp/_vfpdefs.py,sha256=TT0bHHbbdGMS3Xm7dzpZPR7zBLPvlCuQIKvH7sr5TBA,6985
82
- res2df/vfp/_vfpinj.py,sha256=xojHI2hcdUFBUslMKu5K2p2etN-8sinlx-HlkINZOs4,22523
83
- res2df/vfp/_vfpprod.py,sha256=vohQOu63pQsinGT8wgreeUwxzfwH6S1Om90XB8gfUzs,35669
84
- res2df-1.3.10.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
85
- res2df-1.3.10.dist-info/METADATA,sha256=L7B3z6Scx_D5kxHNEBNKOzHoXSWxYg6WzxSzBC0noi8,3973
86
- res2df-1.3.10.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
87
- res2df-1.3.10.dist-info/entry_points.txt,sha256=ctl-_CwayyUVqFMUrwTT3Z3gZdnW6WCaiaLUJ4f_HnY,180
88
- res2df-1.3.10.dist-info/top_level.txt,sha256=U8AZBqrFHm9PMXg0toCfHJ817VfFtdKQpc8JuS5qToM,7
89
- res2df-1.3.10.dist-info/RECORD,,
82
+ res2df/vfp/_vfpinj.py,sha256=SHDhPnrmuJavMfP-Y6Cshf0vBKx3yAZYuk9nZt4Xeao,22447
83
+ res2df/vfp/_vfpprod.py,sha256=kKPGieDZKkfQlnFUyU6IwWUpmGRQkGr1cZtAt75PbM4,35606
84
+ res2df-1.3.12.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
85
+ res2df-1.3.12.dist-info/METADATA,sha256=YeNbMxIa55d-WIKhbuKANtgMW7rgsM90pupDRz7uUn0,4051
86
+ res2df-1.3.12.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
87
+ res2df-1.3.12.dist-info/entry_points.txt,sha256=ctl-_CwayyUVqFMUrwTT3Z3gZdnW6WCaiaLUJ4f_HnY,180
88
+ res2df-1.3.12.dist-info/top_level.txt,sha256=U8AZBqrFHm9PMXg0toCfHJ817VfFtdKQpc8JuS5qToM,7
89
+ res2df-1.3.12.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5