res2df 1.3.11__py3-none-any.whl → 1.3.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
res2df/common.py CHANGED
@@ -653,7 +653,7 @@ def generic_deck_table(
653
653
 
654
654
  # Start building the string we are to return:
655
655
  string = keyword + "\n"
656
- if comment is not None and comment:
656
+ if comment:
657
657
  string += "\n".join(["-- " + line for line in comment.splitlines()]) + "\n"
658
658
 
659
659
  # Empty tables are ok with Eclipse (at least sometimes)
@@ -664,7 +664,7 @@ def generic_deck_table(
664
664
  # Pandas make a pretty txt table:
665
665
  dframe = dframe.copy()
666
666
 
667
- # Column names are pr. ec2ldf standard, redo to opm.common in order to use
667
+ # Column names are pr. res2df standard, redo to opm.common in order to use
668
668
  # sorting from that:
669
669
  if renamer is not None:
670
670
  inv_renamer = {value: key for key, value in renamer.items()}
@@ -696,11 +696,14 @@ def generic_deck_table(
696
696
  dframe = dframe.fillna(value="1*")
697
697
 
698
698
  if drop_trailing_columns:
699
+ columns_to_drop = []
699
700
  for col_name in reversed(relevant_columns):
700
- if set(dframe[col_name].to_numpy()) == {"1*"}:
701
- del dframe[col_name]
701
+ if (dframe[col_name] == "1*").all():
702
+ columns_to_drop.append(col_name)
702
703
  else:
703
704
  break
705
+ if columns_to_drop:
706
+ dframe = dframe.drop(columns=columns_to_drop)
704
707
 
705
708
  # It is critical for opm.common, maybe also E100 to have integers printed
706
709
  # as integers, for correct parsing. Ensure these are integer where the json
@@ -708,13 +711,15 @@ def generic_deck_table(
708
711
  integer_cols = {
709
712
  item["name"]
710
713
  for item in OPMKEYWORDS[keyword]["items"]
711
- if item["value_type"] == "INT" # and item["name"] in col_headers
714
+ if item["value_type"] == "INT"
712
715
  }
713
716
  for int_col in integer_cols.intersection(dframe.columns):
714
- defaulted_rows = dframe[int_col] == "1*"
715
- integer_values = dframe.loc[~defaulted_rows, int_col].astype(int)
717
+ mask = dframe[int_col] != "1*"
716
718
  dframe[int_col] = dframe[int_col].astype(str)
717
- dframe.loc[~defaulted_rows, int_col] = integer_values
719
+ if mask.any():
720
+ dframe.loc[mask, int_col] = (
721
+ dframe.loc[mask, int_col].astype(float).astype(int).astype(str)
722
+ )
718
723
 
719
724
  # Quote all string data. This is not always needed, but needed
720
725
  # for some colums, for example well-names containing a slash.
@@ -726,9 +731,8 @@ def generic_deck_table(
726
731
  for str_col in string_cols.intersection(dframe.columns):
727
732
  # Ensure 1* is not quoted.
728
733
  non_defaulted_rows = dframe[str_col] != "1*"
729
- dframe.loc[non_defaulted_rows, str_col].str.replace("'", "")
730
734
  dframe.loc[non_defaulted_rows, str_col] = (
731
- "'" + dframe.loc[non_defaulted_rows, str_col] + "'"
735
+ "'" + dframe.loc[non_defaulted_rows, str_col].str.replace("'", "") + "'"
732
736
  )
733
737
 
734
738
  # Now rename again to have prettier column names:
@@ -739,8 +743,7 @@ def generic_deck_table(
739
743
  tablestring = dframe.to_string(header=True, index=False)
740
744
  # Indent all lines with two spaces:
741
745
  tablestring = "\n".join(
742
- [" " + line.strip().replace(" /", " /") for line in tablestring.splitlines()]
743
- # The replace() in there is needed for py36/pandas==1.1.5 only.
746
+ " " + line.strip().replace(" /", " /") for line in tablestring.splitlines()
744
747
  )
745
748
  # Eclipse comment for the header line:
746
749
  tablestring = "--" + tablestring[1:]
@@ -836,7 +839,7 @@ def stack_on_colnames(
836
839
  # Drop rows stemming from the NaNs in the second tuple-element for
837
840
  # static columns:
838
841
  dframe = dframe.dropna(axis="index", subset=["DATE"])
839
- del dframe["level_0"]
842
+ dframe = dframe.drop(columns="level_0")
840
843
  dframe.index.name = ""
841
844
  return dframe
842
845
 
@@ -912,11 +915,11 @@ def parse_lyrfile(filename: str | Path) -> list[dict[str, Any]] | None:
912
915
  zonedict["to_layer"] = to_layer
913
916
  else:
914
917
  logger.error("From_layer higher than to_layer")
915
- raise ValueError()
918
+ raise ValueError
916
919
  elif len(numbers) == 1:
917
920
  zonedict["span"] = int(numbers[0])
918
921
  else:
919
- raise ValueError()
922
+ raise ValueError
920
923
  lyrlist.append(zonedict)
921
924
  except ValueError:
922
925
  logger.error("Could not parse lyr file %s", filename)
@@ -967,7 +970,7 @@ def get_wells_matching_template(template: str, wells: list[str]) -> list[str]:
967
970
  Returns:
968
971
  List of matched wells
969
972
  """
970
- if template.startswith("*") or template.startswith("?"):
973
+ if template.startswith(("*", "?")):
971
974
  raise ValueError(
972
975
  "Well template not allowed to start with a wildcard character: "
973
976
  f"Must be preceded with a \\: {template}"
res2df/compdat.py CHANGED
@@ -751,7 +751,7 @@ def expand_complump_in_welopen_df(
751
751
  exp_welopens.append(cell_row)
752
752
 
753
753
  dframe = pd.DataFrame(exp_welopens)
754
- return dframe.astype(object).where(pd.notna(dframe), None) # type: ignore[call-overload]
754
+ return dframe.astype(object).replace({np.nan: None})
755
755
 
756
756
 
757
757
  def expand_wlist_in_welopen_df(
@@ -785,7 +785,7 @@ def expand_wlist_in_welopen_df(
785
785
  # Explicit wellname was used, no expansion to happen:
786
786
  exp_welopens.append(row)
787
787
  dframe = pd.DataFrame(exp_welopens)
788
- return dframe.astype(object).where(pd.notna(dframe), None) # type: ignore[call-overload]
788
+ return dframe.astype(object).replace({np.nan: None})
789
789
 
790
790
 
791
791
  def applywelopen(
@@ -844,7 +844,7 @@ def applywelopen(
844
844
  "The WLIST dataframe must be expanded through expand_wlist()"
845
845
  )
846
846
 
847
- welopen_df = welopen_df.astype(object).where(pd.notna(welopen_df), None) # type: ignore[call-overload]
847
+ welopen_df = welopen_df.astype(object).replace({np.nan: None})
848
848
  if wlist_df is not None:
849
849
  welopen_df = expand_wlist_in_welopen_df(welopen_df, wlist_df)
850
850
  if complump_df is not None:
res2df/grid.py CHANGED
@@ -47,7 +47,7 @@ def get_available_rst_dates(resdatafiles: ResdataFiles) -> list[datetime.date]:
47
47
  )
48
48
  return [
49
49
  resdatafiles.get_rstfile().iget_restart_sim_time(index).date()
50
- for index in range(0, len(report_indices))
50
+ for index in range(len(report_indices))
51
51
  ]
52
52
 
53
53
 
res2df/gruptree.py CHANGED
@@ -9,9 +9,7 @@ import warnings
9
9
  from typing import Any
10
10
 
11
11
  import numpy as np
12
-
13
- # Needed for mypy
14
- import opm.io
12
+ import opm
15
13
  import pandas as pd
16
14
  import treelib
17
15
 
@@ -63,6 +61,9 @@ def df(
63
61
  date: datetime.date | None
64
62
  date = startdate if startdate is not None else None
65
63
 
64
+ if not isinstance(deck, (ResdataFiles, opm.opmcommon_python.Deck)):
65
+ raise TypeError("Input deck must be either ResdataFiles or an opm Deck.")
66
+
66
67
  if isinstance(deck, ResdataFiles):
67
68
  deck = deck.get_deck()
68
69
 
@@ -178,7 +179,6 @@ def df(
178
179
  # This happens with WELSPECS if both GRUPTREE and BRANPROP is defined
179
180
  # at the same timestep. And when a node is redirected to a new parent node
180
181
  dframe = dframe.drop_duplicates(subset=["DATE", "CHILD", "KEYWORD"], keep="last")
181
- print(dframe)
182
182
  return dframe
183
183
 
184
184
 
res2df/parameters.py CHANGED
@@ -5,7 +5,7 @@ import json
5
5
  import logging
6
6
  import warnings
7
7
  from pathlib import Path
8
- from typing import Any
8
+ from typing import Any, cast
9
9
 
10
10
  import pandas as pd
11
11
  import yaml
@@ -79,11 +79,12 @@ def load_parameterstxt(filename: str | Path) -> dict[str, Any]:
79
79
  engine="python",
80
80
  names=["KEY", "VALUE"],
81
81
  index_col=False,
82
+ dtype={"KEY": str},
82
83
  )
83
84
  except pd.errors.ParserWarning as txt_exc:
84
85
  raise pd.errors.ParserError(txt_exc) from txt_exc
85
86
 
86
- return dframe.set_index("KEY")["VALUE"].to_dict()
87
+ return cast(dict[str, Any], dframe.set_index("KEY")["VALUE"].to_dict())
87
88
 
88
89
 
89
90
  def load_all(
res2df/summary.py CHANGED
@@ -39,6 +39,7 @@ PD_FREQ_MNEMONICS: dict[str, str] = {
39
39
  See
40
40
  https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects
41
41
  """
42
+ PANDAS_MAJOR_VERSION = int(pd.__version__.split(".")[0])
42
43
 
43
44
 
44
45
  def date_range(
@@ -617,7 +618,9 @@ def _fix_dframe_for_resdata(dframe: pd.DataFrame) -> pd.DataFrame:
617
618
  dframe = dframe.copy()
618
619
  if "DATE" in dframe.columns:
619
620
  # Infer datatype (Pandas cannot answer it) based on the first element:
620
- if isinstance(dframe["DATE"].to_numpy()[0], str):
621
+ if PANDAS_MAJOR_VERSION >= 3:
622
+ dframe["DATE"] = pd.to_datetime(dframe["DATE"])
623
+ elif isinstance(dframe["DATE"].to_numpy()[0], str):
621
624
  # Do not use pd.Series.apply() here, Pandas would try to convert it to
622
625
  # datetime64[ns] which is limited at year 2262.
623
626
  dframe["DATE"] = pd.Series(
@@ -625,7 +628,7 @@ def _fix_dframe_for_resdata(dframe: pd.DataFrame) -> pd.DataFrame:
625
628
  dtype="object",
626
629
  index=dframe.index,
627
630
  )
628
- if isinstance(dframe["DATE"].to_numpy()[0], dt.date):
631
+ elif isinstance(dframe["DATE"].to_numpy()[0], dt.date):
629
632
  dframe["DATE"] = pd.Series(
630
633
  [
631
634
  dt.datetime.combine(dateobj, dt.datetime.min.time())
@@ -650,9 +653,7 @@ def _fix_dframe_for_resdata(dframe: pd.DataFrame) -> pd.DataFrame:
650
653
  if "Unnamed: 0" in dframe:
651
654
  dframe = dframe.drop("Unnamed: 0", axis="columns")
652
655
 
653
- block_columns = [
654
- col for col in dframe.columns if (col.startswith("B") or col.startswith("LB"))
655
- ]
656
+ block_columns = [col for col in dframe.columns if (col.startswith(("B", "LB")))]
656
657
  if block_columns:
657
658
  dframe = dframe.drop(columns=block_columns)
658
659
  logger.warning(
res2df/version.py CHANGED
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '1.3.11'
32
- __version_tuple__ = version_tuple = (1, 3, 11)
31
+ __version__ = version = '1.3.12'
32
+ __version_tuple__ = version_tuple = (1, 3, 12)
33
33
 
34
34
  __commit_id__ = commit_id = None
res2df/vfp/_vfpcommon.py CHANGED
@@ -123,7 +123,7 @@ def _stack_vfptable2df(
123
123
  no_indices = len(index_names_list)
124
124
 
125
125
  # insert index values as first columns in dataframe
126
- for i in range(0, no_indices):
126
+ for i in range(no_indices):
127
127
  df_vfptable.insert(i, index_names_list[i], index_values_list[i])
128
128
 
129
129
  # create multi-index for columns
res2df/vfp/_vfpinj.py CHANGED
@@ -14,19 +14,15 @@ import numbers
14
14
  from typing import Any
15
15
 
16
16
  import numpy as np
17
+
18
+ # Needed for mypy
19
+ import opm.io
17
20
  import pandas as pd
18
21
  import pyarrow as pa
19
22
 
20
- try:
21
- # Needed for mypy
22
-
23
- import opm.io
24
-
25
- # This import is seemingly not used, but necessary for some attributes
26
- # to be included in DeckItem objects.
27
- from opm.io.deck import DeckKeyword # noqa: F401
28
- except ImportError:
29
- pass
23
+ # This import is seemingly not used, but necessary for some attributes
24
+ # to be included in DeckItem objects.
25
+ from opm.io.deck import DeckKeyword # noqa: F401
30
26
 
31
27
  from ..common import comment_formatter, parse_opmio_deckrecord
32
28
  from ._vfpcommon import (
@@ -276,7 +272,7 @@ def basic_data2pyarrow(
276
272
 
277
273
  # Column metadata is index in THP array
278
274
  col_metadata_list = []
279
- for i in range(0, len(thp_values)):
275
+ for i in range(len(thp_values)):
280
276
  col_name = str(i)
281
277
  col_dtype = pa.float64()
282
278
  col_metadata = {
@@ -397,7 +393,7 @@ def pyarrow2basic_data(pa_table: pa.Table) -> dict[str, Any]:
397
393
  # Extract index data from colum metadata
398
394
  thp_indices = [
399
395
  int(pa_table.schema.field(i).metadata[b"thp_idx"])
400
- for i in range(0, pa_table.num_columns)
396
+ for i in range(pa_table.num_columns)
401
397
  ]
402
398
 
403
399
  # Extract table data as numpy.array
@@ -602,7 +598,7 @@ def _write_table_records(
602
598
  else:
603
599
  table = table.reshape(no_records, no_flow_values)
604
600
 
605
- for row in range(0, no_records):
601
+ for row in range(no_records):
606
602
  thp = thp_indices[row]
607
603
  deck_str += f"{thp:2d}"
608
604
  for n, value in enumerate(table[row, :]):
res2df/vfp/_vfpprod.py CHANGED
@@ -14,18 +14,15 @@ import numbers
14
14
  from typing import Any
15
15
 
16
16
  import numpy as np
17
+
18
+ # Needed for mypy
19
+ import opm.io
17
20
  import pandas as pd
18
21
  import pyarrow as pa
19
22
 
20
- try:
21
- # Needed for mypy
22
- import opm.io
23
-
24
- # This import is seemingly not used, but necessary for some attributes
25
- # to be included in DeckItem objects.
26
- from opm.io.deck import DeckKeyword # noqa: F401
27
- except ImportError:
28
- pass
23
+ # This import is seemingly not used, but necessary for some attributes
24
+ # to be included in DeckItem objects.
25
+ from opm.io.deck import DeckKeyword # noqa: F401
29
26
 
30
27
  from ..common import comment_formatter, parse_opmio_deckrecord
31
28
  from ._vfpcommon import (
@@ -382,7 +379,7 @@ def basic_data2pyarrow(
382
379
  # Column metadata is list of indices (THP,WFR,GFR,ALQ)
383
380
  col_metadata_list = []
384
381
  num_records = len(thp_values) * len(wfr_values) * len(gfr_values) * len(alq_values)
385
- for i in range(0, num_records):
382
+ for i in range(num_records):
386
383
  thp_idx = thp_indices[i]
387
384
  wfr_idx = wfr_indices[i]
388
385
  gfr_idx = gfr_indices[i]
@@ -597,7 +594,7 @@ def pyarrow2basic_data(pa_table: pa.Table) -> dict[str, Any]:
597
594
  wfr_indices = []
598
595
  gfr_indices = []
599
596
  alq_indices = []
600
- for i in range(0, pa_table.num_columns):
597
+ for i in range(pa_table.num_columns):
601
598
  thp_indices.append(int(pa_table.schema.field(i).metadata[b"thp_idx"]))
602
599
  wfr_indices.append(int(pa_table.schema.field(i).metadata[b"wfr_idx"]))
603
600
  gfr_indices.append(int(pa_table.schema.field(i).metadata[b"gfr_idx"]))
@@ -880,7 +877,7 @@ def _write_table_records(
880
877
  else:
881
878
  table = table.reshape(no_records, no_flow_values)
882
879
 
883
- for row in range(0, no_records):
880
+ for row in range(no_records):
884
881
  thp = thp_indices[row]
885
882
  wfr = wfr_indices[row]
886
883
  gfr = gfr_indices[row]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: res2df
3
- Version: 1.3.11
3
+ Version: 1.3.12
4
4
  Summary: Convert reservoir simulator input and output to DataFrames
5
5
  Author-email: Håvard Berland <havb@equinor.com>
6
6
  License-Expression: GPL-3.0-only
@@ -26,7 +26,7 @@ Requires-Dist: resfo
26
26
  Requires-Dist: networkx
27
27
  Requires-Dist: numpy
28
28
  Requires-Dist: opm>=2020.10.2
29
- Requires-Dist: pandas
29
+ Requires-Dist: pandas>=2
30
30
  Requires-Dist: pyarrow
31
31
  Requires-Dist: pyyaml>=5.1
32
32
  Requires-Dist: treelib
@@ -1,17 +1,17 @@
1
1
  res2df/__init__.py,sha256=IXPD2hI8TlGP7oZcCtXBBQlcnx3QrvYkglI94G731d4,1429
2
2
  res2df/__version__.py,sha256=3bzEDF3c5flgPrfdaj_gccAfCp9firn58xrcjSwgh7E,112
3
- res2df/common.py,sha256=K6sTXPusioWKb50etYLw_PiCANPMmwedeq4r2l_5RLQ,34910
4
- res2df/compdat.py,sha256=TlG7jwh7ZnqPcAjoYfaUX7fEUYfJHjRPuCcrKy9jBxw,36741
3
+ res2df/common.py,sha256=ibSwz3g2IFAFwAv1ug0PaESGB9V1a6ziyakq4oAFiBg,34852
4
+ res2df/compdat.py,sha256=VZrwlPBW88o4jhELWrFAEU2-N8XhgOVCEtbp1cTfAJ0,36626
5
5
  res2df/constants.py,sha256=0W-ZYlm3Bbe9MONrRhA0c4yLXfJlqGX5apFN64i8RU0,258
6
6
  res2df/csv2res.py,sha256=EN3FsBXONmKWVuYk3cTmW25bsmvhp9Yg1VvJ_DSoAA8,3077
7
7
  res2df/equil.py,sha256=6DrStpmeZDnkSUyZMd2Hxdlun95P2xi6nXUd6QPbSHw,17102
8
8
  res2df/faults.py,sha256=wd--_6ilhOozsi0HID4YUTFuZi-8BcGhJP1f3MT17pY,3058
9
9
  res2df/fipreports.py,sha256=XvkjFyPuJnroRKHDNRkrSfU4rcfkKhYJFRi8FWahgLA,7599
10
- res2df/grid.py,sha256=dW1fcrur4wz6ZO8VtP6GvKHmz3qdeKXh8n8kKocJPUQ,27291
11
- res2df/gruptree.py,sha256=ffWLzN4V67qcgjV0DUsQgVGWINoM8yPBlUSI1sK15Ys,16898
10
+ res2df/grid.py,sha256=myOGanVAGqigbbTgzxTCu12BMc4pL4S8hI1yJ1-2Udg,27288
11
+ res2df/gruptree.py,sha256=LfYKF7xO2Te2eOKGPpCmtxPKbedKYf9P1bqCUvmKTm0,17013
12
12
  res2df/inferdims.py,sha256=NtjCaR0l54LiFRAg4Zq9xMJmYKyBSc3aLWa9RASgRNs,7124
13
13
  res2df/nnc.py,sha256=_O2Uxc0wkUeXzGosLPRAqFcSXrQ9qKm8qduOJt9k8m8,9177
14
- res2df/parameters.py,sha256=X2dtb_RPpC3lR3RV1sjCqQjvn13eSWQtZJOT-KvLJQA,6217
14
+ res2df/parameters.py,sha256=hr0sJC7GvhkW0OE4uNhhjnWXoSA1WJDKWbN5sElVLgE,6281
15
15
  res2df/pillars.py,sha256=PkPNVp_ukLr1HAudjlQwgrsnlb8yAVY0YuyJZdEad60,15983
16
16
  res2df/pvt.py,sha256=OAHYUpBoeFQlrNkYqsDK4oYqRMkxZpP6oyKXQ1pd0dw,22458
17
17
  res2df/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -20,10 +20,10 @@ res2df/res2csvlogger.py,sha256=n9gMmlgzPNbtZAN9dhrBDpahEK1bG2jPWQZ7CHjSN6c,2243
20
20
  res2df/resdatafiles.py,sha256=tMyqYYcXUua6fHQspQgt7nGWfW34wQvSjWAWD6LIGxI,9946
21
21
  res2df/rft.py,sha256=_Y6mEbhMgaV2W_l7po2prMsXbQeITMQv84ao0IR9ufI,27974
22
22
  res2df/satfunc.py,sha256=md_pb_T_XgAhL40VGWO_wH2JFOcljUvqpRSXvJc7dvg,13527
23
- res2df/summary.py,sha256=Q3natArNIvMoWFhYRvsV4t8Rhj4af-cOmh-oWQXfuK4,31352
23
+ res2df/summary.py,sha256=tm0lYagNgSycDMAvVSmpqTCMLzFvq5PDxUfzhjlVr0I,31481
24
24
  res2df/svg_color_keyword_names.txt,sha256=yiI2Ohv3NxxEAJC5OjVERZi7MLi2-bk7okyI5-xFDr8,1456
25
25
  res2df/trans.py,sha256=aTZ3tLQJjRGUUdw4gmGCXcAoSs33AJWHa7AYlCmoOak,10469
26
- res2df/version.py,sha256=RIYRhQ2JFIUmHdqcIYBvks_0HOhsZi2Vy6dWdcTCKVw,706
26
+ res2df/version.py,sha256=XSypKUR4Hmw-4FC_upVEcnmOdXCwhUI2DUVSjR35UiA,706
27
27
  res2df/wcon.py,sha256=l5yvjQGleLyQY_ySjw2hq0C8gC-U3EMQRDGtjo4iroA,3152
28
28
  res2df/wellcompletiondata.py,sha256=5wdipNlu0B9BDZaAfJcPT-WwskP_PtsTIZv-q3frKtM,11104
29
29
  res2df/wellconnstatus.py,sha256=fMar2MkCXh2_-XWLbOmdUUDftQM9LnxDkqFdrfSAgUw,4061
@@ -77,13 +77,13 @@ res2df/opmkeywords/readme,sha256=xJM2Or6oiSP02xTe-aY4efPeh5UQiTieScoPHGV4rH0,285
77
77
  res2df/opmkeywords/runmetoupdate.sh,sha256=GKvjvd3H3Sf0bmeIduQ43pYcDmcBlOL_Epfm_xjDFUA,996
78
78
  res2df/vfp/__init__.py,sha256=6ksjEvMSkHK_3C_x5Fz0_eR4dq-DtTqm_YMv4WI13Fc,633
79
79
  res2df/vfp/_vfp.py,sha256=sHN_08HJEtVzirltNnJ_wfm9JVm_iGrRdVF-BhSzkLQ,19371
80
- res2df/vfp/_vfpcommon.py,sha256=Np49jE5SIecw-4Gb5ZsGOc45Ln66gseZEDkX8EL2axk,7144
80
+ res2df/vfp/_vfpcommon.py,sha256=rGuExqqUVzKp8Fu6HjWcDk0Voy7ZqcxI6-V24wgHc3g,7141
81
81
  res2df/vfp/_vfpdefs.py,sha256=TT0bHHbbdGMS3Xm7dzpZPR7zBLPvlCuQIKvH7sr5TBA,6985
82
- res2df/vfp/_vfpinj.py,sha256=WYaJRSF2LBnW6dqm4mdhZWGizlKBiUe-xcNBGjgqZ2s,22511
83
- res2df/vfp/_vfpprod.py,sha256=vohQOu63pQsinGT8wgreeUwxzfwH6S1Om90XB8gfUzs,35669
84
- res2df-1.3.11.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
85
- res2df-1.3.11.dist-info/METADATA,sha256=k0mpu_hkpNscaCYFH1s5EvG-z0kkohSvpnwsgZjEqWs,4048
86
- res2df-1.3.11.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
87
- res2df-1.3.11.dist-info/entry_points.txt,sha256=ctl-_CwayyUVqFMUrwTT3Z3gZdnW6WCaiaLUJ4f_HnY,180
88
- res2df-1.3.11.dist-info/top_level.txt,sha256=U8AZBqrFHm9PMXg0toCfHJ817VfFtdKQpc8JuS5qToM,7
89
- res2df-1.3.11.dist-info/RECORD,,
82
+ res2df/vfp/_vfpinj.py,sha256=SHDhPnrmuJavMfP-Y6Cshf0vBKx3yAZYuk9nZt4Xeao,22447
83
+ res2df/vfp/_vfpprod.py,sha256=kKPGieDZKkfQlnFUyU6IwWUpmGRQkGr1cZtAt75PbM4,35606
84
+ res2df-1.3.12.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
85
+ res2df-1.3.12.dist-info/METADATA,sha256=YeNbMxIa55d-WIKhbuKANtgMW7rgsM90pupDRz7uUn0,4051
86
+ res2df-1.3.12.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
87
+ res2df-1.3.12.dist-info/entry_points.txt,sha256=ctl-_CwayyUVqFMUrwTT3Z3gZdnW6WCaiaLUJ4f_HnY,180
88
+ res2df-1.3.12.dist-info/top_level.txt,sha256=U8AZBqrFHm9PMXg0toCfHJ817VfFtdKQpc8JuS5qToM,7
89
+ res2df-1.3.12.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5