completor 1.3.0__py3-none-any.whl → 1.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- completor/constants.py +19 -0
- completor/create_output.py +12 -3
- completor/hook_implementations/__init__.py +5 -0
- completor/hook_implementations/forward_model_steps.py +32 -0
- completor/hook_implementations/run_completor.py +61 -0
- completor/icv_file_handling.py +362 -0
- completor/icv_functions.py +845 -0
- completor/initialization.py +563 -0
- completor/input_validation.py +63 -0
- completor/logger.py +1 -1
- completor/main.py +88 -19
- completor/parse.py +5 -5
- completor/read_casefile.py +429 -48
- completor/utils.py +133 -2
- {completor-1.3.0.dist-info → completor-1.5.0.dist-info}/METADATA +11 -3
- completor-1.5.0.dist-info/RECORD +32 -0
- completor-1.5.0.dist-info/entry_points.txt +6 -0
- completor/hook_implementations/jobs.py +0 -71
- completor-1.3.0.dist-info/RECORD +0 -27
- completor-1.3.0.dist-info/entry_points.txt +0 -6
- {completor-1.3.0.dist-info → completor-1.5.0.dist-info}/LICENSE +0 -0
- {completor-1.3.0.dist-info → completor-1.5.0.dist-info}/WHEEL +0 -0
completor/read_casefile.py
CHANGED
|
@@ -10,36 +10,12 @@ import numpy.typing as npt
|
|
|
10
10
|
import pandas as pd
|
|
11
11
|
|
|
12
12
|
from completor import input_validation, parse
|
|
13
|
-
from completor.constants import Content, Headers, Keywords, Method, WellData
|
|
13
|
+
from completor.constants import Content, Headers, ICVMethod, Keywords, Method, WellData
|
|
14
14
|
from completor.exceptions.clean_exceptions import CompletorError
|
|
15
15
|
from completor.exceptions.exceptions import CaseReaderFormatError
|
|
16
16
|
from completor.logger import logger
|
|
17
|
-
from completor.
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
def _mapper(map_file: str) -> dict[str, str]:
|
|
21
|
-
"""Read two-column file and store data as values and keys in a dictionary.
|
|
22
|
-
|
|
23
|
-
Used to map between pre-processing tools and reservoir simulator file names.
|
|
24
|
-
|
|
25
|
-
Args:
|
|
26
|
-
map_file: Two-column text file.
|
|
27
|
-
|
|
28
|
-
Returns:
|
|
29
|
-
Dictionary of key and values taken from the mapfile.
|
|
30
|
-
"""
|
|
31
|
-
mapper = {}
|
|
32
|
-
with open(map_file, encoding="utf-8") as lines:
|
|
33
|
-
for line in lines:
|
|
34
|
-
if not line.startswith("--"):
|
|
35
|
-
keyword_pair = line.strip().split()
|
|
36
|
-
if len(keyword_pair) == 2:
|
|
37
|
-
key = keyword_pair[0]
|
|
38
|
-
value = keyword_pair[1]
|
|
39
|
-
mapper[key] = value
|
|
40
|
-
else:
|
|
41
|
-
logger.warning("Illegal line '%s' in mapfile", keyword_pair)
|
|
42
|
-
return mapper
|
|
17
|
+
from completor.parse import locate_keyword
|
|
18
|
+
from completor.utils import clean_file_lines, sort_string_with_assign_first
|
|
43
19
|
|
|
44
20
|
|
|
45
21
|
class ReadCasefile:
|
|
@@ -71,6 +47,7 @@ class ReadCasefile:
|
|
|
71
47
|
gp_perf_devicelayer (bool): GRAVEL_PACKED_PERFORATED_DEVICELAYER. If TRUE all wells with
|
|
72
48
|
gravel pack and perforation completion are given a device layer.
|
|
73
49
|
If FALSE (default) all wells with this type of completions are untouched by Completor.
|
|
50
|
+
python_dependent (bool): PYTHON_DEPENDENT. If TRUE prints pyaction to output.
|
|
74
51
|
"""
|
|
75
52
|
|
|
76
53
|
def __init__(self, case_file: str, schedule_file: str | None = None, output_file: str | None = None):
|
|
@@ -133,10 +110,6 @@ class ReadCasefile:
|
|
|
133
110
|
Raises:
|
|
134
111
|
ValueError: If COMPLETION keyword is not defined in the case.
|
|
135
112
|
"""
|
|
136
|
-
start_index, end_index = parse.locate_keyword(self.content, Keywords.COMPLETION)
|
|
137
|
-
if start_index == end_index:
|
|
138
|
-
raise ValueError("No completion is defined in the case file.")
|
|
139
|
-
|
|
140
113
|
# Table headers
|
|
141
114
|
header = [
|
|
142
115
|
Headers.WELL,
|
|
@@ -151,6 +124,18 @@ class ReadCasefile:
|
|
|
151
124
|
Headers.DEVICE_TYPE,
|
|
152
125
|
Headers.DEVICE_NUMBER,
|
|
153
126
|
]
|
|
127
|
+
# Initialize empty table for ICV Control
|
|
128
|
+
self.completion_table = pd.DataFrame(columns=header)
|
|
129
|
+
start_index, end_index = parse.locate_keyword(self.content, Keywords.COMPLETION)
|
|
130
|
+
if start_index == end_index:
|
|
131
|
+
# Check if there is ICV Control keyword
|
|
132
|
+
# It is allowed to have no COMPLETION if there is ICVCONTROL
|
|
133
|
+
start_index_icv_control, end_index_icv_control = locate_keyword(self.content, "ICVCONTROL")
|
|
134
|
+
if start_index_icv_control == end_index_icv_control:
|
|
135
|
+
raise ValueError("No COMPLETION keyword or ICVCONTROL is defined in the case file.")
|
|
136
|
+
else:
|
|
137
|
+
return
|
|
138
|
+
|
|
154
139
|
df_temp = self._create_dataframe_with_columns(header, start_index, end_index)
|
|
155
140
|
# Set default value for packer segment
|
|
156
141
|
df_temp = input_validation.set_default_packer_section(df_temp)
|
|
@@ -354,7 +339,7 @@ class ReadCasefile:
|
|
|
354
339
|
if end_index == start_index + 2:
|
|
355
340
|
# the content is in between the keyword and the /
|
|
356
341
|
self.mapfile = parse.remove_string_characters(self.content[start_index + 1])
|
|
357
|
-
self.mapper = _mapper(self.mapfile)
|
|
342
|
+
self.mapper = self._mapper(self.mapfile)
|
|
358
343
|
|
|
359
344
|
def read_wsegvalv(self) -> None:
|
|
360
345
|
"""Read the WELL_SEGMENTS_VALVE keyword in the case file.
|
|
@@ -385,7 +370,7 @@ class ReadCasefile:
|
|
|
385
370
|
device_checks = self.completion_table[self.completion_table[Headers.DEVICE_TYPE] == Content.VALVE][
|
|
386
371
|
Headers.DEVICE_NUMBER
|
|
387
372
|
].to_numpy()
|
|
388
|
-
if not
|
|
373
|
+
if not self._check_contents(device_checks, self.wsegvalv_table[Headers.DEVICE_NUMBER].to_numpy()):
|
|
389
374
|
raise CompletorError(
|
|
390
375
|
f"Not all device in {Keywords.COMPLETION} is specified in {Keywords.WELL_SEGMENTS_VALVE}"
|
|
391
376
|
)
|
|
@@ -420,7 +405,7 @@ class ReadCasefile:
|
|
|
420
405
|
device_checks = self.completion_table[
|
|
421
406
|
self.completion_table[Headers.DEVICE_TYPE] == Content.INFLOW_CONTROL_DEVICE
|
|
422
407
|
][Headers.DEVICE_NUMBER].to_numpy()
|
|
423
|
-
if not
|
|
408
|
+
if not self._check_contents(device_checks, self.wsegsicd_table[Headers.DEVICE_NUMBER].to_numpy()):
|
|
424
409
|
raise CompletorError(f"Not all device in COMPLETION is specified in {Keywords.INFLOW_CONTROL_DEVICE}")
|
|
425
410
|
|
|
426
411
|
def read_wsegaicd(self) -> None:
|
|
@@ -465,7 +450,7 @@ class ReadCasefile:
|
|
|
465
450
|
device_checks = self.completion_table[
|
|
466
451
|
self.completion_table[Headers.DEVICE_TYPE] == Content.AUTONOMOUS_INFLOW_CONTROL_DEVICE
|
|
467
452
|
][Headers.DEVICE_NUMBER].to_numpy()
|
|
468
|
-
if not
|
|
453
|
+
if not self._check_contents(device_checks, self.wsegaicd_table[Headers.DEVICE_NUMBER].to_numpy()):
|
|
469
454
|
raise CompletorError(
|
|
470
455
|
f"Not all device in COMPLETION is specified in {Keywords.AUTONOMOUS_INFLOW_CONTROL_DEVICE}"
|
|
471
456
|
)
|
|
@@ -520,7 +505,7 @@ class ReadCasefile:
|
|
|
520
505
|
device_checks = self.completion_table[self.completion_table[Headers.DEVICE_TYPE] == content][
|
|
521
506
|
Headers.DEVICE_NUMBER
|
|
522
507
|
].to_numpy()
|
|
523
|
-
if not
|
|
508
|
+
if not self._check_contents(device_checks, self.wsegdensity_table[Headers.DEVICE_NUMBER].to_numpy()):
|
|
524
509
|
raise CompletorError(f"Not all device in COMPLETION is specified in {key}")
|
|
525
510
|
|
|
526
511
|
def read_wseginjv(self) -> None:
|
|
@@ -554,7 +539,7 @@ class ReadCasefile:
|
|
|
554
539
|
device_checks = self.completion_table[
|
|
555
540
|
self.completion_table[Headers.DEVICE_TYPE] == Content.INJECTION_VALVE
|
|
556
541
|
][Headers.DEVICE_NUMBER].to_numpy()
|
|
557
|
-
if not
|
|
542
|
+
if not self._check_contents(device_checks, self.wseginjv_table[Headers.DEVICE_NUMBER].to_numpy()):
|
|
558
543
|
raise CompletorError(f"Not all device in COMPLETION is specified in {Keywords.INJECTION_VALVE}")
|
|
559
544
|
|
|
560
545
|
def read_python_dependent(self) -> None:
|
|
@@ -636,7 +621,7 @@ class ReadCasefile:
|
|
|
636
621
|
device_checks = self.completion_table[self.completion_table[Headers.DEVICE_TYPE] == content][
|
|
637
622
|
Headers.DEVICE_NUMBER
|
|
638
623
|
].to_numpy()
|
|
639
|
-
if not
|
|
624
|
+
if not self._check_contents(device_checks, self.wsegdualrcp_table[Headers.DEVICE_NUMBER].to_numpy()):
|
|
640
625
|
raise CompletorError(f"Not all devices in COMPLETION are specified in {key}")
|
|
641
626
|
|
|
642
627
|
def read_wsegicv(self) -> None:
|
|
@@ -666,7 +651,7 @@ class ReadCasefile:
|
|
|
666
651
|
device_checks = self.completion_table[
|
|
667
652
|
self.completion_table[Headers.DEVICE_TYPE] == Content.INFLOW_CONTROL_VALVE
|
|
668
653
|
][Headers.DEVICE_NUMBER].to_numpy()
|
|
669
|
-
if not
|
|
654
|
+
if not self._check_contents(device_checks, self.wsegicv_table[Headers.DEVICE_NUMBER].to_numpy()):
|
|
670
655
|
raise CompletorError("Not all device in COMPLETION is specified in INFLOW_CONTROL_VALVE")
|
|
671
656
|
|
|
672
657
|
def get_completion(self, well_name: str | None, branch: int) -> pd.DataFrame:
|
|
@@ -801,15 +786,411 @@ class ReadCasefile:
|
|
|
801
786
|
df_temp = pd.read_csv(StringIO(table), sep=" ", dtype="object", index_col=False)
|
|
802
787
|
return parse.remove_string_characters(df_temp)
|
|
803
788
|
|
|
789
|
+
@staticmethod
|
|
790
|
+
def _mapper(map_file: str) -> dict[str, str]:
|
|
791
|
+
"""Read two-column file and store data as values and keys in a dictionary.
|
|
804
792
|
|
|
805
|
-
|
|
806
|
-
"""Check if all members of a list is in another list.
|
|
793
|
+
Used to map between pre-processing tools and reservoir simulator file names.
|
|
807
794
|
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
reference: Reference array.
|
|
795
|
+
Args:
|
|
796
|
+
map_file: Two-column text file.
|
|
811
797
|
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
798
|
+
Returns:
|
|
799
|
+
Dictionary of key and values taken from the mapfile.
|
|
800
|
+
"""
|
|
801
|
+
mapper = {}
|
|
802
|
+
with open(map_file, encoding="utf-8") as lines:
|
|
803
|
+
for line in lines:
|
|
804
|
+
if not line.startswith("--"):
|
|
805
|
+
keyword_pair = line.strip().split()
|
|
806
|
+
if len(keyword_pair) == 2:
|
|
807
|
+
key = keyword_pair[0]
|
|
808
|
+
value = keyword_pair[1]
|
|
809
|
+
mapper[key] = value
|
|
810
|
+
else:
|
|
811
|
+
logger.warning("Illegal line '%s' in mapfile", keyword_pair)
|
|
812
|
+
return mapper
|
|
813
|
+
|
|
814
|
+
@staticmethod
|
|
815
|
+
def _check_contents(values: npt.NDArray[Any], reference: npt.NDArray[Any]) -> bool:
|
|
816
|
+
"""Check if all members of a list is in another list.
|
|
817
|
+
|
|
818
|
+
Args:
|
|
819
|
+
values: Array to be evaluated.
|
|
820
|
+
reference: Reference array.
|
|
821
|
+
|
|
822
|
+
Returns:
|
|
823
|
+
True if members of values are present in reference, false otherwise.
|
|
824
|
+
"""
|
|
825
|
+
return all(comp in reference for comp in values)
|
|
826
|
+
|
|
827
|
+
|
|
828
|
+
class ICVReadCasefile(ReadCasefile):
|
|
829
|
+
"""Inherited ReadCaseFile from completor with additions for ICV-Control."""
|
|
830
|
+
|
|
831
|
+
def __init__(self, case_file: str, user_schedule_file: str | None = None, new_segments: str | None = None):
|
|
832
|
+
"""Initialize ICVReadCasefile.
|
|
833
|
+
|
|
834
|
+
Args:
|
|
835
|
+
case_file: Case/input file name.
|
|
836
|
+
schedule_file: Schedule file as output from Completor schedule output
|
|
837
|
+
new_segments: Well segment lists from Completor output.
|
|
838
|
+
|
|
839
|
+
"""
|
|
840
|
+
super().__init__(case_file, user_schedule_file)
|
|
841
|
+
self.icv_table: dict[str, pd.DataFrame] = {}
|
|
842
|
+
self.icv_date = None
|
|
843
|
+
self.icv_control_table = pd.DataFrame()
|
|
844
|
+
self.custom_conditions: dict[ICVMethod, dict[str, Any]] = {}
|
|
845
|
+
self.icv_segments = new_segments
|
|
846
|
+
|
|
847
|
+
self.read_icv_control()
|
|
848
|
+
if self.icv_segments:
|
|
849
|
+
self.update_icv_case()
|
|
850
|
+
self.read_icv_table()
|
|
851
|
+
self.read_custom_conditions()
|
|
852
|
+
self.step_table = self.create_step_table()
|
|
853
|
+
|
|
854
|
+
self.init_table = self.create_table_from_casefile("INIT")
|
|
855
|
+
self.min_table = self.create_table_from_casefile("MIN")
|
|
856
|
+
self.max_table = self.create_table_from_casefile("MAX")
|
|
857
|
+
self.init_opening_table = self.create_table_from_casefile("OPENING")
|
|
858
|
+
|
|
859
|
+
def read_icv_control(self) -> None:
|
|
860
|
+
"""This procedure reads the ICVCONTROL keyword in the case file.
|
|
861
|
+
|
|
862
|
+
The ICVCONTROL keyword information is stored in a class property
|
|
863
|
+
DataFrame ``self.icv_control_table`` with the following format:
|
|
864
|
+
"""
|
|
865
|
+
start_index, end_index = locate_keyword(self.content, "ICVCONTROL")
|
|
866
|
+
if start_index == end_index:
|
|
867
|
+
logger.warning("No ICVCONTROL table is found in the case file.")
|
|
868
|
+
|
|
869
|
+
headers = ["WELL", "ICV", "SEGMENT", "AC-TABLE", "STEPS", "ICVDATE", "FREQ", "MIN", "MAX", "OPENING"]
|
|
870
|
+
extended_headers = ["FUD", "FUH", "FUL", "OPERSTEP", "WAITSTEP", "INIT"]
|
|
871
|
+
try:
|
|
872
|
+
df_temp = self._create_dataframe_with_columns(headers, start_index, end_index)
|
|
873
|
+
# Default values for extra headers if not supplied:
|
|
874
|
+
if df_temp.shape[1] == 10:
|
|
875
|
+
df_temp["FUD"] = 1
|
|
876
|
+
df_temp["FUH"] = 10
|
|
877
|
+
df_temp["FUL"] = 0.1
|
|
878
|
+
df_temp["OPERSTEP"] = 2
|
|
879
|
+
df_temp["WAITSTEP"] = 1
|
|
880
|
+
df_temp["INIT"] = 0.01
|
|
881
|
+
except CaseReaderFormatError:
|
|
882
|
+
headers += extended_headers
|
|
883
|
+
df_temp = self._create_dataframe_with_columns(headers, start_index, end_index)
|
|
884
|
+
df_temp = input_validation.set_format_icvcontrol(df_temp)
|
|
885
|
+
self.icv_control_table = df_temp.copy(deep=True)
|
|
886
|
+
|
|
887
|
+
def update_icv_case(self) -> None:
|
|
888
|
+
"""Read and update the segmentation number in case file based on
|
|
889
|
+
input schedule file.
|
|
890
|
+
|
|
891
|
+
Default: False
|
|
892
|
+
|
|
893
|
+
"""
|
|
894
|
+
df_new_segment = pd.DataFrame(self.icv_segments, columns=["WELL", "NEW_SEGMENT"])
|
|
895
|
+
value_working = df_new_segment.copy(deep=True)
|
|
896
|
+
df_working = self.icv_control_table.copy(deep=True)
|
|
897
|
+
if len(df_working) != len(value_working):
|
|
898
|
+
raise CompletorError(
|
|
899
|
+
f"ICVs defined in ICVCONTROL table are {len(df_working)} while the ICVs found in schedule file "
|
|
900
|
+
f"are {len(value_working)}"
|
|
901
|
+
)
|
|
902
|
+
value_working = df_new_segment.copy(deep=True)
|
|
903
|
+
for well in value_working["WELL"].unique():
|
|
904
|
+
if len(value_working[value_working["WELL"] == well]) == df_working[df_working["WELL"] == well].shape[0]:
|
|
905
|
+
for idx, row in df_working.iterrows():
|
|
906
|
+
df_working.loc[idx, "SEGMENT"] = value_working.loc[idx, "NEW_SEGMENT"]
|
|
907
|
+
else:
|
|
908
|
+
num_icvs_schedule = len(value_working[value_working["WELL"] == well])
|
|
909
|
+
raise CompletorError(
|
|
910
|
+
f"Number of ICVs defined in ICVCONTROL for well {well} are not the same as ICVs found "
|
|
911
|
+
f"in schedule file which are {num_icvs_schedule}."
|
|
912
|
+
)
|
|
913
|
+
self.icv_control_table = df_working
|
|
914
|
+
|
|
915
|
+
def read_icv_table(self):
|
|
916
|
+
"""This procedure reads the ICVTABLE keyword in the case file.
|
|
917
|
+
|
|
918
|
+
The ICVTABLE keyword information is stored in a class property
|
|
919
|
+
DataFrame ``self.icv_control_table`` with the following format:
|
|
920
|
+
|
|
921
|
+
The class property DataFrame icv_control_table has the following format:
|
|
922
|
+
|
|
923
|
+
.. icv_control_table:
|
|
924
|
+
.. list-table:: icv_control_table
|
|
925
|
+
:widths: 10 10
|
|
926
|
+
:header-rows: 1
|
|
927
|
+
|
|
928
|
+
* - Positions
|
|
929
|
+
- int
|
|
930
|
+
* - Cv
|
|
931
|
+
- float
|
|
932
|
+
* - Area
|
|
933
|
+
- float
|
|
934
|
+
* - Opening
|
|
935
|
+
- str
|
|
936
|
+
|
|
937
|
+
"""
|
|
938
|
+
start_arr = []
|
|
939
|
+
end_arr = []
|
|
940
|
+
i = 0
|
|
941
|
+
while i < len(self.content):
|
|
942
|
+
start, end = locate_keyword(self.content[i:], "ICVTABLE")
|
|
943
|
+
if start == np.array([-1]):
|
|
944
|
+
break
|
|
945
|
+
start_arr.append(start + i)
|
|
946
|
+
end_arr.append(end + i)
|
|
947
|
+
i += end
|
|
948
|
+
|
|
949
|
+
if not start_arr:
|
|
950
|
+
logger.info("No ICVTABLE is found in the case file. Using default steps to adjust openings.")
|
|
951
|
+
return None
|
|
952
|
+
# Table headers
|
|
953
|
+
header = ["POSITION", "CV", "AREA"]
|
|
954
|
+
for start_index, end_index in zip(start_arr, end_arr):
|
|
955
|
+
table_name = self.content[start_index + 1 : start_index + 2]
|
|
956
|
+
table_name = table_name[0].strip("/").split()
|
|
957
|
+
df_temp = self._create_dataframe_with_columns(header, start_index + 1, end_index)
|
|
958
|
+
if df_temp.shape[1] != 3:
|
|
959
|
+
raise CompletorError("Keyword ICVCONTROL is missing data.")
|
|
960
|
+
elif df_temp.isnull().values.any():
|
|
961
|
+
raise CompletorError("Keyword ICVCONTROL is missing data.")
|
|
962
|
+
|
|
963
|
+
df_temp = input_validation.set_format_icv_table(df_temp)
|
|
964
|
+
|
|
965
|
+
self.icv_table.update({name: df_temp for name in table_name})
|
|
966
|
+
|
|
967
|
+
def read_custom_conditions(self) -> dict[ICVMethod, dict[str, Any]]:
|
|
968
|
+
"""This procedure reads the CONTROL_CRITERIA keyword in the case file.
|
|
969
|
+
|
|
970
|
+
The CONTROL_CRITERIA keyword information is stored in a dictionary:
|
|
971
|
+
"functions": [Method.TYPE],
|
|
972
|
+
"criteria": 1,
|
|
973
|
+
"icvs": ["ICV","ICV"],
|
|
974
|
+
"conditions": 'extra string of conditions'
|
|
975
|
+
|
|
976
|
+
Returns:
|
|
977
|
+
A dictionary containing the custom conditions.
|
|
978
|
+
"""
|
|
979
|
+
start_arr = []
|
|
980
|
+
end_arr = []
|
|
981
|
+
i = 0
|
|
982
|
+
while i < len(self.content):
|
|
983
|
+
start, end = locate_keyword(self.content[i:], "CONTROL_CRITERIA")
|
|
984
|
+
if start == np.array([-1]):
|
|
985
|
+
break
|
|
986
|
+
start_arr.append(start + i)
|
|
987
|
+
end_arr.append(end + i)
|
|
988
|
+
i += end
|
|
989
|
+
|
|
990
|
+
if not start_arr:
|
|
991
|
+
self.custom_conditions = {}
|
|
992
|
+
return self.custom_conditions
|
|
993
|
+
|
|
994
|
+
for start, end in zip(start_arr, end_arr):
|
|
995
|
+
self.parse_custom_conditions(self.content[start + 1 : end])
|
|
996
|
+
|
|
997
|
+
return self.custom_conditions
|
|
998
|
+
|
|
999
|
+
def parse_custom_conditions(self, raw_content: list[str]) -> dict[ICVMethod, dict[str, str | list[int]]]:
|
|
1000
|
+
"""Parse the raw text input into a dictionary structure.
|
|
1001
|
+
|
|
1002
|
+
Args:
|
|
1003
|
+
raw_content: Raw input string, as list of lines.
|
|
1004
|
+
|
|
1005
|
+
Raises:
|
|
1006
|
+
ValueError: ValueError if the input data has erroneous format.
|
|
1007
|
+
|
|
1008
|
+
Returns:
|
|
1009
|
+
The processed nested dictionary, e.g.
|
|
1010
|
+
{
|
|
1011
|
+
Method.TYPE: {
|
|
1012
|
+
ICV_NAME: {
|
|
1013
|
+
criteria: [1]
|
|
1014
|
+
content: The text to be inserted to icv_functions.
|
|
1015
|
+
}
|
|
1016
|
+
}
|
|
1017
|
+
}
|
|
1018
|
+
|
|
1019
|
+
"""
|
|
1020
|
+
criteria = []
|
|
1021
|
+
methods: list[ICVMethod] = []
|
|
1022
|
+
icv_map: list[list[str]] = []
|
|
1023
|
+
for i, line in enumerate(raw_content):
|
|
1024
|
+
keyword, *value = line.rsplit(":", 1)
|
|
1025
|
+
if not value:
|
|
1026
|
+
value = [""]
|
|
1027
|
+
keyword = keyword.strip().lower()
|
|
1028
|
+
value = "".join(value).strip()
|
|
1029
|
+
# Looking at start of word to accommodate some typos / variations
|
|
1030
|
+
if keyword.startswith("func"):
|
|
1031
|
+
# Find longes patterns matching UPPERCASE, seperated by underscores
|
|
1032
|
+
# (possibly no underscores)
|
|
1033
|
+
pattern = r"[A-Z]+\_?[A-Z]+\_?[A-Z]*\b|[A-Z]+\_?[A-Z]*\b"
|
|
1034
|
+
tmp = re.findall(pattern, value)
|
|
1035
|
+
methods = []
|
|
1036
|
+
for possible_method in tmp:
|
|
1037
|
+
try:
|
|
1038
|
+
possible_method = ICVMethod(possible_method)
|
|
1039
|
+
if possible_method not in methods:
|
|
1040
|
+
methods.append(possible_method)
|
|
1041
|
+
except ValueError:
|
|
1042
|
+
logger.warning(f"Unknown function '{possible_method}' in FUNCTION field.")
|
|
1043
|
+
|
|
1044
|
+
elif keyword.startswith("crit"):
|
|
1045
|
+
# Make pseudo-floats into integers, e.g. 2.00 -> 2
|
|
1046
|
+
malformed_input = re.findall(r"\d+\.\d+", value)
|
|
1047
|
+
if malformed_input:
|
|
1048
|
+
logger.warning(f"Found possible floats '{malformed_input}'. Attepting to cast to int!")
|
|
1049
|
+
value = re.sub(r"\.0+", "", value)
|
|
1050
|
+
try:
|
|
1051
|
+
if "." in value:
|
|
1052
|
+
raise ValueError
|
|
1053
|
+
tmp = [int(c) for c in re.findall(r"\d+", value)]
|
|
1054
|
+
if not tmp:
|
|
1055
|
+
raise ValueError
|
|
1056
|
+
criteria = tmp
|
|
1057
|
+
except ValueError:
|
|
1058
|
+
raise ValueError(
|
|
1059
|
+
"Expected the value in criteria to be an integer, " f"or a list of integers, got '{value}'."
|
|
1060
|
+
)
|
|
1061
|
+
|
|
1062
|
+
elif keyword.startswith("icv"):
|
|
1063
|
+
icv_list = (" ".join([icv.strip() for icv in re.sub(r"\[|\]\]", "", value).split(",")])).split("]")
|
|
1064
|
+
# Removal of whitespace and maps to sublists
|
|
1065
|
+
icv_map = [sublist.strip().split() for sublist in icv_list if sublist]
|
|
1066
|
+
|
|
1067
|
+
else:
|
|
1068
|
+
content = ("\n".join(raw_content[i:])).upper()
|
|
1069
|
+
break
|
|
1070
|
+
|
|
1071
|
+
if not methods:
|
|
1072
|
+
raise ValueError(
|
|
1073
|
+
"The contents of CONTROL_CRITERIA was malformed. Missing information in the 'FUNCTION' field!"
|
|
1074
|
+
)
|
|
1075
|
+
if not any(icv_map):
|
|
1076
|
+
for method in methods:
|
|
1077
|
+
if method == ICVMethod.UDQ:
|
|
1078
|
+
if method not in self.custom_conditions:
|
|
1079
|
+
self.custom_conditions[method] = {}
|
|
1080
|
+
if "UDQ" not in self.custom_conditions[method]:
|
|
1081
|
+
self.custom_conditions[method]["UDQ"] = {}
|
|
1082
|
+
try:
|
|
1083
|
+
self.custom_conditions[method]["UDQ"]["1"] += "\n" + content
|
|
1084
|
+
except KeyError:
|
|
1085
|
+
self.custom_conditions[method]["UDQ"].update({"1": content})
|
|
1086
|
+
self.custom_conditions[method]["UDQ"]["1"] = "\n".join(
|
|
1087
|
+
self.custom_conditions[method]["UDQ"]["1"].splitlines()
|
|
1088
|
+
)
|
|
1089
|
+
# Ensure that assign get written before defines
|
|
1090
|
+
self.custom_conditions[method]["UDQ"]["1"] = sort_string_with_assign_first(
|
|
1091
|
+
self.custom_conditions[method]["UDQ"]["1"]
|
|
1092
|
+
)
|
|
1093
|
+
self.custom_conditions[method]["UDQ"]["map"] = {"1": {"UDQ": "UDQ"}}
|
|
1094
|
+
return self.custom_conditions
|
|
1095
|
+
raise ValueError("The contents of CONTROL_CRITERIA was malformed. Missing information in the 'ICVS' field!")
|
|
1096
|
+
if not criteria and ICVMethod.UDQ not in methods:
|
|
1097
|
+
logger.warning("Did not find any 'CRITERIA', defaulting to [1]!")
|
|
1098
|
+
criteria = [1]
|
|
1099
|
+
|
|
1100
|
+
# Form the dictionary from the gathered data
|
|
1101
|
+
for method in methods:
|
|
1102
|
+
if self.custom_conditions.get(method) is None:
|
|
1103
|
+
self.custom_conditions[method] = {}
|
|
1104
|
+
for icv_combo in icv_map:
|
|
1105
|
+
if icv_combo != "":
|
|
1106
|
+
icv = icv_combo[0]
|
|
1107
|
+
|
|
1108
|
+
if self.custom_conditions.get(method, {}).get(icv) is None:
|
|
1109
|
+
self.custom_conditions[method][icv] = {}
|
|
1110
|
+
icv_combo_mapped = {}
|
|
1111
|
+
if "map" not in self.custom_conditions[method][icv]:
|
|
1112
|
+
self.custom_conditions[method][icv]["map"] = {}
|
|
1113
|
+
icv_combo_mapped = {f"X{i}": name for i, name in enumerate(icv_combo)}
|
|
1114
|
+
if method == ICVMethod.UDQ:
|
|
1115
|
+
if len(methods) > 1:
|
|
1116
|
+
raise ValueError("UDQ field needs a seperate CONTROL_CRITERIA keyword.")
|
|
1117
|
+
try:
|
|
1118
|
+
self.custom_conditions[method][icv]["1"] += "\n" + content
|
|
1119
|
+
except KeyError:
|
|
1120
|
+
self.custom_conditions[method][icv].update({"1": content})
|
|
1121
|
+
# Make assign be written before define.
|
|
1122
|
+
self.custom_conditions[method][icv]["1"] = "\n".join(
|
|
1123
|
+
self.custom_conditions[method][icv]["1"].splitlines()
|
|
1124
|
+
)
|
|
1125
|
+
self.custom_conditions[method][icv]["1"] = sort_string_with_assign_first(
|
|
1126
|
+
self.custom_conditions[method][icv]["1"]
|
|
1127
|
+
)
|
|
1128
|
+
self.custom_conditions[method][icv]["map"].update({"1": icv_combo_mapped})
|
|
1129
|
+
else:
|
|
1130
|
+
try:
|
|
1131
|
+
self.custom_conditions[method][icv][str(criteria[0])]
|
|
1132
|
+
# if len(self.custom_conditions[method][icv].keys()) > 1:
|
|
1133
|
+
logger.warning(
|
|
1134
|
+
f"The CONTROL_CRITERIA for ICV '{icv}' "
|
|
1135
|
+
f"method '{method}' criteria '{criteria}' "
|
|
1136
|
+
"seems to be defined more than once."
|
|
1137
|
+
)
|
|
1138
|
+
except KeyError:
|
|
1139
|
+
pass
|
|
1140
|
+
formatted_content = ""
|
|
1141
|
+
for i, line in enumerate(content.splitlines()):
|
|
1142
|
+
# Remove any trailing non-alphanumeric characters
|
|
1143
|
+
# (excluding ')') and extra whitespace
|
|
1144
|
+
line = " ".join(re.sub(r"[^\w\)]*$", "", line).split())
|
|
1145
|
+
# The last line should not end with "AND", remove if present
|
|
1146
|
+
if i == len(content.splitlines()) - 1:
|
|
1147
|
+
last_line = re.sub(r"[^\w\)]*$", "", line).split()
|
|
1148
|
+
if last_line[-1] == "AND":
|
|
1149
|
+
line = " ".join(last_line[:-1])
|
|
1150
|
+
formatted_content += line + " /"
|
|
1151
|
+
elif re.findall(r"\w+", line)[-1] == "AND":
|
|
1152
|
+
# If the line ends with "AND", add a "/" after it
|
|
1153
|
+
formatted_content += line + " /\n"
|
|
1154
|
+
elif line != "":
|
|
1155
|
+
# Otherwise, add "AND /" after the line
|
|
1156
|
+
formatted_content += line + " AND /\n"
|
|
1157
|
+
|
|
1158
|
+
self.custom_conditions[method][icv].update({str(crit): formatted_content for crit in criteria})
|
|
1159
|
+
self.custom_conditions[method][icv]["map"].update(
|
|
1160
|
+
{str(crit): icv_combo_mapped for crit in criteria}
|
|
1161
|
+
)
|
|
1162
|
+
for key in icv_combo_mapped.keys():
|
|
1163
|
+
if str(key) not in content:
|
|
1164
|
+
logger.warning(
|
|
1165
|
+
f"Custom condition warning:\n Function '{method}', "
|
|
1166
|
+
f"criteria '{criteria}' has "
|
|
1167
|
+
f"{len(icv_combo_mapped.keys())} ICVs where "
|
|
1168
|
+
f"{icv_combo_mapped} is not part of the custom "
|
|
1169
|
+
"content."
|
|
1170
|
+
)
|
|
1171
|
+
return self.custom_conditions
|
|
1172
|
+
|
|
1173
|
+
def create_step_table(self) -> pd.DataFrame:
|
|
1174
|
+
"""Extract the icv name and step values from ICVCONTROL."""
|
|
1175
|
+
step_table = pd.DataFrame([self.icv_control_table["STEPS"].to_numpy()], columns=self.icv_control_table["ICV"])
|
|
1176
|
+
for i, steps in enumerate(step_table.iloc[0]):
|
|
1177
|
+
if steps % 2 != 0:
|
|
1178
|
+
logger.warning(
|
|
1179
|
+
f"In the casefile table the number of step was '{steps}' for"
|
|
1180
|
+
f" ICV'{step_table.columns[i]}'. The number of step has to be even,"
|
|
1181
|
+
f" and set to {steps + 1}."
|
|
1182
|
+
)
|
|
1183
|
+
step_table.iloc[0, i] = steps + 1
|
|
1184
|
+
return step_table
|
|
1185
|
+
|
|
1186
|
+
def create_table_from_casefile(self, header_name: str) -> pd.DataFrame:
|
|
1187
|
+
"""Extract the information from the casefile table.
|
|
1188
|
+
|
|
1189
|
+
Args:
|
|
1190
|
+
header_name: Column name / header.
|
|
1191
|
+
|
|
1192
|
+
Returns:
|
|
1193
|
+
A DataFrame mapping ICVs to its specific table value.
|
|
1194
|
+
"""
|
|
1195
|
+
|
|
1196
|
+
return pd.DataFrame([self.icv_control_table[header_name].to_numpy()], columns=self.icv_control_table["ICV"])
|