completor 0.1.2__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
- from completor import parse
3
+ from completor.parse import locate_keyword
4
4
 
5
5
 
6
6
  class _BaseCaseException(Exception):
@@ -106,7 +106,7 @@ class CaseReaderFormatError(_BaseCaseException):
106
106
  super().__init__(message, lines, error_index, window_size)
107
107
 
108
108
  @staticmethod
109
- def find_error_line(keyword: str, lines: list[str], header: list[str]):
109
+ def find_error_line(keyword: str, lines: list[str], header: list[str]) -> tuple[int, bool]:
110
110
  """Find line where error occurs.
111
111
 
112
112
  Args:
@@ -114,14 +114,14 @@ class CaseReaderFormatError(_BaseCaseException):
114
114
  lines: The (preferably) original case-file lines.
115
115
  header: The expected headers for this keyword.
116
116
 
117
- Raises:
118
- ValueError: If the line could not be found.
119
-
120
117
  Returns:
121
118
  Line number and whether there are too many/few data entries vs header.
119
+
120
+ Raises:
121
+ ValueError: If the line could not be found.
122
122
  """
123
123
  stripped_content = [x.strip() for x in lines]
124
- start, end = parse.locate_keyword(stripped_content, keyword)
124
+ start, end = locate_keyword(stripped_content, keyword)
125
125
 
126
126
  line_content = {
127
127
  i + start + 1: line for i, line in enumerate(lines[start + 1 : end]) if line and not line.startswith("--")
@@ -0,0 +1,8 @@
1
+ """Only used to get the version."""
2
+
3
+ from importlib import metadata
4
+
5
+
6
+ def get_version():
7
+ """Returns Completors version."""
8
+ return metadata.version("completor")
@@ -1,7 +1,6 @@
1
+ from importlib.resources import files
1
2
  from pathlib import Path
2
3
 
3
- from pkg_resources import resource_filename
4
-
5
4
  from completor.logger import logger
6
5
 
7
6
  SKIP_TESTS = False
@@ -17,7 +16,7 @@ except ModuleNotFoundError:
17
16
  @hook_implementation
18
17
  @plugin_response(plugin_name="completor") # type: ignore
19
18
  def installable_jobs():
20
- config_file = Path(resource_filename("completor", "config_jobs/run_completor"))
19
+ config_file = Path(files("completor") / "config_jobs/run_completor")
21
20
  return {config_file.name: config_file}
22
21
 
23
22
 
@@ -5,7 +5,7 @@ from __future__ import annotations
5
5
  import numpy as np
6
6
  import pandas as pd
7
7
 
8
- from completor.constants import Headers
8
+ from completor.constants import Content, Headers
9
9
  from completor.exceptions import CompletorError
10
10
 
11
11
 
@@ -21,14 +21,22 @@ def set_default_packer_section(df_comp: pd.DataFrame) -> pd.DataFrame:
21
21
  Updated completion data for packers.
22
22
  """
23
23
  # Set default values for packer sections
24
- df_comp[Headers.INNER_DIAMETER] = np.where(df_comp[Headers.ANNULUS] == "PA", 0.0, df_comp[Headers.INNER_DIAMETER])
25
- df_comp[Headers.OUTER_DIAMETER] = np.where(df_comp[Headers.ANNULUS] == "PA", 0.0, df_comp[Headers.OUTER_DIAMETER])
26
- df_comp[Headers.ROUGHNESS] = np.where(df_comp[Headers.ANNULUS] == "PA", 0.0, df_comp[Headers.ROUGHNESS])
24
+ df_comp[Headers.INNER_DIAMETER] = np.where(
25
+ df_comp[Headers.ANNULUS] == Content.PACKER, 0.0, df_comp[Headers.INNER_DIAMETER]
26
+ )
27
+ df_comp[Headers.OUTER_DIAMETER] = np.where(
28
+ df_comp[Headers.ANNULUS] == Content.PACKER, 0.0, df_comp[Headers.OUTER_DIAMETER]
29
+ )
30
+ df_comp[Headers.ROUGHNESS] = np.where(df_comp[Headers.ANNULUS] == Content.PACKER, 0.0, df_comp[Headers.ROUGHNESS])
27
31
  df_comp[Headers.VALVES_PER_JOINT] = np.where(
28
- df_comp[Headers.ANNULUS] == "PA", 0.0, df_comp[Headers.VALVES_PER_JOINT]
32
+ df_comp[Headers.ANNULUS] == Content.PACKER, 0.0, df_comp[Headers.VALVES_PER_JOINT]
33
+ )
34
+ df_comp[Headers.DEVICE_TYPE] = np.where(
35
+ df_comp[Headers.ANNULUS] == Content.PACKER, Content.PERFORATED, df_comp[Headers.DEVICE_TYPE]
36
+ )
37
+ df_comp[Headers.DEVICE_NUMBER] = np.where(
38
+ df_comp[Headers.ANNULUS] == Content.PACKER, 0, df_comp[Headers.DEVICE_NUMBER]
29
39
  )
30
- df_comp[Headers.DEVICE_TYPE] = np.where(df_comp[Headers.ANNULUS] == "PA", "PERF", df_comp[Headers.DEVICE_TYPE])
31
- df_comp[Headers.DEVICE_NUMBER] = np.where(df_comp[Headers.ANNULUS] == "PA", 0, df_comp[Headers.DEVICE_NUMBER])
32
40
  return df_comp
33
41
 
34
42
 
@@ -42,9 +50,11 @@ def set_default_perf_section(df_comp: pd.DataFrame) -> pd.DataFrame:
42
50
  Updated completion data for perforated sections.
43
51
  """
44
52
  df_comp[Headers.VALVES_PER_JOINT] = np.where(
45
- df_comp[Headers.DEVICE_TYPE] == "PERF", 0.0, df_comp[Headers.VALVES_PER_JOINT]
53
+ df_comp[Headers.DEVICE_TYPE] == Content.PERFORATED, 0.0, df_comp[Headers.VALVES_PER_JOINT]
54
+ )
55
+ df_comp[Headers.DEVICE_NUMBER] = np.where(
56
+ df_comp[Headers.DEVICE_TYPE] == Content.PERFORATED, 0, df_comp[Headers.DEVICE_NUMBER]
46
57
  )
47
- df_comp[Headers.DEVICE_NUMBER] = np.where(df_comp[Headers.DEVICE_TYPE] == "PERF", 0, df_comp[Headers.DEVICE_NUMBER])
48
58
  return df_comp
49
59
 
50
60
 
@@ -67,10 +77,8 @@ def check_default_non_packer(df_comp: pd.DataFrame) -> pd.DataFrame:
67
77
  """
68
78
  df_comp = df_comp.copy(True)
69
79
  # set default value of roughness
70
- df_comp[Headers.ROUGHNESS] = (
71
- df_comp[Headers.ROUGHNESS].replace("1*", "1e-5").astype(np.float64)
72
- ) # Ensures float after replacing!
73
- df_nonpa = df_comp[df_comp[Headers.ANNULUS] != "PA"]
80
+ df_comp[Headers.ROUGHNESS] = df_comp[Headers.ROUGHNESS].replace("1*", "1e-5").astype(np.float64)
81
+ df_nonpa = df_comp[df_comp[Headers.ANNULUS] != Content.PACKER]
74
82
  df_columns = df_nonpa.columns.to_numpy()
75
83
  for column in df_columns:
76
84
  if "1*" in df_nonpa[column]:
@@ -136,17 +144,17 @@ def _check_for_errors(df_comp: pd.DataFrame, well_name: str, idx: int) -> None:
136
144
  If the completion description is incomplete for some range of depth.
137
145
  If the completion description is overlapping for some range of depth.
138
146
  """
139
- if df_comp[Headers.ANNULUS].iloc[idx] == "PA" and (
147
+ if df_comp[Headers.ANNULUS].iloc[idx] == Content.PACKER and (
140
148
  df_comp[Headers.START_MEASURED_DEPTH].iloc[idx] != df_comp[Headers.END_MEASURED_DEPTH].iloc[idx]
141
149
  ):
142
- raise CompletorError("Packer segments must not have length")
150
+ raise CompletorError("Packer segments must not have length.")
143
151
 
144
152
  if (
145
- df_comp[Headers.ANNULUS].iloc[idx] != "PA"
146
- and df_comp[Headers.DEVICE_TYPE].iloc[idx] != "ICV"
153
+ df_comp[Headers.ANNULUS].iloc[idx] != Content.PACKER
154
+ and df_comp[Headers.DEVICE_TYPE].iloc[idx] != Content.INFLOW_CONTROL_VALVE
147
155
  and df_comp[Headers.START_MEASURED_DEPTH].iloc[idx] == df_comp[Headers.END_MEASURED_DEPTH].iloc[idx]
148
156
  ):
149
- raise CompletorError("Non packer segments must have length")
157
+ raise CompletorError("Non packer segments must have length.")
150
158
 
151
159
  if idx > 0:
152
160
  if df_comp[Headers.START_MEASURED_DEPTH].iloc[idx] > df_comp[Headers.END_MEASURED_DEPTH].iloc[idx - 1]:
@@ -159,22 +167,22 @@ def _check_for_errors(df_comp: pd.DataFrame, well_name: str, idx: int) -> None:
159
167
  if df_comp[Headers.START_MEASURED_DEPTH].iloc[idx] < df_comp[Headers.END_MEASURED_DEPTH].iloc[idx - 1]:
160
168
  raise CompletorError(
161
169
  f"Overlapping completion description in well '{well_name}' from depth "
162
- f"t{df_comp[Headers.END_MEASURED_DEPTH].iloc[idx - 1]} "
170
+ f"{df_comp[Headers.END_MEASURED_DEPTH].iloc[idx - 1]} "
163
171
  f"to depth {(df_comp[Headers.START_MEASURED_DEPTH].iloc[idx])}"
164
172
  )
165
- if df_comp[Headers.DEVICE_TYPE].iloc[idx] not in ["PERF", "AICD", "ICD", "VALVE", "DAR", "AICV", "ICV"]:
173
+ if df_comp[Headers.DEVICE_TYPE].iloc[idx] not in Content.DEVICE_TYPES:
166
174
  raise CompletorError(
167
- f"{df_comp[Headers.DEVICE_TYPE].iloc[idx]} not a valid device type. "
175
+ f"{df_comp[Headers.DEVICE_TYPE].iloc[idx]} is not a valid device type. "
168
176
  "Valid types are PERF, AICD, ICD, VALVE, DAR, AICV, and ICV."
169
177
  )
170
- if df_comp[Headers.ANNULUS].iloc[idx] not in ["GP", "OA", "PA"]:
178
+ if df_comp[Headers.ANNULUS].iloc[idx] not in Content.ANNULUS_TYPES:
171
179
  raise CompletorError(
172
- f"{df_comp[Headers.ANNULUS].iloc[idx]} not a valid annulus type. Valid types are GP, OA, and PA"
180
+ f"{df_comp[Headers.ANNULUS].iloc[idx]} is not a valid annulus type. Valid types are GP, OA, and PA"
173
181
  )
174
182
 
175
183
 
176
184
  def set_format_wsegvalv(df_temp: pd.DataFrame) -> pd.DataFrame:
177
- """Format the well segments valve (WSEGVALV) table.
185
+ """Format the Well Segments Valve (WELSEGS) table.
178
186
 
179
187
  Args:
180
188
  df_temp: Well segments valve data.
@@ -183,17 +191,19 @@ def set_format_wsegvalv(df_temp: pd.DataFrame) -> pd.DataFrame:
183
191
  Updated data with enforced data types and device type filled with default values.
184
192
  """
185
193
  df_temp[Headers.DEVICE_NUMBER] = df_temp[Headers.DEVICE_NUMBER].astype(np.int64)
186
- df_temp[[Headers.CV, Headers.AC, Headers.AC_MAX]] = df_temp[[Headers.CV, Headers.AC, Headers.AC_MAX]].astype(
187
- np.float64
194
+ df_temp[[Headers.FLOW_COEFFICIENT, Headers.FLOW_CROSS_SECTIONAL_AREA, Headers.MAX_FLOW_CROSS_SECTIONAL_AREA]] = (
195
+ df_temp[
196
+ [Headers.FLOW_COEFFICIENT, Headers.FLOW_CROSS_SECTIONAL_AREA, Headers.MAX_FLOW_CROSS_SECTIONAL_AREA]
197
+ ].astype(np.float64)
188
198
  )
189
- # allows column L to have default value 1* thus it is not set to float
199
+ # allows column ADDITIONAL_PIPE_LENGTH_FRICTION_PRESSURE_DROP to have default value 1* thus it is not set to float
190
200
  # Create ID device column
191
- df_temp.insert(0, Headers.DEVICE_TYPE, np.full(df_temp.shape[0], fill_value="VALVE"))
201
+ df_temp.insert(0, Headers.DEVICE_TYPE, np.full(df_temp.shape[0], fill_value=Content.VALVE))
192
202
  return df_temp
193
203
 
194
204
 
195
205
  def set_format_wsegsicd(df_temp: pd.DataFrame) -> pd.DataFrame:
196
- """Format the well segments inflow control device (WSEGSICD) table.
206
+ """Format the well segments Inflow Control Device (ICD) table.
197
207
 
198
208
  Args:
199
209
  df_temp: Well segments inflow control device data.
@@ -209,12 +219,12 @@ def set_format_wsegsicd(df_temp: pd.DataFrame) -> pd.DataFrame:
209
219
  columns = df_temp.columns.to_numpy()[1:]
210
220
  df_temp[columns] = df_temp[columns].astype(np.float64)
211
221
  # Create ID device column
212
- df_temp.insert(0, Headers.DEVICE_TYPE, np.full(df_temp.shape[0], "ICD"))
222
+ df_temp.insert(0, Headers.DEVICE_TYPE, np.full(df_temp.shape[0], Content.INFLOW_CONTROL_DEVICE))
213
223
  return df_temp
214
224
 
215
225
 
216
226
  def set_format_wsegaicd(df_temp: pd.DataFrame) -> pd.DataFrame:
217
- """Format the well segments automatic inflow control device (WSEGAICD) table.
227
+ """Format the well segments Automatic Inflow Control Device (AICD) table.
218
228
 
219
229
  Args:
220
230
  df_temp: Well segments automatic inflow control device data.
@@ -228,12 +238,12 @@ def set_format_wsegaicd(df_temp: pd.DataFrame) -> pd.DataFrame:
228
238
  columns = df_temp.columns.to_numpy()[1:]
229
239
  df_temp[columns] = df_temp[columns].astype(np.float64)
230
240
  # Create ID device column
231
- df_temp.insert(0, Headers.DEVICE_TYPE, np.full(df_temp.shape[0], "AICD"))
241
+ df_temp.insert(0, Headers.DEVICE_TYPE, np.full(df_temp.shape[0], Content.AUTONOMOUS_INFLOW_CONTROL_DEVICE))
232
242
  return df_temp
233
243
 
234
244
 
235
245
  def set_format_wsegdar(df_temp: pd.DataFrame) -> pd.DataFrame:
236
- """Format the well segments DAR (WSEGDAR) data.
246
+ """Format the well segments Density Activated Recovery (DAR) data.
237
247
 
238
248
  Args:
239
249
  df_temp: Well segments DAR device data.
@@ -246,12 +256,12 @@ def set_format_wsegdar(df_temp: pd.DataFrame) -> pd.DataFrame:
246
256
  columns = df_temp.columns.to_numpy()[1:]
247
257
  df_temp[columns] = df_temp[columns].astype(np.float64)
248
258
  # Create ID device column
249
- df_temp.insert(0, Headers.DEVICE_TYPE, np.full(df_temp.shape[0], "DAR"))
259
+ df_temp.insert(0, Headers.DEVICE_TYPE, np.full(df_temp.shape[0], Content.DENSITY_ACTIVATED_RECOVERY))
250
260
  return df_temp
251
261
 
252
262
 
253
263
  def set_format_wsegaicv(df_temp: pd.DataFrame) -> pd.DataFrame:
254
- """Format the well segments automatic inflow control valve (WSEGAICV) table.
264
+ """Format the well segments Automatic Inflow Control Valve (AICV) table.
255
265
 
256
266
  Args:
257
267
  df_temp: Well segments automatic inflow control valve table.
@@ -264,12 +274,12 @@ def set_format_wsegaicv(df_temp: pd.DataFrame) -> pd.DataFrame:
264
274
  columns = df_temp.columns.to_numpy()[1:]
265
275
  df_temp[columns] = df_temp[columns].astype(np.float64)
266
276
  # Create ID device column
267
- df_temp.insert(0, Headers.DEVICE_TYPE, np.full(df_temp.shape[0], "AICV"))
277
+ df_temp.insert(0, Headers.DEVICE_TYPE, np.full(df_temp.shape[0], Content.AUTONOMOUS_INFLOW_CONTROL_VALVE))
268
278
  return df_temp
269
279
 
270
280
 
271
281
  def set_format_wsegicv(df_temp: pd.DataFrame) -> pd.DataFrame:
272
- """Format the well segments inflow control valve (WSEGICV) table.
282
+ """Format the well segments Inflow Control Valve (ICV) table.
273
283
 
274
284
  Args:
275
285
  df_temp: Well segments inflow control valve table.
@@ -278,12 +288,14 @@ def set_format_wsegicv(df_temp: pd.DataFrame) -> pd.DataFrame:
278
288
  Updated data.
279
289
  """
280
290
  df_temp[Headers.DEVICE_NUMBER] = df_temp[Headers.DEVICE_NUMBER].astype(np.int64)
281
- df_temp[[Headers.CV, Headers.AC, Headers.AC_MAX]] = df_temp[[Headers.CV, Headers.AC, Headers.AC_MAX]].astype(
282
- np.float64
291
+ df_temp[[Headers.FLOW_COEFFICIENT, Headers.FLOW_CROSS_SECTIONAL_AREA, Headers.MAX_FLOW_CROSS_SECTIONAL_AREA]] = (
292
+ df_temp[
293
+ [Headers.FLOW_COEFFICIENT, Headers.FLOW_CROSS_SECTIONAL_AREA, Headers.MAX_FLOW_CROSS_SECTIONAL_AREA]
294
+ ].astype(np.float64)
283
295
  )
284
296
  # allows column DEFAULTS to have default value 5*, thus it is not set to float
285
297
  # Create ID device column
286
- df_temp.insert(0, Headers.DEVICE_TYPE, np.full(df_temp.shape[0], fill_value="ICV"))
298
+ df_temp.insert(0, Headers.DEVICE_TYPE, np.full(df_temp.shape[0], fill_value=Content.INFLOW_CONTROL_VALVE))
287
299
  return df_temp
288
300
 
289
301
 
@@ -310,7 +322,7 @@ def validate_lateral_to_device(df_lat2dev: pd.DataFrame, df_comp: pd.DataFrame)
310
322
  nrow = df_lat2dev.shape[0]
311
323
  for idx in range(0, nrow):
312
324
  l2d_well = df_lat2dev[Headers.WELL].iloc[idx]
313
- if (df_comp[df_comp[Headers.WELL] == l2d_well][Headers.ANNULUS] == "OA").any():
325
+ if (df_comp[df_comp[Headers.WELL] == l2d_well][Headers.ANNULUS] == Content.OPEN_ANNULUS).any():
314
326
  raise CompletorError(
315
327
  f"Please do not connect a lateral to the mother bore in well {l2d_well} that has open annuli. "
316
328
  "This may trigger an error in reservoir simulator."
@@ -4,7 +4,7 @@ from __future__ import annotations
4
4
 
5
5
  import argparse
6
6
 
7
- import completor
7
+ from completor.get_version import get_version
8
8
 
9
9
  COMPLETOR_DESCRIPTION = """Completor models advanced well completions for reservoir simulators.
10
10
  It generates all necessary keywords for reservoir simulation
@@ -20,22 +20,17 @@ def get_parser() -> argparse.ArgumentParser:
20
20
  argparse.ArgumentParser.
21
21
  """
22
22
  parser = argparse.ArgumentParser(description=COMPLETOR_DESCRIPTION)
23
- parser.add_argument("-i", "--inputfile", required=True, type=str, help="(Compulsory) Completor case file")
24
- parser.add_argument("-s", "--schedulefile", type=str, help="(Optional) if it is specified in the case file")
23
+ parser.add_argument("-i", "--inputfile", required=True, type=str, help="(Compulsory) Completor case file.")
24
+ parser.add_argument("-s", "--schedulefile", type=str, help="(Optional) if it is specified in the case file.")
25
25
  parser.add_argument(
26
- "-o", "--outputfile", type=str, help="(Optional) name of output file. Defaults to <schedule>_advanced.wells"
26
+ "-o", "--outputfile", type=str, help="(Optional) name of output file. Defaults to <schedule>_advanced.wells."
27
27
  )
28
28
  parser.add_argument(
29
- "-f", "--figure", action="store_true", help="(Optional) to generate well completion diagrams in pdf format"
29
+ "-f", "--figure", action="store_true", help="(Optional) to generate well completion diagrams in pdf format."
30
30
  )
31
31
  parser.add_argument(
32
- "-l", "--loglevel", action="store", type=int, help="(Optional) log-level. Lower values gives more info (0-50)"
33
- )
34
- parser.add_argument(
35
- "-v",
36
- "--version",
37
- action="version",
38
- version="%(prog)s (completor version " + completor.__version__ + ")",
32
+ "-l", "--loglevel", action="store", type=int, help="(Optional) log-level. Lower values gives more info (0-50)."
39
33
  )
34
+ parser.add_argument("-v", "--version", action="version", version=f"Completor version {get_version()}!")
40
35
 
41
36
  return parser
completor/logger.py CHANGED
@@ -7,7 +7,7 @@ import time
7
7
  from functools import wraps
8
8
  from pathlib import Path
9
9
 
10
- import completor
10
+ from completor.get_version import get_version
11
11
 
12
12
 
13
13
  def get_logger(module_name="completor"):
@@ -52,7 +52,7 @@ def handle_error_messages(func):
52
52
  * The content of any files passed.
53
53
  For the main method of Completor, these are (if provided).
54
54
  * input_file.txt - The case file.
55
- * schedule_file.txt - The schedule file.
55
+ * schedule_file.txt - The schedule file.
56
56
  * new_file.txt - The output file.
57
57
  """
58
58
 
@@ -126,7 +126,7 @@ def dump_debug_information(**kwargs) -> None:
126
126
 
127
127
  dump("traceback.txt", traceback.format_exc())
128
128
  dump("machine.txt", socket.getfqdn())
129
- dump("version.txt", completor.__version__)
129
+ dump("version.txt", get_version())
130
130
  dump("arguments.json", json.dumps(_convert_paths_to_strings(kwargs), indent=4))
131
131
  for key, value in kwargs.items():
132
132
  if isinstance(value, (Path, str)):