completor 0.1.2__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
completor/utils.py CHANGED
@@ -4,20 +4,17 @@ from __future__ import annotations
4
4
 
5
5
  import re
6
6
  import sys
7
- from typing import Any, overload
7
+ from collections.abc import Mapping
8
+ from typing import Any, Literal, NoReturn, overload
8
9
 
9
10
  import numpy as np
10
11
  import numpy.typing as npt
11
12
  import pandas as pd
12
13
 
13
- from completor.constants import Headers
14
+ from completor.constants import Content, Headers, Keywords
15
+ from completor.exceptions import CompletorError
14
16
  from completor.logger import logger
15
17
 
16
- try:
17
- from typing import Literal, NoReturn
18
- except ImportError:
19
- pass
20
-
21
18
 
22
19
  def abort(message: str, status: int = 1) -> SystemExit:
23
20
  """Exit the program with a message and exit code (1 by default).
@@ -28,8 +25,8 @@ def abort(message: str, status: int = 1) -> SystemExit:
28
25
  I.e. there were no errors, while 1 or above indicates that an error occurred. The default code is 1.
29
26
 
30
27
  Returns:
31
- SystemExit: Makes type checkers happy, when using the ``raise`` keyword with this function. I.e.
32
- `>>> raise abort("Something when terribly wrong")`
28
+ SystemExit: Makes type checkers happy when using the ``raise`` keyword with this function. I.e.
29
+ `>>> raise abort("Something when terribly wrong.")`
33
30
  """
34
31
  if status == 0:
35
32
  logger.info(message)
@@ -60,20 +57,6 @@ def sort_by_midpoint(
60
57
  return df.drop([_temp_column], axis=1)
61
58
 
62
59
 
63
- def as_data_frame(args: dict[str, Any] | None = None, **kwargs) -> pd.DataFrame:
64
- """Helper function to create a data frame from a dictionary, or keywords."""
65
- if (args is None and kwargs is None) or (not args and not kwargs):
66
- raise ValueError("`as_data_frame` requires either a single dictionary, or keywords")
67
-
68
- if args:
69
- kwargs = args
70
- data = pd.DataFrame()
71
- for key, value in kwargs.items():
72
- data[key] = value
73
-
74
- return data
75
-
76
-
77
60
  @overload
78
61
  def log_and_raise_exception(message: str, kind: type = ..., throw: Literal[True] = ...) -> NoReturn: ...
79
62
 
@@ -116,7 +99,9 @@ def find_quote(string: str) -> re.Match | None:
116
99
  return re.search(rf"([{quotes}])(?:(?=(\\?))\2.)*?\1", string)
117
100
 
118
101
 
119
- def clean_file_line(line: str, comment_prefix: str = "--", remove_quotation_marks: bool = False) -> str:
102
+ def clean_file_line(
103
+ line: str, comment_prefix: str = "--", remove_quotation_marks: bool = False, replace_tabs: bool = True
104
+ ) -> str:
120
105
  """Remove comments, tabs, newlines and consecutive spaces from a string.
121
106
 
122
107
  Also remove trailing '/' comments, but ignore lines containing a file path.
@@ -126,6 +111,7 @@ def clean_file_line(line: str, comment_prefix: str = "--", remove_quotation_mark
126
111
  comment_prefix: The prefix used to denote a comment in the file.
127
112
  remove_quotation_marks: Whether quotation marks should be removed from the line.
128
113
  Used for cleaning schedule files.
114
+ replace_tabs: Whether tabs should be replaced with a space.
129
115
 
130
116
  Returns:
131
117
  A cleaned line. Returns an empty string in the case of a comment or empty line.
@@ -144,7 +130,8 @@ def clean_file_line(line: str, comment_prefix: str = "--", remove_quotation_mark
144
130
  if not line:
145
131
  return ""
146
132
  # Replace tabs with spaces, remove newlines and remove trailing spaces.
147
- line = line.replace("\t", " ").replace("\n", "")
133
+ if replace_tabs:
134
+ line = line.replace("\t", " ").replace("\n", "")
148
135
  # Remove quotation marks if specified
149
136
  if remove_quotation_marks:
150
137
  line = line.replace("'", " ").replace('"', " ")
@@ -183,3 +170,247 @@ def clean_file_lines(lines: list[str], comment_prefix: str = "--") -> list[str]:
183
170
  if cleaned_line:
184
171
  clean_lines.append(cleaned_line)
185
172
  return clean_lines
173
+
174
+
175
+ def shift_array(array: npt.NDArray[Any], shift_by: int, fill_value: Any = np.nan) -> npt.NDArray[Any]:
176
+ """Shift an array to the left or right, similar to Pandas' shift.
177
+
178
+ Note: By chrisaycock https://stackoverflow.com/a/42642326.
179
+
180
+ Args:
181
+ array: Array to shift.
182
+ shift_by: The amount and direction (positive/negative) to shift by.
183
+ fill_value: The value to fill out of range values with. Defaults to np.nan.
184
+
185
+ Returns:
186
+ Shifted Numpy array.
187
+
188
+ """
189
+ result = np.empty_like(array)
190
+ if shift_by > 0:
191
+ result[:shift_by] = fill_value
192
+ result[shift_by:] = array[:-shift_by]
193
+ elif shift_by < 0:
194
+ result[shift_by:] = fill_value
195
+ result[:shift_by] = array[-shift_by:]
196
+ else:
197
+ result[:] = array
198
+ return result
199
+
200
+
201
+ def get_active_wells(completion_table: pd.DataFrame, gp_perf_devicelayer: bool) -> npt.NDArray[np.str_]:
202
+ """Get a list of active wells specified by users.
203
+
204
+ Notes:
205
+ No device layer will be added for perforated wells with gravel-packed annulus.
206
+ Completor does nothing to gravel-packed perforated wells by default.
207
+ This behavior can be changed by setting the GRAVEL_PACKED_PERFORATED_DEVICELAYER keyword in the case file to true.
208
+
209
+ Args:
210
+ completion_table: Completion information.
211
+ gp_perf_devicelayer: Keyword denoting if the user wants a device layer for this type of completion.
212
+
213
+ Returns:
214
+ The active wells found.
215
+ """
216
+ # Need to check completion of all wells in the completion table to remove GP-PERF type wells
217
+ # If the user wants a device layer for this type of completion.
218
+ if not gp_perf_devicelayer:
219
+ gp_check = completion_table[Headers.ANNULUS] == Content.OPEN_ANNULUS
220
+ perf_check = completion_table[Headers.DEVICE_TYPE].isin(
221
+ [
222
+ Content.AUTONOMOUS_INFLOW_CONTROL_DEVICE,
223
+ Content.AUTONOMOUS_INFLOW_CONTROL_VALVE,
224
+ Content.DENSITY_ACTIVATED_RECOVERY,
225
+ Content.INFLOW_CONTROL_DEVICE,
226
+ Content.VALVE,
227
+ Content.INFLOW_CONTROL_VALVE,
228
+ ]
229
+ )
230
+ # Where annuli is "OA" or perforation is in the list above.
231
+ mask = gp_check | perf_check
232
+ if not mask.any():
233
+ logger.warning(
234
+ "There are no active wells for Completor to work on. E.g. all wells are defined with Gravel Pack "
235
+ "(GP) and valve type PERF. "
236
+ f"If you want these wells to be active set {Keywords.GRAVEL_PACKED_PERFORATED_DEVICELAYER} to TRUE."
237
+ )
238
+ return np.array(completion_table[Headers.WELL][mask].unique())
239
+ return np.array(completion_table[Headers.WELL].unique())
240
+
241
+
242
+ def check_width_lines(result: str, limit: int) -> list[tuple[int, str]]:
243
+ """Check the width of each line versus limit.
244
+
245
+ Disregarding all content after '/' and '--' characters.
246
+
247
+ Args:
248
+ result: Raw text.
249
+ limit: The character width limit.
250
+
251
+ Raises:
252
+ ValueError: If there exists any data that is too long.
253
+ """
254
+ lines = result.splitlines()
255
+ lengths = np.char.str_len(lines)
256
+ lines_to_check = np.nonzero(lengths >= limit)[0]
257
+ too_long_lines = []
258
+ for line_index in lines_to_check:
259
+ # Well names can have slashes, therefore maxsplit must be 1.
260
+ cleaned_line = lines[line_index].rsplit("/", maxsplit=1)[0] + "/"
261
+ # Comment 'char' can be multiple and should not have maxsplit, nor the '--' added.
262
+ cleaned_line = cleaned_line.rsplit("--")[0]
263
+
264
+ if len(cleaned_line) > limit:
265
+ too_long_lines.append((line_index, lines[line_index]))
266
+ return too_long_lines
267
+
268
+
269
+ def format_default_values(text: str) -> list[list[str]]:
270
+ """Format the data-records and resolve the repeat-mechanism.
271
+
272
+ E.g. 3* == 1* 1* 1*, 3*250 == 250 250 250.
273
+
274
+ Args:
275
+ text: A chunk data-record.
276
+
277
+ Returns:
278
+ Expanded values.
279
+ """
280
+ chunk = re.split(r"\s+/", text)[:-1]
281
+ expanded_data = []
282
+ for line in chunk:
283
+ new_record = ""
284
+ for record in line.split():
285
+ if not record[0].isdigit():
286
+ new_record += record + " "
287
+ continue
288
+ if "*" not in record:
289
+ new_record += record + " "
290
+ continue
291
+
292
+ # Handle repeats like 3* or 3*250.
293
+ multiplier, number = record.split("*")
294
+ new_record += f"{number if number else '1*'} " * int(multiplier)
295
+ if new_record:
296
+ expanded_data.append(new_record.split())
297
+ return expanded_data
298
+
299
+
300
+ def find_keyword_data(keyword: str, text: str) -> list[str]:
301
+ """Finds the common pattern for the four keywords thats needed.
302
+
303
+ Args:
304
+ keyword: Current keyword.
305
+ text: The whole text to find matches in.
306
+
307
+ Returns:
308
+ The matches if any.
309
+
310
+ """
311
+ # Finds keyword followed by two slashes.
312
+ # Matches any characters followed by a newline, non-greedily, to allow for comments within the data.
313
+ # Matches new line followed by a single (can have leading whitespace) slash.
314
+ pattern = rf"^{keyword}(?:.*\n)*?\s*\/"
315
+ return re.findall(pattern, text, re.MULTILINE)
316
+
317
+
318
+ def clean_raw_data(raw_record: str, keyword: str) -> list[list[str]]:
319
+ """Parse the record and clean its content.
320
+
321
+ Args:
322
+ raw_record: Raw data taken straight from schedule.
323
+ keyword: The current keyword.
324
+
325
+ Returns:
326
+ The contents of the keyword, cleaned.
327
+ """
328
+ record = re.split(rf"{keyword}\n", raw_record)
329
+ if len(record) != 2:
330
+ raise CompletorError(f"Something went wrong when reading keyword '{keyword}' from schedule:\n{raw_record}")
331
+ # Strip keyword and last line.
332
+ raw_content = record[1].splitlines()
333
+ if raw_content[-1].strip().startswith("/"):
334
+ raw_content = raw_content[:-1]
335
+
336
+ clean_content = []
337
+ for line in raw_content:
338
+ clean_line = clean_file_line(line, remove_quotation_marks=True)
339
+ if clean_line:
340
+ clean_content.append(format_default_values(clean_line)[0])
341
+ return clean_content
342
+
343
+
344
+ def find_well_keyword_data(well: str, keyword: str, text: str) -> str:
345
+ """Find the data associated with keyword and well name, include leading comments.
346
+
347
+ Args:
348
+ well: Well name.
349
+ keyword: Keyword to search for.
350
+ text: Raw text to look for matches in.
351
+
352
+ Returns:
353
+ The correct match given keyword and well name.
354
+ """
355
+ matches = find_keyword_data(keyword, text)
356
+
357
+ lines = []
358
+ for match in matches:
359
+ if re.search(well, match) is None:
360
+ continue
361
+
362
+ matchlines = match.splitlines()
363
+ once = False
364
+ for i, line in enumerate(matchlines):
365
+ if not line:
366
+ # Allow empty lines in the middle of a record.
367
+ if once:
368
+ lines.append(line)
369
+ continue
370
+ if well in line.split()[0]:
371
+ if keyword in [Keywords.WELL_SEGMENTS, Keywords.COMPLETION_SEGMENTS]:
372
+ # These keywords should just be the entire match as they never contain more than one well.
373
+ return match
374
+ if not once:
375
+ once = True
376
+ # Remove contiguous comments above the first line by looking backwards,
377
+ # adding it to the replaceable text match.
378
+ comments = []
379
+ for prev_line in matchlines[i - 1 :: -1]:
380
+ if not prev_line.strip().startswith("--") or not prev_line:
381
+ break
382
+ comments.append(prev_line)
383
+ lines += sorted(comments, reverse=True)
384
+ lines.append(line)
385
+ elif not once:
386
+ continue
387
+ # All following comments inside data.
388
+ elif line.strip().startswith("--"):
389
+ lines.append(line)
390
+ else:
391
+ break
392
+
393
+ return str("\n".join(lines))
394
+
395
+
396
+ def replace_preprocessing_names(text: str, mapper: Mapping[str, str] | None) -> str:
397
+ """Expand start and end marker pairs for well pattern recognition as needed.
398
+
399
+ Args:
400
+ text: Text with pre-processor reservoir modeling well names.
401
+ mapper: Map of old to new names.
402
+
403
+ Returns:
404
+ Text with reservoir simulator well names.
405
+ """
406
+ if mapper is None:
407
+ return text
408
+ start_marks = ["'", " ", "\n", "\t"]
409
+ end_marks = ["'", " ", " ", " "]
410
+ for key, value in mapper.items():
411
+ for start, end in zip(start_marks, end_marks):
412
+ my_key = start + str(key) + start
413
+ if my_key in text:
414
+ my_value = start + str(value) + end
415
+ text = text.replace(my_key, my_value)
416
+ return text
@@ -4,7 +4,6 @@ import matplotlib.pyplot as plt # type: ignore
4
4
  import matplotlib.ticker as ticker # type: ignore
5
5
  from matplotlib import rcParams
6
6
  from matplotlib.axes import Axes # type: ignore
7
- from matplotlib.backends.backend_pdf import PdfPages # type: ignore
8
7
  from matplotlib.figure import Figure # type: ignore
9
8
 
10
9
 
@@ -16,7 +15,7 @@ def update_fonts(family: str = "DejaVu Serif", size: float = 12) -> None:
16
15
  size: Font sizes.
17
16
  """
18
17
  rcParams["font.family"] = family
19
- rcParams.update({"font.size": size})
18
+ rcParams["font.size"] = size
20
19
 
21
20
 
22
21
  def format_axis(subplot: Axes, title: str, xlabel: str, ylabel: str, categorical: bool = False) -> Axes:
@@ -133,18 +132,6 @@ def subplot_position(num_plots: int) -> tuple[int, int]:
133
132
  return list_rows[num_plots - 1], list_cols[num_plots - 1]
134
133
 
135
134
 
136
- def create_pdfpages(file_name: str) -> PdfPages:
137
- """Create a pdf file.
138
-
139
- Args:
140
- file_name: Full name of the file without extension .pdf.
141
-
142
- Returns:
143
- PdfPages instance.
144
- """
145
- return PdfPages(file_name)
146
-
147
-
148
135
  def create_figure(figsize: list[int] | tuple[int, int] | None = (18, 12)) -> Figure:
149
136
  """Create a matplotlib figure.
150
137
 
@@ -5,7 +5,7 @@ from matplotlib.axes import Axes # type: ignore
5
5
  from matplotlib.figure import Figure # type: ignore
6
6
 
7
7
  from completor import visualization
8
- from completor.constants import Headers
8
+ from completor.constants import Content, Headers
9
9
 
10
10
 
11
11
  def visualize_tubing(axs: Axes, df_well: pd.DataFrame) -> Axes:
@@ -18,9 +18,9 @@ def visualize_tubing(axs: Axes, df_well: pd.DataFrame) -> Axes:
18
18
  Returns:
19
19
  Pyplot axis.
20
20
  """
21
- df_device = df_well[(df_well[Headers.NUMBER_OF_DEVICES] > 0) | (df_well[Headers.DEVICE_TYPE] == "PERF")]
21
+ df_device = df_well[(df_well[Headers.NUMBER_OF_DEVICES] > 0) | (df_well[Headers.DEVICE_TYPE] == Content.PERFORATED)]
22
22
  if df_device.shape[0] > 0:
23
- axs.plot(df_well[Headers.TUB_MD].to_numpy(), [1] * df_well.shape[0], "go-")
23
+ axs.plot(df_well[Headers.TUBING_MEASURED_DEPTH].to_numpy(), [1] * df_well.shape[0], "go-")
24
24
  return axs
25
25
 
26
26
 
@@ -34,21 +34,21 @@ def visualize_device(axs: Axes, df_well: pd.DataFrame) -> Axes:
34
34
  Returns:
35
35
  Pyplot axis
36
36
  """
37
- df_device = df_well[(df_well[Headers.NUMBER_OF_DEVICES] > 0) | (df_well[Headers.DEVICE_TYPE] == "PERF")]
37
+ df_device = df_well[(df_well[Headers.NUMBER_OF_DEVICES] > 0) | (df_well[Headers.DEVICE_TYPE] == Content.PERFORATED)]
38
38
  for idx in range(df_device.shape[0]):
39
- xpar = [df_device[Headers.TUB_MD].iloc[idx]] * 2
39
+ xpar = [df_device[Headers.TUBING_MEASURED_DEPTH].iloc[idx]] * 2
40
40
  ypar = [1.0, 2.0]
41
- if df_device[Headers.DEVICE_TYPE].iloc[idx] == "PERF":
41
+ if df_device[Headers.DEVICE_TYPE].iloc[idx] == Content.PERFORATED:
42
42
  axs.plot(xpar, ypar, "ro-", markevery=[1])
43
- elif df_device[Headers.DEVICE_TYPE].iloc[idx] == "AICD":
43
+ elif df_device[Headers.DEVICE_TYPE].iloc[idx] == Content.AUTONOMOUS_INFLOW_CONTROL_DEVICE:
44
44
  axs.plot(xpar, ypar, "rD-", markevery=[1])
45
- elif df_device[Headers.DEVICE_TYPE].iloc[idx] == "ICD":
45
+ elif df_device[Headers.DEVICE_TYPE].iloc[idx] == Content.INFLOW_CONTROL_DEVICE:
46
46
  axs.plot(xpar, ypar, "rs-", markevery=[1])
47
- elif df_device[Headers.DEVICE_TYPE].iloc[idx] == "VALVE":
47
+ elif df_device[Headers.DEVICE_TYPE].iloc[idx] == Content.VALVE:
48
48
  axs.plot(xpar, ypar, "rv-", markevery=[1])
49
- elif df_device[Headers.DEVICE_TYPE].iloc[idx] == "DAR":
49
+ elif df_device[Headers.DEVICE_TYPE].iloc[idx] == Content.DENSITY_ACTIVATED_RECOVERY:
50
50
  axs.plot(xpar, ypar, "rP-", markevery=[1])
51
- elif df_device[Headers.DEVICE_TYPE].iloc[idx] == "AICV":
51
+ elif df_device[Headers.DEVICE_TYPE].iloc[idx] == Content.AUTONOMOUS_INFLOW_CONTROL_VALVE:
52
52
  axs.plot(xpar, ypar, "r*-", markevery=[1])
53
53
  return axs
54
54
 
@@ -67,15 +67,15 @@ def visualize_annulus(axs: Axes, df_well: pd.DataFrame) -> Axes:
67
67
  branches = df_well[Headers.ANNULUS_ZONE].unique()
68
68
  for branch in branches:
69
69
  df_branch = df_annulus[df_annulus[Headers.ANNULUS_ZONE] == branch]
70
- xpar = df_branch[Headers.TUB_MD].to_numpy()
70
+ xpar = df_branch[Headers.TUBING_MEASURED_DEPTH].to_numpy()
71
71
  ypar = [3.0] * len(xpar)
72
72
  axs.plot(xpar, ypar, "bo-")
73
73
  # find the first connection in branches
74
74
  df_annulus_with_connection_to_tubing = df_branch[
75
- (df_branch[Headers.NUMBER_OF_DEVICES] > 0) | (df_branch[Headers.DEVICE_TYPE] == "PERF")
75
+ (df_branch[Headers.NUMBER_OF_DEVICES] > 0) | (df_branch[Headers.DEVICE_TYPE] == Content.PERFORATED)
76
76
  ]
77
77
  for idx in range(df_annulus_with_connection_to_tubing.shape[0]):
78
- xpar = [df_annulus_with_connection_to_tubing[Headers.TUB_MD].iloc[idx]] * 2
78
+ xpar = [df_annulus_with_connection_to_tubing[Headers.TUBING_MEASURED_DEPTH].iloc[idx]] * 2
79
79
  ypar = [2.0, 3.0]
80
80
  if idx == 0:
81
81
  axs.plot(xpar, ypar, "bo-", markevery=[1])
@@ -105,28 +105,30 @@ def visualize_reservoir(axs: Axes, ax_twinx: Axes, df_reservoir: pd.DataFrame) -
105
105
  if df_reservoir[Headers.ANNULUS_ZONE].iloc[idx] > 0:
106
106
  axs.annotate(
107
107
  "",
108
- xy=(df_reservoir[Headers.TUB_MD].iloc[idx], 3.0),
109
- xytext=(df_reservoir[Headers.MD].iloc[idx], 4.0),
108
+ xy=(df_reservoir[Headers.TUBING_MEASURED_DEPTH].iloc[idx], 3.0),
109
+ xytext=(df_reservoir[Headers.MEASURED_DEPTH].iloc[idx], 4.0),
110
110
  arrowprops=dict(facecolor="black", shrink=0.05, width=0.5, headwidth=4.0),
111
111
  )
112
112
  else:
113
113
  if (
114
114
  df_reservoir[Headers.NUMBER_OF_DEVICES].iloc[idx] > 0
115
- or df_reservoir[Headers.DEVICE_TYPE].iloc[idx] == "PERF"
115
+ or df_reservoir[Headers.DEVICE_TYPE].iloc[idx] == Content.PERFORATED
116
116
  ):
117
117
  axs.annotate(
118
118
  "",
119
- xy=(df_reservoir[Headers.TUB_MD].iloc[idx], 2.0),
120
- xytext=(df_reservoir[Headers.MD].iloc[idx], 4.0),
119
+ xy=(df_reservoir[Headers.TUBING_MEASURED_DEPTH].iloc[idx], 2.0),
120
+ xytext=(df_reservoir[Headers.MEASURED_DEPTH].iloc[idx], 4.0),
121
121
  arrowprops=dict(facecolor="black", shrink=0.05, width=0.5, headwidth=4.0),
122
122
  )
123
123
  # get connection factor
124
124
  if "1*" not in df_reservoir[Headers.CONNECTION_FACTOR].to_numpy().tolist():
125
125
  max_cf = max(df_reservoir[Headers.CONNECTION_FACTOR].to_numpy())
126
- ax_twinx.plot(df_reservoir[Headers.MD], df_reservoir[Headers.CONNECTION_FACTOR], "k-")
126
+ ax_twinx.plot(df_reservoir[Headers.MEASURED_DEPTH], df_reservoir[Headers.CONNECTION_FACTOR], "k-")
127
127
  ax_twinx.invert_yaxis()
128
128
  ax_twinx.set_ylim([max_cf * 5.0 + 1e-5, 0])
129
- ax_twinx.fill_between(df_reservoir[Headers.MD], 0, df_reservoir[Headers.CONNECTION_FACTOR], alpha=0.5)
129
+ ax_twinx.fill_between(
130
+ df_reservoir[Headers.MEASURED_DEPTH], 0, df_reservoir[Headers.CONNECTION_FACTOR], alpha=0.5
131
+ )
130
132
 
131
133
  return axs, ax_twinx
132
134
 
@@ -183,14 +185,14 @@ def visualize_well(
183
185
  laterals = df_well[Headers.LATERAL].unique()
184
186
  if isinstance(segment_length, float):
185
187
  if segment_length >= 0.0:
186
- max_md = max(df_well[Headers.TUB_MD].to_numpy())
187
- min_md = min(df_well[Headers.TUB_MD].to_numpy())
188
+ max_md = max(df_well[Headers.TUBING_MEASURED_DEPTH].to_numpy())
189
+ min_md = min(df_well[Headers.TUBING_MEASURED_DEPTH].to_numpy())
188
190
  else:
189
- max_md = max(df_reservoir[Headers.MD].to_numpy())
190
- min_md = min(df_reservoir[Headers.MD].to_numpy())
191
+ max_md = max(df_reservoir[Headers.MEASURED_DEPTH].to_numpy())
192
+ min_md = min(df_reservoir[Headers.MEASURED_DEPTH].to_numpy())
191
193
  elif isinstance(segment_length, str):
192
- max_md = max(df_well[Headers.TUB_MD].to_numpy())
193
- min_md = min(df_well[Headers.TUB_MD].to_numpy())
194
+ max_md = max(df_well[Headers.TUBING_MEASURED_DEPTH].to_numpy())
195
+ min_md = min(df_well[Headers.TUBING_MEASURED_DEPTH].to_numpy())
194
196
  else:
195
197
  raise TypeError(f"segment_length has invalid type ({type(segment_length)})")
196
198
  for lateral_idx, lateral in enumerate(laterals):