completor 0.1.3__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
completor/utils.py CHANGED
@@ -4,20 +4,17 @@ from __future__ import annotations
4
4
 
5
5
  import re
6
6
  import sys
7
- from typing import Any, overload
7
+ from collections.abc import Mapping
8
+ from typing import Any
8
9
 
9
10
  import numpy as np
10
11
  import numpy.typing as npt
11
12
  import pandas as pd
12
13
 
13
- from completor.constants import Headers
14
+ from completor.constants import Content, Headers, Keywords
15
+ from completor.exceptions.clean_exceptions import CompletorError
14
16
  from completor.logger import logger
15
17
 
16
- try:
17
- from typing import Literal, NoReturn
18
- except ImportError:
19
- pass
20
-
21
18
 
22
19
  def abort(message: str, status: int = 1) -> SystemExit:
23
20
  """Exit the program with a message and exit code (1 by default).
@@ -28,8 +25,8 @@ def abort(message: str, status: int = 1) -> SystemExit:
28
25
  I.e. there were no errors, while 1 or above indicates that an error occurred. The default code is 1.
29
26
 
30
27
  Returns:
31
- SystemExit: Makes type checkers happy, when using the ``raise`` keyword with this function. I.e.
32
- `>>> raise abort("Something when terribly wrong")`
28
+ SystemExit: Makes type checkers happy when using the ``raise`` keyword with this function. I.e.
29
+ `>>> raise abort("Something when terribly wrong.")`
33
30
  """
34
31
  if status == 0:
35
32
  logger.info(message)
@@ -60,49 +57,6 @@ def sort_by_midpoint(
60
57
  return df.drop([_temp_column], axis=1)
61
58
 
62
59
 
63
- def as_data_frame(args: dict[str, Any] | None = None, **kwargs) -> pd.DataFrame:
64
- """Helper function to create a data frame from a dictionary, or keywords."""
65
- if (args is None and kwargs is None) or (not args and not kwargs):
66
- raise ValueError("`as_data_frame` requires either a single dictionary, or keywords")
67
-
68
- if args:
69
- kwargs = args
70
- data = pd.DataFrame()
71
- for key, value in kwargs.items():
72
- data[key] = value
73
-
74
- return data
75
-
76
-
77
- @overload
78
- def log_and_raise_exception(message: str, kind: type = ..., throw: Literal[True] = ...) -> NoReturn: ...
79
-
80
-
81
- @overload
82
- def log_and_raise_exception(message: str, kind: type = ..., throw: Literal[False] = ...) -> BaseException: ...
83
-
84
-
85
- def log_and_raise_exception(message: str, kind: type = ValueError, throw: bool = False) -> BaseException | None:
86
- """Log and throw an exception.
87
-
88
- Arguments:
89
- message: The message to be logged, and given to the exception.
90
- kind: The type of exception to be thrown.
91
- throw: Flag to toggle whether this function actually raises the exception or not.
92
-
93
- Raises:
94
- Exception: In general it can be any exception.
95
- ValueError: This is the default exception.
96
- """
97
- logger.error(message)
98
- if not isinstance(kind, (Exception, BaseException)):
99
- raise ValueError(f"The provided exception type ({kind}) does not inherit from Exception")
100
- if throw:
101
- raise kind(message)
102
- else:
103
- return kind(message)
104
-
105
-
106
60
  def find_quote(string: str) -> re.Match | None:
107
61
  """Find single or double quotes in a string.
108
62
 
@@ -116,7 +70,9 @@ def find_quote(string: str) -> re.Match | None:
116
70
  return re.search(rf"([{quotes}])(?:(?=(\\?))\2.)*?\1", string)
117
71
 
118
72
 
119
- def clean_file_line(line: str, comment_prefix: str = "--", remove_quotation_marks: bool = False) -> str:
73
+ def clean_file_line(
74
+ line: str, comment_prefix: str = "--", remove_quotation_marks: bool = False, replace_tabs: bool = True
75
+ ) -> str:
120
76
  """Remove comments, tabs, newlines and consecutive spaces from a string.
121
77
 
122
78
  Also remove trailing '/' comments, but ignore lines containing a file path.
@@ -126,6 +82,7 @@ def clean_file_line(line: str, comment_prefix: str = "--", remove_quotation_mark
126
82
  comment_prefix: The prefix used to denote a comment in the file.
127
83
  remove_quotation_marks: Whether quotation marks should be removed from the line.
128
84
  Used for cleaning schedule files.
85
+ replace_tabs: Whether tabs should be replaced with a space.
129
86
 
130
87
  Returns:
131
88
  A cleaned line. Returns an empty string in the case of a comment or empty line.
@@ -144,7 +101,8 @@ def clean_file_line(line: str, comment_prefix: str = "--", remove_quotation_mark
144
101
  if not line:
145
102
  return ""
146
103
  # Replace tabs with spaces, remove newlines and remove trailing spaces.
147
- line = line.replace("\t", " ").replace("\n", "")
104
+ if replace_tabs:
105
+ line = line.replace("\t", " ").replace("\n", "")
148
106
  # Remove quotation marks if specified
149
107
  if remove_quotation_marks:
150
108
  line = line.replace("'", " ").replace('"', " ")
@@ -183,3 +141,247 @@ def clean_file_lines(lines: list[str], comment_prefix: str = "--") -> list[str]:
183
141
  if cleaned_line:
184
142
  clean_lines.append(cleaned_line)
185
143
  return clean_lines
144
+
145
+
146
+ def shift_array(array: npt.NDArray[Any], shift_by: int, fill_value: Any = np.nan) -> npt.NDArray[Any]:
147
+ """Shift an array to the left or right, similar to Pandas' shift.
148
+
149
+ Note: By chrisaycock https://stackoverflow.com/a/42642326.
150
+
151
+ Args:
152
+ array: Array to shift.
153
+ shift_by: The amount and direction (positive/negative) to shift by.
154
+ fill_value: The value to fill out of range values with. Defaults to np.nan.
155
+
156
+ Returns:
157
+ Shifted Numpy array.
158
+
159
+ """
160
+ result = np.empty_like(array)
161
+ if shift_by > 0:
162
+ result[:shift_by] = fill_value
163
+ result[shift_by:] = array[:-shift_by]
164
+ elif shift_by < 0:
165
+ result[shift_by:] = fill_value
166
+ result[:shift_by] = array[-shift_by:]
167
+ else:
168
+ result[:] = array
169
+ return result
170
+
171
+
172
+ def get_active_wells(completion_table: pd.DataFrame, gp_perf_devicelayer: bool) -> npt.NDArray[np.str_]:
173
+ """Get a list of active wells specified by users.
174
+
175
+ Notes:
176
+ No device layer will be added for perforated wells with gravel-packed annulus.
177
+ Completor does nothing to gravel-packed perforated wells by default.
178
+ This behavior can be changed by setting the GRAVEL_PACKED_PERFORATED_DEVICELAYER keyword in the case file to true.
179
+
180
+ Args:
181
+ completion_table: Completion information.
182
+ gp_perf_devicelayer: Keyword denoting if the user wants a device layer for this type of completion.
183
+
184
+ Returns:
185
+ The active wells found.
186
+ """
187
+ # Need to check completion of all wells in the completion table to remove GP-PERF type wells
188
+ # If the user wants a device layer for this type of completion.
189
+ if not gp_perf_devicelayer:
190
+ gp_check = completion_table[Headers.ANNULUS] == Content.OPEN_ANNULUS
191
+ perf_check = completion_table[Headers.DEVICE_TYPE].isin(
192
+ [
193
+ Content.AUTONOMOUS_INFLOW_CONTROL_DEVICE,
194
+ Content.AUTONOMOUS_INFLOW_CONTROL_VALVE,
195
+ Content.DENSITY_ACTIVATED_RECOVERY,
196
+ Content.INFLOW_CONTROL_DEVICE,
197
+ Content.VALVE,
198
+ Content.INFLOW_CONTROL_VALVE,
199
+ ]
200
+ )
201
+ # Where annuli is "OA" or perforation is in the list above.
202
+ mask = gp_check | perf_check
203
+ if not mask.any():
204
+ logger.warning(
205
+ "There are no active wells for Completor to work on. E.g. all wells are defined with Gravel Pack "
206
+ "(GP) and valve type PERF. "
207
+ f"If you want these wells to be active set {Keywords.GRAVEL_PACKED_PERFORATED_DEVICELAYER} to TRUE."
208
+ )
209
+ return np.array(completion_table[Headers.WELL][mask].unique())
210
+ return np.array(completion_table[Headers.WELL].unique())
211
+
212
+
213
+ def check_width_lines(result: str, limit: int) -> list[tuple[int, str]]:
214
+ """Check the width of each line versus limit.
215
+
216
+ Disregarding all content after '/' and '--' characters.
217
+
218
+ Args:
219
+ result: Raw text.
220
+ limit: The character width limit.
221
+
222
+ Raises:
223
+ ValueError: If there exists any data that is too long.
224
+ """
225
+ lines = result.splitlines()
226
+ lengths = np.char.str_len(lines)
227
+ lines_to_check = np.nonzero(lengths >= limit)[0]
228
+ too_long_lines = []
229
+ for line_index in lines_to_check:
230
+ # Well names can have slashes, therefore maxsplit must be 1.
231
+ cleaned_line = lines[line_index].rsplit("/", maxsplit=1)[0] + "/"
232
+ # Comment 'char' can be multiple and should not have maxsplit, nor the '--' added.
233
+ cleaned_line = cleaned_line.rsplit("--")[0]
234
+
235
+ if len(cleaned_line) > limit:
236
+ too_long_lines.append((line_index, lines[line_index]))
237
+ return too_long_lines
238
+
239
+
240
+ def format_default_values(text: str) -> list[list[str]]:
241
+ """Format the data-records and resolve the repeat-mechanism.
242
+
243
+ E.g. 3* == 1* 1* 1*, 3*250 == 250 250 250.
244
+
245
+ Args:
246
+ text: A chunk data-record.
247
+
248
+ Returns:
249
+ Expanded values.
250
+ """
251
+ chunk = re.split(r"\s+/", text)[:-1]
252
+ expanded_data = []
253
+ for line in chunk:
254
+ new_record = ""
255
+ for record in line.split():
256
+ if not record[0].isdigit():
257
+ new_record += record + " "
258
+ continue
259
+ if "*" not in record:
260
+ new_record += record + " "
261
+ continue
262
+
263
+ # Handle repeats like 3* or 3*250.
264
+ multiplier, number = record.split("*")
265
+ new_record += f"{number if number else '1*'} " * int(multiplier)
266
+ if new_record:
267
+ expanded_data.append(new_record.split())
268
+ return expanded_data
269
+
270
+
271
+ def find_keyword_data(keyword: str, text: str) -> list[str]:
272
+ """Finds the common pattern for the four keywords thats needed.
273
+
274
+ Args:
275
+ keyword: Current keyword.
276
+ text: The whole text to find matches in.
277
+
278
+ Returns:
279
+ The matches if any.
280
+
281
+ """
282
+ # Finds keyword followed by two slashes.
283
+ # Matches any characters followed by a newline, non-greedily, to allow for comments within the data.
284
+ # Matches new line followed by a single (can have leading whitespace) slash.
285
+ pattern = rf"^{keyword}(?:.*\n)*?\s*\/"
286
+ return re.findall(pattern, text, re.MULTILINE)
287
+
288
+
289
+ def clean_raw_data(raw_record: str, keyword: str) -> list[list[str]]:
290
+ """Parse the record and clean its content.
291
+
292
+ Args:
293
+ raw_record: Raw data taken straight from schedule.
294
+ keyword: The current keyword.
295
+
296
+ Returns:
297
+ The contents of the keyword, cleaned.
298
+ """
299
+ record = re.split(rf"{keyword}\n", raw_record)
300
+ if len(record) != 2:
301
+ raise CompletorError(f"Something went wrong when reading keyword '{keyword}' from schedule:\n{raw_record}")
302
+ # Strip keyword and last line.
303
+ raw_content = record[1].splitlines()
304
+ if raw_content[-1].strip().startswith("/"):
305
+ raw_content = raw_content[:-1]
306
+
307
+ clean_content = []
308
+ for line in raw_content:
309
+ clean_line = clean_file_line(line, remove_quotation_marks=True)
310
+ if clean_line:
311
+ clean_content.append(format_default_values(clean_line)[0])
312
+ return clean_content
313
+
314
+
315
+ def find_well_keyword_data(well: str, keyword: str, text: str) -> str:
316
+ """Find the data associated with keyword and well name, include leading comments.
317
+
318
+ Args:
319
+ well: Well name.
320
+ keyword: Keyword to search for.
321
+ text: Raw text to look for matches in.
322
+
323
+ Returns:
324
+ The correct match given keyword and well name.
325
+ """
326
+ matches = find_keyword_data(keyword, text)
327
+
328
+ lines: list[str] = []
329
+ for match in matches:
330
+ if re.search(well, match) is None:
331
+ continue
332
+
333
+ matchlines = match.splitlines()
334
+ once = False
335
+ for i, line in enumerate(matchlines):
336
+ if not line:
337
+ # Allow empty lines in the middle of a record.
338
+ if once:
339
+ lines.append(line)
340
+ continue
341
+ if well in line.split()[0]:
342
+ if keyword in [Keywords.WELL_SEGMENTS, Keywords.COMPLETION_SEGMENTS]:
343
+ # These keywords should just be the entire match as they never contain more than one well.
344
+ return match
345
+ if not once:
346
+ once = True
347
+ # Remove contiguous comments above the first line by looking backwards,
348
+ # adding it to the replaceable text match.
349
+ comments: list[str] = []
350
+ for prev_line in matchlines[i - 1 :: -1]:
351
+ if not prev_line.strip().startswith("--") or not prev_line:
352
+ break
353
+ comments.append(prev_line)
354
+ lines += sorted(comments, reverse=True)
355
+ lines.append(line)
356
+ elif not once:
357
+ continue
358
+ # All following comments inside data.
359
+ elif line.strip().startswith("--"):
360
+ lines.append(line)
361
+ else:
362
+ break
363
+
364
+ return str("\n".join(lines))
365
+
366
+
367
+ def replace_preprocessing_names(text: str, mapper: Mapping[str, str] | None) -> str:
368
+ """Expand start and end marker pairs for well pattern recognition as needed.
369
+
370
+ Args:
371
+ text: Text with pre-processor reservoir modeling well names.
372
+ mapper: Map of old to new names.
373
+
374
+ Returns:
375
+ Text with reservoir simulator well names.
376
+ """
377
+ if mapper is None:
378
+ return text
379
+ start_marks = ["'", " ", "\n", "\t"]
380
+ end_marks = ["'", " ", " ", " "]
381
+ for key, value in mapper.items():
382
+ for start, end in zip(start_marks, end_marks):
383
+ my_key = start + str(key) + start
384
+ if my_key in text:
385
+ my_value = start + str(value) + end
386
+ text = text.replace(my_key, my_value)
387
+ return text
@@ -4,7 +4,6 @@ import matplotlib.pyplot as plt # type: ignore
4
4
  import matplotlib.ticker as ticker # type: ignore
5
5
  from matplotlib import rcParams
6
6
  from matplotlib.axes import Axes # type: ignore
7
- from matplotlib.backends.backend_pdf import PdfPages # type: ignore
8
7
  from matplotlib.figure import Figure # type: ignore
9
8
 
10
9
 
@@ -16,7 +15,7 @@ def update_fonts(family: str = "DejaVu Serif", size: float = 12) -> None:
16
15
  size: Font sizes.
17
16
  """
18
17
  rcParams["font.family"] = family
19
- rcParams.update({"font.size": size})
18
+ rcParams["font.size"] = size
20
19
 
21
20
 
22
21
  def format_axis(subplot: Axes, title: str, xlabel: str, ylabel: str, categorical: bool = False) -> Axes:
@@ -133,18 +132,6 @@ def subplot_position(num_plots: int) -> tuple[int, int]:
133
132
  return list_rows[num_plots - 1], list_cols[num_plots - 1]
134
133
 
135
134
 
136
- def create_pdfpages(file_name: str) -> PdfPages:
137
- """Create a pdf file.
138
-
139
- Args:
140
- file_name: Full name of the file without extension .pdf.
141
-
142
- Returns:
143
- PdfPages instance.
144
- """
145
- return PdfPages(file_name)
146
-
147
-
148
135
  def create_figure(figsize: list[int] | tuple[int, int] | None = (18, 12)) -> Figure:
149
136
  """Create a matplotlib figure.
150
137
 
@@ -5,7 +5,7 @@ from matplotlib.axes import Axes # type: ignore
5
5
  from matplotlib.figure import Figure # type: ignore
6
6
 
7
7
  from completor import visualization
8
- from completor.constants import Headers
8
+ from completor.constants import Content, Headers
9
9
 
10
10
 
11
11
  def visualize_tubing(axs: Axes, df_well: pd.DataFrame) -> Axes:
@@ -18,9 +18,9 @@ def visualize_tubing(axs: Axes, df_well: pd.DataFrame) -> Axes:
18
18
  Returns:
19
19
  Pyplot axis.
20
20
  """
21
- df_device = df_well[(df_well[Headers.NUMBER_OF_DEVICES] > 0) | (df_well[Headers.DEVICE_TYPE] == "PERF")]
21
+ df_device = df_well[(df_well[Headers.NUMBER_OF_DEVICES] > 0) | (df_well[Headers.DEVICE_TYPE] == Content.PERFORATED)]
22
22
  if df_device.shape[0] > 0:
23
- axs.plot(df_well[Headers.TUB_MD].to_numpy(), [1] * df_well.shape[0], "go-")
23
+ axs.plot(df_well[Headers.TUBING_MEASURED_DEPTH].to_numpy(), [1] * df_well.shape[0], "go-")
24
24
  return axs
25
25
 
26
26
 
@@ -34,21 +34,21 @@ def visualize_device(axs: Axes, df_well: pd.DataFrame) -> Axes:
34
34
  Returns:
35
35
  Pyplot axis
36
36
  """
37
- df_device = df_well[(df_well[Headers.NUMBER_OF_DEVICES] > 0) | (df_well[Headers.DEVICE_TYPE] == "PERF")]
37
+ df_device = df_well[(df_well[Headers.NUMBER_OF_DEVICES] > 0) | (df_well[Headers.DEVICE_TYPE] == Content.PERFORATED)]
38
38
  for idx in range(df_device.shape[0]):
39
- xpar = [df_device[Headers.TUB_MD].iloc[idx]] * 2
39
+ xpar = [df_device[Headers.TUBING_MEASURED_DEPTH].iloc[idx]] * 2
40
40
  ypar = [1.0, 2.0]
41
- if df_device[Headers.DEVICE_TYPE].iloc[idx] == "PERF":
41
+ if df_device[Headers.DEVICE_TYPE].iloc[idx] == Content.PERFORATED:
42
42
  axs.plot(xpar, ypar, "ro-", markevery=[1])
43
- elif df_device[Headers.DEVICE_TYPE].iloc[idx] == "AICD":
43
+ elif df_device[Headers.DEVICE_TYPE].iloc[idx] == Content.AUTONOMOUS_INFLOW_CONTROL_DEVICE:
44
44
  axs.plot(xpar, ypar, "rD-", markevery=[1])
45
- elif df_device[Headers.DEVICE_TYPE].iloc[idx] == "ICD":
45
+ elif df_device[Headers.DEVICE_TYPE].iloc[idx] == Content.INFLOW_CONTROL_DEVICE:
46
46
  axs.plot(xpar, ypar, "rs-", markevery=[1])
47
- elif df_device[Headers.DEVICE_TYPE].iloc[idx] == "VALVE":
47
+ elif df_device[Headers.DEVICE_TYPE].iloc[idx] == Content.VALVE:
48
48
  axs.plot(xpar, ypar, "rv-", markevery=[1])
49
- elif df_device[Headers.DEVICE_TYPE].iloc[idx] == "DAR":
49
+ elif df_device[Headers.DEVICE_TYPE].iloc[idx] == Content.DENSITY_ACTIVATED_RECOVERY:
50
50
  axs.plot(xpar, ypar, "rP-", markevery=[1])
51
- elif df_device[Headers.DEVICE_TYPE].iloc[idx] == "AICV":
51
+ elif df_device[Headers.DEVICE_TYPE].iloc[idx] == Content.AUTONOMOUS_INFLOW_CONTROL_VALVE:
52
52
  axs.plot(xpar, ypar, "r*-", markevery=[1])
53
53
  return axs
54
54
 
@@ -67,15 +67,15 @@ def visualize_annulus(axs: Axes, df_well: pd.DataFrame) -> Axes:
67
67
  branches = df_well[Headers.ANNULUS_ZONE].unique()
68
68
  for branch in branches:
69
69
  df_branch = df_annulus[df_annulus[Headers.ANNULUS_ZONE] == branch]
70
- xpar = df_branch[Headers.TUB_MD].to_numpy()
70
+ xpar = df_branch[Headers.TUBING_MEASURED_DEPTH].to_numpy()
71
71
  ypar = [3.0] * len(xpar)
72
72
  axs.plot(xpar, ypar, "bo-")
73
73
  # find the first connection in branches
74
74
  df_annulus_with_connection_to_tubing = df_branch[
75
- (df_branch[Headers.NUMBER_OF_DEVICES] > 0) | (df_branch[Headers.DEVICE_TYPE] == "PERF")
75
+ (df_branch[Headers.NUMBER_OF_DEVICES] > 0) | (df_branch[Headers.DEVICE_TYPE] == Content.PERFORATED)
76
76
  ]
77
77
  for idx in range(df_annulus_with_connection_to_tubing.shape[0]):
78
- xpar = [df_annulus_with_connection_to_tubing[Headers.TUB_MD].iloc[idx]] * 2
78
+ xpar = [df_annulus_with_connection_to_tubing[Headers.TUBING_MEASURED_DEPTH].iloc[idx]] * 2
79
79
  ypar = [2.0, 3.0]
80
80
  if idx == 0:
81
81
  axs.plot(xpar, ypar, "bo-", markevery=[1])
@@ -105,28 +105,30 @@ def visualize_reservoir(axs: Axes, ax_twinx: Axes, df_reservoir: pd.DataFrame) -
105
105
  if df_reservoir[Headers.ANNULUS_ZONE].iloc[idx] > 0:
106
106
  axs.annotate(
107
107
  "",
108
- xy=(df_reservoir[Headers.TUB_MD].iloc[idx], 3.0),
109
- xytext=(df_reservoir[Headers.MD].iloc[idx], 4.0),
108
+ xy=(df_reservoir[Headers.TUBING_MEASURED_DEPTH].iloc[idx], 3.0),
109
+ xytext=(df_reservoir[Headers.MEASURED_DEPTH].iloc[idx], 4.0),
110
110
  arrowprops=dict(facecolor="black", shrink=0.05, width=0.5, headwidth=4.0),
111
111
  )
112
112
  else:
113
113
  if (
114
114
  df_reservoir[Headers.NUMBER_OF_DEVICES].iloc[idx] > 0
115
- or df_reservoir[Headers.DEVICE_TYPE].iloc[idx] == "PERF"
115
+ or df_reservoir[Headers.DEVICE_TYPE].iloc[idx] == Content.PERFORATED
116
116
  ):
117
117
  axs.annotate(
118
118
  "",
119
- xy=(df_reservoir[Headers.TUB_MD].iloc[idx], 2.0),
120
- xytext=(df_reservoir[Headers.MD].iloc[idx], 4.0),
119
+ xy=(df_reservoir[Headers.TUBING_MEASURED_DEPTH].iloc[idx], 2.0),
120
+ xytext=(df_reservoir[Headers.MEASURED_DEPTH].iloc[idx], 4.0),
121
121
  arrowprops=dict(facecolor="black", shrink=0.05, width=0.5, headwidth=4.0),
122
122
  )
123
123
  # get connection factor
124
124
  if "1*" not in df_reservoir[Headers.CONNECTION_FACTOR].to_numpy().tolist():
125
125
  max_cf = max(df_reservoir[Headers.CONNECTION_FACTOR].to_numpy())
126
- ax_twinx.plot(df_reservoir[Headers.MD], df_reservoir[Headers.CONNECTION_FACTOR], "k-")
126
+ ax_twinx.plot(df_reservoir[Headers.MEASURED_DEPTH], df_reservoir[Headers.CONNECTION_FACTOR], "k-")
127
127
  ax_twinx.invert_yaxis()
128
128
  ax_twinx.set_ylim([max_cf * 5.0 + 1e-5, 0])
129
- ax_twinx.fill_between(df_reservoir[Headers.MD], 0, df_reservoir[Headers.CONNECTION_FACTOR], alpha=0.5)
129
+ ax_twinx.fill_between(
130
+ df_reservoir[Headers.MEASURED_DEPTH], 0, df_reservoir[Headers.CONNECTION_FACTOR], alpha=0.5
131
+ )
130
132
 
131
133
  return axs, ax_twinx
132
134
 
@@ -183,14 +185,14 @@ def visualize_well(
183
185
  laterals = df_well[Headers.LATERAL].unique()
184
186
  if isinstance(segment_length, float):
185
187
  if segment_length >= 0.0:
186
- max_md = max(df_well[Headers.TUB_MD].to_numpy())
187
- min_md = min(df_well[Headers.TUB_MD].to_numpy())
188
+ max_md = max(df_well[Headers.TUBING_MEASURED_DEPTH].to_numpy())
189
+ min_md = min(df_well[Headers.TUBING_MEASURED_DEPTH].to_numpy())
188
190
  else:
189
- max_md = max(df_reservoir[Headers.MD].to_numpy())
190
- min_md = min(df_reservoir[Headers.MD].to_numpy())
191
+ max_md = max(df_reservoir[Headers.MEASURED_DEPTH].to_numpy())
192
+ min_md = min(df_reservoir[Headers.MEASURED_DEPTH].to_numpy())
191
193
  elif isinstance(segment_length, str):
192
- max_md = max(df_well[Headers.TUB_MD].to_numpy())
193
- min_md = min(df_well[Headers.TUB_MD].to_numpy())
194
+ max_md = max(df_well[Headers.TUBING_MEASURED_DEPTH].to_numpy())
195
+ min_md = min(df_well[Headers.TUBING_MEASURED_DEPTH].to_numpy())
194
196
  else:
195
197
  raise TypeError(f"segment_length has invalid type ({type(segment_length)})")
196
198
  for lateral_idx, lateral in enumerate(laterals):