completor 0.1.2__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- completor/completion.py +152 -542
- completor/constants.py +223 -150
- completor/create_output.py +559 -431
- completor/exceptions/exceptions.py +6 -6
- completor/get_version.py +8 -0
- completor/hook_implementations/jobs.py +2 -3
- completor/input_validation.py +53 -41
- completor/launch_args_parser.py +7 -12
- completor/logger.py +3 -3
- completor/main.py +102 -360
- completor/parse.py +104 -93
- completor/prepare_outputs.py +593 -457
- completor/read_casefile.py +248 -197
- completor/read_schedule.py +317 -14
- completor/utils.py +256 -25
- completor/visualization.py +1 -14
- completor/visualize_well.py +29 -27
- completor/wells.py +273 -0
- {completor-0.1.2.dist-info → completor-1.0.0.dist-info}/METADATA +10 -11
- completor-1.0.0.dist-info/RECORD +27 -0
- completor/create_wells.py +0 -314
- completor/pvt_model.py +0 -14
- completor-0.1.2.dist-info/RECORD +0 -27
- {completor-0.1.2.dist-info → completor-1.0.0.dist-info}/LICENSE +0 -0
- {completor-0.1.2.dist-info → completor-1.0.0.dist-info}/WHEEL +0 -0
- {completor-0.1.2.dist-info → completor-1.0.0.dist-info}/entry_points.txt +0 -0
completor/read_schedule.py
CHANGED
|
@@ -3,17 +3,17 @@ from __future__ import annotations
|
|
|
3
3
|
import numpy as np
|
|
4
4
|
import pandas as pd
|
|
5
5
|
|
|
6
|
-
from completor.constants import Headers
|
|
6
|
+
from completor.constants import Content, Headers, Keywords, ScheduleData, WellData
|
|
7
7
|
from completor.logger import logger
|
|
8
8
|
from completor.utils import sort_by_midpoint
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
def fix_welsegs(df_header: pd.DataFrame, df_content: pd.DataFrame) -> tuple[pd.DataFrame, pd.DataFrame]:
|
|
12
|
-
"""Convert a
|
|
12
|
+
"""Convert a WELL_SEGMENTS DataFrame specified in incremental (INC) to absolute (ABS) values.
|
|
13
13
|
|
|
14
14
|
Args:
|
|
15
|
-
df_header: First record table of
|
|
16
|
-
df_content: Second record table of
|
|
15
|
+
df_header: First record table of WELL_SEGMENTS.
|
|
16
|
+
df_content: Second record table of WELL_SEGMENTS.
|
|
17
17
|
|
|
18
18
|
Returns:
|
|
19
19
|
Updated header DataFrame, Updated content DataFrame.
|
|
@@ -24,12 +24,12 @@ def fix_welsegs(df_header: pd.DataFrame, df_content: pd.DataFrame) -> tuple[pd.D
|
|
|
24
24
|
if df_header[Headers.INFO_TYPE].iloc[0] == "ABS":
|
|
25
25
|
return df_header, df_content
|
|
26
26
|
|
|
27
|
-
ref_tvd = df_header[Headers.
|
|
28
|
-
ref_md = df_header[Headers.
|
|
27
|
+
ref_tvd = df_header[Headers.TRUE_VERTICAL_DEPTH].iloc[0]
|
|
28
|
+
ref_md = df_header[Headers.MEASURED_DEPTH].iloc[0]
|
|
29
29
|
inlet_segment = df_content[Headers.TUBING_SEGMENT].to_numpy()
|
|
30
30
|
outlet_segment = df_content[Headers.TUBING_OUTLET].to_numpy()
|
|
31
|
-
md_inc = df_content[Headers.
|
|
32
|
-
tvd_inc = df_content[Headers.
|
|
31
|
+
md_inc = df_content[Headers.TUBING_MEASURED_DEPTH].to_numpy()
|
|
32
|
+
tvd_inc = df_content[Headers.TRUE_VERTICAL_DEPTH].to_numpy()
|
|
33
33
|
md_new = np.zeros(inlet_segment.shape[0])
|
|
34
34
|
tvd_new = np.zeros(inlet_segment.shape[0])
|
|
35
35
|
|
|
@@ -44,8 +44,8 @@ def fix_welsegs(df_header: pd.DataFrame, df_content: pd.DataFrame) -> tuple[pd.D
|
|
|
44
44
|
|
|
45
45
|
# update data frame
|
|
46
46
|
df_header[Headers.INFO_TYPE] = ["ABS"]
|
|
47
|
-
df_content[Headers.
|
|
48
|
-
df_content[Headers.
|
|
47
|
+
df_content[Headers.TUBING_MEASURED_DEPTH] = md_new
|
|
48
|
+
df_content[Headers.TRUE_VERTICAL_DEPTH] = tvd_new
|
|
49
49
|
return df_header, df_content
|
|
50
50
|
|
|
51
51
|
|
|
@@ -54,7 +54,7 @@ def fix_compsegs(df_compsegs: pd.DataFrame, well_name: str) -> pd.DataFrame:
|
|
|
54
54
|
|
|
55
55
|
The issue occurs when one cell is penetrated more than once by a well, and happens
|
|
56
56
|
when there are big cells and the well path is complex.
|
|
57
|
-
The issue can be observed from a
|
|
57
|
+
The issue can be observed from a COMPLETION_SEGMENTS definition that has overlapping start and end measured depth.
|
|
58
58
|
|
|
59
59
|
Args:
|
|
60
60
|
df_compsegs: DataFrame.
|
|
@@ -89,7 +89,11 @@ def fix_compsegs(df_compsegs: pd.DataFrame, well_name: str) -> pd.DataFrame:
|
|
|
89
89
|
start_md_new[idx] = start_md[idx]
|
|
90
90
|
end_md_new[idx] = end_md[idx]
|
|
91
91
|
else:
|
|
92
|
-
logger.info(
|
|
92
|
+
logger.info(
|
|
93
|
+
"Overlapping in COMPLETION_SEGMENTS%s for %s. Sorts the depths accordingly",
|
|
94
|
+
Keywords.COMPLETION_SEGMENTS,
|
|
95
|
+
well_name,
|
|
96
|
+
)
|
|
93
97
|
comb_depth = np.append(start_md, end_md)
|
|
94
98
|
comb_depth = np.sort(comb_depth)
|
|
95
99
|
start_md_new = np.copy(comb_depth[::2])
|
|
@@ -107,7 +111,10 @@ def fix_compsegs(df_compsegs: pd.DataFrame, well_name: str) -> pd.DataFrame:
|
|
|
107
111
|
if start_md_new[idx] >= end_md_new[idx - 1]:
|
|
108
112
|
start_md_new[idx] = end_md_new[idx - 1]
|
|
109
113
|
else:
|
|
110
|
-
logger.error(
|
|
114
|
+
logger.error(
|
|
115
|
+
"Cannot construct COMPLETION_SEGMENTS%s segments based on current input",
|
|
116
|
+
Keywords.COMPLETION_SEGMENTS,
|
|
117
|
+
)
|
|
111
118
|
return sort_by_midpoint(df_compsegs, start_md_new, end_md_new)
|
|
112
119
|
|
|
113
120
|
|
|
@@ -126,7 +133,8 @@ def fix_compsegs_by_priority(
|
|
|
126
133
|
"""
|
|
127
134
|
# slicing two dataframe for user and cells segment length
|
|
128
135
|
start_md_comp = df_completion[
|
|
129
|
-
(df_completion[Headers.DEVICE_TYPE] ==
|
|
136
|
+
(df_completion[Headers.DEVICE_TYPE] == Content.INFLOW_CONTROL_VALVE)
|
|
137
|
+
& (df_completion[Headers.VALVES_PER_JOINT] > 0)
|
|
130
138
|
][Headers.START_MEASURED_DEPTH].reset_index(drop=True)
|
|
131
139
|
df_custom_compsegs = df_custom_compsegs[df_custom_compsegs[Headers.START_MEASURED_DEPTH].isin(start_md_comp)]
|
|
132
140
|
df_compsegs["priority"] = 1
|
|
@@ -158,3 +166,298 @@ def fix_compsegs_by_priority(
|
|
|
158
166
|
df = df.dropna()
|
|
159
167
|
|
|
160
168
|
return df.drop("priority", axis=1)
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def set_welspecs(schedule_data: ScheduleData, records: list[list[str]]) -> ScheduleData:
|
|
172
|
+
"""Convert the well specifications (WELSPECS) record to a Pandas DataFrame.
|
|
173
|
+
|
|
174
|
+
* Sets DataFrame column titles.
|
|
175
|
+
* Formats column values.
|
|
176
|
+
* Pads missing columns at the end of the DataFrame with default values (1*).
|
|
177
|
+
|
|
178
|
+
Args:
|
|
179
|
+
schedule_data: Data containing multisegmented well schedules.
|
|
180
|
+
records: Raw well specification.
|
|
181
|
+
|
|
182
|
+
Returns:
|
|
183
|
+
Multisegmented wells with updated welspecs records.
|
|
184
|
+
"""
|
|
185
|
+
columns = [
|
|
186
|
+
Headers.WELL,
|
|
187
|
+
Headers.GROUP,
|
|
188
|
+
Headers.I,
|
|
189
|
+
Headers.J,
|
|
190
|
+
Headers.BHP_DEPTH,
|
|
191
|
+
Headers.PHASE,
|
|
192
|
+
Headers.DR,
|
|
193
|
+
Headers.FLAG,
|
|
194
|
+
Headers.SHUT,
|
|
195
|
+
Headers.FLOW_CROSS_SECTIONAL_AREA,
|
|
196
|
+
Headers.PRESSURE_TABLE,
|
|
197
|
+
Headers.DENSITY_CALCULATION_TYPE,
|
|
198
|
+
Headers.REGION,
|
|
199
|
+
Headers.RESERVED_HEADER_1,
|
|
200
|
+
Headers.RESERVED_HEADER_2,
|
|
201
|
+
Headers.WELL_MODEL_TYPE,
|
|
202
|
+
Headers.POLYMER_MIXING_TABLE_NUMBER,
|
|
203
|
+
]
|
|
204
|
+
len_to_pad = len(columns) - len(records[0])
|
|
205
|
+
_records = [rec + ["1*"] * len_to_pad for rec in records] # pad with default values (1*)
|
|
206
|
+
df = pd.DataFrame(_records, columns=columns)
|
|
207
|
+
df[columns[2:4]] = df[columns[2:4]].astype(np.int64)
|
|
208
|
+
df[columns[4]] = df[columns[4]].astype(np.float64, errors="ignore")
|
|
209
|
+
# welspecs could be for multiple wells - split it
|
|
210
|
+
for well_name in df[Headers.WELL].unique():
|
|
211
|
+
if well_name not in schedule_data:
|
|
212
|
+
schedule_data[well_name] = {}
|
|
213
|
+
schedule_data[well_name][Keywords.WELL_SPECIFICATION] = df[df[Headers.WELL] == well_name]
|
|
214
|
+
logger.debug("set_welspecs for %s", well_name)
|
|
215
|
+
return schedule_data
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def set_welsegs(schedule_data: ScheduleData, recs: list[list[str]]) -> ScheduleData:
|
|
219
|
+
"""Update the well segments (WELSEGS) for a given well if it is an active well.
|
|
220
|
+
|
|
221
|
+
* Pads missing record columns in header and contents with default values.
|
|
222
|
+
* Convert header and column records to DataFrames.
|
|
223
|
+
* Sets proper DataFrame column types and titles.
|
|
224
|
+
* Converts segment depth specified in incremental (INC) to absolute (ABS) values using fix_welsegs.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
schedule_data: Data containing multisegmented well schedules.
|
|
228
|
+
recs: Record set of header and contents data.
|
|
229
|
+
|
|
230
|
+
Returns:
|
|
231
|
+
Name of well if it was updated, or None if it is not in the active_wells list.
|
|
232
|
+
|
|
233
|
+
Raises:
|
|
234
|
+
ValueError: If a well is not an active well.
|
|
235
|
+
"""
|
|
236
|
+
well_name = recs[0][0] # each WELL_SEGMENTS-chunk is for one well only
|
|
237
|
+
columns_header = [
|
|
238
|
+
Headers.WELL,
|
|
239
|
+
Headers.TRUE_VERTICAL_DEPTH,
|
|
240
|
+
Headers.MEASURED_DEPTH,
|
|
241
|
+
Headers.WELLBORE_VOLUME,
|
|
242
|
+
Headers.INFO_TYPE,
|
|
243
|
+
Headers.PRESSURE_DROP_COMPLETION,
|
|
244
|
+
Headers.MULTIPHASE_FLOW_MODEL,
|
|
245
|
+
Headers.X_COORDINATE_TOP_SEGMENT,
|
|
246
|
+
Headers.Y_COORDINATE_TOP_SEGMENT,
|
|
247
|
+
Headers.THERMAL_CONDUCTIVITY_CROSS_SECTIONAL_AREA,
|
|
248
|
+
Headers.VOLUMETRIC_HEAT_CAPACITY_PIPE_WALL,
|
|
249
|
+
Headers.THERMAL_CONDUCTIVITY_PIPE_WALL,
|
|
250
|
+
]
|
|
251
|
+
# pad header with default values (1*)
|
|
252
|
+
header = recs[0] + ["1*"] * (len(columns_header) - len(recs[0]))
|
|
253
|
+
df_header = pd.DataFrame(np.array(header).reshape((1, len(columns_header))), columns=columns_header)
|
|
254
|
+
df_header[columns_header[1:3]] = df_header[columns_header[1:3]].astype(np.float64) # data types
|
|
255
|
+
|
|
256
|
+
# make df for data records
|
|
257
|
+
columns_data = [
|
|
258
|
+
Headers.TUBING_SEGMENT,
|
|
259
|
+
Headers.TUBING_SEGMENT_2,
|
|
260
|
+
Headers.TUBING_BRANCH,
|
|
261
|
+
Headers.TUBING_OUTLET,
|
|
262
|
+
Headers.TUBING_MEASURED_DEPTH,
|
|
263
|
+
Headers.TRUE_VERTICAL_DEPTH,
|
|
264
|
+
Headers.TUBING_INNER_DIAMETER,
|
|
265
|
+
Headers.TUBING_ROUGHNESS,
|
|
266
|
+
Headers.FLOW_CROSS_SECTIONAL_AREA,
|
|
267
|
+
Headers.SEGMENT_VOLUME,
|
|
268
|
+
Headers.X_COORDINATE_LAST_SEGMENT,
|
|
269
|
+
Headers.Y_COORDINATE_LAST_SEGMENT,
|
|
270
|
+
Headers.THERMAL_CONDUCTIVITY_CROSS_SECTIONAL_AREA,
|
|
271
|
+
Headers.VOLUMETRIC_HEAT_CAPACITY_PIPE_WALL,
|
|
272
|
+
Headers.THERMAL_CONDUCTIVITY_PIPE_WALL,
|
|
273
|
+
]
|
|
274
|
+
# pad with default values (1*)
|
|
275
|
+
recs = [rec + ["1*"] * (len(columns_data) - len(rec)) for rec in recs[1:]]
|
|
276
|
+
df_records = pd.DataFrame(recs, columns=columns_data)
|
|
277
|
+
# data types
|
|
278
|
+
df_records[columns_data[:4]] = df_records[columns_data[:4]].astype(np.int64)
|
|
279
|
+
df_records[columns_data[4:8]] = df_records[columns_data[4:8]].astype(np.float64)
|
|
280
|
+
# fix abs/inc issue with welsegs
|
|
281
|
+
df_header, df_records = fix_welsegs(df_header, df_records)
|
|
282
|
+
|
|
283
|
+
# Warn user if the tubing segments' measured depth for a branch
|
|
284
|
+
# is not sorted in ascending order (monotonic)
|
|
285
|
+
for branch_num in df_records[Headers.TUBING_BRANCH].unique():
|
|
286
|
+
if (
|
|
287
|
+
not df_records[Headers.TUBING_MEASURED_DEPTH]
|
|
288
|
+
.loc[df_records[Headers.TUBING_BRANCH] == branch_num]
|
|
289
|
+
.is_monotonic_increasing
|
|
290
|
+
):
|
|
291
|
+
logger.warning(
|
|
292
|
+
"The branch %s in well %s contains negative length segments. Check the input schedulefile %s "
|
|
293
|
+
"keyword for inconsistencies in measured depth (MEASURED_DEPTH) of Tubing layer.",
|
|
294
|
+
Keywords.WELL_SEGMENTS,
|
|
295
|
+
branch_num,
|
|
296
|
+
well_name,
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
if well_name not in schedule_data:
|
|
300
|
+
schedule_data[well_name] = {}
|
|
301
|
+
schedule_data[well_name][Keywords.WELL_SEGMENTS] = df_header, df_records
|
|
302
|
+
return schedule_data
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
def set_compsegs(schedule_data: ScheduleData, recs: list[list[str]]) -> ScheduleData:
|
|
306
|
+
"""Update COMPLETION_SEGMENTS for a well if it is an active well.
|
|
307
|
+
|
|
308
|
+
* Pads missing record columns in header and contents with default 1*.
|
|
309
|
+
* Convert header and column records to DataFrames.
|
|
310
|
+
* Sets proper DataFrame column types and titles.
|
|
311
|
+
|
|
312
|
+
Args:
|
|
313
|
+
schedule_data: Data containing multisegmented well schedules.
|
|
314
|
+
recs: Record set of header and contents data.
|
|
315
|
+
|
|
316
|
+
Returns:
|
|
317
|
+
The updated well segments.
|
|
318
|
+
|
|
319
|
+
Raises:
|
|
320
|
+
ValueError: If a well is not an active well.
|
|
321
|
+
"""
|
|
322
|
+
well_name = recs[0][0] # each COMPLETION_SEGMENTS-chunk is for one well only
|
|
323
|
+
columns = [
|
|
324
|
+
Headers.I,
|
|
325
|
+
Headers.J,
|
|
326
|
+
Headers.K,
|
|
327
|
+
Headers.BRANCH,
|
|
328
|
+
Headers.START_MEASURED_DEPTH,
|
|
329
|
+
Headers.END_MEASURED_DEPTH,
|
|
330
|
+
Headers.COMPSEGS_DIRECTION,
|
|
331
|
+
Headers.ENDGRID,
|
|
332
|
+
Headers.PERFORATION_DEPTH,
|
|
333
|
+
Headers.THERMAL_CONTACT_LENGTH,
|
|
334
|
+
Headers.SEGMENT,
|
|
335
|
+
]
|
|
336
|
+
recs = np.array(recs[1:])
|
|
337
|
+
recs = np.pad(recs, ((0, 0), (0, len(columns) - recs.shape[1])), "constant", constant_values="1*")
|
|
338
|
+
df = pd.DataFrame(recs, columns=columns)
|
|
339
|
+
df[columns[:4]] = df[columns[:4]].astype(np.int64)
|
|
340
|
+
df[columns[4:6]] = df[columns[4:6]].astype(np.float64)
|
|
341
|
+
if well_name not in schedule_data:
|
|
342
|
+
schedule_data[well_name] = {}
|
|
343
|
+
schedule_data[well_name][Keywords.COMPLETION_SEGMENTS] = df
|
|
344
|
+
logger.debug("set_compsegs for %s", well_name)
|
|
345
|
+
return schedule_data
|
|
346
|
+
|
|
347
|
+
|
|
348
|
+
def set_compdat(schedule_data: ScheduleData, records: list[list[str]]) -> ScheduleData:
|
|
349
|
+
"""Convert completion data (COMPDAT) record to a DataFrame.
|
|
350
|
+
|
|
351
|
+
* Sets DataFrame column titles.
|
|
352
|
+
* Pads missing values with default values (1*).
|
|
353
|
+
* Sets column data types.
|
|
354
|
+
|
|
355
|
+
Args:
|
|
356
|
+
schedule_data: Data containing multisegmented well schedules.
|
|
357
|
+
records: Record set of COMPLETION_DATA data.
|
|
358
|
+
|
|
359
|
+
Returns:
|
|
360
|
+
Key (well name), subkey (keyword), data (DataFrame).
|
|
361
|
+
"""
|
|
362
|
+
columns = [
|
|
363
|
+
Headers.WELL,
|
|
364
|
+
Headers.I,
|
|
365
|
+
Headers.J,
|
|
366
|
+
Headers.K,
|
|
367
|
+
Headers.K2,
|
|
368
|
+
Headers.STATUS,
|
|
369
|
+
Headers.SATURATION_FUNCTION_REGION_NUMBERS,
|
|
370
|
+
Headers.CONNECTION_FACTOR,
|
|
371
|
+
Headers.WELL_BORE_DIAMETER,
|
|
372
|
+
Headers.FORMATION_PERMEABILITY_THICKNESS,
|
|
373
|
+
Headers.SKIN,
|
|
374
|
+
Headers.D_FACTOR,
|
|
375
|
+
Headers.COMPDAT_DIRECTION,
|
|
376
|
+
Headers.RO,
|
|
377
|
+
]
|
|
378
|
+
df = pd.DataFrame(records, columns=columns[0 : len(records[0])])
|
|
379
|
+
if Headers.RO in df.columns:
|
|
380
|
+
df[Headers.RO] = df[Headers.RO].fillna("1*")
|
|
381
|
+
for i in range(len(records[0]), len(columns)):
|
|
382
|
+
df[columns[i]] = ["1*"] * len(records)
|
|
383
|
+
df[columns[1:5]] = df[columns[1:5]].astype(np.int64)
|
|
384
|
+
# Change default value '1*' to equivalent float
|
|
385
|
+
df["SKIN"] = df["SKIN"].replace(["1*"], 0.0)
|
|
386
|
+
df[[Headers.WELL_BORE_DIAMETER, Headers.SKIN]] = df[[Headers.WELL_BORE_DIAMETER, Headers.SKIN]].astype(np.float64)
|
|
387
|
+
# check if CONNECTION_FACTOR, FORMATION_PERMEABILITY_THICKNESS, and RO are defaulted by the users
|
|
388
|
+
df = df.astype(
|
|
389
|
+
{
|
|
390
|
+
Headers.CONNECTION_FACTOR: np.float64,
|
|
391
|
+
Headers.FORMATION_PERMEABILITY_THICKNESS: np.float64,
|
|
392
|
+
Headers.RO: np.float64,
|
|
393
|
+
},
|
|
394
|
+
errors="ignore",
|
|
395
|
+
)
|
|
396
|
+
# Compdat could be for multiple wells, split it.
|
|
397
|
+
unique_wells = df[Headers.WELL].unique()
|
|
398
|
+
for well_name in unique_wells:
|
|
399
|
+
if well_name not in schedule_data:
|
|
400
|
+
schedule_data[well_name] = {}
|
|
401
|
+
schedule_data[well_name][Keywords.COMPLETION_DATA] = df[df[Headers.WELL] == well_name]
|
|
402
|
+
logger.debug("handle_compdat for %s", well_name)
|
|
403
|
+
return schedule_data
|
|
404
|
+
|
|
405
|
+
|
|
406
|
+
def get_completion_data(well_data: WellData) -> pd.DataFrame:
|
|
407
|
+
"""Get-function for COMPLETION_DATA.
|
|
408
|
+
|
|
409
|
+
Args:
|
|
410
|
+
well_data: Segment information.
|
|
411
|
+
|
|
412
|
+
Returns:
|
|
413
|
+
Completion data.
|
|
414
|
+
|
|
415
|
+
Raises:
|
|
416
|
+
ValueError: If completion data keyword is missing in input schedule file.
|
|
417
|
+
"""
|
|
418
|
+
data = well_data.get(Keywords.COMPLETION_DATA)
|
|
419
|
+
if data is None:
|
|
420
|
+
raise KeyError(f"Input schedule file missing {Keywords.COMPLETION_DATA} keyword.")
|
|
421
|
+
return data # type: ignore # TODO(#173): Use TypedDict for WellData.
|
|
422
|
+
|
|
423
|
+
|
|
424
|
+
def get_completion_segments(well_data: WellData, well_name: str, branch: int | None = None) -> pd.DataFrame:
|
|
425
|
+
"""Get-function for COMPLETION_SEGMENTS.
|
|
426
|
+
|
|
427
|
+
Args:
|
|
428
|
+
well_data: Data containing multisegmented well segments.
|
|
429
|
+
well_name: Well name.
|
|
430
|
+
branch: Branch number.
|
|
431
|
+
|
|
432
|
+
Returns:
|
|
433
|
+
Completion segment data.
|
|
434
|
+
"""
|
|
435
|
+
df = well_data[Keywords.COMPLETION_SEGMENTS].copy() # type: ignore # TODO(#173): Use TypedDict for WellData.
|
|
436
|
+
if branch is not None:
|
|
437
|
+
df = df[df[Headers.BRANCH] == branch]
|
|
438
|
+
df = df.reset_index(drop=True) # reset index after filtering
|
|
439
|
+
return fix_compsegs(df, well_name)
|
|
440
|
+
|
|
441
|
+
|
|
442
|
+
def get_well_segments(well_data: WellData, branch: int | None = None) -> tuple[pd.DataFrame, pd.DataFrame]:
|
|
443
|
+
"""Get-function for well segments.
|
|
444
|
+
|
|
445
|
+
Args:
|
|
446
|
+
well_data: The multisegmented wells.
|
|
447
|
+
branch: Branch number.
|
|
448
|
+
|
|
449
|
+
Returns:
|
|
450
|
+
Well segments headers and content.
|
|
451
|
+
|
|
452
|
+
Raises:
|
|
453
|
+
ValueError: If WELL_SEGMENTS keyword missing in input schedule file.
|
|
454
|
+
"""
|
|
455
|
+
data = well_data.get(Keywords.WELL_SEGMENTS)
|
|
456
|
+
if data is None:
|
|
457
|
+
raise ValueError(f"Input schedule file missing {Keywords.WELL_SEGMENTS} keyword.")
|
|
458
|
+
columns, content = data
|
|
459
|
+
|
|
460
|
+
if branch is not None:
|
|
461
|
+
content = content[content[Headers.TUBING_BRANCH] == branch]
|
|
462
|
+
content = content.reset_index(drop=True)
|
|
463
|
+
return columns, content
|