subsurface-terra 2025.1.0rc15__py3-none-any.whl → 2025.1.0rc17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- subsurface/__init__.py +31 -31
- subsurface/_version.py +34 -21
- subsurface/api/__init__.py +13 -13
- subsurface/api/interfaces/__init__.py +3 -3
- subsurface/api/interfaces/stream.py +136 -136
- subsurface/api/reader/read_wells.py +78 -78
- subsurface/core/geological_formats/boreholes/_combine_trajectories.py +117 -117
- subsurface/core/geological_formats/boreholes/_map_attrs_to_survey.py +236 -234
- subsurface/core/geological_formats/boreholes/_survey_to_unstruct.py +163 -163
- subsurface/core/geological_formats/boreholes/boreholes.py +140 -140
- subsurface/core/geological_formats/boreholes/collars.py +26 -26
- subsurface/core/geological_formats/boreholes/survey.py +86 -86
- subsurface/core/geological_formats/fault.py +47 -47
- subsurface/core/reader_helpers/reader_unstruct.py +11 -11
- subsurface/core/reader_helpers/readers_data.py +130 -130
- subsurface/core/reader_helpers/readers_wells.py +13 -13
- subsurface/core/structs/__init__.py +3 -3
- subsurface/core/structs/base_structures/__init__.py +2 -2
- subsurface/core/structs/base_structures/_aux.py +69 -0
- subsurface/core/structs/base_structures/_liquid_earth_mesh.py +121 -121
- subsurface/core/structs/base_structures/_unstructured_data_constructor.py +70 -70
- subsurface/core/structs/base_structures/base_structures_enum.py +6 -6
- subsurface/core/structs/base_structures/structured_data.py +282 -282
- subsurface/core/structs/base_structures/unstructured_data.py +338 -319
- subsurface/core/structs/structured_elements/octree_mesh.py +10 -10
- subsurface/core/structs/structured_elements/structured_grid.py +59 -59
- subsurface/core/structs/structured_elements/structured_mesh.py +9 -9
- subsurface/core/structs/unstructured_elements/__init__.py +3 -3
- subsurface/core/structs/unstructured_elements/line_set.py +72 -72
- subsurface/core/structs/unstructured_elements/point_set.py +43 -43
- subsurface/core/structs/unstructured_elements/tetrahedron_mesh.py +35 -35
- subsurface/core/structs/unstructured_elements/triangular_surface.py +62 -62
- subsurface/core/utils/utils_core.py +38 -38
- subsurface/modules/reader/__init__.py +13 -13
- subsurface/modules/reader/faults/faults.py +80 -80
- subsurface/modules/reader/from_binary.py +46 -46
- subsurface/modules/reader/mesh/_GOCAD_mesh.py +82 -82
- subsurface/modules/reader/mesh/_trimesh_reader.py +447 -447
- subsurface/modules/reader/mesh/csv_mesh_reader.py +53 -53
- subsurface/modules/reader/mesh/dxf_reader.py +177 -177
- subsurface/modules/reader/mesh/glb_reader.py +30 -30
- subsurface/modules/reader/mesh/mx_reader.py +232 -232
- subsurface/modules/reader/mesh/obj_reader.py +53 -53
- subsurface/modules/reader/mesh/omf_mesh_reader.py +43 -43
- subsurface/modules/reader/mesh/surface_reader.py +56 -56
- subsurface/modules/reader/mesh/surfaces_api.py +41 -41
- subsurface/modules/reader/profiles/__init__.py +3 -3
- subsurface/modules/reader/profiles/profiles_core.py +197 -197
- subsurface/modules/reader/read_netcdf.py +38 -38
- subsurface/modules/reader/topography/__init__.py +7 -7
- subsurface/modules/reader/topography/topo_core.py +100 -100
- subsurface/modules/reader/volume/read_grav3d.py +447 -428
- subsurface/modules/reader/volume/read_volume.py +327 -230
- subsurface/modules/reader/volume/segy_reader.py +105 -105
- subsurface/modules/reader/volume/seismic.py +173 -173
- subsurface/modules/reader/volume/volume_utils.py +43 -43
- subsurface/modules/reader/wells/DEP/__init__.py +43 -43
- subsurface/modules/reader/wells/DEP/_well_files_reader.py +167 -167
- subsurface/modules/reader/wells/DEP/_wells_api.py +61 -61
- subsurface/modules/reader/wells/DEP/_welly_reader.py +180 -180
- subsurface/modules/reader/wells/DEP/pandas_to_welly.py +212 -212
- subsurface/modules/reader/wells/_read_to_df.py +57 -57
- subsurface/modules/reader/wells/read_borehole_interface.py +148 -148
- subsurface/modules/reader/wells/wells_utils.py +68 -68
- subsurface/modules/tools/mocking_aux.py +104 -104
- subsurface/modules/visualization/__init__.py +2 -2
- subsurface/modules/visualization/to_pyvista.py +320 -320
- subsurface/modules/writer/to_binary.py +12 -12
- subsurface/modules/writer/to_rex/common.py +78 -78
- subsurface/modules/writer/to_rex/data_struct.py +74 -74
- subsurface/modules/writer/to_rex/gempy_to_rexfile.py +791 -791
- subsurface/modules/writer/to_rex/material_encoder.py +44 -44
- subsurface/modules/writer/to_rex/mesh_encoder.py +152 -152
- subsurface/modules/writer/to_rex/to_rex.py +115 -115
- subsurface/modules/writer/to_rex/utils.py +15 -15
- subsurface/optional_requirements.py +116 -116
- {subsurface_terra-2025.1.0rc15.dist-info → subsurface_terra-2025.1.0rc17.dist-info}/METADATA +194 -194
- subsurface_terra-2025.1.0rc17.dist-info/RECORD +99 -0
- {subsurface_terra-2025.1.0rc15.dist-info → subsurface_terra-2025.1.0rc17.dist-info}/WHEEL +1 -1
- {subsurface_terra-2025.1.0rc15.dist-info → subsurface_terra-2025.1.0rc17.dist-info}/licenses/LICENSE +203 -203
- subsurface_terra-2025.1.0rc15.dist-info/RECORD +0 -98
- {subsurface_terra-2025.1.0rc15.dist-info → subsurface_terra-2025.1.0rc17.dist-info}/top_level.txt +0 -0
|
@@ -1,212 +1,212 @@
|
|
|
1
|
-
from typing import Iterable, Union, List
|
|
2
|
-
|
|
3
|
-
import numpy as np
|
|
4
|
-
import pandas as pd
|
|
5
|
-
|
|
6
|
-
try:
|
|
7
|
-
import welly
|
|
8
|
-
from welly import Well, Location, Project, Curve
|
|
9
|
-
from striplog import Striplog, Component
|
|
10
|
-
|
|
11
|
-
welly_imported = True
|
|
12
|
-
except ImportError:
|
|
13
|
-
welly_imported = False
|
|
14
|
-
|
|
15
|
-
__all__ = ['WellyToSubsurfaceHelper', ]
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
class WellyToSubsurfaceHelper:
|
|
19
|
-
def __init__(
|
|
20
|
-
self,
|
|
21
|
-
collar_df: pd.DataFrame = None,
|
|
22
|
-
survey_df: pd.DataFrame = None,
|
|
23
|
-
lith_df: pd.DataFrame = None,
|
|
24
|
-
attrib_dfs: List[pd.DataFrame] = None
|
|
25
|
-
):
|
|
26
|
-
""" Class that wraps `welly` to read borehole data - las files (upcoming)
|
|
27
|
-
and deviations, csv, excel - and converts it into a
|
|
28
|
-
`subsurface.UnstructuredData`
|
|
29
|
-
|
|
30
|
-
This class is only meant to be extended with all the necessary functionality
|
|
31
|
-
to load borehole data. For extensive manipulations of the data
|
|
32
|
-
it should be done in `welly` itself.
|
|
33
|
-
|
|
34
|
-
A borehole has:
|
|
35
|
-
|
|
36
|
-
- Datum (XYZ location)
|
|
37
|
-
|
|
38
|
-
- Deviation
|
|
39
|
-
|
|
40
|
-
- Lithology: For this we are going to need striplog
|
|
41
|
-
|
|
42
|
-
- Logs
|
|
43
|
-
|
|
44
|
-
Everything would be a LineSet with a bunch of properties
|
|
45
|
-
|
|
46
|
-
"""
|
|
47
|
-
|
|
48
|
-
if welly_imported is False:
|
|
49
|
-
raise ImportError('You need to install welly to read well data.')
|
|
50
|
-
|
|
51
|
-
self.welly_project = Project([])
|
|
52
|
-
self._well_names = set()
|
|
53
|
-
self._unique_formations = None
|
|
54
|
-
|
|
55
|
-
if collar_df is not None: self.add_datum(collar_df)
|
|
56
|
-
if survey_df is not None: self.add_deviation(survey_df)
|
|
57
|
-
if lith_df is not None: self.add_striplog(lith_df)
|
|
58
|
-
|
|
59
|
-
# First check if is just a path or list
|
|
60
|
-
if attrib_dfs is not None:
|
|
61
|
-
for e, attrib in enumerate(attrib_dfs):
|
|
62
|
-
self.add_assays(attrib, basis='basis')
|
|
63
|
-
|
|
64
|
-
def __repr__(self):
|
|
65
|
-
return self.p.__repr__()
|
|
66
|
-
|
|
67
|
-
@property
|
|
68
|
-
def p(self):
|
|
69
|
-
"""Project Alias"""
|
|
70
|
-
return self.welly_project
|
|
71
|
-
|
|
72
|
-
@p.setter
|
|
73
|
-
def p(self, p):
|
|
74
|
-
self.welly_project = p
|
|
75
|
-
|
|
76
|
-
@property
|
|
77
|
-
def lith_component_table(self):
|
|
78
|
-
return [Component({'lith': l}) for l in self._unique_formations]
|
|
79
|
-
|
|
80
|
-
@lith_component_table.setter
|
|
81
|
-
def lith_component_table(self, unique_formations):
|
|
82
|
-
self._unique_formations = unique_formations
|
|
83
|
-
|
|
84
|
-
def add_wells(self, well_names: Iterable):
|
|
85
|
-
new_boreholes = set(well_names).difference(self._well_names)
|
|
86
|
-
|
|
87
|
-
self._well_names = self._well_names.union(well_names)
|
|
88
|
-
for b in new_boreholes:
|
|
89
|
-
# TODO: Name and uwi should be different
|
|
90
|
-
if welly.__version__ < '0.5':
|
|
91
|
-
w = Well(params={'header': {'name': b, 'uwi': b}})
|
|
92
|
-
else:
|
|
93
|
-
w = Well()
|
|
94
|
-
w.uwi = b
|
|
95
|
-
w.name = b
|
|
96
|
-
# w.location = Location(params={'kb': 100})
|
|
97
|
-
self.p += w
|
|
98
|
-
return self.p
|
|
99
|
-
|
|
100
|
-
def add_datum(self, data: pd.DataFrame):
|
|
101
|
-
self.add_wells(
|
|
102
|
-
well_names=np.unique(data.index)
|
|
103
|
-
)
|
|
104
|
-
|
|
105
|
-
for b in np.unique(data.index):
|
|
106
|
-
w = self.welly_project.get_well(b)
|
|
107
|
-
datum = data.loc[[b]]
|
|
108
|
-
assert datum.shape[1] == 3, 'datum must be XYZ coord'
|
|
109
|
-
|
|
110
|
-
w.location.position = datum.values[0]
|
|
111
|
-
|
|
112
|
-
return self.welly_project
|
|
113
|
-
|
|
114
|
-
def add_collar(self, data: pd.DataFrame):
|
|
115
|
-
"""Alias for add_datum"""
|
|
116
|
-
return self.add_datum(data=data)
|
|
117
|
-
|
|
118
|
-
def add_striplog(self, data: pd.DataFrame):
|
|
119
|
-
unique_borehole = np.unique(data.index)
|
|
120
|
-
self.add_wells(unique_borehole)
|
|
121
|
-
self.lith_component_table = data['component lith'].unique()
|
|
122
|
-
missed_borehole = []
|
|
123
|
-
for b in unique_borehole:
|
|
124
|
-
w = self.p.get_well(b)
|
|
125
|
-
data_dict = data.loc[[b]].to_dict('list')
|
|
126
|
-
data_csv = data.loc[[b]].to_csv()
|
|
127
|
-
# s = Striplog.from_dict_advanced(data_dict, points=True)
|
|
128
|
-
# s = Striplog.from_dict(data_dict)
|
|
129
|
-
s = Striplog.from_csv(text=data_csv)
|
|
130
|
-
|
|
131
|
-
try:
|
|
132
|
-
n_basis = w.location.md.shape[0]
|
|
133
|
-
except TypeError:
|
|
134
|
-
n_basis = 2
|
|
135
|
-
try:
|
|
136
|
-
start, stop, step_size = self._calculate_basis_parameters(
|
|
137
|
-
w,
|
|
138
|
-
n_basis)
|
|
139
|
-
s_log, basis, table = s.to_log(step_size, start, stop,
|
|
140
|
-
return_meta=True)
|
|
141
|
-
|
|
142
|
-
w.data['lith'] = s
|
|
143
|
-
w.data['lith_log'] = Curve(s_log, basis)
|
|
144
|
-
except TypeError:
|
|
145
|
-
missed_borehole.append(b)
|
|
146
|
-
continue
|
|
147
|
-
|
|
148
|
-
print('The following striplog failed being processed: ', missed_borehole)
|
|
149
|
-
|
|
150
|
-
return self.p
|
|
151
|
-
|
|
152
|
-
def add_assays(self, data: pd.DataFrame, basis: Union[str, Iterable]):
|
|
153
|
-
unique_borehole = np.unique(data.index)
|
|
154
|
-
self.add_wells(unique_borehole)
|
|
155
|
-
assay_attributes = data.columns
|
|
156
|
-
|
|
157
|
-
if type(basis) == str:
|
|
158
|
-
assay_attributes = assay_attributes.drop(basis)
|
|
159
|
-
basis = data[basis]
|
|
160
|
-
elif type(basis) == Iterable:
|
|
161
|
-
pass
|
|
162
|
-
else:
|
|
163
|
-
raise AttributeError('basis must be either a string with the column name'
|
|
164
|
-
'or a array like object')
|
|
165
|
-
|
|
166
|
-
for b in unique_borehole:
|
|
167
|
-
for a in assay_attributes:
|
|
168
|
-
w = self.p.get_well(b)
|
|
169
|
-
w.data[a] = Curve(
|
|
170
|
-
data=data[a],
|
|
171
|
-
basis=basis
|
|
172
|
-
)
|
|
173
|
-
|
|
174
|
-
return self.p
|
|
175
|
-
|
|
176
|
-
@staticmethod
|
|
177
|
-
def _calculate_basis_parameters(well, n_points):
|
|
178
|
-
|
|
179
|
-
max_ = well.location.md.max()
|
|
180
|
-
min_ = well.location.md.min()
|
|
181
|
-
step_size = (max_ - min_) / n_points
|
|
182
|
-
return min_ + step_size / 2, max_ - step_size / 2, step_size + 1e-12
|
|
183
|
-
|
|
184
|
-
def add_deviation(
|
|
185
|
-
self,
|
|
186
|
-
deviations: pd.DataFrame,
|
|
187
|
-
td=None,
|
|
188
|
-
method='mc',
|
|
189
|
-
update_deviation=True,
|
|
190
|
-
azimuth_datum=0
|
|
191
|
-
):
|
|
192
|
-
""" Add a deviation survey to this instance, and try to compute a position
|
|
193
|
-
log from it.
|
|
194
|
-
|
|
195
|
-
"""
|
|
196
|
-
unique_borehole = np.unique(deviations.index)
|
|
197
|
-
self.add_wells(unique_borehole)
|
|
198
|
-
|
|
199
|
-
for b in unique_borehole:
|
|
200
|
-
w = self.p.get_well(b)
|
|
201
|
-
deviations_df: pd.DataFrame = deviations.loc[[b], ['md', 'inc', 'azi']]
|
|
202
|
-
deviations_df.fillna(0, inplace=True)
|
|
203
|
-
w.location.add_deviation(
|
|
204
|
-
deviation=deviations_df,
|
|
205
|
-
td=td,
|
|
206
|
-
method=method,
|
|
207
|
-
update_deviation=update_deviation,
|
|
208
|
-
azimuth_datum=azimuth_datum)
|
|
209
|
-
if w.location.position is None:
|
|
210
|
-
raise ValueError('Deviations could not be calculated.')
|
|
211
|
-
|
|
212
|
-
return self.p
|
|
1
|
+
from typing import Iterable, Union, List
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
import pandas as pd
|
|
5
|
+
|
|
6
|
+
try:
|
|
7
|
+
import welly
|
|
8
|
+
from welly import Well, Location, Project, Curve
|
|
9
|
+
from striplog import Striplog, Component
|
|
10
|
+
|
|
11
|
+
welly_imported = True
|
|
12
|
+
except ImportError:
|
|
13
|
+
welly_imported = False
|
|
14
|
+
|
|
15
|
+
__all__ = ['WellyToSubsurfaceHelper', ]
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class WellyToSubsurfaceHelper:
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
collar_df: pd.DataFrame = None,
|
|
22
|
+
survey_df: pd.DataFrame = None,
|
|
23
|
+
lith_df: pd.DataFrame = None,
|
|
24
|
+
attrib_dfs: List[pd.DataFrame] = None
|
|
25
|
+
):
|
|
26
|
+
""" Class that wraps `welly` to read borehole data - las files (upcoming)
|
|
27
|
+
and deviations, csv, excel - and converts it into a
|
|
28
|
+
`subsurface.UnstructuredData`
|
|
29
|
+
|
|
30
|
+
This class is only meant to be extended with all the necessary functionality
|
|
31
|
+
to load borehole data. For extensive manipulations of the data
|
|
32
|
+
it should be done in `welly` itself.
|
|
33
|
+
|
|
34
|
+
A borehole has:
|
|
35
|
+
|
|
36
|
+
- Datum (XYZ location)
|
|
37
|
+
|
|
38
|
+
- Deviation
|
|
39
|
+
|
|
40
|
+
- Lithology: For this we are going to need striplog
|
|
41
|
+
|
|
42
|
+
- Logs
|
|
43
|
+
|
|
44
|
+
Everything would be a LineSet with a bunch of properties
|
|
45
|
+
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
if welly_imported is False:
|
|
49
|
+
raise ImportError('You need to install welly to read well data.')
|
|
50
|
+
|
|
51
|
+
self.welly_project = Project([])
|
|
52
|
+
self._well_names = set()
|
|
53
|
+
self._unique_formations = None
|
|
54
|
+
|
|
55
|
+
if collar_df is not None: self.add_datum(collar_df)
|
|
56
|
+
if survey_df is not None: self.add_deviation(survey_df)
|
|
57
|
+
if lith_df is not None: self.add_striplog(lith_df)
|
|
58
|
+
|
|
59
|
+
# First check if is just a path or list
|
|
60
|
+
if attrib_dfs is not None:
|
|
61
|
+
for e, attrib in enumerate(attrib_dfs):
|
|
62
|
+
self.add_assays(attrib, basis='basis')
|
|
63
|
+
|
|
64
|
+
def __repr__(self):
|
|
65
|
+
return self.p.__repr__()
|
|
66
|
+
|
|
67
|
+
@property
|
|
68
|
+
def p(self):
|
|
69
|
+
"""Project Alias"""
|
|
70
|
+
return self.welly_project
|
|
71
|
+
|
|
72
|
+
@p.setter
|
|
73
|
+
def p(self, p):
|
|
74
|
+
self.welly_project = p
|
|
75
|
+
|
|
76
|
+
@property
|
|
77
|
+
def lith_component_table(self):
|
|
78
|
+
return [Component({'lith': l}) for l in self._unique_formations]
|
|
79
|
+
|
|
80
|
+
@lith_component_table.setter
|
|
81
|
+
def lith_component_table(self, unique_formations):
|
|
82
|
+
self._unique_formations = unique_formations
|
|
83
|
+
|
|
84
|
+
def add_wells(self, well_names: Iterable):
|
|
85
|
+
new_boreholes = set(well_names).difference(self._well_names)
|
|
86
|
+
|
|
87
|
+
self._well_names = self._well_names.union(well_names)
|
|
88
|
+
for b in new_boreholes:
|
|
89
|
+
# TODO: Name and uwi should be different
|
|
90
|
+
if welly.__version__ < '0.5':
|
|
91
|
+
w = Well(params={'header': {'name': b, 'uwi': b}})
|
|
92
|
+
else:
|
|
93
|
+
w = Well()
|
|
94
|
+
w.uwi = b
|
|
95
|
+
w.name = b
|
|
96
|
+
# w.location = Location(params={'kb': 100})
|
|
97
|
+
self.p += w
|
|
98
|
+
return self.p
|
|
99
|
+
|
|
100
|
+
def add_datum(self, data: pd.DataFrame):
|
|
101
|
+
self.add_wells(
|
|
102
|
+
well_names=np.unique(data.index)
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
for b in np.unique(data.index):
|
|
106
|
+
w = self.welly_project.get_well(b)
|
|
107
|
+
datum = data.loc[[b]]
|
|
108
|
+
assert datum.shape[1] == 3, 'datum must be XYZ coord'
|
|
109
|
+
|
|
110
|
+
w.location.position = datum.values[0]
|
|
111
|
+
|
|
112
|
+
return self.welly_project
|
|
113
|
+
|
|
114
|
+
def add_collar(self, data: pd.DataFrame):
|
|
115
|
+
"""Alias for add_datum"""
|
|
116
|
+
return self.add_datum(data=data)
|
|
117
|
+
|
|
118
|
+
def add_striplog(self, data: pd.DataFrame):
|
|
119
|
+
unique_borehole = np.unique(data.index)
|
|
120
|
+
self.add_wells(unique_borehole)
|
|
121
|
+
self.lith_component_table = data['component lith'].unique()
|
|
122
|
+
missed_borehole = []
|
|
123
|
+
for b in unique_borehole:
|
|
124
|
+
w = self.p.get_well(b)
|
|
125
|
+
data_dict = data.loc[[b]].to_dict('list')
|
|
126
|
+
data_csv = data.loc[[b]].to_csv()
|
|
127
|
+
# s = Striplog.from_dict_advanced(data_dict, points=True)
|
|
128
|
+
# s = Striplog.from_dict(data_dict)
|
|
129
|
+
s = Striplog.from_csv(text=data_csv)
|
|
130
|
+
|
|
131
|
+
try:
|
|
132
|
+
n_basis = w.location.md.shape[0]
|
|
133
|
+
except TypeError:
|
|
134
|
+
n_basis = 2
|
|
135
|
+
try:
|
|
136
|
+
start, stop, step_size = self._calculate_basis_parameters(
|
|
137
|
+
w,
|
|
138
|
+
n_basis)
|
|
139
|
+
s_log, basis, table = s.to_log(step_size, start, stop,
|
|
140
|
+
return_meta=True)
|
|
141
|
+
|
|
142
|
+
w.data['lith'] = s
|
|
143
|
+
w.data['lith_log'] = Curve(s_log, basis)
|
|
144
|
+
except TypeError:
|
|
145
|
+
missed_borehole.append(b)
|
|
146
|
+
continue
|
|
147
|
+
|
|
148
|
+
print('The following striplog failed being processed: ', missed_borehole)
|
|
149
|
+
|
|
150
|
+
return self.p
|
|
151
|
+
|
|
152
|
+
def add_assays(self, data: pd.DataFrame, basis: Union[str, Iterable]):
|
|
153
|
+
unique_borehole = np.unique(data.index)
|
|
154
|
+
self.add_wells(unique_borehole)
|
|
155
|
+
assay_attributes = data.columns
|
|
156
|
+
|
|
157
|
+
if type(basis) == str:
|
|
158
|
+
assay_attributes = assay_attributes.drop(basis)
|
|
159
|
+
basis = data[basis]
|
|
160
|
+
elif type(basis) == Iterable:
|
|
161
|
+
pass
|
|
162
|
+
else:
|
|
163
|
+
raise AttributeError('basis must be either a string with the column name'
|
|
164
|
+
'or a array like object')
|
|
165
|
+
|
|
166
|
+
for b in unique_borehole:
|
|
167
|
+
for a in assay_attributes:
|
|
168
|
+
w = self.p.get_well(b)
|
|
169
|
+
w.data[a] = Curve(
|
|
170
|
+
data=data[a],
|
|
171
|
+
basis=basis
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
return self.p
|
|
175
|
+
|
|
176
|
+
@staticmethod
|
|
177
|
+
def _calculate_basis_parameters(well, n_points):
|
|
178
|
+
|
|
179
|
+
max_ = well.location.md.max()
|
|
180
|
+
min_ = well.location.md.min()
|
|
181
|
+
step_size = (max_ - min_) / n_points
|
|
182
|
+
return min_ + step_size / 2, max_ - step_size / 2, step_size + 1e-12
|
|
183
|
+
|
|
184
|
+
def add_deviation(
|
|
185
|
+
self,
|
|
186
|
+
deviations: pd.DataFrame,
|
|
187
|
+
td=None,
|
|
188
|
+
method='mc',
|
|
189
|
+
update_deviation=True,
|
|
190
|
+
azimuth_datum=0
|
|
191
|
+
):
|
|
192
|
+
""" Add a deviation survey to this instance, and try to compute a position
|
|
193
|
+
log from it.
|
|
194
|
+
|
|
195
|
+
"""
|
|
196
|
+
unique_borehole = np.unique(deviations.index)
|
|
197
|
+
self.add_wells(unique_borehole)
|
|
198
|
+
|
|
199
|
+
for b in unique_borehole:
|
|
200
|
+
w = self.p.get_well(b)
|
|
201
|
+
deviations_df: pd.DataFrame = deviations.loc[[b], ['md', 'inc', 'azi']]
|
|
202
|
+
deviations_df.fillna(0, inplace=True)
|
|
203
|
+
w.location.add_deviation(
|
|
204
|
+
deviation=deviations_df,
|
|
205
|
+
td=td,
|
|
206
|
+
method=method,
|
|
207
|
+
update_deviation=update_deviation,
|
|
208
|
+
azimuth_datum=azimuth_datum)
|
|
209
|
+
if w.location.position is None:
|
|
210
|
+
raise ValueError('Deviations could not be calculated.')
|
|
211
|
+
|
|
212
|
+
return self.p
|
|
@@ -1,57 +1,57 @@
|
|
|
1
|
-
import io
|
|
2
|
-
import pathlib
|
|
3
|
-
from typing import Callable
|
|
4
|
-
|
|
5
|
-
from subsurface.core.reader_helpers.readers_data import GenericReaderFilesHelper, SupportedFormats
|
|
6
|
-
import pandas as pd
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
def check_format_and_read_to_df(reader_helper: GenericReaderFilesHelper) -> pd.DataFrame:
|
|
10
|
-
# ? This swithch is veeery confusing
|
|
11
|
-
match (reader_helper.file_or_buffer, reader_helper.format):
|
|
12
|
-
case _, SupportedFormats.JSON | ".json":
|
|
13
|
-
d = pd.read_json(reader_helper.file_or_buffer, orient='split')
|
|
14
|
-
case str() | pathlib.Path(), _:
|
|
15
|
-
reader: Callable = _get_reader(reader_helper.format)
|
|
16
|
-
d = reader(
|
|
17
|
-
filepath_or_buffer=reader_helper.file_or_buffer,
|
|
18
|
-
sep=reader_helper.separator,
|
|
19
|
-
**reader_helper.pandas_reader_kwargs
|
|
20
|
-
)
|
|
21
|
-
case (bytes() | io.BytesIO() | io.StringIO() | io.TextIOWrapper()), _:
|
|
22
|
-
reader = _get_reader(reader_helper.format)
|
|
23
|
-
d = reader(reader_helper.file_or_buffer, **reader_helper.pandas_reader_kwargs)
|
|
24
|
-
case dict(), _:
|
|
25
|
-
reader = _get_reader('dict')
|
|
26
|
-
d = reader(reader_helper.file_or_buffer)
|
|
27
|
-
case _:
|
|
28
|
-
raise AttributeError('file_or_buffer must be either a path or a dict')
|
|
29
|
-
|
|
30
|
-
if type(d.columns) is str: d.columns = d.columns.str.strip() # Remove spaces at the beginning and end
|
|
31
|
-
if type(d.index) is str: d.index = d.index.str.strip() # Remove spaces at the beginning and end
|
|
32
|
-
return d
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
def _get_reader(file_format) -> Callable:
|
|
38
|
-
def _dict_reader(dict_):
|
|
39
|
-
return pd.DataFrame(
|
|
40
|
-
data=dict_['data'],
|
|
41
|
-
columns=dict_['columns'],
|
|
42
|
-
index=dict_['index']
|
|
43
|
-
)
|
|
44
|
-
|
|
45
|
-
match file_format:
|
|
46
|
-
case SupportedFormats.XLXS:
|
|
47
|
-
raise NotImplemented("Pandas changed the backend for reading excel files and needs to be re-implemented")
|
|
48
|
-
reader = pd.read_excel
|
|
49
|
-
case 'dict':
|
|
50
|
-
reader = _dict_reader
|
|
51
|
-
case SupportedFormats.CSV:
|
|
52
|
-
reader = pd.read_csv
|
|
53
|
-
case SupportedFormats.JSON:
|
|
54
|
-
reader = _dict_reader
|
|
55
|
-
case _:
|
|
56
|
-
raise ValueError(f"Subsurface is not able to read the following extension: {file_format}")
|
|
57
|
-
return reader
|
|
1
|
+
import io
|
|
2
|
+
import pathlib
|
|
3
|
+
from typing import Callable
|
|
4
|
+
|
|
5
|
+
from subsurface.core.reader_helpers.readers_data import GenericReaderFilesHelper, SupportedFormats
|
|
6
|
+
import pandas as pd
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def check_format_and_read_to_df(reader_helper: GenericReaderFilesHelper) -> pd.DataFrame:
|
|
10
|
+
# ? This swithch is veeery confusing
|
|
11
|
+
match (reader_helper.file_or_buffer, reader_helper.format):
|
|
12
|
+
case _, SupportedFormats.JSON | ".json":
|
|
13
|
+
d = pd.read_json(reader_helper.file_or_buffer, orient='split')
|
|
14
|
+
case str() | pathlib.Path(), _:
|
|
15
|
+
reader: Callable = _get_reader(reader_helper.format)
|
|
16
|
+
d = reader(
|
|
17
|
+
filepath_or_buffer=reader_helper.file_or_buffer,
|
|
18
|
+
sep=reader_helper.separator,
|
|
19
|
+
**reader_helper.pandas_reader_kwargs
|
|
20
|
+
)
|
|
21
|
+
case (bytes() | io.BytesIO() | io.StringIO() | io.TextIOWrapper()), _:
|
|
22
|
+
reader = _get_reader(reader_helper.format)
|
|
23
|
+
d = reader(reader_helper.file_or_buffer, **reader_helper.pandas_reader_kwargs)
|
|
24
|
+
case dict(), _:
|
|
25
|
+
reader = _get_reader('dict')
|
|
26
|
+
d = reader(reader_helper.file_or_buffer)
|
|
27
|
+
case _:
|
|
28
|
+
raise AttributeError('file_or_buffer must be either a path or a dict')
|
|
29
|
+
|
|
30
|
+
if type(d.columns) is str: d.columns = d.columns.str.strip() # Remove spaces at the beginning and end
|
|
31
|
+
if type(d.index) is str: d.index = d.index.str.strip() # Remove spaces at the beginning and end
|
|
32
|
+
return d
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def _get_reader(file_format) -> Callable:
|
|
38
|
+
def _dict_reader(dict_):
|
|
39
|
+
return pd.DataFrame(
|
|
40
|
+
data=dict_['data'],
|
|
41
|
+
columns=dict_['columns'],
|
|
42
|
+
index=dict_['index']
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
match file_format:
|
|
46
|
+
case SupportedFormats.XLXS:
|
|
47
|
+
raise NotImplemented("Pandas changed the backend for reading excel files and needs to be re-implemented")
|
|
48
|
+
reader = pd.read_excel
|
|
49
|
+
case 'dict':
|
|
50
|
+
reader = _dict_reader
|
|
51
|
+
case SupportedFormats.CSV:
|
|
52
|
+
reader = pd.read_csv
|
|
53
|
+
case SupportedFormats.JSON:
|
|
54
|
+
reader = _dict_reader
|
|
55
|
+
case _:
|
|
56
|
+
raise ValueError(f"Subsurface is not able to read the following extension: {file_format}")
|
|
57
|
+
return reader
|