subsurface-terra 2025.1.0rc14__py3-none-any.whl → 2025.1.0rc16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. subsurface/__init__.py +31 -31
  2. subsurface/_version.py +34 -21
  3. subsurface/api/__init__.py +13 -13
  4. subsurface/api/interfaces/__init__.py +3 -3
  5. subsurface/api/interfaces/stream.py +136 -136
  6. subsurface/api/reader/read_wells.py +78 -78
  7. subsurface/core/geological_formats/boreholes/_combine_trajectories.py +117 -117
  8. subsurface/core/geological_formats/boreholes/_map_attrs_to_survey.py +236 -0
  9. subsurface/core/geological_formats/boreholes/_survey_to_unstruct.py +163 -0
  10. subsurface/core/geological_formats/boreholes/boreholes.py +140 -116
  11. subsurface/core/geological_formats/boreholes/collars.py +26 -26
  12. subsurface/core/geological_formats/boreholes/survey.py +86 -380
  13. subsurface/core/geological_formats/fault.py +47 -47
  14. subsurface/core/reader_helpers/reader_unstruct.py +11 -11
  15. subsurface/core/reader_helpers/readers_data.py +130 -130
  16. subsurface/core/reader_helpers/readers_wells.py +13 -13
  17. subsurface/core/structs/__init__.py +3 -3
  18. subsurface/core/structs/base_structures/__init__.py +2 -2
  19. subsurface/core/structs/base_structures/_liquid_earth_mesh.py +121 -121
  20. subsurface/core/structs/base_structures/_unstructured_data_constructor.py +70 -70
  21. subsurface/core/structs/base_structures/base_structures_enum.py +6 -6
  22. subsurface/core/structs/base_structures/structured_data.py +282 -282
  23. subsurface/core/structs/base_structures/unstructured_data.py +319 -319
  24. subsurface/core/structs/structured_elements/octree_mesh.py +10 -10
  25. subsurface/core/structs/structured_elements/structured_grid.py +59 -59
  26. subsurface/core/structs/structured_elements/structured_mesh.py +9 -9
  27. subsurface/core/structs/unstructured_elements/__init__.py +3 -3
  28. subsurface/core/structs/unstructured_elements/line_set.py +72 -72
  29. subsurface/core/structs/unstructured_elements/point_set.py +43 -43
  30. subsurface/core/structs/unstructured_elements/tetrahedron_mesh.py +35 -35
  31. subsurface/core/structs/unstructured_elements/triangular_surface.py +62 -62
  32. subsurface/core/utils/utils_core.py +38 -38
  33. subsurface/modules/reader/__init__.py +13 -13
  34. subsurface/modules/reader/faults/faults.py +80 -80
  35. subsurface/modules/reader/from_binary.py +46 -46
  36. subsurface/modules/reader/mesh/_GOCAD_mesh.py +82 -82
  37. subsurface/modules/reader/mesh/_trimesh_reader.py +447 -447
  38. subsurface/modules/reader/mesh/csv_mesh_reader.py +53 -53
  39. subsurface/modules/reader/mesh/dxf_reader.py +177 -177
  40. subsurface/modules/reader/mesh/glb_reader.py +30 -30
  41. subsurface/modules/reader/mesh/mx_reader.py +232 -232
  42. subsurface/modules/reader/mesh/obj_reader.py +53 -53
  43. subsurface/modules/reader/mesh/omf_mesh_reader.py +43 -43
  44. subsurface/modules/reader/mesh/surface_reader.py +56 -56
  45. subsurface/modules/reader/mesh/surfaces_api.py +41 -41
  46. subsurface/modules/reader/profiles/__init__.py +3 -3
  47. subsurface/modules/reader/profiles/profiles_core.py +197 -197
  48. subsurface/modules/reader/read_netcdf.py +38 -38
  49. subsurface/modules/reader/topography/__init__.py +7 -7
  50. subsurface/modules/reader/topography/topo_core.py +100 -100
  51. subsurface/modules/reader/volume/read_grav3d.py +478 -428
  52. subsurface/modules/reader/volume/read_volume.py +327 -230
  53. subsurface/modules/reader/volume/segy_reader.py +105 -105
  54. subsurface/modules/reader/volume/seismic.py +173 -173
  55. subsurface/modules/reader/volume/volume_utils.py +43 -43
  56. subsurface/modules/reader/wells/DEP/__init__.py +43 -43
  57. subsurface/modules/reader/wells/DEP/_well_files_reader.py +167 -167
  58. subsurface/modules/reader/wells/DEP/_wells_api.py +61 -61
  59. subsurface/modules/reader/wells/DEP/_welly_reader.py +180 -180
  60. subsurface/modules/reader/wells/DEP/pandas_to_welly.py +212 -212
  61. subsurface/modules/reader/wells/_read_to_df.py +57 -57
  62. subsurface/modules/reader/wells/read_borehole_interface.py +148 -148
  63. subsurface/modules/reader/wells/wells_utils.py +68 -68
  64. subsurface/modules/tools/mocking_aux.py +104 -104
  65. subsurface/modules/visualization/__init__.py +2 -2
  66. subsurface/modules/visualization/to_pyvista.py +320 -320
  67. subsurface/modules/writer/to_binary.py +12 -12
  68. subsurface/modules/writer/to_rex/common.py +78 -78
  69. subsurface/modules/writer/to_rex/data_struct.py +74 -74
  70. subsurface/modules/writer/to_rex/gempy_to_rexfile.py +791 -791
  71. subsurface/modules/writer/to_rex/material_encoder.py +44 -44
  72. subsurface/modules/writer/to_rex/mesh_encoder.py +152 -152
  73. subsurface/modules/writer/to_rex/to_rex.py +115 -115
  74. subsurface/modules/writer/to_rex/utils.py +15 -15
  75. subsurface/optional_requirements.py +116 -116
  76. {subsurface_terra-2025.1.0rc14.dist-info → subsurface_terra-2025.1.0rc16.dist-info}/METADATA +194 -194
  77. subsurface_terra-2025.1.0rc16.dist-info/RECORD +98 -0
  78. {subsurface_terra-2025.1.0rc14.dist-info → subsurface_terra-2025.1.0rc16.dist-info}/WHEEL +1 -1
  79. {subsurface_terra-2025.1.0rc14.dist-info → subsurface_terra-2025.1.0rc16.dist-info}/licenses/LICENSE +203 -203
  80. subsurface_terra-2025.1.0rc14.dist-info/RECORD +0 -96
  81. {subsurface_terra-2025.1.0rc14.dist-info → subsurface_terra-2025.1.0rc16.dist-info}/top_level.txt +0 -0
@@ -1,380 +1,86 @@
1
- import warnings
2
-
3
- from typing import Union, Hashable, Optional
4
-
5
- import pandas as pd
6
- from dataclasses import dataclass
7
- import numpy as np
8
- import xarray as xr
9
-
10
- from subsurface import optional_requirements
11
- from ...structs.unstructured_elements import LineSet
12
- from ...structs.base_structures import UnstructuredData
13
-
14
- NUMBER_NODES = 30
15
- RADIUS = 10
16
-
17
-
18
- @dataclass
19
- class Survey:
20
- ids: list[str]
21
- survey_trajectory: LineSet
22
- well_id_mapper: dict[str, int] = None #: This is following the order of the survey csv that can be different that the collars
23
-
24
- @property
25
- def id_to_well_id(self):
26
- # Reverse the well_id_mapper dictionary to map IDs to well names
27
- id_to_well_name_mapper = {v: k for k, v in self.well_id_mapper.items()}
28
- return id_to_well_name_mapper
29
-
30
- @classmethod
31
- def from_df(cls, survey_df: 'pd.DataFrame', attr_df: Optional['pd.DataFrame'] = None, number_nodes: Optional[int] = NUMBER_NODES,
32
- duplicate_attr_depths: bool = False) -> 'Survey':
33
- """
34
- Create a Survey object from two DataFrames containing survey and attribute data.
35
-
36
- :param survey_df: DataFrame containing survey data.
37
- :param attr_df: DataFrame containing attribute data. This is used to make sure the raw data is perfectly aligned.
38
- :param number_nodes: Optional parameter specifying the number of nodes.
39
- :return: A Survey object representing the input data.
40
-
41
- """
42
- trajectories: UnstructuredData = _data_frame_to_unstructured_data(
43
- survey_df=_correct_angles(survey_df),
44
- attr_df=attr_df,
45
- number_nodes=number_nodes,
46
- duplicate_attr_depths=duplicate_attr_depths
47
- )
48
- # Grab the unique ids
49
- unique_ids = trajectories.points_attributes["well_id"].unique()
50
-
51
- return cls(
52
- ids=unique_ids,
53
- survey_trajectory=LineSet(data=trajectories, radius=RADIUS),
54
- well_id_mapper=trajectories.data.attrs["well_id_mapper"]
55
- )
56
-
57
- def get_well_string_id(self, well_id: int) -> str:
58
- return self.ids[well_id]
59
-
60
- def get_well_num_id(self, well_string_id: Union[str, Hashable]) -> int:
61
- return self.well_id_mapper.get(well_string_id, None)
62
-
63
- def update_survey_with_lith(self, lith: pd.DataFrame):
64
- unstruct: UnstructuredData = _combine_survey_and_attrs(lith, self)
65
- self.survey_trajectory.data = unstruct
66
-
67
- def update_survey_with_attr(self, attrs: pd.DataFrame):
68
- self.survey_trajectory.data = _combine_survey_and_attrs(attrs, self)
69
-
70
-
71
- def _combine_survey_and_attr(lith: pd.DataFrame, survey: Survey) -> UnstructuredData:
72
- pass
73
-
74
-
75
- def _combine_survey_and_attrs(attrs: pd.DataFrame, survey: Survey) -> UnstructuredData:
76
- # Import moved to top for clarity and possibly avoiding repeated imports if called multiple times
77
- from ...structs.base_structures._unstructured_data_constructor import raw_attributes_to_dict_data_arrays
78
-
79
- # Accessing trajectory data more succinctly
80
- trajectory: xr.DataArray = survey.survey_trajectory.data.data["vertex_attrs"]
81
- # Ensure all columns in lith exist in new_attrs, if not, add them as NaN
82
-
83
- new_attrs = _map_attrs_to_measured_depths(attrs, survey)
84
-
85
- # Construct the final xarray dict without intermediate variable
86
- points_attributes_xarray_dict = raw_attributes_to_dict_data_arrays(
87
- default_attributes_name="vertex_attrs",
88
- n_items=trajectory.shape[0], # TODO: Can I look this on new_attrs to remove line 11?
89
- dims=["points", "vertex_attr"],
90
- raw_attributes=new_attrs
91
- )
92
-
93
- # Inline construction of UnstructuredData
94
- return UnstructuredData.from_data_arrays_dict(
95
- xarray_dict={
96
- "vertex" : survey.survey_trajectory.data.data["vertex"],
97
- "cells" : survey.survey_trajectory.data.data["cells"],
98
- "vertex_attrs": points_attributes_xarray_dict["vertex_attrs"],
99
- "cell_attrs" : survey.survey_trajectory.data.data["cell_attrs"]
100
- },
101
- xarray_attributes=survey.survey_trajectory.data.data.attrs,
102
- default_cells_attributes_name=survey.survey_trajectory.data.cells_attr_name,
103
- default_points_attributes_name=survey.survey_trajectory.data.vertex_attr_name
104
- )
105
-
106
-
107
- def _map_attrs_to_measured_depths(attrs: pd.DataFrame, survey: Survey) -> pd.DataFrame:
108
- trajectory: xr.DataArray = survey.survey_trajectory.data.data["vertex_attrs"]
109
- trajectory_well_id: xr.DataArray = trajectory.sel({'vertex_attr': 'well_id'})
110
- measured_depths: np.ndarray = trajectory.sel({'vertex_attr': 'measured_depths'}).values.astype(np.float64)
111
-
112
- # Start with a copy of the existing attributes DataFrame
113
- new_attrs = survey.survey_trajectory.data.points_attributes.copy()
114
- if 'component lith' in attrs.columns and 'lith_ids' not in attrs.columns:
115
- # Factorize lith components directly in-place
116
- attrs['lith_ids'], _ = pd.factorize(attrs['component lith'], use_na_sentinel=True)
117
- else:
118
- pass
119
-
120
- # Add missing columns from attrs, preserving their dtypes
121
- for col in attrs.columns.difference(new_attrs.columns):
122
- new_attrs[col] = np.nan if pd.api.types.is_numeric_dtype(attrs[col]) else None
123
-
124
- # Align well IDs between attrs and trajectory, perform interpolation, and map the attributes
125
- # Loop dict
126
- for survey_well_name in survey.well_id_mapper:
127
- # Select rows corresponding to the current well ID
128
-
129
- # use the well_id to get all the elements of attrs that have the well_id as index
130
- if survey_well_name in attrs.index:
131
- attrs_well = attrs.loc[[survey_well_name]]
132
- # Proceed with processing attrs_well
133
- else:
134
- print(f"Well '{survey_well_name}' does not exist in the attributes DataFrame.")
135
- continue
136
-
137
- survey_well_id = survey.get_well_num_id(survey_well_name)
138
- trajectory_well_mask = (trajectory_well_id == survey_well_id).values
139
-
140
- # Apply mask to measured depths for the current well
141
- well_measured_depths = measured_depths[trajectory_well_mask]
142
-
143
- if "base" not in attrs_well.columns:
144
- raise ValueError(f"Base column must be present in the file for well '{survey_well_name}'.")
145
- elif "top" not in attrs_well.columns:
146
- location_values_to_interpolate = attrs_well['base']
147
- else:
148
- location_values_to_interpolate = (attrs_well['top'] + attrs_well['base']) / 2
149
-
150
- # Interpolation for each attribute column
151
- for col in attrs_well.columns:
152
- # Interpolate the attribute values based on the measured depths
153
- if col in ['top', 'base', 'well_id']:
154
- continue
155
- attr_to_interpolate = attrs_well[col]
156
- # make sure the attr_to_interpolate is not a string
157
- if attr_to_interpolate.dtype == 'O' or isinstance(attr_to_interpolate.dtype, pd.CategoricalDtype):
158
- continue
159
- if col in ['lith_ids', 'component lith']:
160
- interp_kind = 'nearest'
161
- else:
162
- interp_kind = 'linear'
163
-
164
- from scipy.interpolate import interp1d
165
- interp_func = interp1d(
166
- x=location_values_to_interpolate,
167
- y=attr_to_interpolate,
168
- bounds_error=False,
169
- fill_value=np.nan,
170
- kind=interp_kind
171
- )
172
-
173
- # Assign the interpolated values to the new_attrs DataFrame
174
- vals = interp_func(well_measured_depths)
175
- new_attrs.loc[trajectory_well_mask, col] = vals
176
-
177
- return new_attrs
178
-
179
-
180
- def _map_attrs_to_measured_depths_(attrs: pd.DataFrame, new_attrs: pd.DataFrame, survey: Survey):
181
- warnings.warn("This function is obsolete. Use _map_attrs_to_measured_depths instead.", DeprecationWarning)
182
-
183
- trajectory: xr.DataArray = survey.survey_trajectory.data.data["vertex_attrs"]
184
- well_ids: xr.DataArray = trajectory.sel({'vertex_attr': 'well_id'})
185
- measured_depths: xr.DataArray = trajectory.sel({'vertex_attr': 'measured_depths'})
186
-
187
- new_columns = attrs.columns.difference(new_attrs.columns)
188
- new_attrs = pd.concat([new_attrs, pd.DataFrame(columns=new_columns)], axis=1)
189
- for index, row in attrs.iterrows():
190
- well_id = survey.get_well_num_id(index)
191
- if well_id is None:
192
- print(f'Well ID {index} not found in survey trajectory. Skipping lithology assignment.')
193
-
194
- well_id_mask = well_ids == well_id
195
-
196
- # TODO: Here we are going to need to interpolate
197
-
198
- spatial_mask = ((measured_depths <= row['top']) & (measured_depths >= row['base']))
199
- mask = well_id_mask & spatial_mask
200
-
201
- new_attrs.loc[mask.values, attrs.columns] = row.values
202
- return new_attrs
203
-
204
-
205
- def _correct_angles(df: pd.DataFrame) -> pd.DataFrame:
206
- def correct_inclination(inc: float) -> float:
207
- if inc < 0:
208
- inc = inc % 360 # Normalize to 0-360 range first if negative
209
- if 0 <= inc <= 180:
210
- # add or subtract a very small number to make sure that 0 or 180 are never possible
211
- return inc + 1e-10 if inc == 0 else inc - 1e-10
212
- elif 180 < inc < 360:
213
- return 360 - inc # Reflect angles greater than 180 back into the 0-180 range
214
- else:
215
- raise ValueError(f'Inclination value {inc} is out of the expected range of 0 to 360 degrees')
216
-
217
- def correct_azimuth(azi: float) -> float:
218
- return azi % 360 # Normalize azimuth to 0-360 range
219
-
220
- df['inc'] = df['inc'].apply(correct_inclination)
221
- df['azi'] = df['azi'].apply(correct_azimuth)
222
-
223
- return df
224
-
225
-
226
- def _data_frame_to_unstructured_data(survey_df: 'pd.DataFrame', number_nodes: int, attr_df: Optional['pd.DataFrame'] = None,
227
- duplicate_attr_depths: bool = False) -> UnstructuredData:
228
-
229
- wp = optional_requirements.require_wellpathpy()
230
-
231
- cum_vertex: np.ndarray = np.empty((0, 3), dtype=np.float32)
232
- cells: np.ndarray = np.empty((0, 2), dtype=np.int_)
233
- cell_attr: pd.DataFrame = pd.DataFrame(columns=['well_id'], dtype=np.float32)
234
- vertex_attr: pd.DataFrame = pd.DataFrame()
235
-
236
- for e, (borehole_id, data) in enumerate(survey_df.groupby(level=0)):
237
- dev = wp.deviation(
238
- md=data['md'].values,
239
- inc=data['inc'].values,
240
- azi=data['azi'].values
241
- )
242
-
243
- md_min = dev.md.min()
244
- md_max = dev.md.max()
245
-
246
- attr_depths = _grab_depths_from_attr(
247
- attr_df=attr_df,
248
- borehole_id=borehole_id,
249
- duplicate_attr_depths=duplicate_attr_depths,
250
- md_max=md_max,
251
- md_min=md_min
252
- )
253
-
254
- # Now combine attr_depths with depths
255
- md_min = dev.md.min()
256
- md_max = dev.md.max()
257
- depths = np.linspace(md_min, md_max, number_nodes)
258
- depths = np.union1d(depths, attr_depths)
259
- depths.sort()
260
-
261
- # Resample positions at depths
262
- pos = dev.minimum_curvature().resample(depths=depths)
263
- vertex_count = cum_vertex.shape[0]
264
-
265
- this_well_vertex = np.vstack([pos.easting, pos.northing, pos.depth]).T
266
- cum_vertex = np.vstack([cum_vertex, this_well_vertex])
267
- measured_depths = _calculate_distances(array_of_vertices=this_well_vertex)
268
-
269
- n_vertex_shift_0 = np.arange(0, len(pos.depth) - 1, dtype=np.int_)
270
- n_vertex_shift_1 = np.arange(1, len(pos.depth), dtype=np.int_)
271
- cell_per_well = np.vstack([n_vertex_shift_0, n_vertex_shift_1]).T + vertex_count
272
- cells = np.vstack([cells, cell_per_well])
273
-
274
- attribute_values = np.isin(depths, attr_depths)
275
-
276
- vertex_attr_per_well = pd.DataFrame({
277
- 'well_id' : [e] * len(pos.depth),
278
- 'measured_depths': measured_depths,
279
- 'is_attr_point' : attribute_values,
280
- })
281
-
282
- vertex_attr = pd.concat([vertex_attr, vertex_attr_per_well], ignore_index=True)
283
-
284
- # Add the id (e), to cell_attr
285
- cell_attr = pd.concat([cell_attr, pd.DataFrame({'well_id': [e] * len(cell_per_well)})], ignore_index=True)
286
-
287
- unstruct = UnstructuredData.from_array(
288
- vertex=cum_vertex,
289
- cells=cells.astype(int),
290
- vertex_attr=vertex_attr.reset_index(drop=True),
291
- cells_attr=cell_attr.reset_index(drop=True)
292
- )
293
-
294
- unstruct.data.attrs["well_id_mapper"] = {well_id: e for e, well_id in enumerate(survey_df.index.unique(level=0))}
295
-
296
- return unstruct
297
-
298
-
299
- def _grab_depths_from_attr(
300
- attr_df: pd.DataFrame,
301
- borehole_id: Hashable,
302
- duplicate_attr_depths: bool,
303
- md_max: float,
304
- md_min: float
305
- ) -> np.ndarray:
306
- # Initialize attr_depths and attr_labels as empty arrays
307
- attr_depths = np.array([], dtype=float)
308
- attr_labels = np.array([], dtype='<U4') # Initialize labels for 'top' and 'base'
309
-
310
- if attr_df is None or ("top" not in attr_df.columns and "base" not in attr_df.columns):
311
- return attr_depths
312
-
313
- try:
314
- vals = attr_df.loc[borehole_id]
315
-
316
- tops = np.array([], dtype=float)
317
- bases = np.array([], dtype=float)
318
-
319
- if 'top' in vals:
320
- if isinstance(vals, pd.DataFrame):
321
- tops = vals['top'].values.flatten()
322
- else:
323
- tops = np.array([vals['top']])
324
- # Convert to float and remove NaNs
325
- tops = tops.astype(float)
326
- tops = tops[~np.isnan(tops)]
327
- # Clip to within md range
328
- tops = tops[(tops >= md_min) & (tops <= md_max)]
329
-
330
- if 'base' in vals:
331
- if isinstance(vals, pd.DataFrame):
332
- bases = vals['base'].values.flatten()
333
- else:
334
- bases = np.array([vals['base']])
335
- # Convert to float and remove NaNs
336
- bases = bases.astype(float)
337
- bases = bases[~np.isnan(bases)]
338
- # Clip to within md range
339
- bases = bases[(bases >= md_min) & (bases <= md_max)]
340
-
341
- # Combine tops and bases into attr_depths with labels
342
- attr_depths = np.concatenate((tops, bases))
343
- attr_labels = np.array(['top'] * len(tops) + ['base'] * len(bases))
344
-
345
- # Drop duplicates while preserving order
346
- _, unique_indices = np.unique(attr_depths, return_index=True)
347
- attr_depths = attr_depths[unique_indices]
348
- attr_labels = attr_labels[unique_indices]
349
-
350
- except KeyError:
351
- # No attributes for this borehole_id or missing columns
352
- attr_depths = np.array([], dtype=float)
353
- attr_labels = np.array([], dtype='<U4')
354
-
355
- # If duplicate_attr_depths is True, duplicate attr_depths with a tiny offset
356
- if duplicate_attr_depths and len(attr_depths) > 0:
357
- tiny_offset = (md_max - md_min) * 1e-6 # A tiny fraction of the depth range
358
- # Create offsets: +tiny_offset for 'top', -tiny_offset for 'base'
359
- offsets = np.where(attr_labels == 'top', tiny_offset, -tiny_offset)
360
- duplicated_attr_depths = attr_depths + offsets
361
- # Ensure the duplicated depths are within the md range
362
- valid_indices = (duplicated_attr_depths >= md_min) & (duplicated_attr_depths <= md_max)
363
- duplicated_attr_depths = duplicated_attr_depths[valid_indices]
364
- # Original attribute depths
365
- original_attr_depths = attr_depths
366
- # Combine originals and duplicates
367
- attr_depths = np.hstack([original_attr_depths, duplicated_attr_depths])
368
-
369
- return attr_depths
370
-
371
-
372
- def _calculate_distances(array_of_vertices: np.ndarray) -> np.ndarray:
373
- # Calculate the differences between consecutive points
374
- differences = np.diff(array_of_vertices, axis=0)
375
-
376
- # Calculate the Euclidean distance for each pair of consecutive points
377
- distances = np.linalg.norm(differences, axis=1)
378
- # Insert a 0 at the beginning to represent the starting point at the surface
379
- measured_depths = np.insert(np.cumsum(distances), 0, 0)
380
- return measured_depths
1
+ from dataclasses import dataclass
2
+ from typing import Union, Hashable, Optional
3
+
4
+ import pandas as pd
5
+
6
+ from ._map_attrs_to_survey import combine_survey_and_attrs
7
+ from ._survey_to_unstruct import data_frame_to_unstructured_data
8
+ from ...structs.base_structures import UnstructuredData
9
+ from ...structs.unstructured_elements import LineSet
10
+
11
+ NUMBER_NODES = 30
12
+ RADIUS = 10
13
+
14
+
15
+ @dataclass
16
+ class Survey:
17
+ ids: list[str]
18
+ survey_trajectory: LineSet
19
+ well_id_mapper: dict[str, int] = None #: This is following the order of the survey csv that can be different that the collars
20
+
21
+ @property
22
+ def id_to_well_id(self):
23
+ # Reverse the well_id_mapper dictionary to map IDs to well names
24
+ id_to_well_name_mapper = {v: k for k, v in self.well_id_mapper.items()}
25
+ return id_to_well_name_mapper
26
+
27
+ @classmethod
28
+ def from_df(cls, survey_df: 'pd.DataFrame', attr_df: Optional['pd.DataFrame'] = None, number_nodes: Optional[int] = NUMBER_NODES,
29
+ duplicate_attr_depths: bool = False) -> 'Survey':
30
+ """
31
+ Create a Survey object from two DataFrames containing survey and attribute data.
32
+
33
+ :param survey_df: DataFrame containing survey data.
34
+ :param attr_df: DataFrame containing attribute data. This is used to make sure the raw data is perfectly aligned.
35
+ :param number_nodes: Optional parameter specifying the number of nodes.
36
+ :return: A Survey object representing the input data.
37
+
38
+ """
39
+ trajectories: UnstructuredData = data_frame_to_unstructured_data(
40
+ survey_df=_correct_angles(survey_df),
41
+ attr_df=attr_df,
42
+ number_nodes=number_nodes,
43
+ duplicate_attr_depths=duplicate_attr_depths
44
+ )
45
+ # Grab the unique ids
46
+ unique_ids = trajectories.points_attributes["well_id"].unique()
47
+
48
+ return cls(
49
+ ids=unique_ids,
50
+ survey_trajectory=LineSet(data=trajectories, radius=RADIUS),
51
+ well_id_mapper=trajectories.data.attrs["well_id_mapper"]
52
+ )
53
+
54
+ def get_well_string_id(self, well_id: int) -> str:
55
+ return self.ids[well_id]
56
+
57
+ def get_well_num_id(self, well_string_id: Union[str, Hashable]) -> int:
58
+ return self.well_id_mapper.get(well_string_id, None)
59
+
60
+ def update_survey_with_lith(self, lith: pd.DataFrame):
61
+ unstruct: UnstructuredData = combine_survey_and_attrs(lith, self.survey_trajectory, self.well_id_mapper)
62
+ self.survey_trajectory.data = unstruct
63
+
64
+ def update_survey_with_attr(self, attrs: pd.DataFrame):
65
+ self.survey_trajectory.data = combine_survey_and_attrs(attrs, self.survey_trajectory, self.well_id_mapper)
66
+
67
+
68
+ def _correct_angles(df: pd.DataFrame) -> pd.DataFrame:
69
+ def correct_inclination(inc: float) -> float:
70
+ if inc < 0:
71
+ inc = inc % 360 # Normalize to 0-360 range first if negative
72
+ if 0 <= inc <= 180:
73
+ # add or subtract a very small number to make sure that 0 or 180 are never possible
74
+ return inc + 1e-10 if inc == 0 else inc - 1e-10
75
+ elif 180 < inc < 360:
76
+ return 360 - inc # Reflect angles greater than 180 back into the 0-180 range
77
+ else:
78
+ raise ValueError(f'Inclination value {inc} is out of the expected range of 0 to 360 degrees')
79
+
80
+ def correct_azimuth(azi: float) -> float:
81
+ return azi % 360 # Normalize azimuth to 0-360 range
82
+
83
+ df['inc'] = df['inc'].apply(correct_inclination)
84
+ df['azi'] = df['azi'].apply(correct_azimuth)
85
+
86
+ return df
@@ -1,48 +1,48 @@
1
- import pandas as pd
2
-
3
- from subsurface import optional_requirements
4
-
5
-
6
- class FaultSticks:
7
- def __init__(self, df: pd.DataFrame):
8
- self.df = df
9
-
10
- self.pointcloud = None
11
- self.sticks = None
12
-
13
- def __getattr__(self, attr):
14
- if attr in self.__dict__:
15
- return getattr(self, attr)
16
- return getattr(self.df, attr)
17
-
18
- def __getitem__(self, item):
19
- return self.df[item]
20
-
21
- def plot(self, notebook=False, color="black"):
22
- if not self.pointcloud:
23
- self._make_pointcloud()
24
- self._make_sticks()
25
-
26
- pv = optional_requirements.require_pyvista()
27
- p = pv.Plotter(notebook=notebook)
28
- p.add_mesh(self.pointcloud, color=color)
29
- for stick in self.sticks:
30
- p.add_mesh(stick, color=color)
31
- p.show()
32
-
33
- def _make_pointcloud(self):
34
- pv = optional_requirements.require_pyvista()
35
- self.pointcloud = pv.PolyData(self.df[["X", "Y", "Z"]].values)
36
-
37
- def _make_sticks(self):
38
- pv = optional_requirements.require_pyvista()
39
- lines = []
40
- for stick, indices in self.df.groupby("stick id").groups.items():
41
- stickdf = self.df.loc[indices]
42
- for (r1, row1), (r2, row2) in zip(stickdf[:-1].iterrows(), stickdf[1:].iterrows()):
43
- line = pv.Line(
44
- pointa=(row1.X, row1.Y, row1.Z),
45
- pointb=(row2.X, row2.Y, row2.Z),
46
- )
47
- lines.append(line)
1
+ import pandas as pd
2
+
3
+ from subsurface import optional_requirements
4
+
5
+
6
+ class FaultSticks:
7
+ def __init__(self, df: pd.DataFrame):
8
+ self.df = df
9
+
10
+ self.pointcloud = None
11
+ self.sticks = None
12
+
13
+ def __getattr__(self, attr):
14
+ if attr in self.__dict__:
15
+ return getattr(self, attr)
16
+ return getattr(self.df, attr)
17
+
18
+ def __getitem__(self, item):
19
+ return self.df[item]
20
+
21
+ def plot(self, notebook=False, color="black"):
22
+ if not self.pointcloud:
23
+ self._make_pointcloud()
24
+ self._make_sticks()
25
+
26
+ pv = optional_requirements.require_pyvista()
27
+ p = pv.Plotter(notebook=notebook)
28
+ p.add_mesh(self.pointcloud, color=color)
29
+ for stick in self.sticks:
30
+ p.add_mesh(stick, color=color)
31
+ p.show()
32
+
33
+ def _make_pointcloud(self):
34
+ pv = optional_requirements.require_pyvista()
35
+ self.pointcloud = pv.PolyData(self.df[["X", "Y", "Z"]].values)
36
+
37
+ def _make_sticks(self):
38
+ pv = optional_requirements.require_pyvista()
39
+ lines = []
40
+ for stick, indices in self.df.groupby("stick id").groups.items():
41
+ stickdf = self.df.loc[indices]
42
+ for (r1, row1), (r2, row2) in zip(stickdf[:-1].iterrows(), stickdf[1:].iterrows()):
43
+ line = pv.Line(
44
+ pointa=(row1.X, row1.Y, row1.Z),
45
+ pointb=(row2.X, row2.Y, row2.Z),
46
+ )
47
+ lines.append(line)
48
48
  self.sticks = lines
@@ -1,11 +1,11 @@
1
- from dataclasses import dataclass
2
-
3
- from subsurface.core.reader_helpers.readers_data import GenericReaderFilesHelper
4
-
5
-
6
- @dataclass
7
- class ReaderUnstructuredHelper:
8
- reader_vertex_args: GenericReaderFilesHelper
9
- reader_cells_args: GenericReaderFilesHelper = None
10
- reader_vertex_attr_args: GenericReaderFilesHelper = None
11
- reader_cells_attr_args: GenericReaderFilesHelper = None
1
+ from dataclasses import dataclass
2
+
3
+ from subsurface.core.reader_helpers.readers_data import GenericReaderFilesHelper
4
+
5
+
6
+ @dataclass
7
+ class ReaderUnstructuredHelper:
8
+ reader_vertex_args: GenericReaderFilesHelper
9
+ reader_cells_args: GenericReaderFilesHelper = None
10
+ reader_vertex_attr_args: GenericReaderFilesHelper = None
11
+ reader_cells_attr_args: GenericReaderFilesHelper = None