subsurface-terra 2025.1.0rc15__py3-none-any.whl → 2025.1.0rc16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. subsurface/__init__.py +31 -31
  2. subsurface/_version.py +34 -21
  3. subsurface/api/__init__.py +13 -13
  4. subsurface/api/interfaces/__init__.py +3 -3
  5. subsurface/api/interfaces/stream.py +136 -136
  6. subsurface/api/reader/read_wells.py +78 -78
  7. subsurface/core/geological_formats/boreholes/_combine_trajectories.py +117 -117
  8. subsurface/core/geological_formats/boreholes/_map_attrs_to_survey.py +236 -234
  9. subsurface/core/geological_formats/boreholes/_survey_to_unstruct.py +163 -163
  10. subsurface/core/geological_formats/boreholes/boreholes.py +140 -140
  11. subsurface/core/geological_formats/boreholes/collars.py +26 -26
  12. subsurface/core/geological_formats/boreholes/survey.py +86 -86
  13. subsurface/core/geological_formats/fault.py +47 -47
  14. subsurface/core/reader_helpers/reader_unstruct.py +11 -11
  15. subsurface/core/reader_helpers/readers_data.py +130 -130
  16. subsurface/core/reader_helpers/readers_wells.py +13 -13
  17. subsurface/core/structs/__init__.py +3 -3
  18. subsurface/core/structs/base_structures/__init__.py +2 -2
  19. subsurface/core/structs/base_structures/_liquid_earth_mesh.py +121 -121
  20. subsurface/core/structs/base_structures/_unstructured_data_constructor.py +70 -70
  21. subsurface/core/structs/base_structures/base_structures_enum.py +6 -6
  22. subsurface/core/structs/base_structures/structured_data.py +282 -282
  23. subsurface/core/structs/base_structures/unstructured_data.py +319 -319
  24. subsurface/core/structs/structured_elements/octree_mesh.py +10 -10
  25. subsurface/core/structs/structured_elements/structured_grid.py +59 -59
  26. subsurface/core/structs/structured_elements/structured_mesh.py +9 -9
  27. subsurface/core/structs/unstructured_elements/__init__.py +3 -3
  28. subsurface/core/structs/unstructured_elements/line_set.py +72 -72
  29. subsurface/core/structs/unstructured_elements/point_set.py +43 -43
  30. subsurface/core/structs/unstructured_elements/tetrahedron_mesh.py +35 -35
  31. subsurface/core/structs/unstructured_elements/triangular_surface.py +62 -62
  32. subsurface/core/utils/utils_core.py +38 -38
  33. subsurface/modules/reader/__init__.py +13 -13
  34. subsurface/modules/reader/faults/faults.py +80 -80
  35. subsurface/modules/reader/from_binary.py +46 -46
  36. subsurface/modules/reader/mesh/_GOCAD_mesh.py +82 -82
  37. subsurface/modules/reader/mesh/_trimesh_reader.py +447 -447
  38. subsurface/modules/reader/mesh/csv_mesh_reader.py +53 -53
  39. subsurface/modules/reader/mesh/dxf_reader.py +177 -177
  40. subsurface/modules/reader/mesh/glb_reader.py +30 -30
  41. subsurface/modules/reader/mesh/mx_reader.py +232 -232
  42. subsurface/modules/reader/mesh/obj_reader.py +53 -53
  43. subsurface/modules/reader/mesh/omf_mesh_reader.py +43 -43
  44. subsurface/modules/reader/mesh/surface_reader.py +56 -56
  45. subsurface/modules/reader/mesh/surfaces_api.py +41 -41
  46. subsurface/modules/reader/profiles/__init__.py +3 -3
  47. subsurface/modules/reader/profiles/profiles_core.py +197 -197
  48. subsurface/modules/reader/read_netcdf.py +38 -38
  49. subsurface/modules/reader/topography/__init__.py +7 -7
  50. subsurface/modules/reader/topography/topo_core.py +100 -100
  51. subsurface/modules/reader/volume/read_grav3d.py +478 -428
  52. subsurface/modules/reader/volume/read_volume.py +327 -230
  53. subsurface/modules/reader/volume/segy_reader.py +105 -105
  54. subsurface/modules/reader/volume/seismic.py +173 -173
  55. subsurface/modules/reader/volume/volume_utils.py +43 -43
  56. subsurface/modules/reader/wells/DEP/__init__.py +43 -43
  57. subsurface/modules/reader/wells/DEP/_well_files_reader.py +167 -167
  58. subsurface/modules/reader/wells/DEP/_wells_api.py +61 -61
  59. subsurface/modules/reader/wells/DEP/_welly_reader.py +180 -180
  60. subsurface/modules/reader/wells/DEP/pandas_to_welly.py +212 -212
  61. subsurface/modules/reader/wells/_read_to_df.py +57 -57
  62. subsurface/modules/reader/wells/read_borehole_interface.py +148 -148
  63. subsurface/modules/reader/wells/wells_utils.py +68 -68
  64. subsurface/modules/tools/mocking_aux.py +104 -104
  65. subsurface/modules/visualization/__init__.py +2 -2
  66. subsurface/modules/visualization/to_pyvista.py +320 -320
  67. subsurface/modules/writer/to_binary.py +12 -12
  68. subsurface/modules/writer/to_rex/common.py +78 -78
  69. subsurface/modules/writer/to_rex/data_struct.py +74 -74
  70. subsurface/modules/writer/to_rex/gempy_to_rexfile.py +791 -791
  71. subsurface/modules/writer/to_rex/material_encoder.py +44 -44
  72. subsurface/modules/writer/to_rex/mesh_encoder.py +152 -152
  73. subsurface/modules/writer/to_rex/to_rex.py +115 -115
  74. subsurface/modules/writer/to_rex/utils.py +15 -15
  75. subsurface/optional_requirements.py +116 -116
  76. {subsurface_terra-2025.1.0rc15.dist-info → subsurface_terra-2025.1.0rc16.dist-info}/METADATA +194 -194
  77. subsurface_terra-2025.1.0rc16.dist-info/RECORD +98 -0
  78. {subsurface_terra-2025.1.0rc15.dist-info → subsurface_terra-2025.1.0rc16.dist-info}/WHEEL +1 -1
  79. {subsurface_terra-2025.1.0rc15.dist-info → subsurface_terra-2025.1.0rc16.dist-info}/licenses/LICENSE +203 -203
  80. subsurface_terra-2025.1.0rc15.dist-info/RECORD +0 -98
  81. {subsurface_terra-2025.1.0rc15.dist-info → subsurface_terra-2025.1.0rc16.dist-info}/top_level.txt +0 -0
@@ -1,148 +1,148 @@
1
- import numpy as np
2
- import warnings
3
-
4
- from subsurface.core.reader_helpers.readers_data import GenericReaderFilesHelper
5
- import pandas as pd
6
-
7
- from subsurface.modules.reader.wells._read_to_df import check_format_and_read_to_df
8
- from subsurface.modules.reader.wells.wells_utils import add_tops_from_base_and_altitude_in_place
9
-
10
-
11
- def read_collar(reader_helper: GenericReaderFilesHelper) -> pd.DataFrame:
12
- if reader_helper.index_col is False: reader_helper.index_col = 0
13
-
14
- # Check file_or_buffer type
15
- data_df: pd.DataFrame = check_format_and_read_to_df(reader_helper)
16
- _map_rows_and_cols_inplace(data_df, reader_helper)
17
-
18
- # Remove duplicates
19
- data_df = data_df[~data_df.index.duplicated(keep='first')]
20
-
21
- return data_df
22
-
23
-
24
- def read_survey(reader_helper: GenericReaderFilesHelper, validate_survey: bool = True) -> pd.DataFrame:
25
- if reader_helper.index_col is False: reader_helper.index_col = 0
26
-
27
- d = check_format_and_read_to_df(reader_helper)
28
- _map_rows_and_cols_inplace(d, reader_helper)
29
-
30
- if validate_survey:
31
- d_no_singles = _validate_survey_data(d)
32
- else:
33
- d_no_singles = d
34
-
35
- return d_no_singles
36
-
37
-
38
- def read_lith(reader_helper: GenericReaderFilesHelper) -> pd.DataFrame:
39
- return read_attributes(reader_helper, is_lith=True)
40
-
41
-
42
- def read_attributes(reader_helper: GenericReaderFilesHelper, is_lith: bool = False, validate_attr: bool = True) -> pd.DataFrame:
43
- if reader_helper.index_col is False:
44
- reader_helper.index_col = 0
45
-
46
- d = check_format_and_read_to_df(reader_helper)
47
-
48
- _map_rows_and_cols_inplace(d, reader_helper)
49
- if validate_attr is False:
50
- return d
51
-
52
- if is_lith:
53
- d = _validate_lith_data(d, reader_helper)
54
- else:
55
- _validate_attr_data(d)
56
- return d
57
-
58
-
59
- def _map_rows_and_cols_inplace(d: pd.DataFrame, reader_helper: GenericReaderFilesHelper):
60
- if reader_helper.index_map is not None:
61
- d.rename(reader_helper.index_map, axis="index", inplace=True) # d.index = d.index.map(reader_helper.index_map)
62
- if reader_helper.columns_map is not None:
63
- d.rename(reader_helper.columns_map, axis="columns", inplace=True)
64
-
65
-
66
- def _validate_survey_data(d):
67
- # Check for essential column 'md'
68
- if 'md' not in d.columns:
69
- raise AttributeError(
70
- 'md, inc, and azi columns must be present in the file. Use columns_map to assign column names to these fields.')
71
-
72
- # Check if 'dip' column exists and convert it to 'inc'
73
- if 'dip' in d.columns:
74
- # Convert dip to inclination (90 - dip)
75
- d['inc'] = 90 - d['dip']
76
- # Optionally, drop the 'dip' column if it's no longer needed
77
- d.drop(columns=['dip'], inplace=True)
78
-
79
- # Handle if inclination ('inc') or azimuth ('azi') columns are missing
80
- if not np.isin(['inc', 'azi'], d.columns).all():
81
- warnings.warn(
82
- 'inc and/or azi columns are not present in the file. The boreholes will be straight.')
83
- d['inc'] = 180
84
- d['azi'] = 0
85
-
86
- # Drop wells that contain only one value, ensuring that we keep rows only when there are duplicates
87
- d_no_singles = d[d.index.duplicated(keep=False)]
88
-
89
- return d_no_singles
90
-
91
-
92
- def _validate_attr_data(d):
93
- assert d.columns.isin(['base']).any(), ('base column must be present in the file. '
94
- 'Use columns_map to assign column names to these fields.')
95
-
96
-
97
- def _validate_lith_data(d: pd.DataFrame, reader_helper: GenericReaderFilesHelper) -> pd.DataFrame:
98
- # Check component lith in column
99
- if 'component lith' not in d.columns:
100
- raise AttributeError('If wells attributes represent lithology, `component lith` column must be present in the file. '
101
- 'Use columns_map to assign column names to these fields. Maybe you are marking as lithology'
102
- 'the wrong file?')
103
- else:
104
- # TODO: Add categories to reader helper
105
- categories = sorted(d['component lith'].dropna().unique())
106
- d['component lith'] = pd.Categorical(
107
- d['component lith'],
108
- categories=categories,
109
- ordered=True
110
- )
111
-
112
- d['lith_ids'] = d['component lith'].cat.codes + 1
113
-
114
- given_top = np.isin(['top', 'base'], d.columns).all()
115
- given_altitude_and_base = np.isin(['altitude', 'base'], d.columns).all()
116
- given_only_base = np.isin(['base'], d.columns).all()
117
- if given_altitude_and_base and not given_top:
118
- warnings.warn('top column is not present in the file. The tops will be calculated from the base and altitude')
119
- d = add_tops_from_base_and_altitude_in_place(
120
- data=d,
121
- col_well_name=reader_helper.index_col,
122
- col_base='base',
123
- col_altitude='altitude'
124
- )
125
- elif given_only_base and not given_top:
126
- warnings.warn('top column is not present in the file. The tops will be calculated from the base assuming altitude=0')
127
- # add a top column with 0 and call add_tops_from_base_and_altitude_in_place
128
- d['altitude'] = 0
129
- d = add_tops_from_base_and_altitude_in_place(
130
- data=d,
131
- col_well_name=reader_helper.index_col,
132
- col_base='base',
133
- col_altitude='altitude'
134
- )
135
-
136
-
137
- elif not given_top and not given_altitude_and_base:
138
- raise ValueError('top column or base and altitude columns must be present in the file. '
139
- 'Use columns_map to assign column names to these fields. Maybe you are marking as lithology'
140
- 'the wrong file?')
141
-
142
- lith_df = d[['top', 'base', 'component lith']]
143
-
144
- # * Make sure values are positive
145
- lith_df['top'] = np.abs(lith_df['top'])
146
- lith_df['base'] = np.abs(lith_df['base'])
147
-
148
- return lith_df
1
+ import numpy as np
2
+ import warnings
3
+
4
+ from subsurface.core.reader_helpers.readers_data import GenericReaderFilesHelper
5
+ import pandas as pd
6
+
7
+ from subsurface.modules.reader.wells._read_to_df import check_format_and_read_to_df
8
+ from subsurface.modules.reader.wells.wells_utils import add_tops_from_base_and_altitude_in_place
9
+
10
+
11
+ def read_collar(reader_helper: GenericReaderFilesHelper) -> pd.DataFrame:
12
+ if reader_helper.index_col is False: reader_helper.index_col = 0
13
+
14
+ # Check file_or_buffer type
15
+ data_df: pd.DataFrame = check_format_and_read_to_df(reader_helper)
16
+ _map_rows_and_cols_inplace(data_df, reader_helper)
17
+
18
+ # Remove duplicates
19
+ data_df = data_df[~data_df.index.duplicated(keep='first')]
20
+
21
+ return data_df
22
+
23
+
24
+ def read_survey(reader_helper: GenericReaderFilesHelper, validate_survey: bool = True) -> pd.DataFrame:
25
+ if reader_helper.index_col is False: reader_helper.index_col = 0
26
+
27
+ d = check_format_and_read_to_df(reader_helper)
28
+ _map_rows_and_cols_inplace(d, reader_helper)
29
+
30
+ if validate_survey:
31
+ d_no_singles = _validate_survey_data(d)
32
+ else:
33
+ d_no_singles = d
34
+
35
+ return d_no_singles
36
+
37
+
38
+ def read_lith(reader_helper: GenericReaderFilesHelper) -> pd.DataFrame:
39
+ return read_attributes(reader_helper, is_lith=True)
40
+
41
+
42
+ def read_attributes(reader_helper: GenericReaderFilesHelper, is_lith: bool = False, validate_attr: bool = True) -> pd.DataFrame:
43
+ if reader_helper.index_col is False:
44
+ reader_helper.index_col = 0
45
+
46
+ d = check_format_and_read_to_df(reader_helper)
47
+
48
+ _map_rows_and_cols_inplace(d, reader_helper)
49
+ if validate_attr is False:
50
+ return d
51
+
52
+ if is_lith:
53
+ d = _validate_lith_data(d, reader_helper)
54
+ else:
55
+ _validate_attr_data(d)
56
+ return d
57
+
58
+
59
+ def _map_rows_and_cols_inplace(d: pd.DataFrame, reader_helper: GenericReaderFilesHelper):
60
+ if reader_helper.index_map is not None:
61
+ d.rename(reader_helper.index_map, axis="index", inplace=True) # d.index = d.index.map(reader_helper.index_map)
62
+ if reader_helper.columns_map is not None:
63
+ d.rename(reader_helper.columns_map, axis="columns", inplace=True)
64
+
65
+
66
+ def _validate_survey_data(d):
67
+ # Check for essential column 'md'
68
+ if 'md' not in d.columns:
69
+ raise AttributeError(
70
+ 'md, inc, and azi columns must be present in the file. Use columns_map to assign column names to these fields.')
71
+
72
+ # Check if 'dip' column exists and convert it to 'inc'
73
+ if 'dip' in d.columns:
74
+ # Convert dip to inclination (90 - dip)
75
+ d['inc'] = 90 - d['dip']
76
+ # Optionally, drop the 'dip' column if it's no longer needed
77
+ d.drop(columns=['dip'], inplace=True)
78
+
79
+ # Handle if inclination ('inc') or azimuth ('azi') columns are missing
80
+ if not np.isin(['inc', 'azi'], d.columns).all():
81
+ warnings.warn(
82
+ 'inc and/or azi columns are not present in the file. The boreholes will be straight.')
83
+ d['inc'] = 180
84
+ d['azi'] = 0
85
+
86
+ # Drop wells that contain only one value, ensuring that we keep rows only when there are duplicates
87
+ d_no_singles = d[d.index.duplicated(keep=False)]
88
+
89
+ return d_no_singles
90
+
91
+
92
+ def _validate_attr_data(d):
93
+ assert d.columns.isin(['base']).any(), ('base column must be present in the file. '
94
+ 'Use columns_map to assign column names to these fields.')
95
+
96
+
97
+ def _validate_lith_data(d: pd.DataFrame, reader_helper: GenericReaderFilesHelper) -> pd.DataFrame:
98
+ # Check component lith in column
99
+ if 'component lith' not in d.columns:
100
+ raise AttributeError('If wells attributes represent lithology, `component lith` column must be present in the file. '
101
+ 'Use columns_map to assign column names to these fields. Maybe you are marking as lithology'
102
+ 'the wrong file?')
103
+ else:
104
+ # TODO: Add categories to reader helper
105
+ categories = sorted(d['component lith'].dropna().unique())
106
+ d['component lith'] = pd.Categorical(
107
+ d['component lith'],
108
+ categories=categories,
109
+ ordered=True
110
+ )
111
+
112
+ d['lith_ids'] = d['component lith'].cat.codes + 1
113
+
114
+ given_top = np.isin(['top', 'base'], d.columns).all()
115
+ given_altitude_and_base = np.isin(['altitude', 'base'], d.columns).all()
116
+ given_only_base = np.isin(['base'], d.columns).all()
117
+ if given_altitude_and_base and not given_top:
118
+ warnings.warn('top column is not present in the file. The tops will be calculated from the base and altitude')
119
+ d = add_tops_from_base_and_altitude_in_place(
120
+ data=d,
121
+ col_well_name=reader_helper.index_col,
122
+ col_base='base',
123
+ col_altitude='altitude'
124
+ )
125
+ elif given_only_base and not given_top:
126
+ warnings.warn('top column is not present in the file. The tops will be calculated from the base assuming altitude=0')
127
+ # add a top column with 0 and call add_tops_from_base_and_altitude_in_place
128
+ d['altitude'] = 0
129
+ d = add_tops_from_base_and_altitude_in_place(
130
+ data=d,
131
+ col_well_name=reader_helper.index_col,
132
+ col_base='base',
133
+ col_altitude='altitude'
134
+ )
135
+
136
+
137
+ elif not given_top and not given_altitude_and_base:
138
+ raise ValueError('top column or base and altitude columns must be present in the file. '
139
+ 'Use columns_map to assign column names to these fields. Maybe you are marking as lithology'
140
+ 'the wrong file?')
141
+
142
+ lith_df = d[['top', 'base', 'component lith']]
143
+
144
+ # * Make sure values are positive
145
+ lith_df['top'] = np.abs(lith_df['top'])
146
+ lith_df['base'] = np.abs(lith_df['base'])
147
+
148
+ return lith_df
@@ -1,68 +1,68 @@
1
- from typing import List
2
-
3
- import pandas as pd
4
-
5
-
6
- __all__ = ['add_tops_from_base_and_altitude_in_place',
7
- 'fix_wells_higher_base_than_top_inplace', 'map_attr_to_segments',
8
- 'pivot_wells_df_into_segment_per_row']
9
-
10
-
11
- def add_tops_from_base_and_altitude_in_place(data: pd.DataFrame, col_well_name: str, col_base: str,
12
- col_altitude: str) -> pd.DataFrame:
13
- d = data
14
- d = _remove_repeated_rows(d)
15
- _create_base_col(col_altitude, col_base, d)
16
- _create_top_col(col_altitude, col_well_name, d)
17
- _add_md_col_if_missing(d)
18
- return d
19
-
20
-
21
- def fix_wells_higher_base_than_top_inplace(df_fixed) -> pd.DataFrame:
22
- top_base_error = df_fixed["top"] > df_fixed["base"]
23
- df_fixed["base"][top_base_error] = df_fixed["top"] + 0.01
24
- return df_fixed
25
-
26
-
27
- def map_attr_to_segments(df, attr_per_segment: List, n_wells: int) -> pd.DataFrame:
28
- tiled_formations = pd.np.tile(attr_per_segment, (n_wells))
29
- df['formation'] = tiled_formations
30
- return df
31
-
32
-
33
- def pivot_wells_df_into_segment_per_row(df: pd.DataFrame, start_segment_cols: int, n_segments_per_well: int) -> pd.DataFrame:
34
- # Repeat fixed rows (collar name and so)
35
- df_fixed = df.iloc[:, :start_segment_cols]
36
- df_fixed = df_fixed.loc[df_fixed.index.repeat(n_segments_per_well)]
37
-
38
- df_bottoms = df.iloc[:, start_segment_cols:start_segment_cols + n_segments_per_well]
39
- df_fixed['base'] = df_bottoms.values.reshape(-1, 1, order='C')
40
-
41
- return df_fixed
42
-
43
-
44
- def _add_md_col_if_missing(d):
45
- if "md" not in d.columns:
46
- d.loc[:, 'md'] = d['top']
47
-
48
-
49
- def _create_top_col(col_altitude, col_well_name, d):
50
- Z_shift = d.groupby(col_well_name)['base'].shift(1)
51
- Z_0 = Z_shift.fillna(0)
52
- v = Z_0 + d[col_altitude]
53
- d.loc[:, 'top'] = Z_0
54
- d.loc[:, '_top_abs'] = v
55
-
56
-
57
- def _create_base_col(col_altitude, col_base, d):
58
- d.loc[:, 'base'] = d[col_altitude] - d[col_base]
59
-
60
-
61
- def _remove_repeated_rows(d):
62
- repeated_rows = _mark_repeated_rows(d['base'])
63
- d = d[~repeated_rows] # removed repeated rows
64
- return d
65
-
66
-
67
- def _mark_repeated_rows(series: pd.Series):
68
- return series.shift(1) == series
1
+ from typing import List
2
+
3
+ import pandas as pd
4
+
5
+
6
+ __all__ = ['add_tops_from_base_and_altitude_in_place',
7
+ 'fix_wells_higher_base_than_top_inplace', 'map_attr_to_segments',
8
+ 'pivot_wells_df_into_segment_per_row']
9
+
10
+
11
+ def add_tops_from_base_and_altitude_in_place(data: pd.DataFrame, col_well_name: str, col_base: str,
12
+ col_altitude: str) -> pd.DataFrame:
13
+ d = data
14
+ d = _remove_repeated_rows(d)
15
+ _create_base_col(col_altitude, col_base, d)
16
+ _create_top_col(col_altitude, col_well_name, d)
17
+ _add_md_col_if_missing(d)
18
+ return d
19
+
20
+
21
+ def fix_wells_higher_base_than_top_inplace(df_fixed) -> pd.DataFrame:
22
+ top_base_error = df_fixed["top"] > df_fixed["base"]
23
+ df_fixed["base"][top_base_error] = df_fixed["top"] + 0.01
24
+ return df_fixed
25
+
26
+
27
+ def map_attr_to_segments(df, attr_per_segment: List, n_wells: int) -> pd.DataFrame:
28
+ tiled_formations = pd.np.tile(attr_per_segment, (n_wells))
29
+ df['formation'] = tiled_formations
30
+ return df
31
+
32
+
33
+ def pivot_wells_df_into_segment_per_row(df: pd.DataFrame, start_segment_cols: int, n_segments_per_well: int) -> pd.DataFrame:
34
+ # Repeat fixed rows (collar name and so)
35
+ df_fixed = df.iloc[:, :start_segment_cols]
36
+ df_fixed = df_fixed.loc[df_fixed.index.repeat(n_segments_per_well)]
37
+
38
+ df_bottoms = df.iloc[:, start_segment_cols:start_segment_cols + n_segments_per_well]
39
+ df_fixed['base'] = df_bottoms.values.reshape(-1, 1, order='C')
40
+
41
+ return df_fixed
42
+
43
+
44
+ def _add_md_col_if_missing(d):
45
+ if "md" not in d.columns:
46
+ d.loc[:, 'md'] = d['top']
47
+
48
+
49
+ def _create_top_col(col_altitude, col_well_name, d):
50
+ Z_shift = d.groupby(col_well_name)['base'].shift(1)
51
+ Z_0 = Z_shift.fillna(0)
52
+ v = Z_0 + d[col_altitude]
53
+ d.loc[:, 'top'] = Z_0
54
+ d.loc[:, '_top_abs'] = v
55
+
56
+
57
+ def _create_base_col(col_altitude, col_base, d):
58
+ d.loc[:, 'base'] = d[col_altitude] - d[col_base]
59
+
60
+
61
+ def _remove_repeated_rows(d):
62
+ repeated_rows = _mark_repeated_rows(d['base'])
63
+ d = d[~repeated_rows] # removed repeated rows
64
+ return d
65
+
66
+
67
+ def _mark_repeated_rows(series: pd.Series):
68
+ return series.shift(1) == series
@@ -1,104 +1,104 @@
1
- import numpy as np
2
- import pyvista as pv
3
-
4
-
5
- def transform_gaussian_blur(grid, sigma=20.0):
6
- """
7
- Applies a Gaussian blur to the 'model_name' field of the structured grid.
8
-
9
- Parameters:
10
- grid - PyVista grid with 'model_name' field
11
- sigma - Standard deviation for the Gaussian kernel
12
- """
13
- from scipy.ndimage import gaussian_filter
14
-
15
- # Get the original dimensions of the grid
16
- dims = grid.dimensions
17
-
18
- # Reshape the data to 3D array matching grid dimensions
19
- values = np.array(grid['model_name'])
20
- values_3d = values.reshape(dims[2] - 1, dims[1] - 1, dims[0] - 1).transpose(2, 1, 0)
21
-
22
- # Apply Gaussian filter
23
- blurred_values = gaussian_filter(values_3d, sigma=sigma, axes=(2,))
24
-
25
- # Reshape back to 1D array
26
- grid['model_name'] = blurred_values.transpose(2, 1, 0).flatten()
27
- return grid
28
-
29
-
30
- def transform_sinusoidal(values, amplitude=1.0, frequency=0.01, phase=0):
31
- """
32
- Apply a sinusoidal transformation to the values.
33
- """
34
- return values + amplitude * np.sin(frequency * values + phase)
35
-
36
-
37
- def obfuscate_model_name(grid, transform_functions, attr):
38
- """
39
- Applies transformation functions to the 'model_name' field.
40
- Functions can operate on either the grid or the values array.
41
- """
42
- for func in transform_functions:
43
- if 'grid' in func.__code__.co_varnames:
44
- # Function expects the full grid
45
- grid = func(grid)
46
- else:
47
- # Function expects just the values array
48
- values = np.array(grid[attr])
49
- grid[attr] = func(values)
50
-
51
- return grid
52
-
53
-
54
- # pyvista_struct = transform_xy_to_z_propagation(pyvista_struct, z_factor=0.3, noise_level=0.1)
55
- def transform_subtract_mean(values):
56
- """
57
- Subtract the mean of the array from each element.
58
- """
59
- return values - np.mean(values)
60
-
61
-
62
- def transform_scale(values, scale_factor=0.003):
63
- """
64
- Multiply each value by scale_factor.
65
- """
66
- return values * scale_factor
67
-
68
-
69
-
70
-
71
- def update_extent(pyvista_grid, new_extent):
72
- # new_extent: array-like with 6 elements [xmin, xmax, ymin, ymax, zmin, zmax]
73
- old_bounds = np.array(pyvista_grid.bounds) # [xmin, xmax, ymin, ymax, zmin, zmax]
74
-
75
- # Check for valid extents
76
- if any(new_extent[i] >= new_extent[i + 1] for i in range(0, 6, 2)):
77
- raise ValueError("Each min value must be less than the corresponding max value in the new extent.")
78
-
79
- # Compute old ranges and new ranges for each axis
80
- old_ranges = old_bounds[1::2] - old_bounds[0::2] # [x_range, y_range, z_range]
81
- new_ranges = np.array([new_extent[1] - new_extent[0],
82
- new_extent[3] - new_extent[2],
83
- new_extent[5] - new_extent[4]])
84
-
85
- # Avoid division by zero if any old range is zero
86
- if np.any(old_ranges == 0):
87
- raise ValueError("One of the dimensions in the current grid has zero length.")
88
-
89
- # Get the old points and reshape for easier manipulation
90
- old_points = pyvista_grid.points # shape (N, 3)
91
-
92
- # Compute normalized coordinates within the old extent
93
- norm_points = (old_points - old_bounds[0::2]) / old_ranges
94
-
95
- # Compute new points based on new extent
96
- new_mins = np.array([new_extent[0], new_extent[2], new_extent[4]])
97
- new_points = new_mins + norm_points * new_ranges
98
-
99
- # Update the grid's points
100
- pyvista_grid.points = new_points
101
-
102
- # Updating bounds is implicit once the points are modified.
103
- pyvista_grid.Modified()
104
- return pyvista_grid
1
+ import numpy as np
2
+ import pyvista as pv
3
+
4
+
5
+ def transform_gaussian_blur(grid, sigma=20.0):
6
+ """
7
+ Applies a Gaussian blur to the 'model_name' field of the structured grid.
8
+
9
+ Parameters:
10
+ grid - PyVista grid with 'model_name' field
11
+ sigma - Standard deviation for the Gaussian kernel
12
+ """
13
+ from scipy.ndimage import gaussian_filter
14
+
15
+ # Get the original dimensions of the grid
16
+ dims = grid.dimensions
17
+
18
+ # Reshape the data to 3D array matching grid dimensions
19
+ values = np.array(grid['model_name'])
20
+ values_3d = values.reshape(dims[2] - 1, dims[1] - 1, dims[0] - 1).transpose(2, 1, 0)
21
+
22
+ # Apply Gaussian filter
23
+ blurred_values = gaussian_filter(values_3d, sigma=sigma, axes=(2,))
24
+
25
+ # Reshape back to 1D array
26
+ grid['model_name'] = blurred_values.transpose(2, 1, 0).flatten()
27
+ return grid
28
+
29
+
30
+ def transform_sinusoidal(values, amplitude=1.0, frequency=0.01, phase=0):
31
+ """
32
+ Apply a sinusoidal transformation to the values.
33
+ """
34
+ return values + amplitude * np.sin(frequency * values + phase)
35
+
36
+
37
+ def obfuscate_model_name(grid, transform_functions, attr):
38
+ """
39
+ Applies transformation functions to the 'model_name' field.
40
+ Functions can operate on either the grid or the values array.
41
+ """
42
+ for func in transform_functions:
43
+ if 'grid' in func.__code__.co_varnames:
44
+ # Function expects the full grid
45
+ grid = func(grid)
46
+ else:
47
+ # Function expects just the values array
48
+ values = np.array(grid[attr])
49
+ grid[attr] = func(values)
50
+
51
+ return grid
52
+
53
+
54
+ # pyvista_struct = transform_xy_to_z_propagation(pyvista_struct, z_factor=0.3, noise_level=0.1)
55
+ def transform_subtract_mean(values):
56
+ """
57
+ Subtract the mean of the array from each element.
58
+ """
59
+ return values - np.mean(values)
60
+
61
+
62
+ def transform_scale(values, scale_factor=0.003):
63
+ """
64
+ Multiply each value by scale_factor.
65
+ """
66
+ return values * scale_factor
67
+
68
+
69
+
70
+
71
+ def update_extent(pyvista_grid, new_extent):
72
+ # new_extent: array-like with 6 elements [xmin, xmax, ymin, ymax, zmin, zmax]
73
+ old_bounds = np.array(pyvista_grid.bounds) # [xmin, xmax, ymin, ymax, zmin, zmax]
74
+
75
+ # Check for valid extents
76
+ if any(new_extent[i] >= new_extent[i + 1] for i in range(0, 6, 2)):
77
+ raise ValueError("Each min value must be less than the corresponding max value in the new extent.")
78
+
79
+ # Compute old ranges and new ranges for each axis
80
+ old_ranges = old_bounds[1::2] - old_bounds[0::2] # [x_range, y_range, z_range]
81
+ new_ranges = np.array([new_extent[1] - new_extent[0],
82
+ new_extent[3] - new_extent[2],
83
+ new_extent[5] - new_extent[4]])
84
+
85
+ # Avoid division by zero if any old range is zero
86
+ if np.any(old_ranges == 0):
87
+ raise ValueError("One of the dimensions in the current grid has zero length.")
88
+
89
+ # Get the old points and reshape for easier manipulation
90
+ old_points = pyvista_grid.points # shape (N, 3)
91
+
92
+ # Compute normalized coordinates within the old extent
93
+ norm_points = (old_points - old_bounds[0::2]) / old_ranges
94
+
95
+ # Compute new points based on new extent
96
+ new_mins = np.array([new_extent[0], new_extent[2], new_extent[4]])
97
+ new_points = new_mins + norm_points * new_ranges
98
+
99
+ # Update the grid's points
100
+ pyvista_grid.points = new_points
101
+
102
+ # Updating bounds is implicit once the points are modified.
103
+ pyvista_grid.Modified()
104
+ return pyvista_grid
@@ -1,2 +1,2 @@
1
- from .to_pyvista import *
2
- from .to_pyvista import init_plotter
1
+ from .to_pyvista import *
2
+ from .to_pyvista import init_plotter