aspect-stable 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
aspect/__init__.py ADDED
@@ -0,0 +1,12 @@
1
+ import logging
2
+ from .io import cfg, load_cfg, load_model
3
+ from .workflow import SpectrumDetector, model, CHOICE_DM, TIME_DM
4
+ from .plots import decision_matrix_plot
5
+ from .trainer import components_trainer
6
+
7
+ # Creating the lime logger
8
+ _logger = logging.getLogger("aspect")
9
+ _logger.setLevel(logging.INFO)
10
+
11
+ # Invert the dictionary of categories number
12
+ cfg['number_shape'] = {v: k for k, v in cfg['shape_number'].items()}
aspect/aspect.toml ADDED
@@ -0,0 +1,113 @@
1
+ [shape_number]
2
+ undefined = 0
3
+ white-noise = 1
4
+ continuum = 2
5
+ emission = 3
6
+ cosmic-ray = 4
7
+ pixel-line = 5
8
+ broad = 6
9
+ doublet = 7
10
+ peak = 8
11
+ absorption = 9
12
+ dead-pixel = 10
13
+
14
+ [colors]
15
+ undefined = 'black'
16
+ white-noise = '#C41E3A' # Red
17
+ continuum = '#F48CBA' # Pink
18
+ emission = '#00FF98' # Spring Green
19
+ cosmic-ray= '#FFF468' # Yellow
20
+ pixel-line = '#0070DD' # Blue
21
+ broad = '#A330C9' # Dark magenta
22
+ doublet = '#3FC7EB' # Light blue
23
+ peak = '#C69B6D' # Tan
24
+ absorption = '#FF7C0A' # Orange
25
+ dead-pixel = '#8788EE' # Purple
26
+
27
+ [decision_matrices]
28
+
29
+ # Orange Blue
30
+ decision_colors = ['#be6530', '#72a7c2']
31
+
32
+ choice_labels = ['1st option', '2nd option']
33
+ choice = [[2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #undefined
34
+ [0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0], #white-noise
35
+ [0, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1], #continuum
36
+ [0, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0], #emission
37
+ [0, 1, 0, 1, 2, 0, 0, 0, 0, 0, 0], #cosmic-ray
38
+ [0, 1, 0, 1, 0, 2, 0, 0, 0, 0, 0], #pixel-line
39
+ [0, 1, 0, 1, 0, 0, 2, 0, 0, 0, 0], #broad
40
+ [0, 1, 0, 1, 0, 0, 0, 2, 0, 0, 0], #doublet
41
+ [0, 1, 0, 1, 0, 0, 0, 0, 2, 0, 0], #peak
42
+ [0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0], #absorption
43
+ [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2]] #dead-pixel
44
+ # u w c e c p b d p a d
45
+ # n h o m o i r o e b e
46
+ # d i n i s x o u a s a
47
+ # e t t s m e a b k o d
48
+ # f e i s i l d l r -
49
+ # i - n i c - e p p
50
+ # n n u o - l t t i
51
+ # e o u n r i i x
52
+ # d i m a n o e
53
+ # s y e n l
54
+ # e
55
+
56
+ time_labels = ['Current detection', 'Past detection']
57
+ time = [[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], #undefined
58
+ [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], #white-noise
59
+ [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], #continuum
60
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], #emission
61
+ [0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0], #cosmic-ray
62
+ [0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0], #pixel-line
63
+ [0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0], #broad
64
+ [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], #doublet
65
+ [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], #peak
66
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], #absorption
67
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] #dead-pixel
68
+ # u w c e c p b d p a d
69
+ # n h o m o i r o e b e
70
+ # d i n i s x o u a s a
71
+ # e t t s m e a b k o d
72
+ # f e i s i l d l r -
73
+ # i - n i c - e p p
74
+ # n n u o - l t t i
75
+ # e o u n r i i x
76
+ # d i m a n o e
77
+ # s y e n l
78
+ # e
79
+
80
+
81
+
82
+ #choice = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], #undefined
83
+ # [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], #white-noise
84
+ # [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], #continuum
85
+ # [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], #emission
86
+ # [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], #cosmic-ray
87
+ # [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], #pixel-line
88
+ # [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], #broad
89
+ # [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], #doublet
90
+ # [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], #peak
91
+ # [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], #absorption
92
+ # [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] #dead-pixel
93
+ ## u w c e c p b d p a d
94
+ ## n h o m o i r o e b e
95
+ ## d i n i s x o u a s a
96
+ ## e t t s m e a b k o d
97
+ ## f e i s i l d l r -
98
+ ## i - n i c - e p p
99
+ ## n n u o - l t t i
100
+ ## e o u n r i i x
101
+ ## d i m a n o e
102
+ ## s y e n l
103
+ ## e
104
+
105
+ [plots_format]
106
+ dm."figure.dpi" = 300
107
+ dm."figure.figsize" = [5, 5]
108
+ dm."axes.titlesize" = 9
109
+ dm."axes.labelsize" = 14
110
+ dm."legend.fontsize" = 7
111
+ dm."xtick.labelsize" = 12
112
+ dm."ytick.labelsize" = 12
113
+ dm."font.size" = 5
aspect/changelog.txt ADDED
File without changes
aspect/io.py ADDED
@@ -0,0 +1,97 @@
1
+ from joblib import load as jload
2
+ import numpy as np
3
+
4
+ from pathlib import Path
5
+
6
+ try:
7
+ import tomllib
8
+ except ModuleNotFoundError:
9
+ import tomli as tomllib
10
+
11
+ # Specify the files location
12
+ _ASPECT_FOLDER = Path(__file__).parent
13
+ _MODEL_FOLDER = _ASPECT_FOLDER/'models'
14
+
15
+ # Configuration file
16
+ _CONF_FILE = _ASPECT_FOLDER/'aspect.toml'
17
+
18
+ class Aspect_Error(Exception):
19
+ """Aspect exception function"""
20
+
21
+ # Read lime configuration file
22
+ with open(_CONF_FILE, mode="rb") as fp:
23
+ cfg = tomllib.load(fp)
24
+
25
+ # Default feature detection model
26
+ # DEFAULT_MODEL_ADDRESS = _MODEL_FOLDER/'training_multi_sample_v4_min-max_8categories_v4_175000points_angleSample_numpy_array_model.joblib'
27
+ # DEFAULT_MODEL_ADDRESS = _MODEL_FOLDER/'aspect_min-max_mediumbox_v1_model.joblib'
28
+ DEFAULT_MODEL_ADDRESS = _MODEL_FOLDER/'aspect_min-max_mediumbox_v3_model.joblib'
29
+
30
+
31
+ def read_trained_model(file_address):
32
+
33
+ # Read trained model
34
+ model = jload(file_address)
35
+
36
+ # Read lime configuration file
37
+ cfg_address = Path(file_address).parent/f'{file_address.stem}.toml'
38
+ with open(cfg_address, mode="rb") as cm:
39
+ cfg_model = tomllib.load(cm)
40
+
41
+ return model, cfg_model
42
+
43
+
44
+ def check_lisa(model1D, model2D, setup_cfg):
45
+
46
+ if model1D is None:
47
+ coeffs1D = np.array(setup_cfg['linear']['model1D_coeffs']), np.array(setup_cfg['linear']['model1D_intercept'])
48
+ else:
49
+ model1D_job = jload(model1D)
50
+ coeffs1D = np.squeeze(model1D_job.coef_), np.squeeze(model1D_job.intercept_)
51
+
52
+ if model2D is None:
53
+ coeffs2D = np.array(setup_cfg['linear']['model2D_coeffs']), np.array(setup_cfg['linear']['model2D_intercept'])
54
+ else:
55
+ model2D_job = jload(model2D)
56
+ coeffs2D = np.squeeze(model2D_job.coef_), np.squeeze(model2D_job.intercept_)
57
+
58
+ return coeffs1D, coeffs2D
59
+
60
+
61
+ # Function to load configuration file
62
+ def load_cfg(file_address, fit_cfg_suffix='_line_fitting'):
63
+
64
+ """
65
+
66
+ This function reads a configuration file with the `toml format <https://toml.io/en/>`_.
67
+
68
+ :param file_address: Input configuration file address.
69
+ :type file_address: str, pathlib.Path
70
+
71
+ :param fit_cfg_suffix: Suffix for LiMe configuration sections. The default value is "_line_fitting".
72
+ :type fit_cfg_suffix: str
73
+
74
+ :return: Parsed configuration data
75
+ :type: dict
76
+
77
+ """
78
+
79
+ file_path = Path(file_address)
80
+
81
+ # Open the file
82
+ if file_path.is_file():
83
+
84
+ # Toml file
85
+ with open(file_path, mode="rb") as fp:
86
+ cfg = tomllib.load(fp)
87
+
88
+ else:
89
+ raise Aspect_Error(f'The configuration file was not found at: {file_address}')
90
+
91
+ return cfg
92
+
93
+ def load_model(file_address):
94
+
95
+ ml_function = jload(file_address)
96
+
97
+ return ml_function
@@ -0,0 +1,27 @@
1
+ [resuts]
2
+ f1 = "0.9093169891178652"
3
+ precision = "0.9128093921636236"
4
+ Recall = "0.9096525248222435"
5
+ confusion_matrix = [ [ "0.13127522178049186", "0.008862660852373147", "0.0001668970416429516", "0.0009157940233741447", "0.0005691617061157067", "0.00023964703415398178", "0.0008302057968905798",], [ "0.030512202741390895", "0.10520076858227383", "0.0031239702666501197", "0.003385014357424993", "0.0005477646494948155", "8.558822648356493e-06", "7.702940383520843e-05",], [ "0.0001027058717802779", "0.005986896442525366", "0.12498448713394986", "0.011036601805055696", "0.0007446175704070149", "0.0", "0.0",], [ "0.00024392644547816003", "0.005537558253486651", "0.0003637499625551509", "0.13671435357352243", "0.0", "0.0", "0.0",], [ "0.0007874116836487973", "8.986763780774317e-05", "0.00017973527561548634", "0.0", "0.1417982942266462", "0.0", "0.0",], [ "0.0013865292690337517", "0.0004964117136046765", "0.0", "0.0", "0.0", "0.1296747219452492", "0.01129764589583057",], [ "0.0010698528310445616", "0.0013009410425501868", "0.0", "0.0", "0.0", "0.0004835734796321418", "0.1400052208818155",],]
6
+ fit_time = "0.0"
7
+
8
+ [properties]
9
+ box_size = 12
10
+ sample_size = 300000
11
+ test_sample_size_fraction = 0.1
12
+ categories = [ "white-noise", "continuum", "cosmic-ray", "emission", "doublet", "dead-pixel", "absorption",]
13
+ scale = "min-max"
14
+
15
+ [properties.estimator]
16
+ module = "sklearn.ensemble"
17
+ class = "RandomForestClassifier"
18
+
19
+ [properties.estimator_params]
20
+ random_state = 42
21
+ n_estimators = 60
22
+ max_depth = 8
23
+ max_features = "sqrt"
24
+ verbose = 0
25
+ n_jobs = 15
26
+ min_samples_split = 2000
27
+ min_samples_leaf = 2000
aspect/plots.py ADDED
@@ -0,0 +1,274 @@
1
+ import numpy as np
2
+ from matplotlib import pyplot as plt, gridspec, colors, rc_context
3
+ from .io import Aspect_Error, cfg
4
+ from lime.plots import theme
5
+ from .tools import detection_function, stratify_sample
6
+
7
+
8
+
9
+ def decision_matrix_plot(matrix_arr, output_address=None, categories=None, exclude_diagonal=True, show_categories=False):
10
+
11
+ # Try to find from database if None provided
12
+ matrix_name = None
13
+ if isinstance(matrix_arr, str):
14
+ if matrix_arr in cfg['decision_matrices']:
15
+ matrix_name = str(matrix_arr)
16
+ matrix_arr = np.array(cfg['decision_matrices'][matrix_arr])
17
+ else:
18
+ raise Aspect_Error(f'Decision matrix "{matrix_arr}" not found in configuration file please')
19
+ categories = list(cfg['shape_number'].keys())
20
+
21
+ # Number of categories
22
+ n_categories = matrix_arr.shape[0]
23
+
24
+ # Set the diagonal to a distinct value (e.g., -1) to differentiate it
25
+ if exclude_diagonal:
26
+ np.fill_diagonal(matrix_arr, -1)
27
+
28
+ # Figure format
29
+ decision_colors = cfg['decision_matrices']['decision_colors']
30
+ axes_labels = None if matrix_name is None else cfg['decision_matrices'][f'{matrix_name}_labels']
31
+
32
+ # Start the figure
33
+ with rc_context(cfg['plots_format']['dm']):
34
+
35
+ # Define colors for values
36
+ cmap = colors.ListedColormap(['white', decision_colors[0], decision_colors[1]])
37
+ bounds = [-1.5, -0.5, 0.5, 1.5]
38
+ norm = colors.BoundaryNorm(bounds, cmap.N)
39
+
40
+ # Adjusting the plot by adding gridlines to the subplots
41
+ fig = plt.figure(figsize=(10, 8))
42
+ gs = gridspec.GridSpec(n_categories, 2, width_ratios=[n_categories, 1], wspace=0.05)
43
+
44
+ # Plot the decision matrix on the left (column 0 of the GridSpec)
45
+ ax_matrix = fig.add_subplot(gs[:, 0])
46
+ ax_matrix.matshow(matrix_arr, cmap=cmap, norm=norm)
47
+
48
+ # Customize matrix ticks and labels
49
+ ax_matrix.set_xticks(range(n_categories))
50
+ ax_matrix.set_yticks(range(n_categories))
51
+
52
+ ax_matrix.set_xticklabels(categories, rotation=90)
53
+ ax_matrix.set_yticklabels(categories)
54
+
55
+ # # Move x-axis labels to the bottom
56
+ # ax_matrix.xaxis.set_ticks_position('bottom')
57
+ # ax_matrix.xaxis.set_label_position('bottom')
58
+
59
+ # Add black gridlines to separate each square
60
+ ax_matrix.set_xticks(np.arange(-.5, n_categories, 1), minor=True)
61
+ ax_matrix.set_yticks(np.arange(-.5, n_categories, 1), minor=True)
62
+ ax_matrix.grid(which="minor", color="black", linestyle='-', linewidth=2)
63
+ ax_matrix.tick_params(which="minor", size=0)
64
+
65
+ if axes_labels is not None:
66
+ ax_matrix.set_ylabel(axes_labels[0], color=decision_colors[0])
67
+ ax_matrix.set_xlabel(axes_labels[1], color=decision_colors[1])
68
+
69
+ # Add individual plots on the right side (column 1 of the GridSpec) for each category
70
+ if show_categories:
71
+
72
+ for i, category in enumerate(categories):
73
+
74
+ ax_plot = fig.add_subplot(gs[i, 1])
75
+
76
+ # Example placeholder data for each category plot
77
+ x = np.linspace(0, 10, 100)
78
+ y = np.sin(x + i) # Example sine wave that varies by row
79
+ ax_plot.step(x, y, color=cfg['colors'][category])
80
+
81
+ # Add gridlines to the subplots
82
+ ax_plot.grid(True, which='both', linestyle='--', linewidth=0.5)
83
+
84
+ # Hide y-axis ticks for these small plots for a cleaner look
85
+ ax_plot.yaxis.set_ticks([])
86
+ ax_plot.xaxis.set_ticks([])
87
+
88
+ # Output the result
89
+ if output_address is None:
90
+ plt.show()
91
+ else:
92
+ fig.savefig(output_address, bbox_inches='tight')
93
+
94
+ return
95
+
96
+
97
+ def scatter_plot(fig, ax, x_arr, y_arr, labels_arr, feature_list, color_dict, alpha=0.5, idx_target=None,
98
+ detection_range=None, ratio_color=None):
99
+
100
+ for feature in feature_list:
101
+ idcs_class = labels_arr == feature
102
+ x_feature = x_arr[idcs_class]
103
+ y_feature = y_arr[idcs_class]
104
+ label = f'{feature} ({y_feature.size})'
105
+ color_points = color_dict[feature] if ratio_color is None else ratio_color
106
+ scatter = ax.scatter(x_feature, y_feature, label=label, c=color_points, alpha=alpha, edgecolor='none')
107
+
108
+ # if ratio_color is not None:
109
+ # fig.colorbar(scatter, ax=ax)
110
+
111
+ if idx_target is not None:
112
+ ax.scatter(x_arr[idx_target], y_arr[idx_target], marker='x', label='selection', color='black')
113
+
114
+ if detection_range is not None:
115
+ ax.plot(detection_range, detection_function(detection_range))
116
+
117
+ return
118
+
119
+ def parse_fig_cfg(fig_cfg=None, ax_diag=None, ax_line=None, dtype=None):
120
+
121
+ # Input configuration updates default
122
+ fig_cfg = fig_cfg if fig_cfg is not None else {'axes.labelsize': 10, 'axes.titlesize': 10,
123
+ 'figure.figsize': (12, 6), 'hatch.linewidth': 0.3, 'legend.fontsize': 8}
124
+ fig_cfg = theme.fig_defaults(fig_cfg)
125
+
126
+ if dtype == 'classifier':
127
+ ax_diag = {} if ax_diag is None else ax_diag
128
+ ax_diag = {'xlabel': r'$\frac{\sigma_{gas}}{\Delta\lambda_{inst}} = \sigma_{pixels}$ (Gaussian sigma in pixels)',
129
+ 'ylabel': r'$\frac{A_{gas}}{\sigma_{noise}}$ (Signal-to-noise)',
130
+ **ax_diag}
131
+
132
+ ax_line = {} if ax_line is None else ax_line
133
+ ax_line = {'xlabel': 'Feature Number', 'ylabel': 'value', **ax_line}
134
+
135
+ if dtype == 'doublet':
136
+ ax_diag = {} if ax_diag is None else ax_diag
137
+ ax_diag = {'xlabel': r'$\frac{\sigma_{gas}}{\Delta\lambda_{inst}} = \sigma_{pixels}$ (Gaussian sigma in pixels)',
138
+ 'ylabel': r'$S_{pixels}$ (pixels)',
139
+ **ax_diag}
140
+
141
+ ax_line = {} if ax_line is None else ax_line
142
+ ax_line = {'xlabel': 'Feature Number', 'ylabel': 'value', **ax_line}
143
+
144
+ return {'fig': fig_cfg, 'ax1': ax_diag, 'ax2': ax_line}
145
+
146
+
147
+ def ax_wording(ax, ax_cfg=None, legend_cfg=None, yscale=None):
148
+
149
+ ax.update(ax_cfg)
150
+
151
+ if legend_cfg is not None:
152
+ ax.legend(**legend_cfg)
153
+
154
+ if yscale is not None:
155
+ ax.set_yscale(yscale)
156
+
157
+ return
158
+
159
+
160
+ class CheckSample:
161
+
162
+ def __init__(self, in_data_arr, in_pred_arr, fig_cfg=None, ax_diag=None, ax_line=None, base=10000, sample_size=None,
163
+ categories=None, column_labels='shape_class', dtype='classifier'):
164
+
165
+ # Stratify the selection aiming for same number of points
166
+ data_arr, pred_arr = stratify_sample(in_data_arr, in_pred_arr, sample_size, categories, randomize=True)
167
+
168
+ self.y_base = base
169
+ self.x_coords = data_arr[:, 1]
170
+ self.y_coords = data_arr[:, 0]
171
+ self.id_arr = pred_arr
172
+ self.classes = np.sort(np.unique(self.id_arr))
173
+ self.data_df = data_arr[:, 2:]
174
+ self.wave_range = np.arange(self.data_df.shape[1])
175
+ self.dtype = dtype
176
+ self.y_scale = 'log' if self.dtype == 'classifier' else 'linear'
177
+ self.ratio_color = None
178
+
179
+ if self.dtype == 'classifier':
180
+ self.y_coords_log = np.log10(self.y_coords) / np.log10(self.y_base)
181
+ else:
182
+ self.y_coords_log = self.y_coords
183
+
184
+ if self.dtype == 'doublet':
185
+ self.ratio_color = self.y_coords/self.x_coords
186
+
187
+ self.idx_current = None
188
+ self.color_dict = cfg['colors']
189
+
190
+ self.fig_format = parse_fig_cfg(fig_cfg, ax_diag, ax_line, self.dtype)
191
+ self._fig, self._ax1, self._ax2 = None, None, None
192
+
193
+ self.detection_range = np.linspace(data_arr[:,1].min(), data_arr[:,1].max(), 50) if self.dtype == 'classifier' else None
194
+
195
+ return
196
+
197
+ def show(self):
198
+
199
+ # Generate the figure
200
+ with rc_context(self.fig_format['fig']):
201
+
202
+ # Create the figure
203
+ self._fig, (self._ax1, self._ax2) = plt.subplots(1, 2)
204
+
205
+ # Diagnostic plot
206
+ scatter_plot(self._fig, self._ax1, self.x_coords, self.y_coords, self.id_arr, self.classes, self.color_dict,
207
+ idx_target=self.idx_current, detection_range=self.detection_range, ratio_color=self.ratio_color)
208
+ ax_wording(self._ax1, self.fig_format['ax1'], legend_cfg={'loc': 'lower center', 'ncol':2, 'framealpha':0.95},
209
+ yscale=self.y_scale)
210
+
211
+ # Line plot
212
+ self.index_target()
213
+ self.line_plot()
214
+ ax_wording(self._ax2, self.fig_format['ax2'])
215
+
216
+ # Interactive widget
217
+ self._fig.canvas.mpl_connect('button_press_event', self._on_click)
218
+
219
+ # Display the plot
220
+ plt.tight_layout()
221
+ plt.show()
222
+
223
+ return
224
+
225
+ def _on_click(self, event):
226
+
227
+ if event.inaxes == self._ax1 and event.button == 1:
228
+
229
+ if self.y_scale == 'log':
230
+ user_point = (event.xdata, np.log10(event.ydata) / np.log10(self.y_base))
231
+ else:
232
+ user_point = (event.xdata, event.ydata)
233
+
234
+
235
+ # Get index point
236
+ self.index_target(user_point)
237
+
238
+ # Replot the figures
239
+ self._ax1.clear()
240
+ scatter_plot(self._fig, self._ax1, self.x_coords, self.y_coords, self.id_arr, self.classes, self.color_dict,
241
+ idx_target=self.idx_current, detection_range=self.detection_range, ratio_color=self.ratio_color)
242
+ ax_wording(self._ax1, self.fig_format['ax1'], legend_cfg={'loc': 'lower center', 'ncol':2, 'framealpha':0.95},
243
+ yscale=self.y_scale)
244
+
245
+ self._ax2.clear()
246
+ # self._fig.clear()
247
+
248
+ self.line_plot()
249
+ plt.tight_layout()
250
+ self._fig.canvas.draw()
251
+
252
+ return
253
+
254
+ def index_target(self, mouse_coords=None):
255
+
256
+ # If no selection use first point
257
+ if mouse_coords is None:
258
+ self.idx_current = 0
259
+ print(f'Reseting location')
260
+
261
+ else:
262
+ distances = np.sqrt((self.x_coords - mouse_coords[0]) ** 2 + (self.y_coords_log - mouse_coords[1]) ** 2)
263
+ self.idx_current = np.argmin(distances)
264
+ print('Click on:', mouse_coords)#, self.ratio_color[self.idx_current])
265
+
266
+ return
267
+
268
+ def line_plot(self):
269
+
270
+ feature = self.id_arr[self.idx_current]
271
+ self._ax2.step(self.wave_range, self.data_df[self.idx_current, :], label=feature,
272
+ color=self.color_dict[feature], where='mid')
273
+
274
+ return
aspect/tools.py ADDED
@@ -0,0 +1,153 @@
1
+ import logging
2
+ import numpy as np
3
+ from .io import Aspect_Error
4
+
5
+ # Log variable
6
+ _logger = logging.getLogger('aspect')
7
+ np.random.seed(42)
8
+
9
+
10
+ def monte_carlo_expansion(flux_array, err_array, n_mc, for_loop=True):
11
+
12
+
13
+ # # Get the noise scale for the selections
14
+ # if noise_scale is None:
15
+ # noise_scale = self._spec.err_flux if self._spec.err_flux is not None else self._spec.cont_std
16
+ #
17
+ # if noise_scale is None:
18
+ # _logger.warning(f"No flux uncertainty provided for the line detection. There won't be a confidence value"
19
+ # f" for the predictions.")
20
+ # self.n_mc = 1
21
+
22
+ # Scale array depending on the scale
23
+ # noise_scale = err_array if np.isscalar(err_array) else err_array[..., None]
24
+ # noise_scale = err_array
25
+
26
+ # Add random noise matrix
27
+ # noise_array = np.random.normal(0, err_array, size=(n_mc, flux_array.size))
28
+ # mc_flux = flux_array[:, :, np.newaxis] + noise_array
29
+
30
+ if for_loop:
31
+ mc_flux = flux_array + np.random.normal(0, err_array, size=(n_mc, flux_array.size))
32
+
33
+ else:
34
+ noise_scale = err_array if np.isscalar(err_array) else err_array[..., None]
35
+ noise_matrix_shape = (flux_array.shape[0], flux_array.shape[1], n_mc)
36
+
37
+ noise_array = np.random.normal(0, noise_scale, size=noise_matrix_shape)
38
+ mc_flux = flux_array[:, :, np.newaxis] + noise_array
39
+
40
+ return mc_flux
41
+
42
+
43
+ def scale_min_max(data, axis=None):
44
+
45
+ data_min_array = data.min(axis=axis, keepdims=True)
46
+ data_max_array = data.max(axis=axis, keepdims=True)
47
+ data_norm = (data - data_min_array) / (data_max_array - data_min_array)
48
+
49
+ return data_norm
50
+
51
+
52
+ def scale_log(data, log_base, axis=None):
53
+
54
+ data_min_array = data.min(axis=axis, keepdims=True)
55
+
56
+ y_cont = data - data_min_array + 1
57
+ data_norm = np.emath.logn(log_base, y_cont)
58
+
59
+ return data_norm
60
+
61
+
62
+ def scale_log_min_max(data, log_base, axis=None):
63
+
64
+ data_min_array = data.min(axis=axis, keepdims=True)
65
+ data_cont = data - data_min_array + 1
66
+ log_data = np.emath.logn(log_base, data_cont)
67
+ log_min_array, log_max_array = log_data.min(axis=axis, keepdims=True), log_data.max(axis=axis, keepdims=True)
68
+ data_norm = (log_data - log_min_array) / (log_max_array - log_min_array)
69
+
70
+ return data_norm
71
+
72
+
73
+ def feature_scaling(data, transformation, log_base=None, axis=1):
74
+
75
+ match transformation:
76
+ case 'min-max':
77
+ return scale_min_max(data, axis=axis)
78
+ case 'log':
79
+ return scale_log(data, log_base=log_base, axis=axis)
80
+ case 'log-min-max':
81
+ return scale_log_min_max(data, log_base=log_base, axis=axis)
82
+ case _:
83
+ raise Aspect_Error(f'Input scaling: "{transformation}" is not recognized')
84
+
85
+
86
+ def white_noise_scale(flux_arr):
87
+
88
+ min, max = flux_arr.min(), flux_arr.max()
89
+
90
+ diff = max - min if max != 0 else np.abs(max-min)
91
+
92
+ # 1 White noise, 2 continuum
93
+ output_type = 1 if diff > 10 else 2
94
+
95
+ return output_type
96
+
97
+
98
+ def detection_function(x_ratio):
99
+
100
+ # Original
101
+ # 2.5 + 1/np.square(x_ratio - 0.1) + 0.5 * np.square(x_ratio)
102
+
103
+ return 0.5 * np.power(x_ratio, 2) - 0.5 * x_ratio + 5
104
+
105
+
106
+ def cosmic_ray_function(x_ratio, res_ratio_check=True):
107
+
108
+ # Resolution ration
109
+ if res_ratio_check:
110
+ output = np.exp(0.5 * np.power(x_ratio, -2))
111
+
112
+ # Intensity ratio
113
+ else:
114
+ output = 1/np.sqrt(2 * np.log(x_ratio))
115
+
116
+ return output
117
+
118
+
119
+ def stratify_sample(x_arr, y_arr, n_samples=None, categories=None, randomize=True):
120
+
121
+ # Inspect input sample
122
+ unique_categories, counts = np.unique(y_arr, return_counts=True)
123
+ min_count = min(counts)
124
+
125
+ # Use all categories and the minimum number of counts if not provided
126
+ n_samples = n_samples if n_samples is not None else min_count
127
+ categories = categories if categories is not None else unique_categories
128
+
129
+ # Check input sample size is below category
130
+ if n_samples > min_count:
131
+ _logger.warning(f'The input sample minimun size category ({unique_categories[counts==min_count]} = {min_count})'
132
+ f' is less than the requested input size ({n_samples}). The minimum count will be used instead.')
133
+ n_samples = min_count
134
+
135
+ # Empty mask for the target categories
136
+ selection_mask = np.zeros(y_arr.size, dtype=bool)
137
+
138
+ # Mark indices for each category
139
+ print(f'\nInput sample has {y_arr.shape[0]} entries:')
140
+ for j, category in enumerate(categories):
141
+ print(f'- {category}: {counts[j]}')
142
+ category_indices = np.where(y_arr == category)[0]
143
+ sampled_indices = np.random.choice(category_indices, n_samples, replace=False)
144
+ selection_mask[sampled_indices] = True
145
+ print(f'Cropping to {n_samples} entries per category')
146
+
147
+ selection_mask = np.nonzero(selection_mask)[0]
148
+
149
+ if randomize:
150
+ np.random.shuffle(selection_mask)
151
+
152
+ return x_arr[selection_mask, :], y_arr[selection_mask]
153
+
aspect/trainer.py ADDED
@@ -0,0 +1,102 @@
1
+ import importlib
2
+ import numpy as np
3
+ import joblib
4
+ import toml
5
+ from sklearn.model_selection import cross_val_score, cross_val_predict
6
+ from sklearn.metrics import confusion_matrix
7
+ from sklearn.ensemble import RandomForestClassifier
8
+ from sklearn.model_selection import StratifiedShuffleSplit
9
+ from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
10
+ from time import time
11
+ from pathlib import Path
12
+ from .io import cfg as aspect_cfg
13
+
14
+
15
+ def get_training_test_sets(x_arr, y_arr, test_fraction, random_state=None):
16
+
17
+ # Split into training and testing:
18
+ print(f'\nSplitting sample with categories:')
19
+ print(np.unique(y_arr))
20
+ sss = StratifiedShuffleSplit(n_splits=1, train_size=int(y_arr.size * (1 - test_fraction)),
21
+ test_size=int(y_arr.size * test_fraction), random_state=random_state)
22
+
23
+ for train_index, test_index in sss.split(x_arr, y_arr):
24
+ X_train, X_test = x_arr[train_index, 2:], x_arr[test_index, 2:]
25
+ y_train, y_test = y_arr[train_index], y_arr[test_index]
26
+
27
+ # Convert strings to integers
28
+ y_train = np.vectorize(aspect_cfg['shape_number'].get)(y_train)
29
+ y_test = np.vectorize(aspect_cfg['shape_number'].get)(y_test)
30
+
31
+ return X_train, y_train, X_test, y_test
32
+
33
+
34
+ def components_trainer(model_label, x_arr, y_arr, fit_cfg, list_labels, output_folder=None, test_fraction=0.1,
35
+ random_state=None):
36
+
37
+ # Preparing the estimator:
38
+ print(f'\nLoading estimator: {fit_cfg["estimator"]["class"]}')
39
+ estimator = getattr(importlib.import_module(fit_cfg['estimator']["module"]), fit_cfg['estimator']["class"])
40
+ estimator_params = fit_cfg.get('estimator_params', {})
41
+
42
+ # Split into training and testing:
43
+ print(f'\nSplitting sample with categories:')
44
+ X_train, y_train, X_test, y_test = get_training_test_sets(x_arr, y_arr, test_fraction, random_state)
45
+
46
+ # Run the training
47
+ print(f'\nTraining: {y_train.size/len(fit_cfg["categories"]):.0f} * {len(fit_cfg["categories"])} = {y_train.size} points ({model_label})')
48
+ print(f'- Settings: {fit_cfg["estimator_params"]}\n')
49
+ start_time = time()
50
+ ml_function = estimator(**estimator_params)
51
+ ml_function.fit(X_train, y_train)
52
+ end_time = np.round((time()-start_time)/60, 2)
53
+ print(f'- completed ({end_time} minutes)')
54
+
55
+ # Save the trained model and configuration
56
+ output_folder = Path(output_folder)/'results'
57
+ output_folder.mkdir(parents=True, exist_ok=True)
58
+
59
+ model_address = output_folder/f'{model_label}.joblib'
60
+ joblib.dump(ml_function, model_address)
61
+
62
+ # Run initial diagnostics
63
+ print(f'\nReloading model from: {model_address}')
64
+ start_time = time()
65
+ ml_function = joblib.load(model_address)
66
+ fit_time = np.round((time()-start_time)/60, 3)
67
+ print(f'- completed ({fit_time} minutes)')
68
+
69
+ print(f'\nRuning prediction on test set ({y_test.size} points)')
70
+ start_time = time()
71
+ y_pred = ml_function.predict(X_test)
72
+ print(f'- completed ({(time()-start_time)/60:0.2f} minutes)')
73
+
74
+ # Testing confussion matrix
75
+ print(f'\nConfusion matrix in test set ({y_test.size} points)')
76
+ start_time = time()
77
+ conf_matrix_test = confusion_matrix(y_test, y_pred, normalize="all")
78
+ print(f'- completed ({(time()-start_time)/60:0.2f} minutes)')
79
+
80
+ # Precision, recall and f1:
81
+ print(f'\nF1, Precision and recall diagnostics ({y_test.size} points)')
82
+ start_time = time()
83
+ pres = precision_score(y_test, y_pred, average='macro')
84
+ recall = recall_score(y_test, y_pred, average='macro')
85
+ f1 = f1_score(y_test, y_pred, average='macro')
86
+ print(f'- completed ({(time()-start_time)/60:0.2f} minutes)')
87
+
88
+ print(f'\nModel outputs')
89
+ print(f'- F1: \n {f1}')
90
+ print(f'- Precision: \n {pres}')
91
+ print(f'- Recall: \n {recall}')
92
+ print(f'- Testing confusion matrix: \n {conf_matrix_test}')
93
+ print(f'- Fitting time: \n {fit_time}')
94
+
95
+ # Save results into a TOML file
96
+ toml_path = output_folder/f'{model_label}.toml'
97
+ output_dict = {'resuts': {'f1':f1, 'precision':pres, 'Recall':recall, 'confusion_matrix':conf_matrix_test,
98
+ 'fit_time': fit_time}, 'properties': fit_cfg,}
99
+ with open(toml_path, 'w') as f:
100
+ toml.dump(output_dict, f)
101
+
102
+ return
aspect/workflow.py ADDED
@@ -0,0 +1,299 @@
1
+ import numpy as np
2
+ from time import time
3
+ from .io import read_trained_model, DEFAULT_MODEL_ADDRESS, cfg, Aspect_Error
4
+ from .tools import monte_carlo_expansion, feature_scaling, white_noise_scale
5
+ from matplotlib import pyplot as plt
6
+
7
+ CHOICE_DM = np.array(cfg['decision_matrices']['choice'])
8
+ TIME_DM = np.array(cfg['decision_matrices']['time'])
9
+
10
+ # TODO Larger box overwrites small
11
+ # TODO COMPLEX overwrites simple
12
+
13
+ def unpack_spec_flux(spectrum, rest_wl_lim):
14
+
15
+ # Extract the mask if masked array
16
+ mask_check = np.ma.isMaskedArray(spectrum.flux)
17
+ pixel_mask = spectrum.flux.mask if mask_check else np.zeros(spectrum.flux.size).astype(bool)
18
+
19
+ # Limit to region if requested
20
+ if rest_wl_lim is not None:
21
+ wave_rest = spectrum.wave_rest if not mask_check else spectrum.wave_rest.data
22
+ pixel_mask = pixel_mask | ~((wave_rest > rest_wl_lim[0]) & (wave_rest < rest_wl_lim[1]))
23
+
24
+ # Extract flux and error arrays and invert the mask for location of the valid data indeces
25
+ pixel_mask = ~pixel_mask
26
+ flux_arr = spectrum.flux[pixel_mask] if not mask_check else spectrum.flux.data[pixel_mask]
27
+ err_arr = spectrum.err_flux[pixel_mask] if not mask_check else spectrum.err_flux.data[pixel_mask]
28
+ idcs_data_mask = np.flatnonzero(pixel_mask)
29
+
30
+ return flux_arr, err_arr, idcs_data_mask
31
+
32
+
33
+ def enbox_spectrum(input_flux, box_size, range_box):
34
+
35
+ # Use only the true entries from the mask
36
+ # flux_array = input_flux if not np.ma.isMaskedArray(input_flux) else input_flux.data[~input_flux.mask]
37
+
38
+ # Reshape to the detection interval
39
+ n_intervals = input_flux.size - box_size + 1
40
+ input_flux = input_flux[np.arange(n_intervals)[:, None] + range_box]
41
+
42
+ # # Remove nan entries
43
+ # idcs_nan_rows = np.isnan(input_flux).any(axis=1)
44
+ # flux_array = input_flux[~idcs_nan_rows, :]
45
+
46
+ return input_flux
47
+
48
+
49
+ def detection_spectrum_prechecks(y_arr, box_size, idcs_data):
50
+
51
+ valid = True
52
+
53
+ # Box bigger than spectrum or all entries are masked
54
+ if (y_arr.size < box_size) or (idcs_data.size < box_size):
55
+ valid = False
56
+
57
+ return valid
58
+
59
+
60
+ class ModelManager:
61
+
62
+ def __init__(self, model_address=None, n_jobs=None, verbose=0):
63
+
64
+ self.cfg = None
65
+ self.detection_model = None
66
+ self.b_pixels_arr = None
67
+ self.scale = None
68
+ self.log_base = None
69
+
70
+ self.categories_str = None
71
+ self.feature_number_dict = None
72
+ self.number_feature_dict = None
73
+ self.n_categories = None
74
+
75
+ # Default values
76
+ model_address = DEFAULT_MODEL_ADDRESS if model_address is None else model_address
77
+
78
+ # Load the model
79
+ self.predictor, self.cfg = read_trained_model(model_address)
80
+
81
+ # Specify cores (default 4)
82
+ n_jobs = 4
83
+ self.predictor.n_jobs = n_jobs # Use 4 cores
84
+ self.predictor.verbose = verbose # No output message
85
+
86
+ # Array with the boxes size
87
+ self.b_pixels_arr = np.atleast_1d(self.cfg['properties']['box_size'])
88
+ self.b_pixels_range = np.atleast_2d(np.arange(self.b_pixels_arr [0]))
89
+
90
+ # Scaling properties
91
+ self.scale = self.cfg['properties']['scale']
92
+ self.log_base = self.cfg['properties'].get('log_base')
93
+ self.categories_str = np.array(self.cfg['properties']['categories'])
94
+ self.feature_number_dict = cfg['shape_number']
95
+ self.number_feature_dict = {v: k for k, v in self.feature_number_dict.items()}
96
+
97
+ self.n_categories = len(self.feature_number_dict)
98
+
99
+ return
100
+
101
+ def reload_model(self, model_address=None, n_jobs=None):
102
+
103
+ # Call the constructor again
104
+ self.__init__(model_address, n_jobs)
105
+
106
+ return
107
+
108
+
109
+ # Create object with default model
110
+ model = ModelManager()
111
+
112
+
113
+ class SpectrumDetector:
114
+
115
+ def __init__(self, spectrum, model_address=None):
116
+
117
+ self._spec = spectrum
118
+ self.narrow_detect = None
119
+ self.box_width = None
120
+ self.range_box = None
121
+ self.n_mc = 100
122
+ self.detection_min = 40
123
+ self.white_noise_maximum = 50
124
+
125
+ self.line_1d_pred = None
126
+ self.line_2d_pred = None
127
+ self.line_pred = None
128
+
129
+ self.features = None
130
+
131
+ # Read the detection model
132
+ if model_address is None:
133
+ self.model = model
134
+
135
+ # Arrays to store the data
136
+ self.seg_flux = None
137
+ self.seg_err = None
138
+
139
+ self.seg_pred = None
140
+ self.seg_conf = None
141
+
142
+ self.pred_arr = None
143
+ self.conf_arr = None
144
+
145
+ return
146
+
147
+ def detection(self, feature_list=None, bands=None, exclude_continuum=True, show_steps=False, rest_wl_lim=None):
148
+
149
+ # Support variables
150
+ box_size = self.model.b_pixels_arr[0]
151
+ box_range = self.model.b_pixels_range[0]
152
+
153
+ # Remove masks from flux and uncertainty
154
+ y_arr, err_arr, idcs_data = unpack_spec_flux(self._spec, rest_wl_lim)
155
+
156
+ # Check the validity of the spectrum
157
+ if detection_spectrum_prechecks(y_arr, box_size, idcs_data):
158
+
159
+ # Empty containers
160
+ self.pred_arr = np.zeros(self._spec.flux.size, dtype=np.int64)
161
+ self.conf_arr = np.zeros(self._spec.flux.size, dtype=np.int64)
162
+
163
+ self.seg_pred = np.zeros(box_size, dtype=np.int64)
164
+ self.seg_conf = np.zeros(box_size, dtype=np.int64)
165
+
166
+ # Reshape spectrum to box size
167
+ y_enbox = enbox_spectrum(y_arr, box_size, box_range)
168
+ err_enbox = enbox_spectrum(err_arr, box_size, box_range)
169
+
170
+ # MC expansion
171
+ y_enbox = monte_carlo_expansion(y_enbox, err_enbox, self.n_mc, for_loop=False)
172
+
173
+ # Scaling
174
+ y_norm = feature_scaling(y_enbox, 'min-max', 1)
175
+
176
+ # Run the prediction
177
+ y_reshaped = y_norm.transpose(0, 2, 1).reshape(-1, box_size)
178
+ y_pred = self.model.predictor.predict(y_reshaped)
179
+ y_pred = y_pred.reshape(-1, 100)
180
+
181
+ # Get the count of types detected on Monte-Carlo
182
+ counts_categories = np.apply_along_axis(np.bincount, axis=1, arr=y_pred, minlength=self.model.n_categories)
183
+
184
+ # Exclude white-noise regions from review:
185
+ if exclude_continuum:
186
+ idcs_detection = np.flatnonzero(counts_categories[:, 1] < self.white_noise_maximum)
187
+ else:
188
+ idcs_detection = np.arange(y_arr.size - box_size)
189
+
190
+ for idx in idcs_detection:
191
+
192
+ # Get segment arrays
193
+ self.seg_pred[:] = self.pred_arr[idcs_data][idx:idx + box_size]
194
+ self.seg_conf[:] = self.conf_arr[idcs_data][idx:idx + box_size]
195
+
196
+ # Count
197
+ counts = counts_categories[idx, :]
198
+ idcs_categories = counts > self.detection_min
199
+
200
+ # Choose detection
201
+ out_type, out_confidence = self.detection_evaluation(counts, idcs_categories)
202
+
203
+ # Check with previous detection
204
+ idcs_pred, new_pred, new_conf = self.detection_revision(idx, box_size, out_type, out_confidence)
205
+
206
+ # Only pass if more than half
207
+ half_check = idcs_pred[6:].sum() > 5
208
+ if half_check:
209
+ idcs_pred = np.flatnonzero(idcs_pred)
210
+ self.seg_pred[idcs_pred] = new_pred[idcs_pred]
211
+ self.seg_conf[idcs_pred] = new_conf[idcs_pred]
212
+ else:
213
+ self.seg_pred[:] = self.pred_arr[idcs_data][idx:idx + box_size]
214
+ self.seg_conf[:] = self.conf_arr[idcs_data][idx:idx + box_size]
215
+
216
+ if show_steps:
217
+ self.plot_steps(y_norm[idx, :], idx, counts, idcs_categories, out_type, out_confidence,
218
+ self.pred_arr[idcs_data][idx:idx + box_size], self.conf_arr[idcs_data][idx:idx + box_size],
219
+ idcs_pred, new_pred, new_conf)
220
+
221
+ # Assign new categories and confidence
222
+ self.pred_arr[idcs_data[idx:idx + box_size]] = self.seg_pred[:]
223
+ self.conf_arr[idcs_data[idx:idx + box_size]] = self.seg_conf[:]
224
+
225
+
226
+
227
+
228
+ def detection_evaluation(self, counts_categories, idcs_categories):
229
+
230
+ n_detections = idcs_categories.sum()
231
+
232
+ match n_detections:
233
+
234
+ # Undefined
235
+ case 0:
236
+ return 0, 0
237
+
238
+ # One detection
239
+ case 1:
240
+ return np.argmax(idcs_categories), counts_categories[idcs_categories][0]
241
+
242
+ # Two detections
243
+ case 2:
244
+ category_candidates = np.flatnonzero(idcs_categories)
245
+ idx_output = CHOICE_DM[category_candidates[0], category_candidates[1]]
246
+ output_type, output_count = category_candidates[idx_output], counts_categories[idcs_categories][idx_output]
247
+ return output_type, output_count
248
+
249
+ # Three detections
250
+ case _:
251
+ raise Aspect_Error(f'Number of detections: "{n_detections}" is not recognized')
252
+
253
+
254
+ def detection_revision(self, idx, box_size, new_type, new_confidence):
255
+
256
+ new_pred, new_conf = np.full(box_size, new_type), np.full(box_size, new_confidence)
257
+ idcs_pred = TIME_DM[self.seg_pred, new_pred]
258
+ # idcs_pred = np.nonzero(idcs_pred)
259
+
260
+ return idcs_pred, new_pred, new_conf
261
+
262
+
263
+ def transform_category(self, input_category, segment_flux):
264
+
265
+ match input_category:
266
+
267
+ # White noise scale
268
+ case 1:
269
+ return white_noise_scale(segment_flux)
270
+
271
+ case _:
272
+ return input_category
273
+
274
+ def plot_steps(self, y_norm, idx, counts, idcs_categories, out_type, out_confidence, old_pred, old_conf,
275
+ idcs_pred, new_pred, new_conf):
276
+
277
+ x_arr = self._spec.wave_rest if not np.ma.isMaskedArray(self._spec.wave_rest) else self._spec.wave_rest.data[~self._spec.wave_rest.mask]
278
+ x_sect = x_arr[idx:idx+y_norm.shape[0]]
279
+ print(f'Idx "{idx}"; counts: {counts}; Output: {model.number_feature_dict[out_type]} ({out_type})')
280
+
281
+ colors_old = [cfg['colors'][model.number_feature_dict[val]] for val in old_pred]
282
+ colors_new = [cfg['colors'][model.number_feature_dict[val]] for val in self.seg_pred]
283
+
284
+ fig, ax = plt.subplots()
285
+ color_detection = cfg['colors'][model.number_feature_dict[out_type]]
286
+ ax.step(x_sect, y_norm[:,0], where='mid', color=color_detection, label='Out detection')
287
+ ax.scatter(x_sect, np.zeros(x_sect.size), color=colors_old, label='Old prediction')
288
+ ax.scatter(x_sect, np.ones(x_sect.size), color=colors_new, label='New prediction')
289
+ ax.set_xlabel(r'Wavelength $(\AA)$')
290
+
291
+ ax_secondary = ax.twinx() # Creates a twin y-axis on the right
292
+ ax_secondary.set_ylim(ax.get_ylim()) # Match the primary y-axis limits
293
+ ax_secondary.set_yticks([0, 0.5, 1]) # Custom tick positions
294
+ ax_secondary.set_yticklabels(['Previous\nClassification', 'Present\nClassification', 'Output\nClassification'])
295
+
296
+ plt.tight_layout()
297
+ plt.show()
298
+
299
+ return
@@ -0,0 +1,41 @@
1
+ Metadata-Version: 2.1
2
+ Name: aspect-stable
3
+ Version: 0.1.0
4
+ Summary: Automatic SPEctra Components Tagging
5
+ Author-email: Vital Fernández <vgf@umich.edu>
6
+ Classifier: License :: OSI Approved :: MIT License
7
+ Classifier: Programming Language :: Python :: 3
8
+ Classifier: Programming Language :: Python :: 3.7
9
+ Requires-Python: >=3.10
10
+ Description-Content-Type: text/x-rst
11
+ Requires-Dist: numpy~=1.2
12
+ Requires-Dist: joblib~=1.3
13
+ Requires-Dist: matplotlib~=3.7
14
+ Requires-Dist: sklearn~=1.5
15
+ Requires-Dist: tomli>=2.0.0; python_version < "3.11"
16
+ Provides-Extra: tests
17
+ Requires-Dist: pytest~=7.4; extra == "tests"
18
+ Requires-Dist: pytest-cov~=4.1; extra == "tests"
19
+ Requires-Dist: pytest-mpl~=0.16; extra == "tests"
20
+ Provides-Extra: docs
21
+ Requires-Dist: nbsphinx~=0.9; extra == "docs"
22
+ Requires-Dist: ipympl~=0.9; extra == "docs"
23
+ Requires-Dist: sphinx-rtd-theme~=1.0; extra == "docs"
24
+
25
+
26
+ This library provides a set of tools to identify features in astronomical spectra
27
+
28
+ Installation
29
+ ============
30
+
31
+ The library can be installed directly from its PyPi_ project page running the command:
32
+
33
+ ``pip install aspect-stable``
34
+
35
+ Development
36
+ ===========
37
+
38
+ ASPECT is currently on an alpha release. Any comment/issue/request can be added as an issue on the github page.
39
+ Please commit to dev branch.
40
+
41
+ .. _PyPi: https://pypi.org/project/aspect-stable/
@@ -0,0 +1,14 @@
1
+ aspect/__init__.py,sha256=347qWVlQ7-cQ7qEIpeNcjakEHrMxkxHd3khM4dncAZk,415
2
+ aspect/aspect.toml,sha256=PRTDA8_2oolApOvm0sxFGnm2UKu5aG17TFNDG9NbrGw,4940
3
+ aspect/changelog.txt,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ aspect/io.py,sha256=O8y3g8NimOX1-6FJty1_Ky3x1rpTynX7ITKIwvNioss,2686
5
+ aspect/plots.py,sha256=HRhGXhQYSxeSPlYstDlO7XESoBSZqSFanx5JRzCfX18,10359
6
+ aspect/tools.py,sha256=NWG5lpbpxEyNI9J23yI_v3QorkB3JaQZ9BfiQli85GQ,4912
7
+ aspect/trainer.py,sha256=a8ftyVSbqouB1vrcXHbFJWXKz3buj9pOPEJmBSp7NIg,4254
8
+ aspect/workflow.py,sha256=2E0ZpAWH1i-arC0bDZWL-X7k5A9VkESmBmUGZ34zprE,10996
9
+ aspect/models/aspect_min-max_mediumbox_v3_model.joblib,sha256=CPVKObt44quRwAMSurYAR1DQ6egtdaZgPHI4Ev2Poks,2137409
10
+ aspect/models/aspect_min-max_mediumbox_v3_model.toml,sha256=Mvp2LDbo1iGA7IsntMYE3iz9UCXk9cCJEyIzwhnWXfw,1585
11
+ aspect_stable-0.1.0.dist-info/METADATA,sha256=cT5Q1wtx3_arl2hz3tMRySua1uVY-NF9QE4AU5LBs5A,1304
12
+ aspect_stable-0.1.0.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
13
+ aspect_stable-0.1.0.dist-info/top_level.txt,sha256=RknseycB7xet8TKNMnAzNEdvIcRwUWffKDxYxGimWO8,7
14
+ aspect_stable-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (75.6.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1 @@
1
+ aspect