machinegnostics 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- __init__.py +0 -0
- machinegnostics/__init__.py +24 -0
- machinegnostics/magcal/__init__.py +37 -0
- machinegnostics/magcal/characteristics.py +460 -0
- machinegnostics/magcal/criteria_eval.py +268 -0
- machinegnostics/magcal/criterion.py +140 -0
- machinegnostics/magcal/data_conversion.py +381 -0
- machinegnostics/magcal/gcor.py +64 -0
- machinegnostics/magcal/gdf/__init__.py +2 -0
- machinegnostics/magcal/gdf/base_df.py +39 -0
- machinegnostics/magcal/gdf/base_distfunc.py +1202 -0
- machinegnostics/magcal/gdf/base_egdf.py +823 -0
- machinegnostics/magcal/gdf/base_eldf.py +830 -0
- machinegnostics/magcal/gdf/base_qgdf.py +1234 -0
- machinegnostics/magcal/gdf/base_qldf.py +1019 -0
- machinegnostics/magcal/gdf/cluster_analysis.py +456 -0
- machinegnostics/magcal/gdf/data_cluster.py +975 -0
- machinegnostics/magcal/gdf/data_intervals.py +853 -0
- machinegnostics/magcal/gdf/data_membership.py +536 -0
- machinegnostics/magcal/gdf/der_egdf.py +243 -0
- machinegnostics/magcal/gdf/distfunc_engine.py +841 -0
- machinegnostics/magcal/gdf/egdf.py +324 -0
- machinegnostics/magcal/gdf/eldf.py +297 -0
- machinegnostics/magcal/gdf/eldf_intv.py +609 -0
- machinegnostics/magcal/gdf/eldf_ma.py +627 -0
- machinegnostics/magcal/gdf/homogeneity.py +1218 -0
- machinegnostics/magcal/gdf/intv_engine.py +1523 -0
- machinegnostics/magcal/gdf/marginal_intv_analysis.py +558 -0
- machinegnostics/magcal/gdf/qgdf.py +289 -0
- machinegnostics/magcal/gdf/qldf.py +296 -0
- machinegnostics/magcal/gdf/scedasticity.py +197 -0
- machinegnostics/magcal/gdf/wedf.py +181 -0
- machinegnostics/magcal/gdf/z0_estimator.py +1047 -0
- machinegnostics/magcal/layer_base.py +42 -0
- machinegnostics/magcal/layer_history_base.py +74 -0
- machinegnostics/magcal/layer_io_process_base.py +238 -0
- machinegnostics/magcal/layer_param_base.py +448 -0
- machinegnostics/magcal/mg_weights.py +36 -0
- machinegnostics/magcal/sample_characteristics.py +532 -0
- machinegnostics/magcal/scale_optimization.py +185 -0
- machinegnostics/magcal/scale_param.py +313 -0
- machinegnostics/magcal/util/__init__.py +0 -0
- machinegnostics/magcal/util/dis_docstring.py +18 -0
- machinegnostics/magcal/util/logging.py +24 -0
- machinegnostics/magcal/util/min_max_float.py +34 -0
- machinegnostics/magnet/__init__.py +0 -0
- machinegnostics/metrics/__init__.py +28 -0
- machinegnostics/metrics/accu.py +61 -0
- machinegnostics/metrics/accuracy.py +67 -0
- machinegnostics/metrics/auto_correlation.py +183 -0
- machinegnostics/metrics/auto_covariance.py +204 -0
- machinegnostics/metrics/cls_report.py +130 -0
- machinegnostics/metrics/conf_matrix.py +93 -0
- machinegnostics/metrics/correlation.py +178 -0
- machinegnostics/metrics/cross_variance.py +167 -0
- machinegnostics/metrics/divi.py +82 -0
- machinegnostics/metrics/evalmet.py +109 -0
- machinegnostics/metrics/f1_score.py +128 -0
- machinegnostics/metrics/gmmfe.py +108 -0
- machinegnostics/metrics/hc.py +141 -0
- machinegnostics/metrics/mae.py +72 -0
- machinegnostics/metrics/mean.py +117 -0
- machinegnostics/metrics/median.py +122 -0
- machinegnostics/metrics/mg_r2.py +167 -0
- machinegnostics/metrics/mse.py +78 -0
- machinegnostics/metrics/precision.py +119 -0
- machinegnostics/metrics/r2.py +122 -0
- machinegnostics/metrics/recall.py +108 -0
- machinegnostics/metrics/rmse.py +77 -0
- machinegnostics/metrics/robr2.py +119 -0
- machinegnostics/metrics/std.py +144 -0
- machinegnostics/metrics/variance.py +101 -0
- machinegnostics/models/__init__.py +2 -0
- machinegnostics/models/classification/__init__.py +1 -0
- machinegnostics/models/classification/layer_history_log_reg.py +121 -0
- machinegnostics/models/classification/layer_io_process_log_reg.py +98 -0
- machinegnostics/models/classification/layer_mlflow_log_reg.py +107 -0
- machinegnostics/models/classification/layer_param_log_reg.py +275 -0
- machinegnostics/models/classification/mg_log_reg.py +273 -0
- machinegnostics/models/cross_validation.py +118 -0
- machinegnostics/models/data_split.py +106 -0
- machinegnostics/models/regression/__init__.py +2 -0
- machinegnostics/models/regression/layer_histroy_rob_reg.py +139 -0
- machinegnostics/models/regression/layer_io_process_rob_rig.py +88 -0
- machinegnostics/models/regression/layer_mlflow_rob_reg.py +134 -0
- machinegnostics/models/regression/layer_param_rob_reg.py +212 -0
- machinegnostics/models/regression/mg_lin_reg.py +253 -0
- machinegnostics/models/regression/mg_poly_reg.py +258 -0
- machinegnostics-0.0.1.dist-info/METADATA +246 -0
- machinegnostics-0.0.1.dist-info/RECORD +93 -0
- machinegnostics-0.0.1.dist-info/WHEEL +5 -0
- machinegnostics-0.0.1.dist-info/licenses/LICENSE +674 -0
- machinegnostics-0.0.1.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,448 @@
|
|
|
1
|
+
'''
|
|
2
|
+
Machine Gnostics - Machine Gnostics Library
|
|
3
|
+
Copyright (C) 2025 Machine Gnostics Team
|
|
4
|
+
|
|
5
|
+
This work is licensed under the terms of the GNU General Public License version 3.0.
|
|
6
|
+
|
|
7
|
+
Author: Nirmal Parmar
|
|
8
|
+
Date: 2025-05-31
|
|
9
|
+
|
|
10
|
+
Description:
|
|
11
|
+
Regressor param base class that can be used for robust regression models.
|
|
12
|
+
- linear regression
|
|
13
|
+
- polynomial regression
|
|
14
|
+
- logistic regression
|
|
15
|
+
|
|
16
|
+
'''
|
|
17
|
+
import logging
|
|
18
|
+
from machinegnostics.magcal.util.logging import get_logger
|
|
19
|
+
import numpy as np
|
|
20
|
+
from itertools import combinations_with_replacement
|
|
21
|
+
from machinegnostics.magcal import (ModelBase, GnosticCharacteristicsSample,
|
|
22
|
+
GnosticCriterion,
|
|
23
|
+
GnosticsCharacteristics,
|
|
24
|
+
ScaleParam,
|
|
25
|
+
GnosticsWeights,
|
|
26
|
+
DataConversion)
|
|
27
|
+
from machinegnostics.magcal.util.min_max_float import np_max_float, np_min_float, np_eps_float
|
|
28
|
+
from typing import Union
|
|
29
|
+
|
|
30
|
+
class ParamBase(ModelBase):
|
|
31
|
+
"""
|
|
32
|
+
Base class for MAGCAL - Machine Gnostic Calculations.
|
|
33
|
+
|
|
34
|
+
Calculations for fit, predict, score method
|
|
35
|
+
Calculations for gnostic loss, weights, coefficients, degree, entropy, fi, hi, fj, hj, infoi, infoj, pi, pj, ei, ej
|
|
36
|
+
|
|
37
|
+
"""""
|
|
38
|
+
|
|
39
|
+
def __init__(self,
|
|
40
|
+
degree: int = 1,
|
|
41
|
+
max_iter: int = 100,
|
|
42
|
+
tol: float = 1e-8,
|
|
43
|
+
mg_loss: str = 'hi',
|
|
44
|
+
early_stopping: bool = True,
|
|
45
|
+
verbose: bool = False,
|
|
46
|
+
scale: Union[str, int, float] = 'auto',
|
|
47
|
+
history: bool = True,
|
|
48
|
+
data_form: str = 'a',
|
|
49
|
+
gnostic_characteristics:bool=True):
|
|
50
|
+
super().__init__(
|
|
51
|
+
degree=degree,
|
|
52
|
+
max_iter=max_iter,
|
|
53
|
+
tol=tol,
|
|
54
|
+
mg_loss=mg_loss,
|
|
55
|
+
early_stopping=early_stopping,
|
|
56
|
+
verbose=verbose,
|
|
57
|
+
scale=scale,
|
|
58
|
+
data_form=data_form,
|
|
59
|
+
gnostic_characteristics=gnostic_characteristics,
|
|
60
|
+
history=history
|
|
61
|
+
)
|
|
62
|
+
"""
|
|
63
|
+
Initialize the ParamBase class.
|
|
64
|
+
|
|
65
|
+
Parameters
|
|
66
|
+
----------
|
|
67
|
+
gnostic_characteristics : dict, optional
|
|
68
|
+
Dictionary containing gnostic characteristics like loss, weights, coefficients, etc.
|
|
69
|
+
"""
|
|
70
|
+
self.degree = degree
|
|
71
|
+
self.max_iter = max_iter
|
|
72
|
+
self.tol = tol
|
|
73
|
+
self.coefficients = None
|
|
74
|
+
self.weights = None
|
|
75
|
+
self.early_stopping = early_stopping
|
|
76
|
+
self.mg_loss = mg_loss
|
|
77
|
+
self.verbose = verbose
|
|
78
|
+
self.data_form = data_form
|
|
79
|
+
self.gnostic_characteristics = gnostic_characteristics
|
|
80
|
+
self.scale = scale
|
|
81
|
+
# --- Scale input handling ---
|
|
82
|
+
# if isinstance(scale, str):
|
|
83
|
+
# if scale != 'auto':
|
|
84
|
+
# raise ValueError("scale must be 'auto' or a float between 0 and 2.")
|
|
85
|
+
# self.scale_value = 'auto'
|
|
86
|
+
# elif isinstance(scale, (int, float)):
|
|
87
|
+
# if not (0 <= scale <= 2):
|
|
88
|
+
# raise ValueError("scale must be 'auto' or a float between 0 and 2.")
|
|
89
|
+
# self.scale_value = float(scale)
|
|
90
|
+
# else:
|
|
91
|
+
# raise ValueError("scale must be 'auto' or a float between 0 and 2.")
|
|
92
|
+
# # data form check additive or multiplicative
|
|
93
|
+
# if self.data_form not in ['a', 'm']:
|
|
94
|
+
# raise ValueError("data_form must be 'a' for additive or 'm' for multiplicative.")
|
|
95
|
+
# history option
|
|
96
|
+
if history:
|
|
97
|
+
self._history = []
|
|
98
|
+
# default history content
|
|
99
|
+
self._history.append({
|
|
100
|
+
'iteration': 0,
|
|
101
|
+
'h_loss': None,
|
|
102
|
+
'coefficients': None,
|
|
103
|
+
'rentropy': None,
|
|
104
|
+
'fi': None,
|
|
105
|
+
'fj': None,
|
|
106
|
+
'hi': None,
|
|
107
|
+
'hj': None,
|
|
108
|
+
'pi': None,
|
|
109
|
+
'pj': None,
|
|
110
|
+
'ei': None,
|
|
111
|
+
'ej': None,
|
|
112
|
+
'infoi': None,
|
|
113
|
+
'infoj': None,
|
|
114
|
+
'weights': None,
|
|
115
|
+
'scale': None,
|
|
116
|
+
})
|
|
117
|
+
else:
|
|
118
|
+
self._history = None
|
|
119
|
+
|
|
120
|
+
# logger
|
|
121
|
+
self.logger = get_logger(self.__class__.__name__, logging.INFO) # Create a logger for this class
|
|
122
|
+
self.logger.info("ParamBase initialized.")
|
|
123
|
+
|
|
124
|
+
def _generate_polynomial_features(self, X:np.ndarray) -> np.ndarray:
|
|
125
|
+
"""
|
|
126
|
+
Generate polynomial features for multivariate input up to specified degree.
|
|
127
|
+
|
|
128
|
+
Parameters:
|
|
129
|
+
-----------
|
|
130
|
+
X : array-like of shape (n_samples, n_features)
|
|
131
|
+
Input features
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
--------
|
|
135
|
+
X_poly : ndarray of shape (n_samples, n_output_features)
|
|
136
|
+
Polynomial features including interaction terms
|
|
137
|
+
"""
|
|
138
|
+
self.logger.info(f"Generating polynomial features of degree")
|
|
139
|
+
n_samples, n_features = X.shape
|
|
140
|
+
combinations = []
|
|
141
|
+
for degree in range(self.degree + 1):
|
|
142
|
+
combinations += list(combinations_with_replacement(range(n_features), degree))
|
|
143
|
+
|
|
144
|
+
X_poly = np.ones((n_samples, len(combinations)))
|
|
145
|
+
for i, comb in enumerate(combinations):
|
|
146
|
+
X_poly[:, i] = np.prod(X[:, comb], axis=1)
|
|
147
|
+
self.logger.info(f"Generated polynomial features shape: {X_poly.shape}")
|
|
148
|
+
return X_poly
|
|
149
|
+
|
|
150
|
+
def _weight_init(self, d: np.ndarray, like: str ='one') -> np.ndarray:
|
|
151
|
+
"""
|
|
152
|
+
Initialize weights based on the input data.
|
|
153
|
+
|
|
154
|
+
Parameters
|
|
155
|
+
----------
|
|
156
|
+
d : np.ndarray
|
|
157
|
+
Input data.
|
|
158
|
+
like : str, optional
|
|
159
|
+
Type of initialization ('one', 'zero'). Default is 'one'.
|
|
160
|
+
|
|
161
|
+
Returns
|
|
162
|
+
-------
|
|
163
|
+
np.ndarray
|
|
164
|
+
Initialized weights.
|
|
165
|
+
"""
|
|
166
|
+
self.logger.info(f"Initializing weights with method: {like}")
|
|
167
|
+
if like == 'one':
|
|
168
|
+
return np.ones(len(d))
|
|
169
|
+
elif like == 'zero':
|
|
170
|
+
return np.zeros(len(d))
|
|
171
|
+
# elif like == 'random':
|
|
172
|
+
# return np.random.rand(d.shape[1]).flatten()
|
|
173
|
+
else:
|
|
174
|
+
self.logger.error("Invalid weight initialization method.")
|
|
175
|
+
raise ValueError("like must be 'one', 'zero', or 'random'.")
|
|
176
|
+
|
|
177
|
+
def _weighted_least_squares(self, X_poly, y, weights):
|
|
178
|
+
"""
|
|
179
|
+
Solve weighted least squares using normal equations.
|
|
180
|
+
|
|
181
|
+
Parameters:
|
|
182
|
+
-----------
|
|
183
|
+
X_poly : array-like
|
|
184
|
+
Polynomial features matrix
|
|
185
|
+
y : array-like
|
|
186
|
+
Target values
|
|
187
|
+
weights : array-like
|
|
188
|
+
Sample weights
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
--------
|
|
192
|
+
array-like
|
|
193
|
+
Estimated coefficients
|
|
194
|
+
"""
|
|
195
|
+
self.logger.info("Solving weighted least squares.")
|
|
196
|
+
eps = np_eps_float() # Small value to avoid division by zero
|
|
197
|
+
# Add small regularization term
|
|
198
|
+
weights = np.clip(weights, eps, None)
|
|
199
|
+
W = np.diag(weights)
|
|
200
|
+
XtW = X_poly.T @ W
|
|
201
|
+
XtWX = XtW @ X_poly + eps * np.eye(X_poly.shape[1])
|
|
202
|
+
XtWy = XtW @ y
|
|
203
|
+
|
|
204
|
+
try:
|
|
205
|
+
return np.linalg.solve(XtWX, XtWy)
|
|
206
|
+
except np.linalg.LinAlgError:
|
|
207
|
+
# Fallback to pseudo-inverse for ill-conditioned matrices
|
|
208
|
+
return np.linalg.pinv(XtWX) @ XtWy
|
|
209
|
+
|
|
210
|
+
def _wighted_least_squares_log_reg(self, p, y0, X_poly:np.ndarray, y:np.ndarray, W:np.ndarray, n_features) -> np.ndarray:
|
|
211
|
+
"""
|
|
212
|
+
Solve weighted least squares for logistic regression using normal equations.
|
|
213
|
+
|
|
214
|
+
Parameters:
|
|
215
|
+
-----------
|
|
216
|
+
X_poly : array-like
|
|
217
|
+
Polynomial features matrix
|
|
218
|
+
y : array-like
|
|
219
|
+
Target values
|
|
220
|
+
weights : array-like
|
|
221
|
+
Sample weights
|
|
222
|
+
|
|
223
|
+
Returns:
|
|
224
|
+
--------
|
|
225
|
+
array-like
|
|
226
|
+
Estimated coefficients
|
|
227
|
+
"""
|
|
228
|
+
self.logger.info("Solving weighted least squares for logistic regression.")
|
|
229
|
+
try:
|
|
230
|
+
XtW = X_poly.T @ W
|
|
231
|
+
XtWX = XtW @ X_poly + np_min_float() * np.eye(n_features)
|
|
232
|
+
XtWy = XtW @ (y0 + (y - p) / (p * (1 - p) + 1e-8))
|
|
233
|
+
self.coefficients = np.linalg.solve(XtWX, XtWy)
|
|
234
|
+
except np.linalg.LinAlgError:
|
|
235
|
+
self.coefficients = np.linalg.pinv(XtWX) @ XtWy
|
|
236
|
+
return self.coefficients
|
|
237
|
+
|
|
238
|
+
def _data_conversion(self, z:np.ndarray) -> np.ndarray:
|
|
239
|
+
self.logger.info(f"Converting data using form: {self.data_form}")
|
|
240
|
+
dc = DataConversion()
|
|
241
|
+
if self.data_form == 'a':
|
|
242
|
+
return dc._convert_az(z)
|
|
243
|
+
elif self.data_form == 'm':
|
|
244
|
+
return dc._convert_mz(z)
|
|
245
|
+
else:
|
|
246
|
+
raise ValueError("data_form must be 'a' for additive or 'm' for multiplicative.")
|
|
247
|
+
|
|
248
|
+
def _gnostic_criterion(self, z:np.ndarray, z0:np.ndarray, s) -> tuple: # NOTE can be improved by connecting with GDF
|
|
249
|
+
"""Compute the gnostic criterion.
|
|
250
|
+
|
|
251
|
+
Parameters
|
|
252
|
+
----------
|
|
253
|
+
z : np.ndarray
|
|
254
|
+
Input data.
|
|
255
|
+
z0 : np.ndarray
|
|
256
|
+
Reference data.
|
|
257
|
+
s : int or np.ndarray
|
|
258
|
+
Scale parameter for the gnostic criterion.
|
|
259
|
+
Returns
|
|
260
|
+
-------
|
|
261
|
+
tuple
|
|
262
|
+
Tuple containing the gnostic criterion values.
|
|
263
|
+
|
|
264
|
+
NOTE:
|
|
265
|
+
normalized loss and rentropy are returned.
|
|
266
|
+
"""
|
|
267
|
+
self.logger.info("Computing gnostic criterion.")
|
|
268
|
+
q, q1 = self._compute_q(z, z0, s)
|
|
269
|
+
|
|
270
|
+
# Default values for optional outputs
|
|
271
|
+
pi = pj = ei = ej = infoi = infoj = None
|
|
272
|
+
|
|
273
|
+
if self.mg_loss == 'hi':
|
|
274
|
+
self.logger.info("Computing gnostic criterion for 'hi' loss.")
|
|
275
|
+
hi = self.gc._hi(q, q1)
|
|
276
|
+
fi = self.gc._fi(q, q1)
|
|
277
|
+
fj = self.gc._fj(q, q1)
|
|
278
|
+
re = self.gc._rentropy(fi, fj)
|
|
279
|
+
if self.gnostic_characteristics:
|
|
280
|
+
hj = self.gc._hj(q, q1)
|
|
281
|
+
pi = self.gc._idistfun(hi)
|
|
282
|
+
pj = self.gc._jdistfun(hj)
|
|
283
|
+
infoi = self.gc._info_i(pi)
|
|
284
|
+
infoj = self.gc._info_j(pj)
|
|
285
|
+
ei = self.gc._ientropy(fi)
|
|
286
|
+
ej = self.gc._jentropy(fj)
|
|
287
|
+
else:
|
|
288
|
+
hj = pi = pj = ei = ej = infoi = infoj = None
|
|
289
|
+
|
|
290
|
+
# normalize hi and re
|
|
291
|
+
re_norm = (re - np.min(re)) / (np.max(re) - np.min(re)) if np.max(re) != np.min(re) else re
|
|
292
|
+
H = np.sum(hi ** 2)
|
|
293
|
+
return H, np.mean(re_norm),hi, hj, fi, fj, pi, pj, ei, ej, infoi, infoj
|
|
294
|
+
elif self.mg_loss == 'hj':
|
|
295
|
+
self.logger.info("Computing gnostic criterion for 'hj' loss.")
|
|
296
|
+
hj = self.gc._hj(q, q1)
|
|
297
|
+
fi = self.gc._fi(q, q1)
|
|
298
|
+
fj = self.gc._fj(q, q1)
|
|
299
|
+
re = self.gc._rentropy(fi, fj)
|
|
300
|
+
if self.gnostic_characteristics:
|
|
301
|
+
hi = self.gc._hi(q, q1)
|
|
302
|
+
pi = self.gc._idistfun(hi)
|
|
303
|
+
pj = self.gc._jdistfun(hj)
|
|
304
|
+
infoi = self.gc._info_i(pi)
|
|
305
|
+
infoj = self.gc._info_j(pj)
|
|
306
|
+
ei = self.gc._ientropy(fi)
|
|
307
|
+
ej = self.gc._jentropy(fj)
|
|
308
|
+
else:
|
|
309
|
+
hi = pi = pj = ei = ej = infoi = infoj = None
|
|
310
|
+
# normalize hj and re
|
|
311
|
+
re_norm = (re - np.min(re)) / (np.max(re) - np.min(re)) if np.max(re) != np.min(re) else re
|
|
312
|
+
H = np.sum(hj ** 2)
|
|
313
|
+
return H, np.mean(re_norm),hi, hj, fi, fj, pi, pj, ei, ej, infoi, infoj
|
|
314
|
+
|
|
315
|
+
def _compute_q(self, z, z0, s:int = 1):
|
|
316
|
+
"""
|
|
317
|
+
For interval use only
|
|
318
|
+
Compute q and q1."""
|
|
319
|
+
self.logger.info("Computing q and q1 for gnostic criterion.")
|
|
320
|
+
eps = np_eps_float() # Small value to avoid division by zero
|
|
321
|
+
z0_safe = np.where(np.abs(z0) < eps, eps, z0)
|
|
322
|
+
zz = z / z0_safe
|
|
323
|
+
self.gc = GnosticsCharacteristics(zz, verbose=self.verbose)
|
|
324
|
+
q, q1 = self.gc._get_q_q1(S=s)
|
|
325
|
+
return q, q1
|
|
326
|
+
|
|
327
|
+
def _normalize_weights(self, weights):
|
|
328
|
+
"""
|
|
329
|
+
Normalize weights to ensure they sum to 1.
|
|
330
|
+
|
|
331
|
+
Parameters
|
|
332
|
+
----------
|
|
333
|
+
weights : np.ndarray
|
|
334
|
+
Weights to be normalized.
|
|
335
|
+
|
|
336
|
+
Returns
|
|
337
|
+
-------
|
|
338
|
+
np.ndarray
|
|
339
|
+
Normalized weights.
|
|
340
|
+
"""
|
|
341
|
+
self.logger.info("Normalizing weights.")
|
|
342
|
+
total_weight = np.sum(weights)
|
|
343
|
+
if total_weight == 0:
|
|
344
|
+
return np.ones_like(weights) / len(weights)
|
|
345
|
+
return weights / total_weight
|
|
346
|
+
|
|
347
|
+
def _fit(self, X, y):
|
|
348
|
+
"""
|
|
349
|
+
Fit the model to the data.
|
|
350
|
+
|
|
351
|
+
Parameters
|
|
352
|
+
----------
|
|
353
|
+
X : array-like
|
|
354
|
+
Input features.
|
|
355
|
+
y : array-like
|
|
356
|
+
Target values.
|
|
357
|
+
"""
|
|
358
|
+
# Placeholder for fitting logic
|
|
359
|
+
pass
|
|
360
|
+
|
|
361
|
+
def _predict(self, X):
|
|
362
|
+
"""
|
|
363
|
+
Predict using the fitted model.
|
|
364
|
+
|
|
365
|
+
Parameters
|
|
366
|
+
----------
|
|
367
|
+
X : array-like
|
|
368
|
+
Input features for prediction.
|
|
369
|
+
|
|
370
|
+
Returns
|
|
371
|
+
-------
|
|
372
|
+
y_pred : array-like
|
|
373
|
+
Predicted values.
|
|
374
|
+
"""
|
|
375
|
+
# Placeholder for prediction logic
|
|
376
|
+
pass
|
|
377
|
+
|
|
378
|
+
def _score(self, X, y):
|
|
379
|
+
"""
|
|
380
|
+
Compute the score of the model.
|
|
381
|
+
|
|
382
|
+
Parameters
|
|
383
|
+
----------
|
|
384
|
+
X : array-like
|
|
385
|
+
Input features.
|
|
386
|
+
y : array-like
|
|
387
|
+
True values for X.
|
|
388
|
+
|
|
389
|
+
Returns
|
|
390
|
+
-------
|
|
391
|
+
score : float
|
|
392
|
+
Score of the model.
|
|
393
|
+
"""
|
|
394
|
+
# Placeholder for scoring logic
|
|
395
|
+
pass
|
|
396
|
+
|
|
397
|
+
def _sigmoid(self, z):
|
|
398
|
+
"""
|
|
399
|
+
Compute the sigmoid function for logistic regression.
|
|
400
|
+
Parameters
|
|
401
|
+
----------
|
|
402
|
+
z : np.ndarray
|
|
403
|
+
Input array for which to compute the sigmoid function.
|
|
404
|
+
Returns
|
|
405
|
+
-------
|
|
406
|
+
np.ndarray
|
|
407
|
+
Sigmoid of the input array.
|
|
408
|
+
"""
|
|
409
|
+
return 1 / (1 + np.exp(-z))
|
|
410
|
+
|
|
411
|
+
def _gnostic_prob(self, z) -> tuple:
|
|
412
|
+
"""
|
|
413
|
+
Compute the gnostic probabilities and characteristics.
|
|
414
|
+
Parameters
|
|
415
|
+
----------
|
|
416
|
+
z : np.ndarray
|
|
417
|
+
Input data for which to compute gnostic probabilities.
|
|
418
|
+
Returns
|
|
419
|
+
-------
|
|
420
|
+
tuple
|
|
421
|
+
Tuple containing the gnostic probabilities, information, and normalized rentropy.
|
|
422
|
+
"""
|
|
423
|
+
self.logger.info("Computing gnostic probabilities and characteristics.")
|
|
424
|
+
zz = self._data_conversion(z)
|
|
425
|
+
gc = GnosticsCharacteristics(zz, verbose=self.verbose)
|
|
426
|
+
|
|
427
|
+
# q, q1
|
|
428
|
+
q, q1 = gc._get_q_q1()
|
|
429
|
+
h = gc._hi(q, q1)
|
|
430
|
+
fi = gc._fi(q, q1)
|
|
431
|
+
|
|
432
|
+
# Scale handling
|
|
433
|
+
if self.scale == 'auto':
|
|
434
|
+
scale = ScaleParam(verbose=self.verbose)
|
|
435
|
+
s = scale._gscale_loc(np.mean(fi)) # NOTE this refer to ELDF probability. Can be improved by connecting with GDF and its PDF
|
|
436
|
+
else:
|
|
437
|
+
s = self.scale
|
|
438
|
+
|
|
439
|
+
q, q1 = gc._get_q_q1(S=s)
|
|
440
|
+
h = gc._hi(q, q1)
|
|
441
|
+
fi = gc._fi(q, q1)
|
|
442
|
+
fj = gc._fj(q, q1)
|
|
443
|
+
p = gc._idistfun(h)
|
|
444
|
+
info = gc._info_i(p)
|
|
445
|
+
re = gc._rentropy(fi, fj)
|
|
446
|
+
# nomalized re
|
|
447
|
+
re_norm = (re - np.min(re)) / (np.max(re) - np.min(re)) if np.max(re) != np.min(re) else re
|
|
448
|
+
return p, info, re_norm
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
'''
|
|
2
|
+
ManGo - Machine Gnostics Library
|
|
3
|
+
Copyright (C) 2025 ManGo Team
|
|
4
|
+
|
|
5
|
+
Author: Nirmal Parmar
|
|
6
|
+
'''
|
|
7
|
+
|
|
8
|
+
import numpy as np
|
|
9
|
+
from machinegnostics.magcal import GnosticsCharacteristics, ScaleParam
|
|
10
|
+
import logging
|
|
11
|
+
from machinegnostics.magcal.util.logging import get_logger
|
|
12
|
+
|
|
13
|
+
class GnosticsWeights:
|
|
14
|
+
'''
|
|
15
|
+
Calculates Machine Gnostics weights as per different requirements.
|
|
16
|
+
|
|
17
|
+
For internal use only.
|
|
18
|
+
'''
|
|
19
|
+
def __init__(self, verbose: bool = False):
|
|
20
|
+
self.logger = get_logger('GnosticsWeights', level=logging.WARNING if not verbose else logging.INFO)
|
|
21
|
+
self.logger.info("GnosticsWeights initialized.")
|
|
22
|
+
|
|
23
|
+
def _get_gnostic_weights(self, z):
|
|
24
|
+
"""Compute gnostic weights."""
|
|
25
|
+
self.logger.info("Computing gnostic weights...")
|
|
26
|
+
z0 = np.median(z)
|
|
27
|
+
zz = z / z0
|
|
28
|
+
gc = GnosticsCharacteristics(R=zz)
|
|
29
|
+
q, q1 = gc._get_q_q1(S=1)
|
|
30
|
+
fi = gc._fi(q, q1)
|
|
31
|
+
scale = ScaleParam()
|
|
32
|
+
s = scale._gscale_loc(np.mean(fi))
|
|
33
|
+
q, q1 = gc._get_q_q1(S=s)
|
|
34
|
+
wt = (2 / (q + q1))**2
|
|
35
|
+
self.logger.info("Gnostic weights computation complete.")
|
|
36
|
+
return wt
|