machinegnostics 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. __init__.py +0 -0
  2. machinegnostics/__init__.py +24 -0
  3. machinegnostics/magcal/__init__.py +37 -0
  4. machinegnostics/magcal/characteristics.py +460 -0
  5. machinegnostics/magcal/criteria_eval.py +268 -0
  6. machinegnostics/magcal/criterion.py +140 -0
  7. machinegnostics/magcal/data_conversion.py +381 -0
  8. machinegnostics/magcal/gcor.py +64 -0
  9. machinegnostics/magcal/gdf/__init__.py +2 -0
  10. machinegnostics/magcal/gdf/base_df.py +39 -0
  11. machinegnostics/magcal/gdf/base_distfunc.py +1202 -0
  12. machinegnostics/magcal/gdf/base_egdf.py +823 -0
  13. machinegnostics/magcal/gdf/base_eldf.py +830 -0
  14. machinegnostics/magcal/gdf/base_qgdf.py +1234 -0
  15. machinegnostics/magcal/gdf/base_qldf.py +1019 -0
  16. machinegnostics/magcal/gdf/cluster_analysis.py +456 -0
  17. machinegnostics/magcal/gdf/data_cluster.py +975 -0
  18. machinegnostics/magcal/gdf/data_intervals.py +853 -0
  19. machinegnostics/magcal/gdf/data_membership.py +536 -0
  20. machinegnostics/magcal/gdf/der_egdf.py +243 -0
  21. machinegnostics/magcal/gdf/distfunc_engine.py +841 -0
  22. machinegnostics/magcal/gdf/egdf.py +324 -0
  23. machinegnostics/magcal/gdf/eldf.py +297 -0
  24. machinegnostics/magcal/gdf/eldf_intv.py +609 -0
  25. machinegnostics/magcal/gdf/eldf_ma.py +627 -0
  26. machinegnostics/magcal/gdf/homogeneity.py +1218 -0
  27. machinegnostics/magcal/gdf/intv_engine.py +1523 -0
  28. machinegnostics/magcal/gdf/marginal_intv_analysis.py +558 -0
  29. machinegnostics/magcal/gdf/qgdf.py +289 -0
  30. machinegnostics/magcal/gdf/qldf.py +296 -0
  31. machinegnostics/magcal/gdf/scedasticity.py +197 -0
  32. machinegnostics/magcal/gdf/wedf.py +181 -0
  33. machinegnostics/magcal/gdf/z0_estimator.py +1047 -0
  34. machinegnostics/magcal/layer_base.py +42 -0
  35. machinegnostics/magcal/layer_history_base.py +74 -0
  36. machinegnostics/magcal/layer_io_process_base.py +238 -0
  37. machinegnostics/magcal/layer_param_base.py +448 -0
  38. machinegnostics/magcal/mg_weights.py +36 -0
  39. machinegnostics/magcal/sample_characteristics.py +532 -0
  40. machinegnostics/magcal/scale_optimization.py +185 -0
  41. machinegnostics/magcal/scale_param.py +313 -0
  42. machinegnostics/magcal/util/__init__.py +0 -0
  43. machinegnostics/magcal/util/dis_docstring.py +18 -0
  44. machinegnostics/magcal/util/logging.py +24 -0
  45. machinegnostics/magcal/util/min_max_float.py +34 -0
  46. machinegnostics/magnet/__init__.py +0 -0
  47. machinegnostics/metrics/__init__.py +28 -0
  48. machinegnostics/metrics/accu.py +61 -0
  49. machinegnostics/metrics/accuracy.py +67 -0
  50. machinegnostics/metrics/auto_correlation.py +183 -0
  51. machinegnostics/metrics/auto_covariance.py +204 -0
  52. machinegnostics/metrics/cls_report.py +130 -0
  53. machinegnostics/metrics/conf_matrix.py +93 -0
  54. machinegnostics/metrics/correlation.py +178 -0
  55. machinegnostics/metrics/cross_variance.py +167 -0
  56. machinegnostics/metrics/divi.py +82 -0
  57. machinegnostics/metrics/evalmet.py +109 -0
  58. machinegnostics/metrics/f1_score.py +128 -0
  59. machinegnostics/metrics/gmmfe.py +108 -0
  60. machinegnostics/metrics/hc.py +141 -0
  61. machinegnostics/metrics/mae.py +72 -0
  62. machinegnostics/metrics/mean.py +117 -0
  63. machinegnostics/metrics/median.py +122 -0
  64. machinegnostics/metrics/mg_r2.py +167 -0
  65. machinegnostics/metrics/mse.py +78 -0
  66. machinegnostics/metrics/precision.py +119 -0
  67. machinegnostics/metrics/r2.py +122 -0
  68. machinegnostics/metrics/recall.py +108 -0
  69. machinegnostics/metrics/rmse.py +77 -0
  70. machinegnostics/metrics/robr2.py +119 -0
  71. machinegnostics/metrics/std.py +144 -0
  72. machinegnostics/metrics/variance.py +101 -0
  73. machinegnostics/models/__init__.py +2 -0
  74. machinegnostics/models/classification/__init__.py +1 -0
  75. machinegnostics/models/classification/layer_history_log_reg.py +121 -0
  76. machinegnostics/models/classification/layer_io_process_log_reg.py +98 -0
  77. machinegnostics/models/classification/layer_mlflow_log_reg.py +107 -0
  78. machinegnostics/models/classification/layer_param_log_reg.py +275 -0
  79. machinegnostics/models/classification/mg_log_reg.py +273 -0
  80. machinegnostics/models/cross_validation.py +118 -0
  81. machinegnostics/models/data_split.py +106 -0
  82. machinegnostics/models/regression/__init__.py +2 -0
  83. machinegnostics/models/regression/layer_histroy_rob_reg.py +139 -0
  84. machinegnostics/models/regression/layer_io_process_rob_rig.py +88 -0
  85. machinegnostics/models/regression/layer_mlflow_rob_reg.py +134 -0
  86. machinegnostics/models/regression/layer_param_rob_reg.py +212 -0
  87. machinegnostics/models/regression/mg_lin_reg.py +253 -0
  88. machinegnostics/models/regression/mg_poly_reg.py +258 -0
  89. machinegnostics-0.0.1.dist-info/METADATA +246 -0
  90. machinegnostics-0.0.1.dist-info/RECORD +93 -0
  91. machinegnostics-0.0.1.dist-info/WHEEL +5 -0
  92. machinegnostics-0.0.1.dist-info/licenses/LICENSE +674 -0
  93. machinegnostics-0.0.1.dist-info/top_level.txt +2 -0
@@ -0,0 +1,185 @@
1
+ '''
2
+ ManGo - Machine Gnostics Library
3
+ Copyright (C) 2025 ManGo Team
4
+
5
+ Author: Nirmal Parmar
6
+ '''
7
+
8
+ import numpy as np
9
+ from scipy.optimize import minimize
10
+ from typing import Union
11
+ from machinegnostics.magcal.characteristics import GnosticsCharacteristics
12
+
13
+ class ScaleOptimization(GnosticsCharacteristics):
14
+ """
15
+ A class to perform scale optimization on a given matrix. This class is for internal use.
16
+
17
+ Parameters
18
+ ----------
19
+ R : np.ndarray
20
+ The input matrix for the scale optimization.
21
+ eps : float, optional
22
+ A small value to avoid division by zero (default is np.finfo(float).max).
23
+ c : str [in {'i', 'j'}]
24
+ The type of scale optimization to perform. 'i' for estimation and 'j' for quantification.
25
+ c2 ∈{1,−1}and hc be the irrelevance, either hj (quantifying, c2 = 1) or hi (estimating, c2=−1)
26
+ eps : float, optional
27
+ A small value to avoid division by zero (default is np.finfo(float).eps).
28
+
29
+ Attributes
30
+ ----------
31
+ q : np.ndarray
32
+ The input matrix.
33
+ q1 : np.ndarray
34
+ The inverse of the input matrix, with protection against division by zero.
35
+ """
36
+
37
+ def __init__(self,
38
+ X: np.ndarray,
39
+ y: np.ndarray,
40
+ c: str,
41
+ eps: float = np.finfo(float).eps):
42
+ """
43
+ Initializes the ScaleOptimization class.
44
+
45
+ Parameters
46
+ ----------
47
+ X : np.ndarray
48
+ The input matrix for the scale optimization.
49
+ y : np.ndarray
50
+ The target values for the optimization.
51
+ c : str [in {'i', 'j'}]
52
+ The type of scale optimization to perform. 'i' for estimation and 'j' for quantification.
53
+ eps : float, optional
54
+ A small value to avoid division by zero (default is np.finfo(float).max).
55
+ """
56
+
57
+ def _F(self, C, x)-> float:
58
+ """
59
+ Computes the predicted values based on the coefficients C and input features x_obs.
60
+
61
+ Parameters
62
+ ----------
63
+ C : np.ndarray
64
+ Coefficients for the regression model.
65
+ x_obs : np.ndarray
66
+ Observed input features.
67
+
68
+ Returns
69
+ -------
70
+ np.ndarray
71
+ Predicted values.
72
+ """
73
+ return np.dot(x, C)
74
+
75
+ def _recompute_q(self, Z, z0, S)-> np.ndarray:
76
+ """
77
+ Computes the q values for optimization.
78
+
79
+ Parameters
80
+ ----------
81
+ Z_obs : np.ndarray
82
+ Observed target values.
83
+ z0 : np.ndarray
84
+ Predicted target values.
85
+ S : float
86
+ Scale parameter.
87
+
88
+ Returns
89
+ -------
90
+ np.ndarray
91
+ Computed q values.
92
+ """
93
+ q = np.abs(Z / z0) / (S+self.eps) # to avoid division by zero
94
+ q1 = 1 / q
95
+ q1 = np.where(q1 != 0, q1, np.finfo(float).max)
96
+ return q, q1
97
+
98
+ def _recompute_h(self, q, q1, c:str)-> np.ndarray:
99
+ """
100
+ Computes the h values for optimization.
101
+
102
+ Parameters
103
+ ----------
104
+ q : np.ndarray
105
+ Computed q values.
106
+ c : str [in {'i', 'j'}]
107
+ The type of scale optimization to perform. 'i' for estimation and 'j' for quantification.
108
+
109
+ Returns
110
+ -------
111
+ np.ndarray
112
+ Computed h values.
113
+ """
114
+ if c == 'i':
115
+ return self._hi(q, q1)
116
+ elif c == 'j':
117
+ return self._fj(q, q1)
118
+ else:
119
+ raise ValueError("Invalid value for c. Must be 'i' or 'j'.")
120
+
121
+ def _criterion(self, C, S, x, Z):
122
+ """
123
+ Computes the criterion function for optimization.
124
+
125
+ Parameters
126
+ ----------
127
+ C : np.ndarray
128
+ Coefficients for the regression model.
129
+ S : float
130
+ Scale parameter.
131
+ x : np.ndarray
132
+ Input features.
133
+ Z : np.ndarray
134
+ Target values.
135
+
136
+ Returns
137
+ -------
138
+ float
139
+ Computed criterion value.
140
+ """
141
+ z0 = self._F(C, x)
142
+ q, q1 = self._recompute_q(Z, z0, S)
143
+ h = self._recompute_h(q, q1, self.c)
144
+ # Compute the criterion value (e.g., sum of squares)
145
+ D_hi = h ** 2 # simple quadratic loss
146
+ return np.sum(D_hi)
147
+
148
+ def _optimize(self, x, Z):
149
+ """
150
+ Optimizes the scale parameter S and coefficients C.
151
+
152
+ Parameters
153
+ ----------
154
+ x : np.ndarray
155
+ Input features.
156
+ Z : np.ndarray
157
+ Target values.
158
+
159
+ Returns
160
+ -------
161
+ tuple
162
+ Optimized coefficients C and scale parameter S.
163
+ """
164
+ # Initial guess for C and S
165
+ C0 = np.ones(x.shape[1])
166
+ S0 = 1.0
167
+
168
+ # Define the objective function to minimize
169
+ def objective(params):
170
+ C = params[:-1]
171
+ S = np.abs(params[-1])
172
+ return self._criterion(C, S, x, Z)
173
+
174
+ # Initial guess for parameters
175
+ initial_params = np.concatenate((C0, [S0]))
176
+
177
+ # Perform optimization
178
+ result = minimize(objective, initial_params, method='BFGS',)
179
+
180
+ # Extract optimized coefficients and scale parameter
181
+ optimized_params = result.x
182
+ C_opt = optimized_params[:-1]
183
+ S_opt = optimized_params[-1]
184
+
185
+ return C_opt, S_opt
@@ -0,0 +1,313 @@
1
+ '''
2
+ ManGo - Machine Gnostics Library
3
+ Copyright (C) 2025 ManGo Team
4
+
5
+ Author: Nirmal Parmar
6
+
7
+ ideas:
8
+ - LocS
9
+ - GlobS
10
+ - VarS
11
+ '''
12
+ import numpy as np
13
+ from machinegnostics.magcal import GnosticsCharacteristics
14
+ from scipy.optimize import minimize_scalar
15
+ import logging
16
+ from machinegnostics.magcal.util.logging import get_logger
17
+
18
+ class ScaleParam():
19
+ """
20
+ A Machine Gnostic class to compute and optimize scale parameter for different gnostic distribution functions.
21
+
22
+ This class provides methods to calculate scale parameters used in gnostic analysis, including
23
+ local scale parameters and variable scale parameters for kernel-based estimations.
24
+
25
+ The scale parameter affects the shape and characteristics of gnostic distributions, controlling
26
+ how the distributions respond to variations in the input data.
27
+
28
+ Notes
29
+ -----
30
+ The scale parameter is a critical component in Machine Gnostics that influences the behavior
31
+ of distribution functions, particularly their sensitivity to outliers and their overall shape.
32
+
33
+ The class implements multiple scale parameter calculation strategies:
34
+ - Local scale: Optimizes scale for individual data points
35
+ - Variable scale: Creates a vector of scale parameters for kernel-based estimation
36
+ """
37
+
38
+
39
+ def __init__(self, verbose: bool = False):
40
+ self.logger = get_logger('ScaleParam', level=logging.WARNING if not verbose else logging.INFO)
41
+ self.logger.info("ScaleParam initialized.")
42
+
43
+ def _gscale_loc(self, F):
44
+ """
45
+ Calculate the local scale parameter for a given fidelity parameter F.
46
+
47
+ This method uses the Newton-Raphson method to solve for the scale parameter that satisfies
48
+ the relationship between F and the scale. It supports both scalar and array-like inputs.
49
+
50
+ Parameters
51
+ ----------
52
+ F : float or array-like
53
+ Input parameter (e.g., fidelity of data) at Scale = 1.
54
+
55
+ Returns
56
+ -------
57
+ float or ndarray
58
+ The calculated local scale parameter(s). Will be the same shape as input F.
59
+
60
+ Notes
61
+ -----
62
+ The Newton-Raphson method is used with initial values based on the magnitude of F:
63
+ - For F < (2/π) * √2/3: Initial S = π
64
+ - For F < 2/π: Initial S = 3π/4
65
+ - For F < (2/π) * √2: Initial S = π/2
66
+ - Otherwise: Initial S = π/4
67
+
68
+ The method iteratively refines this estimate until convergence.
69
+ """
70
+ self.logger.info("Calculating local scale parameter...")
71
+ m2pi = 2 / np.pi
72
+ sqrt2 = np.sqrt(2)
73
+ epsilon = 1e-5
74
+
75
+ def _single_scale(f):
76
+ if f < m2pi * sqrt2 / 3:
77
+ S = np.pi
78
+ elif f < m2pi:
79
+ S = 3 * np.pi / 4
80
+ elif f < m2pi * sqrt2:
81
+ S = np.pi / 2
82
+ else:
83
+ S = np.pi / 4
84
+ for _ in range(100):
85
+ delta = (np.sin(S) - S * f) / (np.cos(S) - f)
86
+ S -= delta
87
+ if abs(delta) < epsilon:
88
+ break
89
+ return S * m2pi
90
+ self.logger.info("Local scale parameter calculation complete.")
91
+
92
+ # Check if F is scalar
93
+ if np.isscalar(F):
94
+ return _single_scale(F)
95
+ else:
96
+ F = np.asarray(F)
97
+ return np.array([_single_scale(f) for f in F])
98
+
99
+
100
+ # def var_s(self, Z, W=None, S=1):
101
+ # """
102
+ # Calculates vector of scale parameters for each kernel.
103
+
104
+ # Parameters:
105
+ # Z (array-like): Data vector
106
+ # W (array-like, optional): Weight vector
107
+ # S (float, optional): Scalar scale factor (default is 1)
108
+
109
+ # Returns:
110
+ # numpy.ndarray: Scale vector (same length as Z)
111
+ # """
112
+ # Z = np.asarray(Z).reshape(-1, 1)
113
+
114
+ # if W is None:
115
+ # W = np.ones_like(Z) / len(Z)
116
+ # else:
117
+ # W = np.asarray(W).reshape(-1, 1)
118
+ # if len(Z) != len(W):
119
+ # raise ValueError("Z and W must be of the same length")
120
+ # W = W / np.sum(W)
121
+
122
+ # Sz = np.zeros_like(Z, dtype=float)
123
+
124
+ # for k in range(len(W)):
125
+ # V = Z / Z[k]
126
+ # V = V ** (2/S) + 1.0 / (V ** (2/S))
127
+ # Sz[k] = self._gscale_loc(np.sum(2.0 / V * W))
128
+
129
+ # Sx = S * Sz / np.mean(Sz)
130
+ # return Sx
131
+
132
+ def var_s(self, Z, W=None, S=1):
133
+ """
134
+ Calculate a vector of scale parameters for each kernel in the distribution.
135
+
136
+ This method computes individualized scale parameters for each data point, allowing
137
+ for adaptive scaling in gnostic estimations. It handles numerical edge cases to
138
+ ensure stability.
139
+
140
+ Parameters
141
+ ----------
142
+ Z : array-like
143
+ Data vector containing the values for which to calculate scale parameters.
144
+ W : array-like, optional
145
+ Weight vector for each data point. If not provided, uniform weights are used.
146
+ S : float, optional
147
+ Base scalar scale factor, default is 1.
148
+
149
+ Returns
150
+ -------
151
+ ndarray
152
+ Vector of scale parameters, one for each element in Z.
153
+
154
+ Raises
155
+ ------
156
+ ValueError
157
+ If Z and W are provided but have different lengths.
158
+
159
+ Notes
160
+ -----
161
+ The method calculates relative relationships between data points and applies
162
+ the local scale parameter calculation for each point. For each data point k,
163
+ it computes a ratio V of all data points relative to Z[k], then calculates
164
+ a transformation of this ratio to determine the local scale parameter.
165
+
166
+ The implementation includes safeguards against division by zero and handles
167
+ edge cases to ensure numerical stability. In case of invalid calculations,
168
+ it falls back to the default scale parameter.
169
+ """
170
+ self.logger.info("Calculating local scale parameters...")
171
+ Z = np.asarray(Z).reshape(-1, 1)
172
+
173
+ if W is None:
174
+ W = np.ones_like(Z) / len(Z)
175
+ else:
176
+ W = np.asarray(W).reshape(-1, 1)
177
+ if len(Z) != len(W):
178
+ raise ValueError("Z and W must be of the same length")
179
+ W = W / np.sum(W)
180
+
181
+ Sz = np.zeros_like(Z, dtype=float)
182
+
183
+ # Small value to prevent division by zero
184
+ eps = np.finfo(float).eps * 100
185
+
186
+ for k in range(len(W)):
187
+ # Skip calculation if Z[k] is too close to zero
188
+ if abs(Z[k]) < eps:
189
+ Sz[k] = S # Use default S value
190
+ continue
191
+
192
+ # Safe division with epsilon to prevent division by zero
193
+ V = Z / (Z[k] + (Z[k]==0)*eps)
194
+ V = V ** 2 + 1.0 / (V ** 2 + eps)
195
+
196
+ # Calculate sum and ensure it's valid
197
+ sum_val = np.sum(2.0 / V * W)
198
+ if np.isnan(sum_val) or np.isinf(sum_val):
199
+ Sz[k] = S # Use default S value
200
+ else:
201
+ Sz[k] = self._gscale_loc(sum_val)
202
+
203
+ # Check for any remaining NaN values and replace them
204
+ Sz[np.isnan(Sz)] = S
205
+ self.logger.info("Local scale parameters calculation complete.")
206
+ return Sz
207
+
208
+ def estimate_global_scale_egdf(self, Fk, Ek, tolerance=0.1):
209
+ """
210
+ Estimate the optimal global scale parameter S_optimize to find minimum S where fidelity is maximized.
211
+
212
+ Parameters
213
+ ----------
214
+ Fk : array-like
215
+ Fidelity values for the data points.
216
+ Ek : array-like
217
+ Weighted empirical distribution function values for the data points.
218
+ tolerance : float, optional
219
+ Convergence tolerance for fidelity change (default is 0.01).
220
+
221
+ Returns
222
+ -------
223
+ float
224
+ The optimal global scale parameter S_optimize (minimum S where fidelity is maximized).
225
+
226
+ Notes
227
+ -----
228
+ This function finds the minimum scale parameter S where fidelity is maximized,
229
+ with early stopping when fidelity change is less than the specified tolerance.
230
+ """
231
+ self.logger.info("Estimating global scale parameter...")
232
+ Fk = np.asarray(Fk)
233
+ Ek = np.asarray(Ek)
234
+
235
+ if len(Fk) != len(Ek):
236
+ raise ValueError("Fk and Ek must have the same length.")
237
+
238
+ def compute_fidelity(S):
239
+ """Compute average fidelity for a given S"""
240
+ # Add small epsilon to prevent division by zero
241
+ eps = np.finfo(float).eps
242
+ term1 = (Fk / (Ek + eps)) ** (2 / S)
243
+ term2 = (Ek / (Fk + eps)) ** (2 / S)
244
+ fidelities = 2 / (term1 + term2)
245
+ return np.mean(fidelities)
246
+
247
+ # Search through S values from minimum to maximum
248
+ s_values = np.linspace(0.05, 100, 1000) # Fine grid for accurate search
249
+
250
+ max_fidelity = -np.inf
251
+ optimal_s = None
252
+ previous_fidelity = None
253
+
254
+ for s in s_values:
255
+ current_fidelity = compute_fidelity(s)
256
+
257
+ # Check convergence condition first
258
+ if previous_fidelity is not None:
259
+ fidelity_change = abs(current_fidelity - previous_fidelity)
260
+ if fidelity_change < tolerance:
261
+ # Converged - return the minimum S where we achieved max fidelity
262
+ if optimal_s is not None:
263
+ final_fidelity = compute_fidelity(optimal_s)
264
+ print(f"Converged at S={optimal_s:.4f} with fidelity={final_fidelity:.4f}")
265
+ return optimal_s
266
+ else:
267
+ # First iteration, use current S
268
+ print(f"Converged at S={s:.4f} with fidelity={current_fidelity:.4f}")
269
+ return s
270
+
271
+ # Update maximum fidelity and optimal S (prefer minimum S for same fidelity)
272
+ if current_fidelity > max_fidelity:
273
+ max_fidelity = current_fidelity
274
+ optimal_s = s
275
+
276
+ previous_fidelity = current_fidelity
277
+ self.logger.info("Global scale parameter estimation complete.")
278
+ # If no convergence found, return the S with maximum fidelity
279
+ if optimal_s is not None:
280
+ final_fidelity = compute_fidelity(optimal_s)
281
+ self.logger.warning(f"No convergence found. Returning S={optimal_s:.4f} with max fidelity={final_fidelity:.4f}")
282
+ return optimal_s
283
+ else:
284
+ self.logger.error("Failed to find optimal scale parameter.")
285
+ raise RuntimeError("Failed to find optimal scale parameter.")
286
+
287
+ # def _gscale_loc(self, F):
288
+ # '''
289
+ # For internal use only
290
+
291
+ # calculates the local scale parameter for given calculated F at Scale = 1.
292
+ # S with be in the same shape as F.
293
+ # Solve for scale parameter using Newton-Raphson."
294
+ # '''
295
+ # m2pi = 2 / np.pi
296
+ # sqrt2 = np.sqrt(2)
297
+
298
+ # if F < m2pi * sqrt2 / 3:
299
+ # S = np.pi
300
+ # elif F < m2pi:
301
+ # S = 3 * np.pi / 4
302
+ # elif F < m2pi * sqrt2:
303
+ # S = np.pi / 2
304
+ # else:
305
+ # S = np.pi / 4
306
+
307
+ # epsilon = 1e-5
308
+ # for _ in range(100):
309
+ # delta = (np.sin(S) - S * F) / (np.cos(S) - F)
310
+ # S -= delta
311
+ # if abs(delta) < epsilon:
312
+ # break
313
+ # return S * m2pi
File without changes
@@ -0,0 +1,18 @@
1
+ def disable_parent_docstring(func):
2
+ """
3
+ Decorator to disable (remove) the inherited docstring from a parent class method.
4
+ After applying this decorator, the function's __doc__ will be set to None.
5
+
6
+ Usage:
7
+
8
+ ```python
9
+ @disable_parent_docstring
10
+ def my_method(self, *args, **kwargs):
11
+ # Your method implementation here
12
+ pass
13
+ ```
14
+ This is useful when you want to override a method from a parent class
15
+ but do not want to inherit its docstring, allowing you to provide a new one or leave it empty.
16
+ """
17
+ func.__doc__ = None
18
+ return func
@@ -0,0 +1,24 @@
1
+ import logging
2
+
3
+ def get_logger(name: str, level: int = logging.WARNING) -> logging.Logger:
4
+ """
5
+ Create and configure a logger with the given name and level.
6
+
7
+ Args:
8
+ name (str): Name of the logger, typically `__name__`.
9
+ level (int): Logging level (e.g., logging.DEBUG, logging.INFO).
10
+
11
+ Returns:
12
+ logging.Logger: Configured logger instance.
13
+ """
14
+ logger = logging.getLogger(name)
15
+ logger.setLevel(level)
16
+
17
+ if not logger.hasHandlers():
18
+ handler = logging.StreamHandler()
19
+ handler.setLevel(level)
20
+ formatter = logging.Formatter('%(asctime)s | %(name)s | %(levelname)s | %(message)s')
21
+ handler.setFormatter(formatter)
22
+ logger.addHandler(handler)
23
+
24
+ return logger
@@ -0,0 +1,34 @@
1
+ import numpy as np
2
+
3
+ def np_max_float():
4
+ """
5
+ Returns the maximum float value that can be represented in NumPy.
6
+
7
+ Returns
8
+ -------
9
+ float
10
+ The maximum float value.
11
+ """
12
+ return np.finfo(float).max
13
+
14
+ def np_min_float():
15
+ """
16
+ Returns the minimum float value that can be represented in NumPy.
17
+
18
+ Returns
19
+ -------
20
+ float
21
+ The minimum float value.
22
+ """
23
+ return np.finfo(float).min
24
+
25
+ def np_eps_float():
26
+ """
27
+ Returns the smallest positive float value that can be represented in NumPy.
28
+
29
+ Returns
30
+ -------
31
+ float
32
+ The smallest positive float value.
33
+ """
34
+ return np.finfo(float).eps
File without changes
@@ -0,0 +1,28 @@
1
+ # metrics functions
2
+ from machinegnostics.metrics.mae import mean_absolute_error
3
+ from machinegnostics.metrics.rmse import root_mean_squared_error
4
+ from machinegnostics.metrics.mse import mean_squared_error
5
+ from machinegnostics.metrics.r2 import r2_score, adjusted_r2_score
6
+ from machinegnostics.metrics.robr2 import robr2
7
+ from machinegnostics.metrics.gmmfe import gmmfe
8
+ from machinegnostics.metrics.divi import divI
9
+ from machinegnostics.metrics.evalmet import evalMet
10
+ from machinegnostics.metrics.hc import hc
11
+ from machinegnostics.metrics.f1_score import f1_score
12
+ from machinegnostics.metrics.precision import precision_score
13
+ from machinegnostics.metrics.recall import recall_score
14
+ from machinegnostics.metrics.cls_report import classification_report
15
+ from machinegnostics.metrics.accuracy import accuracy_score
16
+ from machinegnostics.metrics.conf_matrix import confusion_matrix
17
+ from machinegnostics.metrics.variance import variance
18
+ from machinegnostics.metrics.auto_covariance import auto_covariance
19
+ from machinegnostics.metrics.cross_variance import cross_covariance
20
+ from machinegnostics.metrics.correlation import correlation
21
+ from machinegnostics.metrics.auto_correlation import auto_correlation
22
+ from machinegnostics.metrics.mean import mean
23
+ from machinegnostics.metrics.median import median
24
+ from machinegnostics.metrics.std import std
25
+
26
+
27
+ # class
28
+ from machinegnostics.metrics.mg_r2 import EvaluationMetrics
@@ -0,0 +1,61 @@
1
+ import numpy as np
2
+
3
+ def accuracy_score(y_true:np.ndarray, y_pred:np.ndarray) -> float:
4
+ """
5
+ Computes the classification accuracy.
6
+
7
+ The classification accuracy is the ratio of correctly predicted class labels to the total number of predictions.
8
+ It is a commonly used metric for evaluating the performance of classification models. The accuracy score ranges
9
+ from 0 to 1, where:
10
+ - 1 indicates perfect accuracy (all predictions are correct).
11
+ - 0 indicates no correct predictions.
12
+
13
+ Parameters
14
+ ----------
15
+ y_true : array-like
16
+ True class labels. Must be a 1D array-like object (e.g., list, tuple, or numpy array) containing the ground truth labels.
17
+ y_pred : array-like
18
+ Predicted class labels. Must be a 1D array-like object (e.g., list, tuple, or numpy array) containing the predicted labels.
19
+
20
+ Returns
21
+ -------
22
+ float
23
+ The classification accuracy as a float value between 0 and 1.
24
+
25
+ Raises
26
+ ------
27
+ ValueError
28
+ - If `y_true` and `y_pred` have different lengths.
29
+ - If `y_true` or `y_pred` are empty.
30
+ TypeError
31
+ - If `y_true` or `y_pred` are not array-like (e.g., list, tuple, or numpy array).
32
+
33
+ Notes
34
+ -----
35
+ - The function converts `y_true` and `y_pred` to numpy arrays internally for efficient computation.
36
+ - The comparison `y_true == y_pred` is performed element-wise, and the mean of the resulting boolean array is computed to determine accuracy.
37
+
38
+ - The function does not handle multi-class classification or multi-label classification scenarios. It assumes binary classification.
39
+
40
+ - Made for research purposes, and may not be suitable for production use without further validation and testing.
41
+
42
+ """
43
+ # Validate input types
44
+ if not isinstance(y_true, (list, tuple, np.ndarray)):
45
+ raise TypeError("y_true must be array-like (list, tuple, or numpy array).")
46
+ if not isinstance(y_pred, (list, tuple, np.ndarray)):
47
+ raise TypeError("y_pred must be array-like (list, tuple, or numpy array).")
48
+
49
+ # Convert to numpy arrays and flatten
50
+ y_true = np.asarray(y_true).flatten()
51
+ y_pred = np.asarray(y_pred).flatten()
52
+
53
+ # Check for matching shapes
54
+ if y_true.shape != y_pred.shape:
55
+ raise ValueError(f"Shape mismatch: y_true shape {y_true.shape} != y_pred shape {y_pred.shape}")
56
+
57
+ # Check for empty arrays
58
+ if y_true.size == 0:
59
+ raise ValueError("y_true and y_pred must not be empty.")
60
+
61
+ return float(np.mean(y_true == y_pred))