machinegnostics 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- __init__.py +0 -0
- machinegnostics/__init__.py +24 -0
- machinegnostics/magcal/__init__.py +37 -0
- machinegnostics/magcal/characteristics.py +460 -0
- machinegnostics/magcal/criteria_eval.py +268 -0
- machinegnostics/magcal/criterion.py +140 -0
- machinegnostics/magcal/data_conversion.py +381 -0
- machinegnostics/magcal/gcor.py +64 -0
- machinegnostics/magcal/gdf/__init__.py +2 -0
- machinegnostics/magcal/gdf/base_df.py +39 -0
- machinegnostics/magcal/gdf/base_distfunc.py +1202 -0
- machinegnostics/magcal/gdf/base_egdf.py +823 -0
- machinegnostics/magcal/gdf/base_eldf.py +830 -0
- machinegnostics/magcal/gdf/base_qgdf.py +1234 -0
- machinegnostics/magcal/gdf/base_qldf.py +1019 -0
- machinegnostics/magcal/gdf/cluster_analysis.py +456 -0
- machinegnostics/magcal/gdf/data_cluster.py +975 -0
- machinegnostics/magcal/gdf/data_intervals.py +853 -0
- machinegnostics/magcal/gdf/data_membership.py +536 -0
- machinegnostics/magcal/gdf/der_egdf.py +243 -0
- machinegnostics/magcal/gdf/distfunc_engine.py +841 -0
- machinegnostics/magcal/gdf/egdf.py +324 -0
- machinegnostics/magcal/gdf/eldf.py +297 -0
- machinegnostics/magcal/gdf/eldf_intv.py +609 -0
- machinegnostics/magcal/gdf/eldf_ma.py +627 -0
- machinegnostics/magcal/gdf/homogeneity.py +1218 -0
- machinegnostics/magcal/gdf/intv_engine.py +1523 -0
- machinegnostics/magcal/gdf/marginal_intv_analysis.py +558 -0
- machinegnostics/magcal/gdf/qgdf.py +289 -0
- machinegnostics/magcal/gdf/qldf.py +296 -0
- machinegnostics/magcal/gdf/scedasticity.py +197 -0
- machinegnostics/magcal/gdf/wedf.py +181 -0
- machinegnostics/magcal/gdf/z0_estimator.py +1047 -0
- machinegnostics/magcal/layer_base.py +42 -0
- machinegnostics/magcal/layer_history_base.py +74 -0
- machinegnostics/magcal/layer_io_process_base.py +238 -0
- machinegnostics/magcal/layer_param_base.py +448 -0
- machinegnostics/magcal/mg_weights.py +36 -0
- machinegnostics/magcal/sample_characteristics.py +532 -0
- machinegnostics/magcal/scale_optimization.py +185 -0
- machinegnostics/magcal/scale_param.py +313 -0
- machinegnostics/magcal/util/__init__.py +0 -0
- machinegnostics/magcal/util/dis_docstring.py +18 -0
- machinegnostics/magcal/util/logging.py +24 -0
- machinegnostics/magcal/util/min_max_float.py +34 -0
- machinegnostics/magnet/__init__.py +0 -0
- machinegnostics/metrics/__init__.py +28 -0
- machinegnostics/metrics/accu.py +61 -0
- machinegnostics/metrics/accuracy.py +67 -0
- machinegnostics/metrics/auto_correlation.py +183 -0
- machinegnostics/metrics/auto_covariance.py +204 -0
- machinegnostics/metrics/cls_report.py +130 -0
- machinegnostics/metrics/conf_matrix.py +93 -0
- machinegnostics/metrics/correlation.py +178 -0
- machinegnostics/metrics/cross_variance.py +167 -0
- machinegnostics/metrics/divi.py +82 -0
- machinegnostics/metrics/evalmet.py +109 -0
- machinegnostics/metrics/f1_score.py +128 -0
- machinegnostics/metrics/gmmfe.py +108 -0
- machinegnostics/metrics/hc.py +141 -0
- machinegnostics/metrics/mae.py +72 -0
- machinegnostics/metrics/mean.py +117 -0
- machinegnostics/metrics/median.py +122 -0
- machinegnostics/metrics/mg_r2.py +167 -0
- machinegnostics/metrics/mse.py +78 -0
- machinegnostics/metrics/precision.py +119 -0
- machinegnostics/metrics/r2.py +122 -0
- machinegnostics/metrics/recall.py +108 -0
- machinegnostics/metrics/rmse.py +77 -0
- machinegnostics/metrics/robr2.py +119 -0
- machinegnostics/metrics/std.py +144 -0
- machinegnostics/metrics/variance.py +101 -0
- machinegnostics/models/__init__.py +2 -0
- machinegnostics/models/classification/__init__.py +1 -0
- machinegnostics/models/classification/layer_history_log_reg.py +121 -0
- machinegnostics/models/classification/layer_io_process_log_reg.py +98 -0
- machinegnostics/models/classification/layer_mlflow_log_reg.py +107 -0
- machinegnostics/models/classification/layer_param_log_reg.py +275 -0
- machinegnostics/models/classification/mg_log_reg.py +273 -0
- machinegnostics/models/cross_validation.py +118 -0
- machinegnostics/models/data_split.py +106 -0
- machinegnostics/models/regression/__init__.py +2 -0
- machinegnostics/models/regression/layer_histroy_rob_reg.py +139 -0
- machinegnostics/models/regression/layer_io_process_rob_rig.py +88 -0
- machinegnostics/models/regression/layer_mlflow_rob_reg.py +134 -0
- machinegnostics/models/regression/layer_param_rob_reg.py +212 -0
- machinegnostics/models/regression/mg_lin_reg.py +253 -0
- machinegnostics/models/regression/mg_poly_reg.py +258 -0
- machinegnostics-0.0.1.dist-info/METADATA +246 -0
- machinegnostics-0.0.1.dist-info/RECORD +93 -0
- machinegnostics-0.0.1.dist-info/WHEEL +5 -0
- machinegnostics-0.0.1.dist-info/licenses/LICENSE +674 -0
- machinegnostics-0.0.1.dist-info/top_level.txt +2 -0
__init__.py
ADDED
|
File without changes
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# quick methods
|
|
2
|
+
from .metrics.mean import mean
|
|
3
|
+
from .metrics.median import median
|
|
4
|
+
from .metrics.std import std
|
|
5
|
+
from .metrics.variance import variance
|
|
6
|
+
from .metrics.auto_covariance import auto_covariance
|
|
7
|
+
from .metrics.cross_variance import cross_covariance
|
|
8
|
+
from .metrics.correlation import correlation
|
|
9
|
+
from .metrics.auto_correlation import auto_correlation
|
|
10
|
+
from .metrics.robr2 import robr2
|
|
11
|
+
from .metrics.rmse import root_mean_squared_error
|
|
12
|
+
from .metrics.recall import recall_score
|
|
13
|
+
from .metrics.precision import precision_score
|
|
14
|
+
from .metrics.r2 import r2_score, adjusted_r2_score
|
|
15
|
+
from .metrics.mse import mean_squared_error
|
|
16
|
+
from .metrics.mae import mean_absolute_error
|
|
17
|
+
from .metrics.hc import hc
|
|
18
|
+
from .metrics.f1_score import f1_score
|
|
19
|
+
from .metrics.gmmfe import gmmfe
|
|
20
|
+
from .metrics.divi import divI
|
|
21
|
+
from .metrics.evalmet import evalMet
|
|
22
|
+
from .metrics.conf_matrix import confusion_matrix
|
|
23
|
+
from .metrics.accuracy import accuracy_score
|
|
24
|
+
from .metrics.cls_report import classification_report
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
# magcal general imports
|
|
2
|
+
from machinegnostics.magcal.criterion import GnosticCriterion
|
|
3
|
+
from machinegnostics.magcal.layer_base import ModelBase
|
|
4
|
+
from machinegnostics.magcal.data_conversion import DataConversion
|
|
5
|
+
from machinegnostics.magcal.characteristics import GnosticsCharacteristics
|
|
6
|
+
from machinegnostics.magcal.scale_param import ScaleParam
|
|
7
|
+
from machinegnostics.magcal.mg_weights import GnosticsWeights
|
|
8
|
+
from machinegnostics.magcal.sample_characteristics import GnosticCharacteristicsSample
|
|
9
|
+
from machinegnostics.magcal.gcor import __gcorrelation
|
|
10
|
+
from machinegnostics.magcal.layer_param_base import ParamBase
|
|
11
|
+
from machinegnostics.magcal.layer_history_base import HistoryBase
|
|
12
|
+
from machinegnostics.magcal.layer_io_process_base import DataProcessLayerBase
|
|
13
|
+
|
|
14
|
+
# gdf - Gnostic Analytics Models
|
|
15
|
+
from machinegnostics.magcal.gdf.egdf import EGDF
|
|
16
|
+
from machinegnostics.magcal.gdf.eldf import ELDF
|
|
17
|
+
from machinegnostics.magcal.gdf.qgdf import QGDF
|
|
18
|
+
from machinegnostics.magcal.gdf.qldf import QLDF
|
|
19
|
+
from machinegnostics.magcal.gdf.z0_estimator import Z0Estimator
|
|
20
|
+
from machinegnostics.magcal.gdf.homogeneity import DataHomogeneity
|
|
21
|
+
from machinegnostics.magcal.gdf.data_cluster import DataCluster
|
|
22
|
+
from machinegnostics.magcal.gdf.data_membership import DataMembership
|
|
23
|
+
from machinegnostics.magcal.gdf.data_intervals import DataIntervals
|
|
24
|
+
from machinegnostics.magcal.gdf.scedasticity import DataScedasticity
|
|
25
|
+
from machinegnostics.magcal.gdf.marginal_intv_analysis import IntervalAnalysis
|
|
26
|
+
from machinegnostics.magcal.gdf.cluster_analysis import ClusterAnalysis
|
|
27
|
+
|
|
28
|
+
# g correlation function
|
|
29
|
+
# from machinegnostics.magcal.gmodulus import gmodulus
|
|
30
|
+
# from machinegnostics.magcal.gacov import gautocovariance
|
|
31
|
+
# from machinegnostics.magcal.gvar import gvariance
|
|
32
|
+
# from machinegnostics.magcal.gcov import gcovariance
|
|
33
|
+
# from machinegnostics.magcal.gmed import gmedian
|
|
34
|
+
|
|
35
|
+
# util
|
|
36
|
+
from machinegnostics.magcal.util.dis_docstring import disable_parent_docstring
|
|
37
|
+
from machinegnostics.magcal.util.min_max_float import np_max_float, np_min_float, np_eps_float
|
|
@@ -0,0 +1,460 @@
|
|
|
1
|
+
'''
|
|
2
|
+
ManGo - Machine Gnostics Library
|
|
3
|
+
Copyright (C) 2025 ManGo Team
|
|
4
|
+
|
|
5
|
+
Author: Nirmal Parmar
|
|
6
|
+
'''
|
|
7
|
+
|
|
8
|
+
import numpy as np
|
|
9
|
+
import logging
|
|
10
|
+
from machinegnostics.magcal.util.logging import get_logger
|
|
11
|
+
|
|
12
|
+
class GnosticsCharacteristics:
|
|
13
|
+
"""
|
|
14
|
+
A class containing internal functions for Machine Gnostics (MG) calculations.
|
|
15
|
+
|
|
16
|
+
Notes
|
|
17
|
+
-----
|
|
18
|
+
The class takes an input matrix R = Z / Z0, where:
|
|
19
|
+
- Z : Observed data
|
|
20
|
+
- Z0 : Estimated value
|
|
21
|
+
|
|
22
|
+
Internally, it computes:
|
|
23
|
+
- q = R
|
|
24
|
+
- q1 = 1 / R (with protection against division by zero)
|
|
25
|
+
|
|
26
|
+
The internal methods (_fi, _fj, _hi, _hj) operate on q and q1 to calculate
|
|
27
|
+
various gnostic characteristics.
|
|
28
|
+
|
|
29
|
+
Methods
|
|
30
|
+
-------
|
|
31
|
+
_fi(q, q1)
|
|
32
|
+
Calculates the estimation weight.
|
|
33
|
+
|
|
34
|
+
_fj(q, q1)
|
|
35
|
+
Calculates the quantification weight.
|
|
36
|
+
|
|
37
|
+
_hi(q, q1)
|
|
38
|
+
Calculates the estimation relevance.
|
|
39
|
+
|
|
40
|
+
_hj(q, q1)
|
|
41
|
+
Calculates the quantification relevance.
|
|
42
|
+
|
|
43
|
+
_rentropy(fi, fj)
|
|
44
|
+
Calculates the residual entropy.
|
|
45
|
+
|
|
46
|
+
_ientropy(fi)
|
|
47
|
+
Calculates the estimating entropy.
|
|
48
|
+
|
|
49
|
+
_jentropy(fj)
|
|
50
|
+
Calculates the quantifying entropy.
|
|
51
|
+
|
|
52
|
+
_idistfun(hi)
|
|
53
|
+
Calculates the estimating distribute function function.
|
|
54
|
+
|
|
55
|
+
_jdistfun(hj)
|
|
56
|
+
Calculates the quantifying distribute function function.
|
|
57
|
+
|
|
58
|
+
_info_i(p_i)
|
|
59
|
+
Calculates the estimating information.
|
|
60
|
+
|
|
61
|
+
_info_j(p_j)
|
|
62
|
+
Calculates the quantifying information.
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
def __init__(self,
|
|
66
|
+
R: np.ndarray,
|
|
67
|
+
eps: float = 1e-10,
|
|
68
|
+
verbose: bool = False):
|
|
69
|
+
"""
|
|
70
|
+
Initializes the GnosticsCharacteristics class.
|
|
71
|
+
|
|
72
|
+
Parameters
|
|
73
|
+
----------
|
|
74
|
+
R : np.ndarray
|
|
75
|
+
The input matrix for the gnostics calculations (R = Z / Z0).
|
|
76
|
+
eps : float, default=1e-10
|
|
77
|
+
Small constant for numerical stability
|
|
78
|
+
"""
|
|
79
|
+
self.R = R
|
|
80
|
+
self.eps = eps
|
|
81
|
+
|
|
82
|
+
# logger setup
|
|
83
|
+
self.logger = get_logger(self.__class__.__name__, logging.DEBUG if verbose else logging.WARNING)
|
|
84
|
+
self.logger.debug(f"{self.__class__.__name__} initialized:")
|
|
85
|
+
|
|
86
|
+
def _get_q_q1(self, S: int = 1):
|
|
87
|
+
"""
|
|
88
|
+
Calculates the q and q1 for given z and z0
|
|
89
|
+
|
|
90
|
+
For internal use only
|
|
91
|
+
|
|
92
|
+
Parameters
|
|
93
|
+
----------
|
|
94
|
+
R : np.ndarray
|
|
95
|
+
Input values (typically residuals)
|
|
96
|
+
S : int, optional
|
|
97
|
+
Override for shape parameter S
|
|
98
|
+
|
|
99
|
+
Returns
|
|
100
|
+
-------
|
|
101
|
+
tuple
|
|
102
|
+
(q, q1) computed characteristic values
|
|
103
|
+
"""
|
|
104
|
+
self.logger.info("Calculating q and q1.")
|
|
105
|
+
# Add small constant to prevent division by zero
|
|
106
|
+
R_safe = np.abs(self.R) + self.eps
|
|
107
|
+
|
|
108
|
+
# avoid overflow in exponentiation
|
|
109
|
+
S = np.maximum(S, 0.01) # Ensure S is at least 0.01 to avoid division by zero
|
|
110
|
+
|
|
111
|
+
# Calculate exponents
|
|
112
|
+
exp_pos = 2.0 / S
|
|
113
|
+
exp_neg = -2.0 / S
|
|
114
|
+
|
|
115
|
+
# Use log-space calculation to determine safe limits
|
|
116
|
+
log_max = np.log(np.finfo(float).max)
|
|
117
|
+
|
|
118
|
+
# Safe upper limit calculation in log space
|
|
119
|
+
# max_safe_value = exp(log_max / 10) to prevent overflow
|
|
120
|
+
max_safe_log = log_max / 10.0
|
|
121
|
+
max_safe_value = np.exp(max_safe_log)
|
|
122
|
+
|
|
123
|
+
# Clip R_safe to prevent overflow
|
|
124
|
+
R_safe = np.clip(R_safe, self.eps, max_safe_value)
|
|
125
|
+
|
|
126
|
+
# Use log-space calculations for numerical stability
|
|
127
|
+
log_R = np.log(R_safe)
|
|
128
|
+
|
|
129
|
+
# Calculate in log space to avoid overflow
|
|
130
|
+
log_q = exp_pos * log_R
|
|
131
|
+
log_q1 = exp_neg * log_R
|
|
132
|
+
|
|
133
|
+
# Safe exponential limits
|
|
134
|
+
max_exp = log_max - 2.0 # Leave some headroom
|
|
135
|
+
min_exp = -max_exp
|
|
136
|
+
|
|
137
|
+
# Clip log values to prevent overflow/underflow
|
|
138
|
+
log_q = np.clip(log_q, min_exp, max_exp)
|
|
139
|
+
log_q1 = np.clip(log_q1, min_exp, max_exp)
|
|
140
|
+
|
|
141
|
+
# Convert back from log space
|
|
142
|
+
self.q = np.exp(log_q)
|
|
143
|
+
self.q1 = np.exp(log_q1)
|
|
144
|
+
|
|
145
|
+
# Final safety checks
|
|
146
|
+
safe_max = np.finfo(float).max / 1e6
|
|
147
|
+
self.q = np.clip(self.q, self.eps, safe_max)
|
|
148
|
+
self.q1 = np.clip(self.q1, self.eps, safe_max)
|
|
149
|
+
|
|
150
|
+
# Ensure no NaN or Inf values
|
|
151
|
+
self.q = np.nan_to_num(self.q, nan=self.eps, posinf=safe_max, neginf=self.eps)
|
|
152
|
+
self.q1 = np.nan_to_num(self.q1, nan=self.eps, posinf=safe_max, neginf=self.eps)
|
|
153
|
+
|
|
154
|
+
return self.q, self.q1
|
|
155
|
+
|
|
156
|
+
def _fi(self, q=None, q1=None):
|
|
157
|
+
"""
|
|
158
|
+
Calculates the estimation weight.
|
|
159
|
+
|
|
160
|
+
Parameters
|
|
161
|
+
----------
|
|
162
|
+
q : np.ndarray or float
|
|
163
|
+
q1 : np.ndarray or float
|
|
164
|
+
|
|
165
|
+
Returns
|
|
166
|
+
-------
|
|
167
|
+
f : np.ndarray or float
|
|
168
|
+
"""
|
|
169
|
+
self.logger.info("Calculating estimation weight fi.")
|
|
170
|
+
if q is None:
|
|
171
|
+
q = self.q
|
|
172
|
+
if q1 is None:
|
|
173
|
+
q1 = self.q1
|
|
174
|
+
|
|
175
|
+
q = np.asarray(q)
|
|
176
|
+
q1 = np.asarray(q1)
|
|
177
|
+
if q.shape != q1.shape:
|
|
178
|
+
raise ValueError("q and q1 must have the same shape")
|
|
179
|
+
f = 2 / (q + q1)
|
|
180
|
+
return f
|
|
181
|
+
|
|
182
|
+
def _fj(self, q=None, q1=None):
|
|
183
|
+
"""
|
|
184
|
+
Calculates the quantification weight.
|
|
185
|
+
|
|
186
|
+
Parameters
|
|
187
|
+
----------
|
|
188
|
+
q : np.ndarray or float
|
|
189
|
+
q1 : np.ndarray or float
|
|
190
|
+
|
|
191
|
+
Returns
|
|
192
|
+
-------
|
|
193
|
+
f : np.ndarray or float
|
|
194
|
+
"""
|
|
195
|
+
self.logger.info("Calculating quantification weight fj.")
|
|
196
|
+
if q is None:
|
|
197
|
+
q = self.q
|
|
198
|
+
if q1 is None:
|
|
199
|
+
q1 = self.q1
|
|
200
|
+
|
|
201
|
+
q = np.asarray(q)
|
|
202
|
+
q1 = np.asarray(q1)
|
|
203
|
+
if q.shape != q1.shape:
|
|
204
|
+
raise ValueError("q and q1 must have the same shape")
|
|
205
|
+
f = (q + q1) / 2
|
|
206
|
+
return f
|
|
207
|
+
|
|
208
|
+
def _hi(self, q=None, q1=None):
|
|
209
|
+
"""
|
|
210
|
+
Calculates the estimation relevance.
|
|
211
|
+
|
|
212
|
+
Parameters
|
|
213
|
+
----------
|
|
214
|
+
q : np.ndarray or float
|
|
215
|
+
q1 : np.ndarray or float
|
|
216
|
+
|
|
217
|
+
Returns
|
|
218
|
+
-------
|
|
219
|
+
h : np.ndarray or float
|
|
220
|
+
"""
|
|
221
|
+
self.logger.info("Calculating estimation relevance hi.")
|
|
222
|
+
if q is None:
|
|
223
|
+
q = self.q
|
|
224
|
+
if q1 is None:
|
|
225
|
+
q1 = self.q1
|
|
226
|
+
|
|
227
|
+
q = np.asarray(q)
|
|
228
|
+
q1 = np.asarray(q1)
|
|
229
|
+
if q.shape != q1.shape:
|
|
230
|
+
self.logger.error("q and q1 must have the same shape")
|
|
231
|
+
raise ValueError("q and q1 must have the same shape")
|
|
232
|
+
|
|
233
|
+
# Handle potential overflow/underflow in q and q1
|
|
234
|
+
q = np.nan_to_num(q, nan=self.eps, posinf=np.finfo(float).max / 1e6)
|
|
235
|
+
q1 = np.nan_to_num(q1, nan=self.eps, posinf=np.finfo(float).max / 1e6)
|
|
236
|
+
|
|
237
|
+
# Calculate numerator and denominator separately
|
|
238
|
+
numerator = q - q1
|
|
239
|
+
denominator = q + q1
|
|
240
|
+
|
|
241
|
+
# Handle cases where denominator is very small or zero
|
|
242
|
+
eps_threshold = self.eps * 1000 # Use larger threshold for stability
|
|
243
|
+
denominator = np.where(np.abs(denominator) < eps_threshold,
|
|
244
|
+
eps_threshold * np.sign(denominator),
|
|
245
|
+
denominator)
|
|
246
|
+
|
|
247
|
+
# Calculate ratio with additional safety
|
|
248
|
+
with np.errstate(divide='raise', invalid='raise'):
|
|
249
|
+
try:
|
|
250
|
+
h = numerator / denominator
|
|
251
|
+
except (FloatingPointError, RuntimeWarning):
|
|
252
|
+
# Fallback calculation
|
|
253
|
+
# When q >> q1 or q1 >> q, handle separately
|
|
254
|
+
mask_q_large = q > 1000 * q1
|
|
255
|
+
mask_q1_large = q1 > 1000 * q
|
|
256
|
+
mask_normal = ~(mask_q_large | mask_q1_large)
|
|
257
|
+
|
|
258
|
+
h = np.zeros_like(q)
|
|
259
|
+
h[mask_q_large] = 1.0 # When q >> q1, h approaches 1
|
|
260
|
+
h[mask_q1_large] = -1.0 # When q1 >> q, h approaches -1
|
|
261
|
+
h[mask_normal] = numerator[mask_normal] / denominator[mask_normal]
|
|
262
|
+
|
|
263
|
+
# Final clipping and NaN handling
|
|
264
|
+
h = np.clip(h, -1.0, 1.0)
|
|
265
|
+
h = np.nan_to_num(h, nan=0.0)
|
|
266
|
+
|
|
267
|
+
return h
|
|
268
|
+
|
|
269
|
+
def _hj(self, q=None, q1=None):
|
|
270
|
+
"""
|
|
271
|
+
Calculates the quantification relevance.
|
|
272
|
+
|
|
273
|
+
Parameters
|
|
274
|
+
----------
|
|
275
|
+
q : np.ndarray or float
|
|
276
|
+
q1 : np.ndarray or float
|
|
277
|
+
|
|
278
|
+
Returns
|
|
279
|
+
-------
|
|
280
|
+
h : np.ndarray or float
|
|
281
|
+
"""
|
|
282
|
+
self.logger.info("Calculating quantification relevance hj.")
|
|
283
|
+
if q is None:
|
|
284
|
+
q = self.q
|
|
285
|
+
if q1 is None:
|
|
286
|
+
q1 = self.q1
|
|
287
|
+
|
|
288
|
+
q = np.asarray(q)
|
|
289
|
+
q1 = np.asarray(q1)
|
|
290
|
+
if q.shape != q1.shape:
|
|
291
|
+
self.logger.error("q and q1 must have the same shape")
|
|
292
|
+
raise ValueError("q and q1 must have the same shape")
|
|
293
|
+
h = (q - q1) / 2
|
|
294
|
+
return h
|
|
295
|
+
|
|
296
|
+
def _rentropy(self, fi, fj):
|
|
297
|
+
"""
|
|
298
|
+
Calculates the residual entropy.
|
|
299
|
+
|
|
300
|
+
Parameters
|
|
301
|
+
----------
|
|
302
|
+
fi : np.ndarray or float
|
|
303
|
+
Estimation weight.
|
|
304
|
+
fj : np.ndarray or float
|
|
305
|
+
Quantification weight.
|
|
306
|
+
|
|
307
|
+
Returns
|
|
308
|
+
-------
|
|
309
|
+
entropy : np.ndarray or float
|
|
310
|
+
Relative entropy.
|
|
311
|
+
"""
|
|
312
|
+
self.logger.info("Calculating residual entropy.")
|
|
313
|
+
fi = np.asarray(fi)
|
|
314
|
+
fj = np.asarray(fj)
|
|
315
|
+
if fi.shape != fj.shape:
|
|
316
|
+
self.logger.error("fi and fj must have the same shape")
|
|
317
|
+
raise ValueError("fi and fj must have the same shape")
|
|
318
|
+
entropy = fj - fi
|
|
319
|
+
if (entropy < 0).any(): #means something is wrong
|
|
320
|
+
self.logger.error("Entropy cannot be negative")
|
|
321
|
+
raise ValueError("Entropy cannot be negative")
|
|
322
|
+
return entropy
|
|
323
|
+
|
|
324
|
+
def _ientropy(self, fi):
|
|
325
|
+
"""
|
|
326
|
+
Calculates the estimating entropy.
|
|
327
|
+
|
|
328
|
+
Parameters
|
|
329
|
+
----------
|
|
330
|
+
fi : np.ndarray or float
|
|
331
|
+
Estimation weight.
|
|
332
|
+
|
|
333
|
+
Returns
|
|
334
|
+
-------
|
|
335
|
+
entropy : np.ndarray or float
|
|
336
|
+
Inverse relative entropy.
|
|
337
|
+
"""
|
|
338
|
+
self.logger.info("Calculating estimating entropy.")
|
|
339
|
+
fi = np.asarray(fi)
|
|
340
|
+
if fi.shape != self.q.shape:
|
|
341
|
+
self.logger.error("fi and q must have the same shape")
|
|
342
|
+
raise ValueError("fi and q must have the same shape")
|
|
343
|
+
entropy = 1 - fi
|
|
344
|
+
return entropy
|
|
345
|
+
|
|
346
|
+
def _jentropy(self, fj):
|
|
347
|
+
"""
|
|
348
|
+
Calculates the quantifying entropy.
|
|
349
|
+
|
|
350
|
+
Parameters
|
|
351
|
+
----------
|
|
352
|
+
fj : np.ndarray or float
|
|
353
|
+
Quantification weight.
|
|
354
|
+
|
|
355
|
+
Returns
|
|
356
|
+
-------
|
|
357
|
+
entropy : np.ndarray or float
|
|
358
|
+
Relative entropy.
|
|
359
|
+
"""
|
|
360
|
+
self.logger.info("Calculating quantifying entropy.")
|
|
361
|
+
fj = np.asarray(fj)
|
|
362
|
+
if fj.shape != self.q.shape:
|
|
363
|
+
self.logger.error("fj and q must have the same shape")
|
|
364
|
+
raise ValueError("fj and q must have the same shape")
|
|
365
|
+
entropy = fj - 1
|
|
366
|
+
return entropy
|
|
367
|
+
|
|
368
|
+
def _idistfun(self, hi):
|
|
369
|
+
"""
|
|
370
|
+
Calculates the estimating distribute function function.
|
|
371
|
+
|
|
372
|
+
Parameters
|
|
373
|
+
----------
|
|
374
|
+
hi : np.ndarray or float
|
|
375
|
+
Estimation relevance.
|
|
376
|
+
|
|
377
|
+
Returns
|
|
378
|
+
-------
|
|
379
|
+
idist : np.ndarray or float
|
|
380
|
+
Inverse distance function.
|
|
381
|
+
"""
|
|
382
|
+
self.logger.info("Calculating estimating distribute function.")
|
|
383
|
+
hi = np.asarray(hi)
|
|
384
|
+
if hi.shape != self.q.shape:
|
|
385
|
+
self.logger.error("hi and q must have the same shape")
|
|
386
|
+
raise ValueError("hi and q must have the same shape")
|
|
387
|
+
p_i = np.sqrt(np.power((1 - hi) / 2, 2)) # from MGpdf
|
|
388
|
+
return p_i
|
|
389
|
+
|
|
390
|
+
def _jdistfun(self, hj):
|
|
391
|
+
"""
|
|
392
|
+
Calculates the quantifying distribute function function.
|
|
393
|
+
|
|
394
|
+
Parameters
|
|
395
|
+
----------
|
|
396
|
+
hj : np.ndarray or float
|
|
397
|
+
Quantification relevance.
|
|
398
|
+
|
|
399
|
+
Returns
|
|
400
|
+
-------
|
|
401
|
+
jdist : np.ndarray or float
|
|
402
|
+
Inverse distance function.
|
|
403
|
+
"""
|
|
404
|
+
self.logger.info("Calculating quantifying distribute function.")
|
|
405
|
+
hj = np.asarray(hj)
|
|
406
|
+
if hj.shape != self.q.shape:
|
|
407
|
+
self.logger.error("hj and q must have the same shape")
|
|
408
|
+
raise ValueError("hj and q must have the same shape")
|
|
409
|
+
p_j = np.sqrt(np.power((1 - hj) / 2, 2))
|
|
410
|
+
return p_j
|
|
411
|
+
|
|
412
|
+
def _info_i(self, p_i):
|
|
413
|
+
"""
|
|
414
|
+
Calculates the estimating information.
|
|
415
|
+
|
|
416
|
+
Parameters
|
|
417
|
+
----------
|
|
418
|
+
p_i : np.ndarray or float
|
|
419
|
+
Inverse distance function.
|
|
420
|
+
|
|
421
|
+
Returns
|
|
422
|
+
-------
|
|
423
|
+
info : np.ndarray or float
|
|
424
|
+
Estimating information.
|
|
425
|
+
"""
|
|
426
|
+
self.logger.info("Calculating estimating information.")
|
|
427
|
+
p_i = np.asarray(p_i)
|
|
428
|
+
if p_i.shape != self.q.shape:
|
|
429
|
+
self.logger.error("p_i and q must have the same shape")
|
|
430
|
+
raise ValueError("p_i and q must have the same shape")
|
|
431
|
+
epsilon = 1e-12
|
|
432
|
+
# avoid log(0)
|
|
433
|
+
p_i = np.clip(p_i, 0 + epsilon, 1 - epsilon)
|
|
434
|
+
Ii = -p_i * np.log(p_i + epsilon) - (1 - p_i) * np.log(1 - p_i + epsilon)
|
|
435
|
+
return Ii
|
|
436
|
+
|
|
437
|
+
def _info_j(self, p_j):
|
|
438
|
+
"""
|
|
439
|
+
Calculates the quantifying information.
|
|
440
|
+
|
|
441
|
+
Parameters
|
|
442
|
+
----------
|
|
443
|
+
p_j : np.ndarray or float
|
|
444
|
+
Inverse distance function.
|
|
445
|
+
|
|
446
|
+
Returns
|
|
447
|
+
-------
|
|
448
|
+
info : np.ndarray or float
|
|
449
|
+
Quantifying information.
|
|
450
|
+
"""
|
|
451
|
+
self.logger.info("Calculating quantifying information.")
|
|
452
|
+
p_j = np.asarray(p_j)
|
|
453
|
+
if p_j.shape != self.q.shape:
|
|
454
|
+
self.logger.error("p_j and q must have the same shape")
|
|
455
|
+
raise ValueError("p_j and q must have the same shape")
|
|
456
|
+
epsilon = 1e-12
|
|
457
|
+
# avoid log(0)
|
|
458
|
+
p_j = np.clip(p_j, 0 + epsilon, 1 - epsilon)
|
|
459
|
+
Ij = -p_j * np.log(p_j + epsilon) - (1 - p_j) * np.log(1 - p_j + epsilon)
|
|
460
|
+
return Ij
|