machinegnostics 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- __init__.py +0 -0
- machinegnostics/__init__.py +24 -0
- machinegnostics/magcal/__init__.py +37 -0
- machinegnostics/magcal/characteristics.py +460 -0
- machinegnostics/magcal/criteria_eval.py +268 -0
- machinegnostics/magcal/criterion.py +140 -0
- machinegnostics/magcal/data_conversion.py +381 -0
- machinegnostics/magcal/gcor.py +64 -0
- machinegnostics/magcal/gdf/__init__.py +2 -0
- machinegnostics/magcal/gdf/base_df.py +39 -0
- machinegnostics/magcal/gdf/base_distfunc.py +1202 -0
- machinegnostics/magcal/gdf/base_egdf.py +823 -0
- machinegnostics/magcal/gdf/base_eldf.py +830 -0
- machinegnostics/magcal/gdf/base_qgdf.py +1234 -0
- machinegnostics/magcal/gdf/base_qldf.py +1019 -0
- machinegnostics/magcal/gdf/cluster_analysis.py +456 -0
- machinegnostics/magcal/gdf/data_cluster.py +975 -0
- machinegnostics/magcal/gdf/data_intervals.py +853 -0
- machinegnostics/magcal/gdf/data_membership.py +536 -0
- machinegnostics/magcal/gdf/der_egdf.py +243 -0
- machinegnostics/magcal/gdf/distfunc_engine.py +841 -0
- machinegnostics/magcal/gdf/egdf.py +324 -0
- machinegnostics/magcal/gdf/eldf.py +297 -0
- machinegnostics/magcal/gdf/eldf_intv.py +609 -0
- machinegnostics/magcal/gdf/eldf_ma.py +627 -0
- machinegnostics/magcal/gdf/homogeneity.py +1218 -0
- machinegnostics/magcal/gdf/intv_engine.py +1523 -0
- machinegnostics/magcal/gdf/marginal_intv_analysis.py +558 -0
- machinegnostics/magcal/gdf/qgdf.py +289 -0
- machinegnostics/magcal/gdf/qldf.py +296 -0
- machinegnostics/magcal/gdf/scedasticity.py +197 -0
- machinegnostics/magcal/gdf/wedf.py +181 -0
- machinegnostics/magcal/gdf/z0_estimator.py +1047 -0
- machinegnostics/magcal/layer_base.py +42 -0
- machinegnostics/magcal/layer_history_base.py +74 -0
- machinegnostics/magcal/layer_io_process_base.py +238 -0
- machinegnostics/magcal/layer_param_base.py +448 -0
- machinegnostics/magcal/mg_weights.py +36 -0
- machinegnostics/magcal/sample_characteristics.py +532 -0
- machinegnostics/magcal/scale_optimization.py +185 -0
- machinegnostics/magcal/scale_param.py +313 -0
- machinegnostics/magcal/util/__init__.py +0 -0
- machinegnostics/magcal/util/dis_docstring.py +18 -0
- machinegnostics/magcal/util/logging.py +24 -0
- machinegnostics/magcal/util/min_max_float.py +34 -0
- machinegnostics/magnet/__init__.py +0 -0
- machinegnostics/metrics/__init__.py +28 -0
- machinegnostics/metrics/accu.py +61 -0
- machinegnostics/metrics/accuracy.py +67 -0
- machinegnostics/metrics/auto_correlation.py +183 -0
- machinegnostics/metrics/auto_covariance.py +204 -0
- machinegnostics/metrics/cls_report.py +130 -0
- machinegnostics/metrics/conf_matrix.py +93 -0
- machinegnostics/metrics/correlation.py +178 -0
- machinegnostics/metrics/cross_variance.py +167 -0
- machinegnostics/metrics/divi.py +82 -0
- machinegnostics/metrics/evalmet.py +109 -0
- machinegnostics/metrics/f1_score.py +128 -0
- machinegnostics/metrics/gmmfe.py +108 -0
- machinegnostics/metrics/hc.py +141 -0
- machinegnostics/metrics/mae.py +72 -0
- machinegnostics/metrics/mean.py +117 -0
- machinegnostics/metrics/median.py +122 -0
- machinegnostics/metrics/mg_r2.py +167 -0
- machinegnostics/metrics/mse.py +78 -0
- machinegnostics/metrics/precision.py +119 -0
- machinegnostics/metrics/r2.py +122 -0
- machinegnostics/metrics/recall.py +108 -0
- machinegnostics/metrics/rmse.py +77 -0
- machinegnostics/metrics/robr2.py +119 -0
- machinegnostics/metrics/std.py +144 -0
- machinegnostics/metrics/variance.py +101 -0
- machinegnostics/models/__init__.py +2 -0
- machinegnostics/models/classification/__init__.py +1 -0
- machinegnostics/models/classification/layer_history_log_reg.py +121 -0
- machinegnostics/models/classification/layer_io_process_log_reg.py +98 -0
- machinegnostics/models/classification/layer_mlflow_log_reg.py +107 -0
- machinegnostics/models/classification/layer_param_log_reg.py +275 -0
- machinegnostics/models/classification/mg_log_reg.py +273 -0
- machinegnostics/models/cross_validation.py +118 -0
- machinegnostics/models/data_split.py +106 -0
- machinegnostics/models/regression/__init__.py +2 -0
- machinegnostics/models/regression/layer_histroy_rob_reg.py +139 -0
- machinegnostics/models/regression/layer_io_process_rob_rig.py +88 -0
- machinegnostics/models/regression/layer_mlflow_rob_reg.py +134 -0
- machinegnostics/models/regression/layer_param_rob_reg.py +212 -0
- machinegnostics/models/regression/mg_lin_reg.py +253 -0
- machinegnostics/models/regression/mg_poly_reg.py +258 -0
- machinegnostics-0.0.1.dist-info/METADATA +246 -0
- machinegnostics-0.0.1.dist-info/RECORD +93 -0
- machinegnostics-0.0.1.dist-info/WHEEL +5 -0
- machinegnostics-0.0.1.dist-info/licenses/LICENSE +674 -0
- machinegnostics-0.0.1.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,1523 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Interval Estimation Engine (IntveEngine) - Fresh Implementation with Improved Logic
|
|
3
|
+
Core Logic: Extend data with single datum and track Z0 variations with ordering constraint
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import numpy as np
|
|
7
|
+
import warnings
|
|
8
|
+
from typing import Union, Dict, Tuple, Optional, List
|
|
9
|
+
from machinegnostics.magcal import EGDF, ELDF
|
|
10
|
+
import logging
|
|
11
|
+
from machinegnostics.magcal.util.logging import get_logger
|
|
12
|
+
|
|
13
|
+
class IntveEngine:
|
|
14
|
+
"""
|
|
15
|
+
Z0-Based Interval Estimation Engine with Ordering Constraint Validation
|
|
16
|
+
|
|
17
|
+
This class implements a novel approach to interval estimation using Z0 (gnostic mode) variations.
|
|
18
|
+
It extends the original data with single datum points across the [LB, UB] range and tracks how
|
|
19
|
+
the Z0 value changes to identify optimal interval boundaries that satisfy the ordering constraint:
|
|
20
|
+
ZL < Z0L < Z0 < Z0U < ZU
|
|
21
|
+
|
|
22
|
+
**Core Methodology:**
|
|
23
|
+
|
|
24
|
+
1. **Data Extension Strategy**: For each potential datum value in [LB, UB], extend the original
|
|
25
|
+
data with that single datum and recompute the Z0 (gnostic mode).
|
|
26
|
+
|
|
27
|
+
2. **Z0 Variation Tracking**: Monitor how Z0 changes as different datum values are added:
|
|
28
|
+
- Z0 decreases when certain datum values are added → minimum gives Z0L, ZL
|
|
29
|
+
- Z0 increases when other datum values are added → maximum gives Z0U, ZU
|
|
30
|
+
|
|
31
|
+
3. **Interval Identification**:
|
|
32
|
+
- **ZL**: Datum value that produces minimum Z0 (Z0L)
|
|
33
|
+
- **Z0L**: Minimum Z0 value achieved
|
|
34
|
+
- **ZU**: Datum value that produces maximum Z0 (Z0U)
|
|
35
|
+
- **Z0U**: Maximum Z0 value achieved
|
|
36
|
+
- **Tolerance Interval**: [Z0L, Z0U] - range of Z0 variations
|
|
37
|
+
- **Typical Data Interval**: [ZL, ZU] - range of datum values causing extrema
|
|
38
|
+
|
|
39
|
+
4. **Ordering Constraint**: Ensures ZL < Z0L < Z0_original < Z0U < ZU for valid intervals
|
|
40
|
+
|
|
41
|
+
**Key Features:**
|
|
42
|
+
|
|
43
|
+
- **Adaptive Search**: Dense sampling near Z0, sparse sampling toward boundaries
|
|
44
|
+
- **Convergence Detection**: Early stopping when Z0 variations stabilize
|
|
45
|
+
- **Robust Fallback**: Handles cases where strict ordering constraint cannot be satisfied
|
|
46
|
+
- **Dual DF Support**: Works with both EGDF and ELDF objects
|
|
47
|
+
- **Comprehensive Validation**: Multiple validation checks and constraint enforcement
|
|
48
|
+
- **Rich Diagnostics**: Detailed search statistics and quality metrics
|
|
49
|
+
|
|
50
|
+
**Applications:**
|
|
51
|
+
|
|
52
|
+
- Quality control interval estimation
|
|
53
|
+
- Uncertainty quantification in manufacturing
|
|
54
|
+
- Process capability analysis
|
|
55
|
+
- Statistical tolerance design
|
|
56
|
+
- Risk assessment and decision making
|
|
57
|
+
|
|
58
|
+
Parameters
|
|
59
|
+
----------
|
|
60
|
+
df_object : Union[EGDF, ELDF]
|
|
61
|
+
Fitted distribution function object with available Z0 (gnostic mode).
|
|
62
|
+
Must be already fitted using df_object.fit().
|
|
63
|
+
|
|
64
|
+
n_points_per_direction : int, optional (default=1000)
|
|
65
|
+
Number of search points to generate in each direction from Z0.
|
|
66
|
+
Higher values provide more precise interval estimation but increase computation time.
|
|
67
|
+
- Recommended: 500-2000 for most applications
|
|
68
|
+
- Minimum: 50 (automatically enforced)
|
|
69
|
+
|
|
70
|
+
dense_zone_fraction : float, optional (default=0.4)
|
|
71
|
+
Fraction of the search range to sample densely near Z0.
|
|
72
|
+
Controls the balance between local precision and boundary exploration.
|
|
73
|
+
- Range: [0.1, 0.8] (automatically clipped)
|
|
74
|
+
- 0.4 means 40% of range near Z0 gets dense sampling
|
|
75
|
+
|
|
76
|
+
dense_points_fraction : float, optional (default=0.7)
|
|
77
|
+
Fraction of total points to place in the dense zone.
|
|
78
|
+
Higher values focus more search effort near Z0.
|
|
79
|
+
- Range: [0.5, 0.9] (automatically clipped)
|
|
80
|
+
- 0.7 means 70% of points in dense zone, 30% toward boundaries
|
|
81
|
+
|
|
82
|
+
convergence_window : int, optional (default=15)
|
|
83
|
+
Number of recent points to check for Z0 convergence.
|
|
84
|
+
Enables early stopping when Z0 variations stabilize.
|
|
85
|
+
- Minimum: 5 (automatically enforced)
|
|
86
|
+
- Larger windows are more conservative
|
|
87
|
+
|
|
88
|
+
convergence_threshold : float, optional (default=1e-7)
|
|
89
|
+
Standard deviation threshold for detecting Z0 convergence.
|
|
90
|
+
Lower values require more stable convergence before stopping.
|
|
91
|
+
- Typical range: 1e-9 to 1e-5
|
|
92
|
+
|
|
93
|
+
min_search_points : int, optional (default=30)
|
|
94
|
+
Minimum number of points to search before checking convergence.
|
|
95
|
+
Prevents premature stopping in early search phases.
|
|
96
|
+
- Minimum: 10 (automatically enforced)
|
|
97
|
+
|
|
98
|
+
boundary_margin_factor : float, optional (default=0.001)
|
|
99
|
+
Safety margin from LB/UB boundaries as fraction of range.
|
|
100
|
+
Prevents numerical issues near boundaries.
|
|
101
|
+
- Minimum: 1e-6 (automatically enforced)
|
|
102
|
+
- 0.001 means 0.1% margin from each boundary
|
|
103
|
+
|
|
104
|
+
extrema_search_tolerance : float, optional (default=1e-6)
|
|
105
|
+
Tolerance for identifying valid extrema that satisfy ordering constraint.
|
|
106
|
+
Used in numerical comparisons during extrema validation.
|
|
107
|
+
|
|
108
|
+
verbose : bool, optional (default=False)
|
|
109
|
+
Enable detailed progress reporting and diagnostic output.
|
|
110
|
+
Useful for debugging and understanding the search process.
|
|
111
|
+
|
|
112
|
+
Attributes
|
|
113
|
+
----------
|
|
114
|
+
zl : float
|
|
115
|
+
Datum value that produces minimum Z0 (left boundary of typical data interval).
|
|
116
|
+
|
|
117
|
+
z0l : float
|
|
118
|
+
Minimum Z0 value achieved (left boundary of tolerance interval).
|
|
119
|
+
|
|
120
|
+
zu : float
|
|
121
|
+
Datum value that produces maximum Z0 (right boundary of typical data interval).
|
|
122
|
+
|
|
123
|
+
z0u : float
|
|
124
|
+
Maximum Z0 value achieved (right boundary of tolerance interval).
|
|
125
|
+
|
|
126
|
+
z0 : float
|
|
127
|
+
Original Z0 value from the fitted DF object. Accessible as obj.z0 for convenience.
|
|
128
|
+
|
|
129
|
+
tolerance_interval : float
|
|
130
|
+
Width of tolerance interval (Z0U - Z0L).
|
|
131
|
+
|
|
132
|
+
typical_data_interval : float
|
|
133
|
+
Width of typical data interval (ZU - ZL).
|
|
134
|
+
|
|
135
|
+
params : dict
|
|
136
|
+
Comprehensive parameter dictionary containing:
|
|
137
|
+
- Configuration settings
|
|
138
|
+
- Search results and statistics
|
|
139
|
+
- Quality metrics and validation results
|
|
140
|
+
- Timing information
|
|
141
|
+
|
|
142
|
+
search_results : dict
|
|
143
|
+
Detailed search tracking with 'lower' and 'upper' direction results:
|
|
144
|
+
- datum_values: List of tested datum values
|
|
145
|
+
- z0_values: Corresponding Z0 values
|
|
146
|
+
- success_flags: Success/failure status for each attempt
|
|
147
|
+
|
|
148
|
+
Examples
|
|
149
|
+
--------
|
|
150
|
+
Basic usage with ELDF:
|
|
151
|
+
|
|
152
|
+
>>> from machinegnostics.magcal import ELDF
|
|
153
|
+
>>> from machinegnostics.magcal.gdf.intv_engine import IntveEngine
|
|
154
|
+
>>>
|
|
155
|
+
>>> # Create and fit ELDF
|
|
156
|
+
>>> data = np.array([18, 19, 20, 21, 22])
|
|
157
|
+
>>> eldf = ELDF(data, LB=15, UB=25)
|
|
158
|
+
>>> eldf.fit()
|
|
159
|
+
>>>
|
|
160
|
+
>>> # Create and fit interval engine
|
|
161
|
+
>>> intve = IntveEngine(eldf, verbose=True)
|
|
162
|
+
>>> intve.fit(plot=True)
|
|
163
|
+
>>>
|
|
164
|
+
>>> # Access results
|
|
165
|
+
>>> print(f"Z0: {intve.z0}")
|
|
166
|
+
>>> print(f"Tolerance interval: [{intve.z0l:.4f}, {intve.z0u:.4f}]")
|
|
167
|
+
>>> print(f"Typical data interval: [{intve.zl:.4f}, {intve.zu:.4f}]")
|
|
168
|
+
>>>
|
|
169
|
+
>>> # Get complete results dictionary
|
|
170
|
+
>>> intervals = intve.get_intervals()
|
|
171
|
+
|
|
172
|
+
Advanced configuration:
|
|
173
|
+
|
|
174
|
+
>>> # High-precision search with custom parameters
|
|
175
|
+
>>> intve = IntveEngine(
|
|
176
|
+
... eldf,
|
|
177
|
+
... n_points_per_direction=2000,
|
|
178
|
+
... dense_zone_fraction=0.3,
|
|
179
|
+
... convergence_threshold=1e-8,
|
|
180
|
+
... verbose=True
|
|
181
|
+
... )
|
|
182
|
+
>>> intve.fit()
|
|
183
|
+
|
|
184
|
+
Working with search results:
|
|
185
|
+
|
|
186
|
+
>>> # Access detailed search data
|
|
187
|
+
>>> lower_data = intve.search_results['lower']
|
|
188
|
+
>>> upper_data = intve.search_results['upper']
|
|
189
|
+
>>>
|
|
190
|
+
>>> # Check ordering constraint satisfaction
|
|
191
|
+
>>> ordering_valid = (intve.zl < intve.z0l < intve.z0 < intve.z0u < intve.zu)
|
|
192
|
+
>>> print(f"Ordering constraint satisfied: {ordering_valid}")
|
|
193
|
+
|
|
194
|
+
Methods
|
|
195
|
+
-------
|
|
196
|
+
fit(plot=False, update_df_params=True)
|
|
197
|
+
Perform interval estimation with optional plotting and DF parameter updates.
|
|
198
|
+
|
|
199
|
+
get_intervals(decimals=6)
|
|
200
|
+
Return interval results as formatted dictionary.
|
|
201
|
+
|
|
202
|
+
plot(figsize=(12, 8), plot_distribution=False, eldf_plot=True)
|
|
203
|
+
Create visualization of interval estimation results.
|
|
204
|
+
|
|
205
|
+
Notes
|
|
206
|
+
-----
|
|
207
|
+
**Theoretical Foundation:**
|
|
208
|
+
|
|
209
|
+
The method is based on the principle that adding specific datum values to a dataset
|
|
210
|
+
will cause predictable changes in the Z0 (gnostic mode). By systematically exploring
|
|
211
|
+
these changes, we can identify critical boundaries that define meaningful intervals
|
|
212
|
+
for quality control and process analysis.
|
|
213
|
+
|
|
214
|
+
**Ordering Constraint Interpretation:**
|
|
215
|
+
|
|
216
|
+
The constraint ZL < Z0L < Z0 < Z0U < ZU ensures that:
|
|
217
|
+
- ZL and ZU represent extreme datum values that still produce meaningful Z0 changes
|
|
218
|
+
- Z0L and Z0U represent the range of Z0 sensitivity
|
|
219
|
+
- The original Z0 lies between these extremes, indicating stability
|
|
220
|
+
|
|
221
|
+
**Performance Considerations:**
|
|
222
|
+
|
|
223
|
+
- Computation time scales with n_points_per_direction and data size
|
|
224
|
+
- Dense sampling near Z0 is most critical for accuracy
|
|
225
|
+
- Convergence detection can significantly reduce computation time
|
|
226
|
+
- Memory usage is generally modest (< 100MB for typical problems)
|
|
227
|
+
|
|
228
|
+
**Numerical Stability:**
|
|
229
|
+
|
|
230
|
+
- Uses adaptive tolerance relaxation for extended DF fitting
|
|
231
|
+
- Implements fallback methods for difficult cases
|
|
232
|
+
- Applies boundary margins to prevent numerical issues
|
|
233
|
+
- Validates all intermediate results
|
|
234
|
+
|
|
235
|
+
**Quality Indicators:**
|
|
236
|
+
|
|
237
|
+
- ordering_constraint_satisfied: Primary validity indicator
|
|
238
|
+
- search_statistics.success_rate: Measure of numerical stability
|
|
239
|
+
- interval_quality.z0_stability: Measure of Z0 sensitivity
|
|
240
|
+
- fit_time: Performance indicator
|
|
241
|
+
|
|
242
|
+
References
|
|
243
|
+
----------
|
|
244
|
+
Based on the theoretical framework of Machine Gnostics and the principles of
|
|
245
|
+
gnostic mode analysis for industrial quality control applications.
|
|
246
|
+
|
|
247
|
+
See Also
|
|
248
|
+
--------
|
|
249
|
+
ELDF : Empirical Log Density Function for univariate data
|
|
250
|
+
EGDF : Empirical Goodness Distribution Function
|
|
251
|
+
Z0Estimator : Z0 estimation utilities
|
|
252
|
+
"""
|
|
253
|
+
|
|
254
|
+
def __init__(self,
|
|
255
|
+
df_object: Union[EGDF, ELDF],
|
|
256
|
+
n_points_per_direction: int = 1000,
|
|
257
|
+
dense_zone_fraction: float = 0.4,
|
|
258
|
+
dense_points_fraction: float = 0.7,
|
|
259
|
+
convergence_window: int = 15,
|
|
260
|
+
convergence_threshold: float = 1e-7,
|
|
261
|
+
min_search_points: int = 30,
|
|
262
|
+
boundary_margin_factor: float = 0.001,
|
|
263
|
+
extrema_search_tolerance: float = 1e-6,
|
|
264
|
+
verbose: bool = False):
|
|
265
|
+
"""
|
|
266
|
+
Initialize interval estimation engine.
|
|
267
|
+
|
|
268
|
+
Parameters:
|
|
269
|
+
-----------
|
|
270
|
+
df_object : EGDF or ELDF
|
|
271
|
+
Fitted distribution function object with known Z0
|
|
272
|
+
n_points_per_direction : int
|
|
273
|
+
Number of search points in each direction from Z0
|
|
274
|
+
dense_zone_fraction : float
|
|
275
|
+
Fraction of range to sample densely near Z0
|
|
276
|
+
dense_points_fraction : float
|
|
277
|
+
Fraction of points to place in dense zone
|
|
278
|
+
convergence_window : int
|
|
279
|
+
Window size for checking Z0 convergence
|
|
280
|
+
convergence_threshold : float
|
|
281
|
+
Threshold for Z0 convergence detection
|
|
282
|
+
min_search_points : int
|
|
283
|
+
Minimum points to search before stopping
|
|
284
|
+
boundary_margin_factor : float
|
|
285
|
+
Safety margin from boundaries as fraction of range
|
|
286
|
+
extrema_search_tolerance : float
|
|
287
|
+
Tolerance for finding valid extrema that satisfy ordering constraint
|
|
288
|
+
verbose : bool
|
|
289
|
+
Enable verbose output
|
|
290
|
+
"""
|
|
291
|
+
|
|
292
|
+
# Set verbose first
|
|
293
|
+
self.verbose = verbose
|
|
294
|
+
|
|
295
|
+
# Configuration
|
|
296
|
+
self.n_points_per_direction = max(n_points_per_direction, 50)
|
|
297
|
+
self.dense_zone_fraction = np.clip(dense_zone_fraction, 0.1, 0.8)
|
|
298
|
+
self.dense_points_fraction = np.clip(dense_points_fraction, 0.5, 0.9)
|
|
299
|
+
self.convergence_window = max(convergence_window, 5)
|
|
300
|
+
self.convergence_threshold = convergence_threshold
|
|
301
|
+
self.min_search_points = max(min_search_points, 10)
|
|
302
|
+
self.boundary_margin_factor = max(boundary_margin_factor, 1e-6)
|
|
303
|
+
self.extrema_search_tolerance = extrema_search_tolerance
|
|
304
|
+
|
|
305
|
+
# logger setup
|
|
306
|
+
self.logger = get_logger(self.__class__.__name__, logging.DEBUG if verbose else logging.WARNING)
|
|
307
|
+
self.logger.debug(f"{self.__class__.__name__} initialized::")
|
|
308
|
+
|
|
309
|
+
# Initialize params dictionary
|
|
310
|
+
self.params = {}
|
|
311
|
+
|
|
312
|
+
# Results
|
|
313
|
+
self._reset_results()
|
|
314
|
+
|
|
315
|
+
# Validate and extract properties from DF object
|
|
316
|
+
self._validate_and_extract_properties(df_object)
|
|
317
|
+
|
|
318
|
+
# Search tracking
|
|
319
|
+
self.search_results = {
|
|
320
|
+
'lower': {'datum_values': [], 'z0_values': [], 'success_flags': []},
|
|
321
|
+
'upper': {'datum_values': [], 'z0_values': [], 'success_flags': []}
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
self._fitted = False
|
|
325
|
+
|
|
326
|
+
# Store initialization parameters
|
|
327
|
+
self._store_initialization_params()
|
|
328
|
+
|
|
329
|
+
if self.verbose:
|
|
330
|
+
self._print_initialization_info()
|
|
331
|
+
|
|
332
|
+
def _store_initialization_params(self):
|
|
333
|
+
"""Store initialization parameters in params dictionary."""
|
|
334
|
+
self.logger.info("Storing initialization parameters.")
|
|
335
|
+
self.params.update({
|
|
336
|
+
# Configuration parameters
|
|
337
|
+
'n_points_per_direction': self.n_points_per_direction,
|
|
338
|
+
'dense_zone_fraction': self.dense_zone_fraction,
|
|
339
|
+
'dense_points_fraction': self.dense_points_fraction,
|
|
340
|
+
'convergence_window': self.convergence_window,
|
|
341
|
+
'convergence_threshold': self.convergence_threshold,
|
|
342
|
+
'min_search_points': self.min_search_points,
|
|
343
|
+
'boundary_margin_factor': self.boundary_margin_factor,
|
|
344
|
+
'extrema_search_tolerance': self.extrema_search_tolerance,
|
|
345
|
+
'verbose': self.verbose,
|
|
346
|
+
|
|
347
|
+
# DF object information
|
|
348
|
+
'df_type': getattr(self, 'df_type', None),
|
|
349
|
+
'original_data_size': len(getattr(self, 'original_data', [])),
|
|
350
|
+
'LB': getattr(self, 'LB', None),
|
|
351
|
+
'UB': getattr(self, 'UB', None),
|
|
352
|
+
'z0_original': getattr(self, 'z0_original', None),
|
|
353
|
+
|
|
354
|
+
# Status
|
|
355
|
+
'fitted': self._fitted,
|
|
356
|
+
'initialization_time': np.datetime64('now')
|
|
357
|
+
})
|
|
358
|
+
|
|
359
|
+
def _validate_and_extract_properties(self, df_object):
|
|
360
|
+
"""Extract and validate properties from DF object."""
|
|
361
|
+
self.logger.info("Validating and extracting properties from DF object.")
|
|
362
|
+
|
|
363
|
+
# Check if object is fitted
|
|
364
|
+
if not hasattr(df_object, '_fitted') or not df_object._fitted:
|
|
365
|
+
self.logger.error("Distribution function object must be fitted first")
|
|
366
|
+
raise ValueError("Distribution function object must be fitted first")
|
|
367
|
+
|
|
368
|
+
# Check if Z0 is available
|
|
369
|
+
if not hasattr(df_object, 'z0') or df_object.z0 is None:
|
|
370
|
+
self.logger.error("Z0 (gnostic mode) not available. Fit Z0 first.")
|
|
371
|
+
raise ValueError("Z0 (gnostic mode) not available. Fit Z0 first.")
|
|
372
|
+
|
|
373
|
+
# Store reference and extract basic properties
|
|
374
|
+
self.df_object = df_object
|
|
375
|
+
self.original_data = np.array(df_object.data)
|
|
376
|
+
self.LB = float(df_object.LB)
|
|
377
|
+
self.UB = float(df_object.UB)
|
|
378
|
+
self.z0 = float(df_object.z0)
|
|
379
|
+
|
|
380
|
+
# Validate bounds
|
|
381
|
+
if self.LB >= self.UB:
|
|
382
|
+
self.logger.error(f"Invalid bounds: LB ({self.LB}) >= UB ({self.UB})")
|
|
383
|
+
raise ValueError(f"Invalid bounds: LB ({self.LB}) >= UB ({self.UB})")
|
|
384
|
+
|
|
385
|
+
# Validate Z0 within bounds
|
|
386
|
+
if not (self.LB <= self.z0 <= self.UB):
|
|
387
|
+
self.logger.warning(f"Z0 ({self.z0:.6f}) outside bounds [{self.LB:.6f}, {self.UB:.6f}]")
|
|
388
|
+
|
|
389
|
+
# Determine DF type
|
|
390
|
+
if isinstance(df_object, EGDF):
|
|
391
|
+
self.df_type = 'EGDF'
|
|
392
|
+
elif isinstance(df_object, ELDF):
|
|
393
|
+
self.df_type = 'ELDF'
|
|
394
|
+
else:
|
|
395
|
+
class_name = df_object.__class__.__name__
|
|
396
|
+
if 'EGDF' in class_name:
|
|
397
|
+
self.df_type = 'EGDF'
|
|
398
|
+
elif 'ELDF' in class_name:
|
|
399
|
+
self.df_type = 'ELDF'
|
|
400
|
+
else:
|
|
401
|
+
self.logger.error(f"Unsupported distribution type: {class_name}")
|
|
402
|
+
raise ValueError(f"Unsupported distribution type: {class_name}")
|
|
403
|
+
|
|
404
|
+
# Extract DF creation parameters
|
|
405
|
+
self._extract_df_parameters()
|
|
406
|
+
|
|
407
|
+
# Update params with extracted information
|
|
408
|
+
self.params.update({
|
|
409
|
+
'df_type': self.df_type,
|
|
410
|
+
'original_data_size': len(self.original_data),
|
|
411
|
+
'LB': self.LB,
|
|
412
|
+
'UB': self.UB,
|
|
413
|
+
'z0_original': self.z0,
|
|
414
|
+
'data_range': [float(np.min(self.original_data)), float(np.max(self.original_data))],
|
|
415
|
+
'bounds_range': self.UB - self.LB
|
|
416
|
+
})
|
|
417
|
+
|
|
418
|
+
def _extract_df_parameters(self):
|
|
419
|
+
"""Extract parameters needed to create new DF instances."""
|
|
420
|
+
self.logger.info("Extracting DF creation parameters.")
|
|
421
|
+
df = self.df_object
|
|
422
|
+
|
|
423
|
+
# Safely extract DLB and DUB with validation
|
|
424
|
+
def safe_extract_bound(obj, attr_name, default=None):
|
|
425
|
+
"""Safely extract bound with validation."""
|
|
426
|
+
try:
|
|
427
|
+
value = getattr(obj, attr_name, default)
|
|
428
|
+
if value is None:
|
|
429
|
+
return None
|
|
430
|
+
# Convert to float and validate
|
|
431
|
+
value = float(value)
|
|
432
|
+
if not np.isfinite(value):
|
|
433
|
+
self.logger.warning(f"{attr_name} is not finite ({value}), using None")
|
|
434
|
+
return None
|
|
435
|
+
return value
|
|
436
|
+
except (AttributeError, TypeError, ValueError) as e:
|
|
437
|
+
self.logger.warning(f"Could not extract {attr_name}: {e}, using None")
|
|
438
|
+
return None
|
|
439
|
+
|
|
440
|
+
# Extract bounds safely
|
|
441
|
+
self.DLB = safe_extract_bound(df, 'DLB')
|
|
442
|
+
self.DUB = safe_extract_bound(df, 'DUB')
|
|
443
|
+
|
|
444
|
+
# Common parameters with safe extraction
|
|
445
|
+
self.weights = getattr(df, 'weights', None)
|
|
446
|
+
self.data_form = getattr(df, 'data_form', 'a')
|
|
447
|
+
self.homogeneous = getattr(df, 'homogeneous', True)
|
|
448
|
+
self.tolerance = getattr(df, 'tolerance', 1e-9)
|
|
449
|
+
self.max_data_size = getattr(df, 'max_data_size', 1000)
|
|
450
|
+
|
|
451
|
+
# EGDF specific parameters
|
|
452
|
+
if self.df_type == 'EGDF':
|
|
453
|
+
self.S_opt = getattr(df, 'S_opt', 'auto')
|
|
454
|
+
self.wedf = getattr(df, 'wedf', True)
|
|
455
|
+
self.opt_method = getattr(df, 'opt_method', 'L-BFGS-B')
|
|
456
|
+
|
|
457
|
+
# Store extracted parameters in params
|
|
458
|
+
self.params.update({
|
|
459
|
+
'DLB': self.DLB,
|
|
460
|
+
'DUB': self.DUB,
|
|
461
|
+
'data_form': self.data_form,
|
|
462
|
+
'homogeneous': self.homogeneous,
|
|
463
|
+
'tolerance': self.tolerance,
|
|
464
|
+
'max_data_size': self.max_data_size,
|
|
465
|
+
'has_weights': self.weights is not None,
|
|
466
|
+
'weights_shape': np.array(self.weights).shape if self.weights is not None else None
|
|
467
|
+
})
|
|
468
|
+
|
|
469
|
+
if self.df_type == 'EGDF':
|
|
470
|
+
self.params.update({
|
|
471
|
+
'S_opt': self.S_opt,
|
|
472
|
+
'wedf': self.wedf,
|
|
473
|
+
'opt_method': self.opt_method
|
|
474
|
+
})
|
|
475
|
+
|
|
476
|
+
self.logger.info(f"Extracted parameters:")
|
|
477
|
+
self.logger.info(f" DLB: {self.DLB}")
|
|
478
|
+
self.logger.info(f" DUB: {self.DUB}")
|
|
479
|
+
self.logger.info(f" Data form: {self.data_form}")
|
|
480
|
+
self.logger.info(f" Homogeneous: {self.homogeneous}")
|
|
481
|
+
|
|
482
|
+
def _reset_results(self):
|
|
483
|
+
"""Reset all results to initial state."""
|
|
484
|
+
self.logger.info("Resetting results to initial state.")
|
|
485
|
+
self.zl = None # Datum value where Z0 is minimum
|
|
486
|
+
self.z0l = None # Minimum Z0 value
|
|
487
|
+
self.zu = None # Datum value where Z0 is maximum
|
|
488
|
+
self.z0u = None # Maximum Z0 value
|
|
489
|
+
self.tolerance_interval = None
|
|
490
|
+
self.typical_data_interval = None
|
|
491
|
+
|
|
492
|
+
# Reset results in params
|
|
493
|
+
if hasattr(self, 'params'):
|
|
494
|
+
self.params.update({
|
|
495
|
+
'ZL': None,
|
|
496
|
+
'Z0L': None,
|
|
497
|
+
'ZU': None,
|
|
498
|
+
'Z0U': None,
|
|
499
|
+
'tolerance_interval': None,
|
|
500
|
+
'typical_data_interval': None,
|
|
501
|
+
'tolerance_interval_width': None,
|
|
502
|
+
'typical_data_interval_width': None,
|
|
503
|
+
'fitted': False,
|
|
504
|
+
'fit_time': None,
|
|
505
|
+
'search_statistics': None,
|
|
506
|
+
'ordering_validation': None
|
|
507
|
+
})
|
|
508
|
+
|
|
509
|
+
def _print_initialization_info(self):
|
|
510
|
+
"""Print initialization information."""
|
|
511
|
+
self.logger.info(f"IntveEngine Initialized:")
|
|
512
|
+
self.logger.info(f" Type: {self.df_type}")
|
|
513
|
+
self.logger.info(f" Data size: {len(self.original_data)}")
|
|
514
|
+
self.logger.info(f" Bounds: [{self.LB:.6f}, {self.UB:.6f}]")
|
|
515
|
+
self.logger.info(f" Original Z0: {self.z0:.6f}")
|
|
516
|
+
self.logger.info(f" Search points per direction: {self.n_points_per_direction}")
|
|
517
|
+
self.logger.info(f" Dense zone: {self.dense_zone_fraction:.1%} of range")
|
|
518
|
+
self.logger.info(f" Extrema search tolerance: {self.extrema_search_tolerance}")
|
|
519
|
+
|
|
520
|
+
def fit(self, plot: bool = False, update_df_params: bool = True) -> 'IntveEngine':
|
|
521
|
+
"""
|
|
522
|
+
Perform interval estimation with improved extrema detection.
|
|
523
|
+
|
|
524
|
+
Parameters:
|
|
525
|
+
-----------
|
|
526
|
+
plot : bool
|
|
527
|
+
Whether to plot results after fitting
|
|
528
|
+
update_df_params : bool
|
|
529
|
+
Whether to update the original DF object's params with interval results
|
|
530
|
+
|
|
531
|
+
Returns:
|
|
532
|
+
--------
|
|
533
|
+
self : IntveEngine
|
|
534
|
+
Fitted engine instance
|
|
535
|
+
"""
|
|
536
|
+
self.logger.info("Starting Z0-based interval estimation with ordering constraint...")
|
|
537
|
+
|
|
538
|
+
if self.verbose:
|
|
539
|
+
self.logger.info(f"\nStarting Z0-based interval estimation with ordering constraint...")
|
|
540
|
+
|
|
541
|
+
# Record start time
|
|
542
|
+
import time
|
|
543
|
+
start_time = time.time()
|
|
544
|
+
|
|
545
|
+
try:
|
|
546
|
+
# Reset results
|
|
547
|
+
self._reset_results()
|
|
548
|
+
self.search_results = {
|
|
549
|
+
'lower': {'datum_values': [], 'z0_values': [], 'success_flags': []},
|
|
550
|
+
'upper': {'datum_values': [], 'z0_values': [], 'success_flags': []}
|
|
551
|
+
}
|
|
552
|
+
|
|
553
|
+
# Test extension capability first
|
|
554
|
+
self.logger.info("Testing data extension capability...")
|
|
555
|
+
self._test_extension_capability()
|
|
556
|
+
|
|
557
|
+
# Search lower interval: Z0 → LB
|
|
558
|
+
self.logger.info(f"Searching lower interval (Z0 → LB)...")
|
|
559
|
+
self._search_interval('lower')
|
|
560
|
+
|
|
561
|
+
# Search upper interval: Z0 → UB
|
|
562
|
+
self.logger.info(f"Searching upper interval (Z0 → UB)...")
|
|
563
|
+
self._search_interval('upper')
|
|
564
|
+
|
|
565
|
+
# Analyze results with improved extrema detection
|
|
566
|
+
self.logger.info("Analyzing search results and extracting intervals with ordering constraint...")
|
|
567
|
+
self._analyze_and_extract_intervals_with_ordering()
|
|
568
|
+
|
|
569
|
+
# Record end time and update status
|
|
570
|
+
end_time = time.time()
|
|
571
|
+
self._fitted = True
|
|
572
|
+
|
|
573
|
+
# Update params with results and statistics
|
|
574
|
+
self.logger.info("Updating parameters with results and statistics...")
|
|
575
|
+
self._update_params_with_results(end_time - start_time)
|
|
576
|
+
|
|
577
|
+
# Update original DF object params if requested
|
|
578
|
+
if update_df_params:
|
|
579
|
+
self.logger.info("Updating original DF object parameters with interval results...")
|
|
580
|
+
self._update_df_object_params()
|
|
581
|
+
|
|
582
|
+
if self.verbose:
|
|
583
|
+
self.logger.info("Interval estimation completed successfully.")
|
|
584
|
+
self._print_results()
|
|
585
|
+
|
|
586
|
+
if plot:
|
|
587
|
+
self.logger.info("Plotting results...")
|
|
588
|
+
self.plot()
|
|
589
|
+
|
|
590
|
+
return self
|
|
591
|
+
|
|
592
|
+
except Exception as e:
|
|
593
|
+
error_msg = f"Interval estimation failed: {str(e)}"
|
|
594
|
+
if self.verbose:
|
|
595
|
+
self.logger.error(error_msg)
|
|
596
|
+
self._print_debug_info()
|
|
597
|
+
raise RuntimeError(error_msg) from e
|
|
598
|
+
|
|
599
|
+
def _search_interval(self, direction: str):
|
|
600
|
+
"""
|
|
601
|
+
Search interval in specified direction.
|
|
602
|
+
|
|
603
|
+
Parameters:
|
|
604
|
+
-----------
|
|
605
|
+
direction : str
|
|
606
|
+
'lower' for Z0→LB search, 'upper' for Z0→UB search
|
|
607
|
+
"""
|
|
608
|
+
self.logger.info(f"Searching interval in {direction} direction.")
|
|
609
|
+
# Generate search points for this direction
|
|
610
|
+
search_points = self._generate_search_points(direction)
|
|
611
|
+
|
|
612
|
+
if len(search_points) == 0:
|
|
613
|
+
self.logger.info(f" No valid search points for {direction} direction")
|
|
614
|
+
return
|
|
615
|
+
|
|
616
|
+
bound_str = "LB" if direction == 'lower' else "UB"
|
|
617
|
+
bound_val = self.LB if direction == 'lower' else self.UB
|
|
618
|
+
self.logger.info(f" Generated {len(search_points)} points toward {bound_str} ({bound_val:.6f})")
|
|
619
|
+
|
|
620
|
+
# Search each point
|
|
621
|
+
self.logger.info(f" Starting search in {direction} direction...")
|
|
622
|
+
successful_fits = 0
|
|
623
|
+
for i, datum in enumerate(search_points):
|
|
624
|
+
|
|
625
|
+
try:
|
|
626
|
+
# Compute Z0 with extended datum
|
|
627
|
+
z0_new = self._compute_z0_with_extended_datum(datum)
|
|
628
|
+
|
|
629
|
+
# Store successful result
|
|
630
|
+
self.search_results[direction]['datum_values'].append(datum)
|
|
631
|
+
self.search_results[direction]['z0_values'].append(z0_new)
|
|
632
|
+
self.search_results[direction]['success_flags'].append(True)
|
|
633
|
+
successful_fits += 1
|
|
634
|
+
|
|
635
|
+
# Progress reporting
|
|
636
|
+
if self.verbose and (i + 1) % max(1, len(search_points) // 5) == 0:
|
|
637
|
+
progress = ((i + 1) / len(search_points)) * 100
|
|
638
|
+
self.logger.info(f" Progress: {progress:.1f}% | Datum: {datum:.6f} | Z0: {z0_new:.6f}")
|
|
639
|
+
|
|
640
|
+
# Check for early convergence
|
|
641
|
+
if self._check_convergence(direction) and i >= self.min_search_points:
|
|
642
|
+
self.logger.info(f" Early convergence detected after {i+1} points")
|
|
643
|
+
break
|
|
644
|
+
|
|
645
|
+
except Exception as e:
|
|
646
|
+
# Try simple approach for failed cases
|
|
647
|
+
self.logger.warning(f" Failed at datum {datum:.6f}: {str(e)}. Trying simple approach...")
|
|
648
|
+
try:
|
|
649
|
+
# Compute Z0 with simple extended datum
|
|
650
|
+
self.logger.info(f" Trying simple approach for datum {datum:.6f}")
|
|
651
|
+
z0_new = self._compute_z0_with_extended_datum_simple(datum)
|
|
652
|
+
|
|
653
|
+
# Store successful result
|
|
654
|
+
self.search_results[direction]['datum_values'].append(datum)
|
|
655
|
+
self.search_results[direction]['z0_values'].append(z0_new)
|
|
656
|
+
self.search_results[direction]['success_flags'].append(True)
|
|
657
|
+
successful_fits += 1
|
|
658
|
+
|
|
659
|
+
except Exception as e2:
|
|
660
|
+
# Store failed result
|
|
661
|
+
self.search_results[direction]['datum_values'].append(datum)
|
|
662
|
+
self.search_results[direction]['z0_values'].append(np.nan)
|
|
663
|
+
self.search_results[direction]['success_flags'].append(False)
|
|
664
|
+
|
|
665
|
+
if self.verbose and i < 3: # Show first few errors
|
|
666
|
+
self.logger.warning(f" Failed at datum {datum:.6f}: {str(e2)}")
|
|
667
|
+
|
|
668
|
+
if self.verbose:
|
|
669
|
+
self.logger.info(f" {direction.capitalize()} search completed: {successful_fits}/{len(search_points)} successful")
|
|
670
|
+
|
|
671
|
+
def _analyze_and_extract_intervals_with_ordering(self):
|
|
672
|
+
"""
|
|
673
|
+
Analyze search results and extract interval parameters with ordering constraint.
|
|
674
|
+
|
|
675
|
+
Ensures that: ZL < Z0L < Z0 < Z0U < ZU
|
|
676
|
+
If initial extrema don't satisfy this, search for valid alternatives.
|
|
677
|
+
"""
|
|
678
|
+
self.logger.info("Analyzing search results with ordering constraint...")
|
|
679
|
+
|
|
680
|
+
# Collect all successful results
|
|
681
|
+
all_datum_values = []
|
|
682
|
+
all_z0_values = []
|
|
683
|
+
|
|
684
|
+
for direction in ['lower', 'upper']:
|
|
685
|
+
data = self.search_results[direction]
|
|
686
|
+
for datum, z0, success in zip(data['datum_values'], data['z0_values'], data['success_flags']):
|
|
687
|
+
if success and not np.isnan(z0):
|
|
688
|
+
all_datum_values.append(datum)
|
|
689
|
+
all_z0_values.append(z0)
|
|
690
|
+
|
|
691
|
+
if len(all_z0_values) == 0:
|
|
692
|
+
self.logger.error("No successful fits found. Cannot determine intervals.")
|
|
693
|
+
raise RuntimeError("No successful fits found. Cannot determine intervals.")
|
|
694
|
+
|
|
695
|
+
all_datum_values = np.array(all_datum_values)
|
|
696
|
+
all_z0_values = np.array(all_z0_values)
|
|
697
|
+
|
|
698
|
+
if self.verbose:
|
|
699
|
+
self.logger.info(f" Valid results: {len(all_z0_values)}")
|
|
700
|
+
self.logger.info(f" Z0 range: [{np.min(all_z0_values):.6f}, {np.max(all_z0_values):.6f}]")
|
|
701
|
+
self.logger.info(f" Datum range: [{np.min(all_datum_values):.6f}, {np.max(all_datum_values):.6f}]")
|
|
702
|
+
|
|
703
|
+
# Find initial extrema
|
|
704
|
+
min_z0_idx = np.argmin(all_z0_values)
|
|
705
|
+
max_z0_idx = np.argmax(all_z0_values)
|
|
706
|
+
|
|
707
|
+
initial_zl = float(all_datum_values[min_z0_idx])
|
|
708
|
+
initial_z0l = float(all_z0_values[min_z0_idx])
|
|
709
|
+
initial_zu = float(all_datum_values[max_z0_idx])
|
|
710
|
+
initial_z0u = float(all_z0_values[max_z0_idx])
|
|
711
|
+
|
|
712
|
+
if self.verbose:
|
|
713
|
+
self.logger.info(f" Initial extrema:")
|
|
714
|
+
self.logger.info(f" ZL = {initial_zl:.6f}, Z0L = {initial_z0l:.6f}")
|
|
715
|
+
self.logger.info(f" ZU = {initial_zu:.6f}, Z0U = {initial_z0u:.6f}")
|
|
716
|
+
|
|
717
|
+
# Check ordering constraint: ZL < Z0L < Z0 < Z0U < ZU
|
|
718
|
+
ordering_valid = (initial_zl < initial_z0l < self.z0 < initial_z0u < initial_zu)
|
|
719
|
+
|
|
720
|
+
if ordering_valid:
|
|
721
|
+
if self.verbose:
|
|
722
|
+
self.logger.info(f" ✓ Ordering constraint satisfied: ZL < Z0L < Z0 < Z0U < ZU")
|
|
723
|
+
|
|
724
|
+
self.zl = initial_zl
|
|
725
|
+
self.z0l = initial_z0l
|
|
726
|
+
self.zu = initial_zu
|
|
727
|
+
self.z0u = initial_z0u
|
|
728
|
+
|
|
729
|
+
else:
|
|
730
|
+
if self.verbose:
|
|
731
|
+
self.logger.info(f" ✗ Ordering constraint violated. Searching for valid extrema...")
|
|
732
|
+
self.logger.info(f" Current: {initial_zl:.6f} < {initial_z0l:.6f} < {self.z0:.6f} < {initial_z0u:.6f} < {initial_zu:.6f}")
|
|
733
|
+
|
|
734
|
+
# Find valid extrema that satisfy ordering constraint
|
|
735
|
+
valid_extrema = self._find_valid_extrema_with_ordering(all_datum_values, all_z0_values)
|
|
736
|
+
|
|
737
|
+
if valid_extrema is None:
|
|
738
|
+
# Fallback: use best available extrema with warning
|
|
739
|
+
if self.verbose:
|
|
740
|
+
self.logger.warning(f" ⚠ No valid extrema found satisfying ordering constraint. Using best available.")
|
|
741
|
+
|
|
742
|
+
self.zl = initial_zl
|
|
743
|
+
self.z0l = initial_z0l
|
|
744
|
+
self.zu = initial_zu
|
|
745
|
+
self.z0u = initial_z0u
|
|
746
|
+
else:
|
|
747
|
+
self.zl, self.z0l, self.zu, self.z0u = valid_extrema
|
|
748
|
+
if self.verbose:
|
|
749
|
+
self.logger.info(f" ✓ Found valid extrema:")
|
|
750
|
+
self.logger.info(f" ZL = {self.zl:.6f}, Z0L = {self.z0l:.6f}")
|
|
751
|
+
self.logger.info(f" ZU = {self.zu:.6f}, Z0U = {self.z0u:.6f}")
|
|
752
|
+
|
|
753
|
+
# Compute interval widths
|
|
754
|
+
self.typical_data_interval = self.zu - self.zl
|
|
755
|
+
self.tolerance_interval = self.z0u - self.z0l
|
|
756
|
+
|
|
757
|
+
# Final validation
|
|
758
|
+
final_ordering_valid = (self.zl < self.z0l < self.z0 < self.z0u < self.zu)
|
|
759
|
+
|
|
760
|
+
if self.verbose:
|
|
761
|
+
self.logger.info(f" Final ordering check: {'✓ VALID' if final_ordering_valid else '✗ INVALID'}")
|
|
762
|
+
self.logger.info(f" Critical points:")
|
|
763
|
+
self.logger.info(f" ZL = {self.zl:.6f}, Z0L = {self.z0l:.6f}")
|
|
764
|
+
self.logger.info(f" Z0 = {self.z0:.6f}")
|
|
765
|
+
self.logger.info(f" Z0U = {self.z0u:.6f}, ZU = {self.zu:.6f}")
|
|
766
|
+
|
|
767
|
+
def _find_valid_extrema_with_ordering(self, datum_values: np.ndarray, z0_values: np.ndarray) -> Optional[Tuple[float, float, float, float]]:
|
|
768
|
+
"""
|
|
769
|
+
Find extrema that satisfy the ordering constraint: ZL < Z0L < Z0 < Z0U < ZU
|
|
770
|
+
|
|
771
|
+
Parameters:
|
|
772
|
+
-----------
|
|
773
|
+
datum_values : np.ndarray
|
|
774
|
+
Array of datum values
|
|
775
|
+
z0_values : np.ndarray
|
|
776
|
+
Array of corresponding Z0 values
|
|
777
|
+
|
|
778
|
+
Returns:
|
|
779
|
+
--------
|
|
780
|
+
Optional[Tuple[float, float, float, float]]
|
|
781
|
+
Valid (zl, z0l, zu, z0u) or None if not found
|
|
782
|
+
"""
|
|
783
|
+
self.logger.info("Searching for valid extrema satisfying ordering constraint...")
|
|
784
|
+
|
|
785
|
+
# Separate lower and upper search results
|
|
786
|
+
lower_mask = datum_values < self.z0
|
|
787
|
+
upper_mask = datum_values > self.z0
|
|
788
|
+
|
|
789
|
+
lower_datum = datum_values[lower_mask]
|
|
790
|
+
lower_z0 = z0_values[lower_mask]
|
|
791
|
+
upper_datum = datum_values[upper_mask]
|
|
792
|
+
upper_z0 = z0_values[upper_mask]
|
|
793
|
+
|
|
794
|
+
if len(lower_datum) == 0 or len(upper_datum) == 0:
|
|
795
|
+
self.logger.warning(f" ✗ Insufficient data for both sides of Z0")
|
|
796
|
+
return None
|
|
797
|
+
|
|
798
|
+
# Find multiple minima and maxima candidates
|
|
799
|
+
lower_sorted_idx = np.argsort(lower_z0)
|
|
800
|
+
upper_sorted_idx = np.argsort(upper_z0)
|
|
801
|
+
|
|
802
|
+
# Try different combinations of extrema
|
|
803
|
+
n_candidates = min(5, len(lower_sorted_idx), len(upper_sorted_idx))
|
|
804
|
+
|
|
805
|
+
for i in range(n_candidates):
|
|
806
|
+
for j in range(n_candidates):
|
|
807
|
+
# Try i-th minimum and j-th maximum
|
|
808
|
+
min_idx = lower_sorted_idx[i]
|
|
809
|
+
max_idx = upper_sorted_idx[-(j+1)] # j-th from the end (highest)
|
|
810
|
+
|
|
811
|
+
candidate_zl = float(lower_datum[min_idx])
|
|
812
|
+
candidate_z0l = float(lower_z0[min_idx])
|
|
813
|
+
candidate_zu = float(upper_datum[max_idx])
|
|
814
|
+
candidate_z0u = float(upper_z0[max_idx])
|
|
815
|
+
|
|
816
|
+
# Check ordering constraint
|
|
817
|
+
if (candidate_zl < candidate_z0l < self.z0 < candidate_z0u < candidate_zu):
|
|
818
|
+
self.logger.info(f" ✓ Found valid combination (min_rank={i+1}, max_rank={j+1})")
|
|
819
|
+
self.logger.info(f" {candidate_zl:.6f} < {candidate_z0l:.6f} < {self.z0:.6f} < {candidate_z0u:.6f} < {candidate_zu:.6f}")
|
|
820
|
+
|
|
821
|
+
return (candidate_zl, candidate_z0l, candidate_zu, candidate_z0u)
|
|
822
|
+
|
|
823
|
+
# If no valid combination found, try relaxed search
|
|
824
|
+
self.logger.warning(f" No strict extrema found. Trying relaxed search...")
|
|
825
|
+
|
|
826
|
+
return self._find_extrema_relaxed_search(datum_values, z0_values)
|
|
827
|
+
|
|
828
|
+
def _find_extrema_relaxed_search(self, datum_values: np.ndarray, z0_values: np.ndarray) -> Optional[Tuple[float, float, float, float]]:
|
|
829
|
+
"""
|
|
830
|
+
Relaxed search for extrema when strict extrema don't satisfy ordering.
|
|
831
|
+
|
|
832
|
+
Parameters:
|
|
833
|
+
-----------
|
|
834
|
+
datum_values : np.ndarray
|
|
835
|
+
Array of datum values
|
|
836
|
+
z0_values : np.ndarray
|
|
837
|
+
Array of corresponding Z0 values
|
|
838
|
+
|
|
839
|
+
Returns:
|
|
840
|
+
--------
|
|
841
|
+
Optional[Tuple[float, float, float, float]]
|
|
842
|
+
Valid (zl, z0l, zu, z0u) or None if not found
|
|
843
|
+
"""
|
|
844
|
+
self.logger.info("Performing relaxed search for extrema...")
|
|
845
|
+
# Split data into lower and upper regions
|
|
846
|
+
lower_mask = datum_values < self.z0
|
|
847
|
+
upper_mask = datum_values > self.z0
|
|
848
|
+
|
|
849
|
+
lower_datum = datum_values[lower_mask]
|
|
850
|
+
lower_z0 = z0_values[lower_mask]
|
|
851
|
+
upper_datum = datum_values[upper_mask]
|
|
852
|
+
upper_z0 = z0_values[upper_mask]
|
|
853
|
+
|
|
854
|
+
if len(lower_datum) == 0 or len(upper_datum) == 0:
|
|
855
|
+
return None
|
|
856
|
+
|
|
857
|
+
# For lower region: find datum that gives Z0 < Z0_original and ZL < Z0L
|
|
858
|
+
valid_lower_mask = (lower_z0 < self.z0) & (lower_datum < lower_z0)
|
|
859
|
+
if np.any(valid_lower_mask):
|
|
860
|
+
valid_lower_datum = lower_datum[valid_lower_mask]
|
|
861
|
+
valid_lower_z0 = lower_z0[valid_lower_mask]
|
|
862
|
+
|
|
863
|
+
# Choose the one with minimum Z0
|
|
864
|
+
min_idx = np.argmin(valid_lower_z0)
|
|
865
|
+
candidate_zl = valid_lower_datum[min_idx]
|
|
866
|
+
candidate_z0l = valid_lower_z0[min_idx]
|
|
867
|
+
else:
|
|
868
|
+
# Fallback: use extrema even if ordering is violated
|
|
869
|
+
min_idx = np.argmin(lower_z0)
|
|
870
|
+
candidate_zl = lower_datum[min_idx]
|
|
871
|
+
candidate_z0l = lower_z0[min_idx]
|
|
872
|
+
|
|
873
|
+
# For upper region: find datum that gives Z0 > Z0_original and ZU > Z0U
|
|
874
|
+
valid_upper_mask = (upper_z0 > self.z0) & (upper_datum > upper_z0)
|
|
875
|
+
if np.any(valid_upper_mask):
|
|
876
|
+
valid_upper_datum = upper_datum[valid_upper_mask]
|
|
877
|
+
valid_upper_z0 = upper_z0[valid_upper_mask]
|
|
878
|
+
|
|
879
|
+
# Choose the one with maximum Z0
|
|
880
|
+
max_idx = np.argmax(valid_upper_z0)
|
|
881
|
+
candidate_zu = valid_upper_datum[max_idx]
|
|
882
|
+
candidate_z0u = valid_upper_z0[max_idx]
|
|
883
|
+
else:
|
|
884
|
+
# Fallback: use extrema even if ordering is violated
|
|
885
|
+
max_idx = np.argmax(upper_z0)
|
|
886
|
+
candidate_zu = upper_datum[max_idx]
|
|
887
|
+
candidate_z0u = upper_z0[max_idx]
|
|
888
|
+
|
|
889
|
+
if self.verbose:
|
|
890
|
+
ordering_check = (candidate_zl < candidate_z0l < self.z0 < candidate_z0u < candidate_zu)
|
|
891
|
+
self.logger.info(f" Relaxed search result: {'✓ VALID' if ordering_check else '✗ INVALID'}")
|
|
892
|
+
self.logger.info(f" {candidate_zl:.6f} < {candidate_z0l:.6f} < {self.z0:.6f} < {candidate_z0u:.6f} < {candidate_zu:.6f}")
|
|
893
|
+
|
|
894
|
+
return (candidate_zl, candidate_z0l, candidate_zu, candidate_z0u)
|
|
895
|
+
|
|
896
|
+
def _generate_search_points(self, direction: str) -> np.ndarray:
|
|
897
|
+
"""Generate search points for given direction with dense sampling near Z0."""
|
|
898
|
+
self.logger.info(f"Generating search points in the {direction} direction...")
|
|
899
|
+
|
|
900
|
+
if direction == 'lower':
|
|
901
|
+
self.logger.info(" Generating points toward LB...")
|
|
902
|
+
# Search from Z0 toward LB
|
|
903
|
+
full_range = self.z0 - self.LB
|
|
904
|
+
if full_range <= 0:
|
|
905
|
+
return np.array([])
|
|
906
|
+
|
|
907
|
+
# Apply safety margin
|
|
908
|
+
margin = full_range * self.boundary_margin_factor
|
|
909
|
+
search_start = self.z0
|
|
910
|
+
search_end = self.LB + margin
|
|
911
|
+
|
|
912
|
+
if search_start <= search_end:
|
|
913
|
+
return np.array([])
|
|
914
|
+
|
|
915
|
+
# Dense zone: near Z0
|
|
916
|
+
dense_range = full_range * self.dense_zone_fraction
|
|
917
|
+
dense_start = max(search_end, search_start - dense_range)
|
|
918
|
+
|
|
919
|
+
# Generate points
|
|
920
|
+
n_dense = int(self.n_points_per_direction * self.dense_points_fraction)
|
|
921
|
+
n_sparse = self.n_points_per_direction - n_dense
|
|
922
|
+
|
|
923
|
+
# Dense points (linear spacing)
|
|
924
|
+
dense_points = np.linspace(search_start, dense_start, n_dense + 1)[1:] # Exclude Z0
|
|
925
|
+
|
|
926
|
+
# Sparse points (logarithmic spacing toward boundary)
|
|
927
|
+
if n_sparse > 0 and dense_start > search_end:
|
|
928
|
+
# Logarithmic ratios for smooth transition
|
|
929
|
+
log_space = np.logspace(0, 2, n_sparse + 1)[1:] # [10^0, 10^2] range
|
|
930
|
+
log_ratios = (log_space - 1) / (100 - 1) # Normalize to [0, 1]
|
|
931
|
+
sparse_points = search_end + log_ratios * (dense_start - search_end)
|
|
932
|
+
else:
|
|
933
|
+
sparse_points = np.array([])
|
|
934
|
+
|
|
935
|
+
# Combine and sort (Z0 → LB direction)
|
|
936
|
+
all_points = np.concatenate([dense_points, sparse_points])
|
|
937
|
+
return np.sort(all_points)[::-1] # Descending order
|
|
938
|
+
|
|
939
|
+
else: # upper direction
|
|
940
|
+
self.logger.info(" Generating points toward UB...")
|
|
941
|
+
# Search from Z0 toward UB
|
|
942
|
+
full_range = self.UB - self.z0
|
|
943
|
+
if full_range <= 0:
|
|
944
|
+
return np.array([])
|
|
945
|
+
|
|
946
|
+
# Apply safety margin
|
|
947
|
+
margin = full_range * self.boundary_margin_factor
|
|
948
|
+
search_start = self.z0
|
|
949
|
+
search_end = self.UB - margin
|
|
950
|
+
|
|
951
|
+
if search_start >= search_end:
|
|
952
|
+
return np.array([])
|
|
953
|
+
|
|
954
|
+
# Dense zone: near Z0
|
|
955
|
+
dense_range = full_range * self.dense_zone_fraction
|
|
956
|
+
dense_end = min(search_end, search_start + dense_range)
|
|
957
|
+
|
|
958
|
+
# Generate points
|
|
959
|
+
n_dense = int(self.n_points_per_direction * self.dense_points_fraction)
|
|
960
|
+
n_sparse = self.n_points_per_direction - n_dense
|
|
961
|
+
|
|
962
|
+
# Dense points (linear spacing)
|
|
963
|
+
dense_points = np.linspace(search_start, dense_end, n_dense + 1)[1:] # Exclude Z0
|
|
964
|
+
|
|
965
|
+
# Sparse points (logarithmic spacing toward boundary)
|
|
966
|
+
if n_sparse > 0 and dense_end < search_end:
|
|
967
|
+
# Logarithmic ratios for smooth transition
|
|
968
|
+
log_space = np.logspace(0, 2, n_sparse + 1)[1:] # [10^0, 10^2] range
|
|
969
|
+
log_ratios = (log_space - 1) / (100 - 1) # Normalize to [0, 1]
|
|
970
|
+
sparse_points = dense_end + log_ratios * (search_end - dense_end)
|
|
971
|
+
else:
|
|
972
|
+
sparse_points = np.array([])
|
|
973
|
+
|
|
974
|
+
# Combine and sort (Z0 → UB direction)
|
|
975
|
+
all_points = np.concatenate([dense_points, sparse_points])
|
|
976
|
+
return np.sort(all_points) # Ascending order
|
|
977
|
+
|
|
978
|
+
def _check_convergence(self, direction: str) -> bool:
|
|
979
|
+
"""Check if Z0 values have converged in recent window."""
|
|
980
|
+
|
|
981
|
+
self.logger.info(f" Checking convergence in {direction} direction...")
|
|
982
|
+
|
|
983
|
+
z0_values = self.search_results[direction]['z0_values']
|
|
984
|
+
success_flags = self.search_results[direction]['success_flags']
|
|
985
|
+
|
|
986
|
+
# Get successful Z0 values
|
|
987
|
+
valid_z0 = [z0 for z0, success in zip(z0_values, success_flags)
|
|
988
|
+
if success and not np.isnan(z0)]
|
|
989
|
+
|
|
990
|
+
if len(valid_z0) < self.convergence_window:
|
|
991
|
+
return False
|
|
992
|
+
|
|
993
|
+
# Check recent window for convergence
|
|
994
|
+
recent_z0 = valid_z0[-self.convergence_window:]
|
|
995
|
+
z0_std = np.std(recent_z0)
|
|
996
|
+
|
|
997
|
+
return z0_std < self.convergence_threshold
|
|
998
|
+
|
|
999
|
+
def _compute_z0_with_extended_datum(self, datum: float) -> float:
|
|
1000
|
+
"""Compute Z0 for data extended with given datum."""
|
|
1001
|
+
|
|
1002
|
+
self.logger.info(f" Computing Z0 with extended datum: {datum:.6f}")
|
|
1003
|
+
|
|
1004
|
+
# Create extended data
|
|
1005
|
+
extended_data = np.append(self.original_data, datum)
|
|
1006
|
+
|
|
1007
|
+
# Handle weights if present
|
|
1008
|
+
extended_weights = None
|
|
1009
|
+
if self.weights is not None:
|
|
1010
|
+
extended_weights = np.append(self.weights, 1.0)
|
|
1011
|
+
|
|
1012
|
+
# Create new DF instance with extended data
|
|
1013
|
+
df_extended = self._create_df_instance(extended_data, extended_weights)
|
|
1014
|
+
|
|
1015
|
+
# Fit and extract Z0
|
|
1016
|
+
df_extended.fit(plot=False)
|
|
1017
|
+
|
|
1018
|
+
if not hasattr(df_extended, 'z0') or df_extended.z0 is None:
|
|
1019
|
+
self.logger.error("Z0 not computed for extended DF")
|
|
1020
|
+
raise ValueError("Z0 not computed for extended DF")
|
|
1021
|
+
|
|
1022
|
+
return float(df_extended.z0)
|
|
1023
|
+
|
|
1024
|
+
def _compute_z0_with_extended_datum_simple(self, datum: float) -> float:
|
|
1025
|
+
"""Compute Z0 with minimal parameters (fallback method)."""
|
|
1026
|
+
|
|
1027
|
+
self.logger.info(f" (Simple) Computing Z0 with extended datum: {datum:.6f}")
|
|
1028
|
+
|
|
1029
|
+
# Create extended data
|
|
1030
|
+
extended_data = np.append(self.original_data, datum)
|
|
1031
|
+
|
|
1032
|
+
# Use minimal parameters
|
|
1033
|
+
minimal_params = {
|
|
1034
|
+
'data': extended_data,
|
|
1035
|
+
'LB': self.LB,
|
|
1036
|
+
'UB': self.UB,
|
|
1037
|
+
'n_points': min(200, len(extended_data) * 3),
|
|
1038
|
+
'verbose': False,
|
|
1039
|
+
'tolerance': self.tolerance * 10, # Relaxed tolerance
|
|
1040
|
+
'catch': False,
|
|
1041
|
+
'flush': False
|
|
1042
|
+
}
|
|
1043
|
+
|
|
1044
|
+
# Create DF with minimal parameters
|
|
1045
|
+
if self.df_type == 'EGDF':
|
|
1046
|
+
df_extended = EGDF(**minimal_params)
|
|
1047
|
+
else: # ELDF
|
|
1048
|
+
df_extended = ELDF(**minimal_params)
|
|
1049
|
+
|
|
1050
|
+
# Fit and extract Z0
|
|
1051
|
+
df_extended.fit(plot=False)
|
|
1052
|
+
|
|
1053
|
+
if not hasattr(df_extended, 'z0') or df_extended.z0 is None:
|
|
1054
|
+
self.logger.error("Z0 not computed for extended DF (simple)")
|
|
1055
|
+
raise ValueError("Z0 not computed for extended DF (simple)")
|
|
1056
|
+
|
|
1057
|
+
return float(df_extended.z0)
|
|
1058
|
+
|
|
1059
|
+
def _create_df_instance(self, data: np.ndarray, weights: Optional[np.ndarray] = None):
|
|
1060
|
+
"""Create DF instance with given data using original parameters."""
|
|
1061
|
+
|
|
1062
|
+
self.logger.info("Creating DF instance with extended data...")
|
|
1063
|
+
|
|
1064
|
+
# Use adaptive n_points for efficiency
|
|
1065
|
+
n_points = min(400, len(data) * 4)
|
|
1066
|
+
|
|
1067
|
+
# Relaxed tolerance for extended DF fitting
|
|
1068
|
+
extended_tolerance = self.tolerance * 5
|
|
1069
|
+
|
|
1070
|
+
# Build parameters carefully
|
|
1071
|
+
common_params = {
|
|
1072
|
+
'data': data,
|
|
1073
|
+
'LB': self.LB,
|
|
1074
|
+
'UB': self.UB,
|
|
1075
|
+
'tolerance': extended_tolerance,
|
|
1076
|
+
'data_form': self.data_form,
|
|
1077
|
+
'n_points': n_points,
|
|
1078
|
+
'homogeneous': self.homogeneous,
|
|
1079
|
+
'verbose': False,
|
|
1080
|
+
'max_data_size': self.max_data_size,
|
|
1081
|
+
'catch': False,
|
|
1082
|
+
'flush': False
|
|
1083
|
+
}
|
|
1084
|
+
|
|
1085
|
+
# Only add DLB/DUB if they are valid
|
|
1086
|
+
if self.DLB is not None:
|
|
1087
|
+
common_params['DLB'] = self.DLB
|
|
1088
|
+
if self.DUB is not None:
|
|
1089
|
+
common_params['DUB'] = self.DUB
|
|
1090
|
+
|
|
1091
|
+
# Only add weights if provided
|
|
1092
|
+
if weights is not None:
|
|
1093
|
+
common_params['weights'] = weights
|
|
1094
|
+
|
|
1095
|
+
if self.df_type == 'EGDF':
|
|
1096
|
+
return EGDF(
|
|
1097
|
+
S=self.S_opt,
|
|
1098
|
+
wedf=self.wedf,
|
|
1099
|
+
opt_method=self.opt_method,
|
|
1100
|
+
**common_params
|
|
1101
|
+
)
|
|
1102
|
+
else: # ELDF
|
|
1103
|
+
return ELDF(**common_params)
|
|
1104
|
+
|
|
1105
|
+
def _test_extension_capability(self):
|
|
1106
|
+
"""Test if data can be extended successfully."""
|
|
1107
|
+
self.logger.info("Testing data extension capability...")
|
|
1108
|
+
|
|
1109
|
+
# Try a small extension near Z0
|
|
1110
|
+
test_datum = self.z0 + 0.01 * (self.UB - self.z0)
|
|
1111
|
+
|
|
1112
|
+
try:
|
|
1113
|
+
test_z0 = self._compute_z0_with_extended_datum(test_datum)
|
|
1114
|
+
self.logger.info(f" Extension test successful: Z0_new = {test_z0:.6f}")
|
|
1115
|
+
except Exception as e:
|
|
1116
|
+
self.logger.error(f" First extension test failed: {str(e)}")
|
|
1117
|
+
self.logger.info(f" First extension test failed, trying simpler approach...")
|
|
1118
|
+
|
|
1119
|
+
# Try with minimal parameters
|
|
1120
|
+
try:
|
|
1121
|
+
test_z0 = self._compute_z0_with_extended_datum_simple(test_datum)
|
|
1122
|
+
self.logger.info(f" Simple extension test successful: Z0_new = {test_z0:.6f}")
|
|
1123
|
+
except Exception as e2:
|
|
1124
|
+
self.logger.error(f" Simple extension test failed: {str(e2)}")
|
|
1125
|
+
raise RuntimeError(f"Cannot extend data: {str(e2)}")
|
|
1126
|
+
|
|
1127
|
+
def _update_params_with_results(self, fit_time: float):
|
|
1128
|
+
"""Update params dictionary with fitting results and statistics."""
|
|
1129
|
+
|
|
1130
|
+
self.logger.info("Updating parameters with fitting results and statistics...")
|
|
1131
|
+
|
|
1132
|
+
# Search statistics
|
|
1133
|
+
lower_success = sum(self.search_results['lower']['success_flags'])
|
|
1134
|
+
upper_success = sum(self.search_results['upper']['success_flags'])
|
|
1135
|
+
lower_total = len(self.search_results['lower']['datum_values'])
|
|
1136
|
+
upper_total = len(self.search_results['upper']['datum_values'])
|
|
1137
|
+
total_success = lower_success + upper_success
|
|
1138
|
+
total_attempts = lower_total + upper_total
|
|
1139
|
+
|
|
1140
|
+
# Ordering validation
|
|
1141
|
+
ordering_valid = (self.zl < self.z0l < self.z0 < self.z0u < self.zu)
|
|
1142
|
+
|
|
1143
|
+
# self z0
|
|
1144
|
+
self.z0 = self.z0
|
|
1145
|
+
|
|
1146
|
+
# Update params with complete results
|
|
1147
|
+
self.params.update({
|
|
1148
|
+
# Core interval results
|
|
1149
|
+
'ZL': float(self.zl),
|
|
1150
|
+
'Z0L': float(self.z0l),
|
|
1151
|
+
'Z0': float(self.z0),
|
|
1152
|
+
'Z0U': float(self.z0u),
|
|
1153
|
+
'ZU': float(self.zu),
|
|
1154
|
+
|
|
1155
|
+
# Interval measures
|
|
1156
|
+
'tolerance_interval': [float(self.z0l), float(self.z0u)],
|
|
1157
|
+
'typical_data_interval': [float(self.zl), float(self.zu)],
|
|
1158
|
+
'tolerance_interval_width': float(self.tolerance_interval),
|
|
1159
|
+
'typical_data_interval_width': float(self.typical_data_interval),
|
|
1160
|
+
|
|
1161
|
+
# Ordering validation
|
|
1162
|
+
'ordering_validation': {
|
|
1163
|
+
'constraint_satisfied': ordering_valid,
|
|
1164
|
+
'constraint_formula': 'ZL < Z0L < Z0 < Z0U < ZU',
|
|
1165
|
+
'values': [self.zl, self.z0l, self.z0, self.z0u, self.zu],
|
|
1166
|
+
'differences': [
|
|
1167
|
+
self.z0l - self.zl,
|
|
1168
|
+
self.z0 - self.z0l,
|
|
1169
|
+
self.z0u - self.z0,
|
|
1170
|
+
self.zu - self.z0u
|
|
1171
|
+
]
|
|
1172
|
+
},
|
|
1173
|
+
|
|
1174
|
+
# Relative measures
|
|
1175
|
+
'tolerance_to_bounds_ratio': self.tolerance_interval / (self.UB - self.LB),
|
|
1176
|
+
'typical_to_bounds_ratio': self.typical_data_interval / (self.UB - self.LB),
|
|
1177
|
+
'typical_to_tolerance_ratio': self.typical_data_interval / self.tolerance_interval if self.tolerance_interval > 0 else np.inf,
|
|
1178
|
+
|
|
1179
|
+
# Data coverage
|
|
1180
|
+
'data_within_tolerance': self._count_data_in_interval(self.z0l, self.z0u),
|
|
1181
|
+
'data_within_typical': self._count_data_in_interval(self.zl, self.zu),
|
|
1182
|
+
'data_within_tolerance_fraction': self._count_data_in_interval(self.z0l, self.z0u) / len(self.original_data),
|
|
1183
|
+
'data_within_typical_fraction': self._count_data_in_interval(self.zl, self.zu) / len(self.original_data),
|
|
1184
|
+
|
|
1185
|
+
# Search statistics
|
|
1186
|
+
'search_statistics': {
|
|
1187
|
+
'total_attempts': total_attempts,
|
|
1188
|
+
'total_successful': total_success,
|
|
1189
|
+
'success_rate': total_success / total_attempts if total_attempts > 0 else 0,
|
|
1190
|
+
'lower_search': {
|
|
1191
|
+
'attempts': lower_total,
|
|
1192
|
+
'successful': lower_success,
|
|
1193
|
+
'success_rate': lower_success / lower_total if lower_total > 0 else 0
|
|
1194
|
+
},
|
|
1195
|
+
'upper_search': {
|
|
1196
|
+
'attempts': upper_total,
|
|
1197
|
+
'successful': upper_success,
|
|
1198
|
+
'success_rate': upper_success / upper_total if upper_total > 0 else 0
|
|
1199
|
+
}
|
|
1200
|
+
},
|
|
1201
|
+
|
|
1202
|
+
# Timing and status
|
|
1203
|
+
'fitted': True,
|
|
1204
|
+
'fit_time': fit_time,
|
|
1205
|
+
'fit_timestamp': np.datetime64('now'),
|
|
1206
|
+
|
|
1207
|
+
# Quality metrics
|
|
1208
|
+
'interval_quality': {
|
|
1209
|
+
'z0_variation_range': self.z0u - self.z0l,
|
|
1210
|
+
'datum_variation_range': self.zu - self.zl,
|
|
1211
|
+
'z0_stability': 1.0 - (self.z0u - self.z0l) / abs(self.z0) if self.z0 != 0 else 1.0,
|
|
1212
|
+
'interval_symmetry': abs((self.zu + self.zl) / 2 - self.z0) / (self.UB - self.LB),
|
|
1213
|
+
'ordering_constraint_satisfied': ordering_valid
|
|
1214
|
+
}
|
|
1215
|
+
})
|
|
1216
|
+
|
|
1217
|
+
def _count_data_in_interval(self, lower: float, upper: float) -> int:
|
|
1218
|
+
"""Count how many data points fall within the given interval."""
|
|
1219
|
+
self.logger.info(f"Counting data points in interval [{lower:.6f}, {upper:.6f}]...")
|
|
1220
|
+
return np.sum((self.original_data >= lower) & (self.original_data <= upper))
|
|
1221
|
+
|
|
1222
|
+
def _update_df_object_params(self):
|
|
1223
|
+
"""Update the original DF object's params with interval results."""
|
|
1224
|
+
|
|
1225
|
+
self.lower.info("Updating original DF object parameters with interval results...")
|
|
1226
|
+
|
|
1227
|
+
if not hasattr(self.df_object, 'params'):
|
|
1228
|
+
self.df_object.params = {}
|
|
1229
|
+
|
|
1230
|
+
# Create interval-specific parameter dictionary
|
|
1231
|
+
interval_params = {
|
|
1232
|
+
'interval_estimation': {
|
|
1233
|
+
# Core results
|
|
1234
|
+
'ZL': self.zl,
|
|
1235
|
+
'Z0L': self.z0l,
|
|
1236
|
+
'Z0U': self.z0u,
|
|
1237
|
+
'ZU': self.zu,
|
|
1238
|
+
'tolerance_interval': [self.z0l, self.z0u],
|
|
1239
|
+
'typical_data_interval': [self.zl, self.zu],
|
|
1240
|
+
'tolerance_interval_width': self.tolerance_interval,
|
|
1241
|
+
'typical_data_interval_width': self.typical_data_interval,
|
|
1242
|
+
|
|
1243
|
+
# Ordering validation
|
|
1244
|
+
'ordering_constraint_satisfied': (self.zl < self.z0l < self.z0 < self.z0u < self.zu),
|
|
1245
|
+
|
|
1246
|
+
# Summary statistics
|
|
1247
|
+
'data_coverage': {
|
|
1248
|
+
'tolerance_count': self._count_data_in_interval(self.z0l, self.z0u),
|
|
1249
|
+
'typical_count': self._count_data_in_interval(self.zl, self.zu),
|
|
1250
|
+
'tolerance_fraction': self._count_data_in_interval(self.z0l, self.z0u) / len(self.original_data),
|
|
1251
|
+
'typical_fraction': self._count_data_in_interval(self.zl, self.zu) / len(self.original_data),
|
|
1252
|
+
},
|
|
1253
|
+
|
|
1254
|
+
# Method information
|
|
1255
|
+
'method': 'Z0-based interval estimation with ordering constraint',
|
|
1256
|
+
'engine_type': 'IntveEngine',
|
|
1257
|
+
'search_points_per_direction': self.n_points_per_direction,
|
|
1258
|
+
'successful_fits': sum(self.search_results['lower']['success_flags']) + sum(self.search_results['upper']['success_flags']),
|
|
1259
|
+
'fit_timestamp': str(np.datetime64('now'))
|
|
1260
|
+
}
|
|
1261
|
+
}
|
|
1262
|
+
|
|
1263
|
+
# Update DF object params
|
|
1264
|
+
self.df_object.params.update(interval_params)
|
|
1265
|
+
|
|
1266
|
+
self.logger.info(f"Updated {self.df_type} object params with interval estimation results")
|
|
1267
|
+
|
|
1268
|
+
def _print_results(self):
|
|
1269
|
+
"""Print formatted results."""
|
|
1270
|
+
self.logger.info(f"\n{'='*70}")
|
|
1271
|
+
self.logger.info(f"Z0-BASED INTERVAL ESTIMATION RESULTS - ({self.df_type})")
|
|
1272
|
+
self.logger.info(f"{'='*70}")
|
|
1273
|
+
|
|
1274
|
+
self.logger.info(f"Original Configuration:")
|
|
1275
|
+
self.logger.info(f" Data size: {len(self.original_data)}")
|
|
1276
|
+
self.logger.info(f" Bounds: [{self.LB:.6f}, {self.UB:.6f}]")
|
|
1277
|
+
self.logger.info(f" Original Z0: {self.z0:.6f}")
|
|
1278
|
+
self.logger.info(f"")
|
|
1279
|
+
|
|
1280
|
+
# Ordering constraint check
|
|
1281
|
+
ordering_valid = (self.zl < self.z0l < self.z0 < self.z0u < self.zu)
|
|
1282
|
+
self.logger.info(f"Ordering Constraint: ZL < Z0L < Z0 < Z0U < ZU")
|
|
1283
|
+
self.logger.info(f" Status: {'✓ SATISFIED' if ordering_valid else '✗ VIOLATED'}")
|
|
1284
|
+
self.logger.info(f" Values: {self.zl:.6f} < {self.z0l:.6f} < {self.z0:.6f} < {self.z0u:.6f} < {self.zu:.6f}")
|
|
1285
|
+
|
|
1286
|
+
if not ordering_valid:
|
|
1287
|
+
self.logger.info(f" ⚠ Warning: Ordering constraint not satisfied. Results may be suboptimal.")
|
|
1288
|
+
self.logger.info(f"")
|
|
1289
|
+
|
|
1290
|
+
self.logger.info(f"Critical Points:")
|
|
1291
|
+
self.logger.info(f" ZL (datum for min Z0): {self.zl:.6f}")
|
|
1292
|
+
self.logger.info(f" Z0L (minimum Z0): {self.z0l:.6f}")
|
|
1293
|
+
self.logger.info(f" Z0 (original): {self.z0:.6f}")
|
|
1294
|
+
self.logger.info(f" Z0U (maximum Z0): {self.z0u:.6f}")
|
|
1295
|
+
self.logger.info(f" ZU (datum for max Z0): {self.zu:.6f}")
|
|
1296
|
+
self.logger.info(f"")
|
|
1297
|
+
|
|
1298
|
+
self.logger.info(f"Intervals:")
|
|
1299
|
+
self.logger.info(f" Typical Data: [{self.zl:.6f}, {self.zu:.6f}] (width: {self.typical_data_interval:.6f})")
|
|
1300
|
+
self.logger.info(f" Tolerance: [{self.z0l:.6f}, {self.z0u:.6f}] (width: {self.tolerance_interval:.6f})")
|
|
1301
|
+
self.logger.info(f"")
|
|
1302
|
+
|
|
1303
|
+
# Data coverage
|
|
1304
|
+
tol_count = self._count_data_in_interval(self.z0l, self.z0u)
|
|
1305
|
+
typ_count = self._count_data_in_interval(self.zl, self.zu)
|
|
1306
|
+
total_data = len(self.original_data)
|
|
1307
|
+
|
|
1308
|
+
self.logger.info(f"Data Coverage:")
|
|
1309
|
+
self.logger.info(f" Within tolerance interval: {tol_count}/{total_data} ({tol_count/total_data:.1%})")
|
|
1310
|
+
self.logger.info(f" Within typical data interval: {typ_count}/{total_data} ({typ_count/total_data:.1%})")
|
|
1311
|
+
self.logger.info(f"")
|
|
1312
|
+
|
|
1313
|
+
# Search summary
|
|
1314
|
+
lower_success = sum(self.search_results['lower']['success_flags'])
|
|
1315
|
+
upper_success = sum(self.search_results['upper']['success_flags'])
|
|
1316
|
+
lower_total = len(self.search_results['lower']['datum_values'])
|
|
1317
|
+
upper_total = len(self.search_results['upper']['datum_values'])
|
|
1318
|
+
|
|
1319
|
+
self.logger.info(f"Search Summary:")
|
|
1320
|
+
self.logger.info(f" Lower direction: {lower_success}/{lower_total} successful")
|
|
1321
|
+
self.logger.info(f" Upper direction: {upper_success}/{upper_total} successful")
|
|
1322
|
+
self.logger.info(f" Total valid fits: {lower_success + upper_success}")
|
|
1323
|
+
self.logger.info(f" Fit time: {self.params.get('fit_time', 0):.3f} seconds")
|
|
1324
|
+
self.logger.info(f"{'='*70}")
|
|
1325
|
+
|
|
1326
|
+
def _print_debug_info(self):
|
|
1327
|
+
"""Print debug information when fitting fails."""
|
|
1328
|
+
self.logger.info(' Fitting failed or produced invalid results. Debug information:')
|
|
1329
|
+
self.logger.info(f"Original data: {self.original_data}")
|
|
1330
|
+
self.logger.info(f"Data stats: mean={np.mean(self.original_data):.6f}, std={np.std(self.original_data):.6f}")
|
|
1331
|
+
self.logger.info(f"Bounds: LB={self.LB:.6f}, UB={self.UB:.6f}")
|
|
1332
|
+
self.logger.info(f"Z0: {self.z0:.6f}")
|
|
1333
|
+
self.logger.info(f"DLB: {self.DLB}, DUB: {self.DUB}")
|
|
1334
|
+
|
|
1335
|
+
# Show search results summary
|
|
1336
|
+
for direction in ['lower', 'upper']:
|
|
1337
|
+
data = self.search_results[direction]
|
|
1338
|
+
if len(data['datum_values']) > 0:
|
|
1339
|
+
success_count = sum(data['success_flags'])
|
|
1340
|
+
total_count = len(data['datum_values'])
|
|
1341
|
+
self.logger.info(f"{direction.capitalize()} search: {success_count}/{total_count} successful")
|
|
1342
|
+
|
|
1343
|
+
def get_intervals(self, decimals: int = 6) -> Dict[str, float]:
|
|
1344
|
+
"""Get interval results as dictionary."""
|
|
1345
|
+
self.logger.info("Retrieving interval results as dictionary...")
|
|
1346
|
+
|
|
1347
|
+
if not self._fitted:
|
|
1348
|
+
raise RuntimeError("Must fit before getting intervals")
|
|
1349
|
+
|
|
1350
|
+
return {
|
|
1351
|
+
'ZL': round(self.zl, decimals),
|
|
1352
|
+
'Z0L': round(self.z0l, decimals),
|
|
1353
|
+
'Z0': round(self.z0, decimals),
|
|
1354
|
+
'Z0U': round(self.z0u, decimals),
|
|
1355
|
+
'ZU': round(self.zu, decimals),
|
|
1356
|
+
'typical_data_interval': round(self.typical_data_interval, decimals),
|
|
1357
|
+
'tolerance_interval': round(self.tolerance_interval, decimals),
|
|
1358
|
+
'LB': round(self.LB, decimals),
|
|
1359
|
+
'UB': round(self.UB, decimals),
|
|
1360
|
+
'ordering_constraint_satisfied': (self.zl < self.z0l < self.z0 < self.z0u < self.zu)
|
|
1361
|
+
}
|
|
1362
|
+
|
|
1363
|
+
def plot(self, figsize: Tuple[int, int] = (12, 8)):
|
|
1364
|
+
"""Plot interval estimation results."""
|
|
1365
|
+
self.logger.info("Plotting interval estimation results...")
|
|
1366
|
+
|
|
1367
|
+
if not self._fitted:
|
|
1368
|
+
self.logger.error("Must fit before plotting")
|
|
1369
|
+
raise RuntimeError("Must fit before plotting")
|
|
1370
|
+
|
|
1371
|
+
try:
|
|
1372
|
+
import matplotlib.pyplot as plt
|
|
1373
|
+
except ImportError:
|
|
1374
|
+
self.logger.error("matplotlib required for plotting")
|
|
1375
|
+
raise ImportError("matplotlib required for plotting")
|
|
1376
|
+
|
|
1377
|
+
# Create main Z0 variation plot
|
|
1378
|
+
self._plot_z0_variation(figsize)
|
|
1379
|
+
|
|
1380
|
+
# Create distribution plot if requested
|
|
1381
|
+
# if plot_distribution:
|
|
1382
|
+
# self._plot_distribution_with_intervals(figsize, eldf_plot)
|
|
1383
|
+
|
|
1384
|
+
def _plot_z0_variation(self, figsize: Tuple[int, int] = (12, 8)):
|
|
1385
|
+
"""Plot Z0 variation with improved legend and ordering validation."""
|
|
1386
|
+
self.logger.info("Plotting Z0 variation...")
|
|
1387
|
+
import matplotlib.pyplot as plt
|
|
1388
|
+
|
|
1389
|
+
# Collect valid data points
|
|
1390
|
+
datum_vals, z0_vals, colors = [], [], []
|
|
1391
|
+
|
|
1392
|
+
# Lower search (blue)
|
|
1393
|
+
lower_data = self.search_results['lower']
|
|
1394
|
+
for datum, z0, success in zip(lower_data['datum_values'], lower_data['z0_values'], lower_data['success_flags']):
|
|
1395
|
+
if success and not np.isnan(z0):
|
|
1396
|
+
datum_vals.append(datum)
|
|
1397
|
+
z0_vals.append(z0)
|
|
1398
|
+
colors.append('blue')
|
|
1399
|
+
|
|
1400
|
+
# Upper search (red)
|
|
1401
|
+
upper_data = self.search_results['upper']
|
|
1402
|
+
for datum, z0, success in zip(upper_data['datum_values'], upper_data['z0_values'], upper_data['success_flags']):
|
|
1403
|
+
if success and not np.isnan(z0):
|
|
1404
|
+
datum_vals.append(datum)
|
|
1405
|
+
z0_vals.append(z0)
|
|
1406
|
+
colors.append('red')
|
|
1407
|
+
|
|
1408
|
+
if len(datum_vals) == 0:
|
|
1409
|
+
self.logger.info("No valid data for plotting")
|
|
1410
|
+
return
|
|
1411
|
+
|
|
1412
|
+
# Create plot
|
|
1413
|
+
fig, ax = plt.subplots(1, 1, figsize=figsize)
|
|
1414
|
+
|
|
1415
|
+
# Scatter points by search direction
|
|
1416
|
+
datum_vals = np.array(datum_vals)
|
|
1417
|
+
z0_vals = np.array(z0_vals)
|
|
1418
|
+
colors = np.array(colors)
|
|
1419
|
+
|
|
1420
|
+
blue_mask = colors == 'blue'
|
|
1421
|
+
red_mask = colors == 'red'
|
|
1422
|
+
|
|
1423
|
+
if np.any(blue_mask):
|
|
1424
|
+
ax.scatter(datum_vals[blue_mask], z0_vals[blue_mask],
|
|
1425
|
+
c='blue', alpha=0.6, s=20, label='Lower Search (Z0→LB)')
|
|
1426
|
+
if np.any(red_mask):
|
|
1427
|
+
ax.scatter(datum_vals[red_mask], z0_vals[red_mask],
|
|
1428
|
+
c='red', alpha=0.6, s=20, label='Upper Search (Z0→UB)')
|
|
1429
|
+
|
|
1430
|
+
# Smooth curve if enough points
|
|
1431
|
+
if len(datum_vals) > 20:
|
|
1432
|
+
sort_idx = np.argsort(datum_vals)
|
|
1433
|
+
ax.plot(datum_vals[sort_idx], z0_vals[sort_idx], 'k-',
|
|
1434
|
+
alpha=0.4, linewidth=1.5, label='Z0 Variation Curve')
|
|
1435
|
+
|
|
1436
|
+
# Critical points
|
|
1437
|
+
ax.scatter([self.zl], [self.z0l], marker='v', s=150, color='purple',
|
|
1438
|
+
edgecolor='black', linewidth=2, zorder=10,
|
|
1439
|
+
label=f'ZL,Z0L ({self.zl:.4f},{self.z0l:.4f})')
|
|
1440
|
+
ax.scatter([self.z0], [self.z0], marker='s', s=150, color='green',
|
|
1441
|
+
edgecolor='black', linewidth=2, zorder=10,
|
|
1442
|
+
label=f'Z0 ({self.z0:.4f})')
|
|
1443
|
+
ax.scatter([self.zu], [self.z0u], marker='^', s=150, color='orange',
|
|
1444
|
+
edgecolor='black', linewidth=2, zorder=10,
|
|
1445
|
+
label=f'Z0U, ZU ({self.z0u:.4f},{self.zu:.4f})')
|
|
1446
|
+
|
|
1447
|
+
# Reference lines
|
|
1448
|
+
ax.axvline(x=self.zl, color='purple', linestyle='--', alpha=0.7, linewidth=1)
|
|
1449
|
+
ax.axvline(x=self.z0, color='green', linestyle='-', alpha=0.8, linewidth=2)
|
|
1450
|
+
ax.axvline(x=self.zu, color='orange', linestyle='--', alpha=0.7, linewidth=1)
|
|
1451
|
+
ax.axhline(y=self.z0l, color='purple', linestyle=':', alpha=0.7, linewidth=1)
|
|
1452
|
+
ax.axhline(y=self.z0u, color='orange', linestyle=':', alpha=0.7, linewidth=1)
|
|
1453
|
+
|
|
1454
|
+
# Add interval information and ordering status to legend
|
|
1455
|
+
ordering_valid = (self.zl < self.z0l < self.z0 < self.z0u < self.zu)
|
|
1456
|
+
ordering_status = "✓ VALID" if ordering_valid else "✗ INVALID"
|
|
1457
|
+
|
|
1458
|
+
tol_interval_str = f"Tolerance Interval: [{self.z0l:.4f}, {self.z0u:.4f}]"
|
|
1459
|
+
typ_interval_str = f"Typical Data Interval: [{self.zl:.4f}, {self.zu:.4f}]"
|
|
1460
|
+
ordering_str = f"Ordering Constraint: {ordering_status}"
|
|
1461
|
+
|
|
1462
|
+
# Create invisible plot points for legend entries
|
|
1463
|
+
ax.plot([], [], ' ', label=tol_interval_str, color='lightgreen', alpha=0.7)
|
|
1464
|
+
ax.plot([], [], ' ', label=typ_interval_str, color='lightblue', alpha=0.7)
|
|
1465
|
+
ax.plot([], [], ' ', label=ordering_str, color='red' if not ordering_valid else 'green', alpha=0.7)
|
|
1466
|
+
|
|
1467
|
+
# Labels and formatting
|
|
1468
|
+
ax.set_xlabel('Datum Value', fontsize=12, fontweight='bold')
|
|
1469
|
+
ax.set_ylabel('Z0 Value', fontsize=12, fontweight='bold')
|
|
1470
|
+
|
|
1471
|
+
title = f'Z0-Based Interval Estimation ({self.df_type})'
|
|
1472
|
+
if not ordering_valid:
|
|
1473
|
+
title += ' - ⚠ Ordering Constraint Violated'
|
|
1474
|
+
|
|
1475
|
+
ax.set_title(title, fontsize=14, fontweight='bold')
|
|
1476
|
+
ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=10)
|
|
1477
|
+
ax.grid(True, alpha=0.3)
|
|
1478
|
+
|
|
1479
|
+
plt.tight_layout()
|
|
1480
|
+
plt.show()
|
|
1481
|
+
|
|
1482
|
+
if self.verbose:
|
|
1483
|
+
# Print summary
|
|
1484
|
+
self.logger.info(f"\nZ0 Variation Plot Summary:")
|
|
1485
|
+
self.logger.info(f" Total valid points: {len(datum_vals)}")
|
|
1486
|
+
self.logger.info(f" Typical data interval: [{self.zl:.6f}, {self.zu:.6f}] (width: {self.typical_data_interval:.6f})")
|
|
1487
|
+
self.logger.info(f" Tolerance interval: [{self.z0l:.6f}, {self.z0u:.6f}] (width: {self.tolerance_interval:.6f})")
|
|
1488
|
+
self.logger.info(f" Ordering constraint: {'✓ SATISFIED' if ordering_valid else '✗ VIOLATED'}")
|
|
1489
|
+
|
|
1490
|
+
# def _plot_distribution_with_intervals(self, figsize: Tuple[int, int] = (12, 8),
|
|
1491
|
+
# eldf_plot: bool = True):
|
|
1492
|
+
# """Plot ELDF/PDF distribution with interval markers and filled areas."""
|
|
1493
|
+
|
|
1494
|
+
# import matplotlib.pyplot as plt
|
|
1495
|
+
|
|
1496
|
+
# # Create figure
|
|
1497
|
+
# fig, ax = plt.subplots(1, 1, figsize=figsize)
|
|
1498
|
+
|
|
1499
|
+
# # Get x range for plotting (slightly beyond bounds)
|
|
1500
|
+
# x_margin = (self.UB - self.LB) * 0.05
|
|
1501
|
+
# x_min = self.LB - x_margin
|
|
1502
|
+
# x_max = self.UB + x_margin
|
|
1503
|
+
# x = np.linspace(x_min, x_max, 1000)
|
|
1504
|
+
|
|
1505
|
+
# # Compute and plot ELDF or PDF
|
|
1506
|
+
# if eldf_plot:
|
|
1507
|
+
# try:
|
|
1508
|
+
# y = self.df_object.eldf(x)
|
|
1509
|
+
# ax.plot(x, y, 'k-', linewidth=2, label=f'{self.df_type} Function', alpha=0.8)
|
|
1510
|
+
# y_label = f'{self.df_type} Value'
|
|
1511
|
+
# plot_title = f'{self.df_type} with Intervals'
|
|
1512
|
+
# except Exception as e:
|
|
1513
|
+
# print(f"Could not compute ELDF: {e}")
|
|
1514
|
+
# return
|
|
1515
|
+
# else:
|
|
1516
|
+
# try:
|
|
1517
|
+
# y = self.df_object.pdf(x)
|
|
1518
|
+
# ax.plot(x, y, 'k-', linewidth=2, label='PDF', alpha=0.8)
|
|
1519
|
+
# y_label = 'Probability Density'
|
|
1520
|
+
# plot_title = 'PDF with Intervals'
|
|
1521
|
+
# except Exception as e:
|
|
1522
|
+
# print(f"Could not compute PDF: {e}")
|
|
1523
|
+
# return
|