hdim-opt 1.3.1__tar.gz → 1.3.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {hdim_opt-1.3.1 → hdim_opt-1.3.2}/PKG-INFO +4 -3
- {hdim_opt-1.3.1 → hdim_opt-1.3.2}/README.md +2 -1
- {hdim_opt-1.3.1 → hdim_opt-1.3.2}/hdim_opt/__init__.py +8 -8
- {hdim_opt-1.3.1 → hdim_opt-1.3.2}/hdim_opt/hyperellipsoid_sampling.py +10 -14
- {hdim_opt-1.3.1 → hdim_opt-1.3.2}/hdim_opt/quasar_helpers.py +11 -14
- {hdim_opt-1.3.1 → hdim_opt-1.3.2}/hdim_opt/quasar_optimization.py +31 -33
- hdim_opt-1.3.2/hdim_opt/sobol_sensitivity.py +185 -0
- {hdim_opt-1.3.1 → hdim_opt-1.3.2}/hdim_opt/waveform_analysis.py +1 -1
- {hdim_opt-1.3.1 → hdim_opt-1.3.2}/hdim_opt.egg-info/PKG-INFO +4 -3
- {hdim_opt-1.3.1 → hdim_opt-1.3.2}/hdim_opt.egg-info/SOURCES.txt +0 -1
- {hdim_opt-1.3.1 → hdim_opt-1.3.2}/pyproject.toml +2 -2
- hdim_opt-1.3.1/hdim_opt/sobol_sampling.py +0 -29
- hdim_opt-1.3.1/hdim_opt/sobol_sensitivity.py +0 -126
- {hdim_opt-1.3.1 → hdim_opt-1.3.2}/hdim_opt/test_functions.py +0 -0
- {hdim_opt-1.3.1 → hdim_opt-1.3.2}/hdim_opt.egg-info/dependency_links.txt +0 -0
- {hdim_opt-1.3.1 → hdim_opt-1.3.2}/hdim_opt.egg-info/requires.txt +0 -0
- {hdim_opt-1.3.1 → hdim_opt-1.3.2}/hdim_opt.egg-info/top_level.txt +0 -0
- {hdim_opt-1.3.1 → hdim_opt-1.3.2}/setup.cfg +0 -0
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: hdim_opt
|
|
3
|
-
Version: 1.3.
|
|
4
|
-
Summary: High-dimensional
|
|
3
|
+
Version: 1.3.2
|
|
4
|
+
Summary: High-dimensional numerical optimization and sampling toolkit for complex, non-differentiable problems.
|
|
5
5
|
Author-email: Julian Soltes <jsoltes@regis.edu>
|
|
6
6
|
License: MIT
|
|
7
7
|
Project-URL: Homepage, https://github.com/jgsoltes/hdim_opt
|
|
@@ -42,13 +42,14 @@ Requires-Dist: SALib; extra == "sensitivity"
|
|
|
42
42
|
|
|
43
43
|
# hdim-opt: High-Dimensional Optimization Toolkit
|
|
44
44
|
|
|
45
|
-
|
|
45
|
+
Modern optimization package to accelerate convergence in complex, high-dimensional problems. Includes the QUASAR evolutionary algorithm, HDS exploitative QMC sampler, Sobol sensitivity analysis, signal waveform decomposition, and data transformations.
|
|
46
46
|
|
|
47
47
|
All core functions, listed below, are single-line executable and require three essential parameters: [obj_function, bounds, n_samples].
|
|
48
48
|
* **quasar**: QUASAR optimization for high-dimensional, non-differentiable problems.
|
|
49
49
|
* **hyperellipsoid**: Generate a non-uniform Hyperellipsoid Density sequence, to focus sample distributions.
|
|
50
50
|
* **sobol**: Generate a uniform Sobol sequence (via SciPy).
|
|
51
51
|
* **sensitivity**: Perform Sobol sensitivity analysis to measure each variable's importance on objective function results (via SALib).
|
|
52
|
+
* **isotropize**: Isotropizes the input matrix.
|
|
52
53
|
* **waveform**: Decompose the input waveform array (handles time- and frequency-domain via FFT / IFFT) into a diagnostic summary.
|
|
53
54
|
|
|
54
55
|
---
|
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
# hdim-opt: High-Dimensional Optimization Toolkit
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
Modern optimization package to accelerate convergence in complex, high-dimensional problems. Includes the QUASAR evolutionary algorithm, HDS exploitative QMC sampler, Sobol sensitivity analysis, signal waveform decomposition, and data transformations.
|
|
4
4
|
|
|
5
5
|
All core functions, listed below, are single-line executable and require three essential parameters: [obj_function, bounds, n_samples].
|
|
6
6
|
* **quasar**: QUASAR optimization for high-dimensional, non-differentiable problems.
|
|
7
7
|
* **hyperellipsoid**: Generate a non-uniform Hyperellipsoid Density sequence, to focus sample distributions.
|
|
8
8
|
* **sobol**: Generate a uniform Sobol sequence (via SciPy).
|
|
9
9
|
* **sensitivity**: Perform Sobol sensitivity analysis to measure each variable's importance on objective function results (via SALib).
|
|
10
|
+
* **isotropize**: Isotropizes the input matrix.
|
|
10
11
|
* **waveform**: Decompose the input waveform array (handles time- and frequency-domain via FFT / IFFT) into a diagnostic summary.
|
|
11
12
|
|
|
12
13
|
---
|
|
@@ -7,12 +7,12 @@ Functions:
|
|
|
7
7
|
- hyperellipsoid: Generate a non-uniform hyperellipsoid density sequence.
|
|
8
8
|
- sobol: Generate a uniform Sobol sequence (via SciPy).
|
|
9
9
|
- sensitivity: Perform Sobol sensitivity analysis to measure each variable's importance on objective function results (via SALib).
|
|
10
|
-
-
|
|
11
|
-
-
|
|
10
|
+
- isotropize/deisotropize: Isotropize the input matrix using ZCA.
|
|
11
|
+
- waveform: Decompose the input waveform array (handles time- and frequency-domain via FFT / IFFT) into a diagnostic summary.
|
|
12
12
|
|
|
13
13
|
Modules:
|
|
14
14
|
- test_functions: Contains test functions for local optimization testing.
|
|
15
|
-
- waveform_analysis: Contains pulse generation functions.
|
|
15
|
+
- waveform_analysis: Contains pulse signal generation functions.
|
|
16
16
|
|
|
17
17
|
Example Usage:
|
|
18
18
|
|
|
@@ -38,17 +38,17 @@ Example Usage:
|
|
|
38
38
|
"""
|
|
39
39
|
|
|
40
40
|
# package version
|
|
41
|
-
__version__ = "1.3.
|
|
42
|
-
__all__ = ['quasar',
|
|
41
|
+
__version__ = "1.3.2"
|
|
42
|
+
__all__ = ['quasar','hyperellipsoid','sensitivity','waveform','sobol','isotropize','deisotropize',
|
|
43
|
+
'test_functions','quasar_helpers','waveform_analysis'] # available for star imports
|
|
43
44
|
|
|
44
45
|
# import core components
|
|
45
46
|
from .quasar_optimization import optimize as quasar
|
|
46
47
|
from .hyperellipsoid_sampling import sample as hyperellipsoid
|
|
47
|
-
from .
|
|
48
|
+
from .sobol_sensitivity import sobol_sample as sobol
|
|
48
49
|
from .sobol_sensitivity import sens_analysis as sensitivity
|
|
50
|
+
from .quasar_helpers import isotropize, deisotropize
|
|
49
51
|
from .waveform_analysis import analyze_waveform as waveform
|
|
50
|
-
from .quasar_helpers import isotropize
|
|
51
|
-
from .quasar_helpers import deisotropize
|
|
52
52
|
from . import test_functions
|
|
53
53
|
from . import quasar_helpers
|
|
54
54
|
from . import waveform_analysis
|
|
@@ -1,14 +1,15 @@
|
|
|
1
|
-
# global
|
|
1
|
+
# global epsilon
|
|
2
2
|
epsilon = 1e-16
|
|
3
|
+
import numpy as np
|
|
3
4
|
|
|
4
5
|
### misc helper functions
|
|
5
6
|
|
|
6
|
-
def sample_hypersphere(n_dimensions, radius,
|
|
7
|
+
def sample_hypersphere(n_dimensions, radius,
|
|
8
|
+
n_samples_in_sphere, radius_qmc_sequence):
|
|
7
9
|
'''
|
|
8
10
|
Objective:
|
|
9
11
|
- Samples unit hyperspheres using Marsaglia polar vectors scaled by a QMC sequence.
|
|
10
12
|
'''
|
|
11
|
-
import numpy as np
|
|
12
13
|
|
|
13
14
|
# generate normal distribution (for angular direction)
|
|
14
15
|
samples = np.random.normal(size=(n_samples_in_sphere, n_dimensions))
|
|
@@ -28,14 +29,15 @@ def sample_hypersphere(n_dimensions, radius, n_samples_in_sphere, radius_qmc_seq
|
|
|
28
29
|
|
|
29
30
|
return samples
|
|
30
31
|
|
|
31
|
-
def sample_hyperellipsoid(n_dimensions, n_samples_in_ellipsoid,
|
|
32
|
+
def sample_hyperellipsoid(n_dimensions, n_samples_in_ellipsoid,
|
|
33
|
+
origin, pca_components, pca_variances,
|
|
34
|
+
scaling_factor, radius_qmc_sequence=None):
|
|
32
35
|
'''
|
|
33
36
|
Objective:
|
|
34
37
|
- Generates samples inside the hyperellipsoid.
|
|
35
38
|
- Calls the function to sample unit hyperspheres.
|
|
36
39
|
- Transforms the hyperspherical samples to the ellipsoid axes defined using the PCA variances.
|
|
37
40
|
'''
|
|
38
|
-
import numpy as np
|
|
39
41
|
|
|
40
42
|
# generate samples in unit hypersphere
|
|
41
43
|
unit_sphere_samples = sample_hypersphere(n_dimensions, 1.0, n_samples_in_ellipsoid, radius_qmc_sequence)
|
|
@@ -61,7 +63,6 @@ def sample_in_voids(existing_samples, n_to_fill, bounds_min, bounds_max,
|
|
|
61
63
|
'''
|
|
62
64
|
from sklearn.neighbors import BallTree
|
|
63
65
|
from sklearn.random_projection import GaussianRandomProjection
|
|
64
|
-
import numpy as np
|
|
65
66
|
from scipy import stats
|
|
66
67
|
import time
|
|
67
68
|
|
|
@@ -146,14 +147,14 @@ def sample_in_voids(existing_samples, n_to_fill, bounds_min, bounds_max,
|
|
|
146
147
|
|
|
147
148
|
return new_samples
|
|
148
149
|
|
|
149
|
-
def fit_pca_for_cluster(cluster_samples, current_origin,
|
|
150
|
+
def fit_pca_for_cluster(cluster_samples, current_origin,
|
|
151
|
+
initial_samples_std, n_dimensions):
|
|
150
152
|
'''
|
|
151
153
|
Performs PCA on a single cluster's samples or returns a default,
|
|
152
154
|
called in parallel.
|
|
153
155
|
'''
|
|
154
156
|
|
|
155
157
|
from sklearn.decomposition import PCA
|
|
156
|
-
import numpy as np
|
|
157
158
|
|
|
158
159
|
# extract shape
|
|
159
160
|
n_cluster_samples = len(cluster_samples)
|
|
@@ -174,7 +175,7 @@ def fit_pca_for_cluster(cluster_samples, current_origin, initial_samples_std, n_
|
|
|
174
175
|
'variances': fixed_variance}
|
|
175
176
|
|
|
176
177
|
|
|
177
|
-
|
|
178
|
+
### main sampling function
|
|
178
179
|
|
|
179
180
|
def sample(n_samples, bounds,
|
|
180
181
|
weights=None, normalize=False,
|
|
@@ -209,7 +210,6 @@ def sample(n_samples, bounds,
|
|
|
209
210
|
|
|
210
211
|
# imports
|
|
211
212
|
try:
|
|
212
|
-
import numpy as np
|
|
213
213
|
import pandas as pd
|
|
214
214
|
from scipy import stats
|
|
215
215
|
from joblib import Parallel, delayed
|
|
@@ -524,7 +524,6 @@ def sample(n_samples, bounds,
|
|
|
524
524
|
|
|
525
525
|
|
|
526
526
|
### PCA for n_dim > 2:
|
|
527
|
-
|
|
528
527
|
if normalize:
|
|
529
528
|
data_to_plot_raw = initial_samples
|
|
530
529
|
else:
|
|
@@ -548,8 +547,6 @@ def sample(n_samples, bounds,
|
|
|
548
547
|
xlabel_str = f'Dimension 0'
|
|
549
548
|
ylabel_str = f'Dimension 1'
|
|
550
549
|
|
|
551
|
-
# dark visualization parameters for better sample visuals
|
|
552
|
-
|
|
553
550
|
# samples
|
|
554
551
|
fig, ax = plt.subplots(1,2,figsize=(9,5))
|
|
555
552
|
|
|
@@ -568,7 +565,6 @@ def sample(n_samples, bounds,
|
|
|
568
565
|
ax[0].set_title(title_str, fontsize=14)
|
|
569
566
|
ax[0].set_xlabel(xlabel_str)
|
|
570
567
|
ax[0].set_ylabel(ylabel_str)
|
|
571
|
-
# ax[0].axis(False)
|
|
572
568
|
ax[0].legend(loc=(0.7,0.87), fontsize=8)
|
|
573
569
|
|
|
574
570
|
# plot histograms
|
|
@@ -1,23 +1,21 @@
|
|
|
1
1
|
# global imports
|
|
2
|
-
import pandas as pd
|
|
3
|
-
from sklearn.preprocessing import StandardScaler
|
|
4
2
|
import numpy as np
|
|
5
3
|
epsilon = 1e-16
|
|
6
4
|
|
|
7
5
|
def isotropize(data):
|
|
8
6
|
'''
|
|
9
7
|
Objective:
|
|
10
|
-
-
|
|
11
|
-
|
|
8
|
+
- Isotropizes the input matrix using Zero-Phase Component Analysis (ZCA).
|
|
9
|
+
- Maintains original parameter orientation while removing correlations.
|
|
10
|
+
- 'deisotropize' function inverse transforms to the original parameter space.
|
|
12
11
|
'''
|
|
13
12
|
from scipy.linalg import eigh
|
|
14
|
-
|
|
15
13
|
# convert to array
|
|
16
14
|
X = np.array(data)
|
|
17
15
|
|
|
18
16
|
# standard scaling (mean = 0, var = 1)
|
|
19
17
|
mean = np.mean(X, axis=0)
|
|
20
|
-
stdev = np.std(X, axis=0) + epsilon #
|
|
18
|
+
stdev = np.std(X, axis=0) + epsilon # add epsilon to avoid div0
|
|
21
19
|
X_centered = (X - mean) / stdev
|
|
22
20
|
|
|
23
21
|
# eigen-decomposition of the correlation matrix
|
|
@@ -25,19 +23,18 @@ def isotropize(data):
|
|
|
25
23
|
eigenvalues, eigenvectors = eigh(cov) # eigh is more stable for symmetric matrices like covariance
|
|
26
24
|
|
|
27
25
|
# ZCA whitening matrix: W_zca = U @ diag(1/sqrt(lambda)) @ U.T
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
26
|
+
diag_inv_sqrt = np.diag(1.0 / np.sqrt(np.maximum(eigenvalues, epsilon))) # use maximum to avoid div0
|
|
27
|
+
W_zca = eigenvectors @ diag_inv_sqrt @ eigenvectors.T # whitening matrix
|
|
28
|
+
W_zca_inv = (eigenvectors * np.sqrt(np.maximum(eigenvalues, epsilon))) @ eigenvectors.T # save for deisotropization
|
|
29
|
+
|
|
32
30
|
# transform: y = X_centered @ W_zca.T
|
|
33
|
-
data_iso = np.dot(X_centered, W_zca.T
|
|
31
|
+
data_iso = np.dot(X_centered, W_zca) # no .T needed because W_zca is symmetric
|
|
34
32
|
|
|
35
33
|
# store parameters for deisotropization
|
|
36
34
|
params = {
|
|
37
35
|
'mean': mean,
|
|
38
36
|
'stdev': stdev,
|
|
39
|
-
'
|
|
40
|
-
'W_zca_inv': eigenvectors @ np.diag(np.sqrt(eigenvalues + epsilon)) @ eigenvectors.T
|
|
37
|
+
'W_zca_inv': W_zca_inv
|
|
41
38
|
}
|
|
42
39
|
return data_iso, params
|
|
43
40
|
|
|
@@ -50,7 +47,7 @@ def deisotropize(data_iso, params):
|
|
|
50
47
|
# inverse scaling: X = (X_centered * std) + mean
|
|
51
48
|
data_original = (data_centered * params['stdev']) + params['mean']
|
|
52
49
|
return data_original
|
|
53
|
-
|
|
50
|
+
|
|
54
51
|
############## CONSTRAINTS ##############
|
|
55
52
|
def apply_penalty(fitnesses, solutions, constraints, constraint_penalty, vectorized):
|
|
56
53
|
'''
|
|
@@ -164,7 +164,6 @@ def evolve_generation(obj_function, population, fitnesses, best_solution,
|
|
|
164
164
|
|
|
165
165
|
# greedy elitism selection
|
|
166
166
|
selection_indices = trial_fitnesses < fitnesses
|
|
167
|
-
|
|
168
167
|
new_population = np.where(selection_indices[np.newaxis, :], trial_vectors, population)
|
|
169
168
|
new_fitnesses = np.where(selection_indices, trial_fitnesses, fitnesses)
|
|
170
169
|
|
|
@@ -263,7 +262,7 @@ def evolve_generation(obj_function, population, fitnesses, best_solution,
|
|
|
263
262
|
|
|
264
263
|
return new_population, new_fitnesses
|
|
265
264
|
|
|
266
|
-
def asym_reinit(population, current_fitnesses, bounds, reinit_method, seed, vectorized):
|
|
265
|
+
def asym_reinit(population, current_fitnesses, bounds, reinit_method, seed, generation, vectorized):
|
|
267
266
|
'''
|
|
268
267
|
Objective:
|
|
269
268
|
- Reinitializes the worst 33% solutions in the population.
|
|
@@ -340,7 +339,7 @@ def asym_reinit(population, current_fitnesses, bounds, reinit_method, seed, vect
|
|
|
340
339
|
elif reinit_method == 'sobol':
|
|
341
340
|
|
|
342
341
|
# generate sobol samples
|
|
343
|
-
sobol_sampler = stats.qmc.Sobol(d=dimensions, seed=seed)
|
|
342
|
+
sobol_sampler = stats.qmc.Sobol(d=dimensions, seed=seed+generation)
|
|
344
343
|
sobol_samples_unit = sobol_sampler.random(n=num_to_replace)
|
|
345
344
|
|
|
346
345
|
bounds_low = bounds[:, 0]
|
|
@@ -367,7 +366,6 @@ def asym_reinit(population, current_fitnesses, bounds, reinit_method, seed, vect
|
|
|
367
366
|
|
|
368
367
|
|
|
369
368
|
# main optimize function
|
|
370
|
-
|
|
371
369
|
def optimize(func, bounds, args=(),
|
|
372
370
|
init='sobol', popsize=None, maxiter=100,
|
|
373
371
|
entangle_rate=0.33, polish=True, polish_minimizer=None,
|
|
@@ -392,24 +390,24 @@ def optimize(func, bounds, args=(),
|
|
|
392
390
|
- kwargs: Dictionary of keyword arguments for the objective function.
|
|
393
391
|
|
|
394
392
|
- init: Initial population sampling method.
|
|
395
|
-
- Defaults to 'sobol'
|
|
393
|
+
- Defaults to 'sobol' (recommended power-of-2 population sizes for maximum uniformity).
|
|
396
394
|
- Existing options are:
|
|
397
|
-
- 'sobol': Sobol (
|
|
398
|
-
- 'hds': Hyperellipsoid
|
|
399
|
-
- 'lhs': Latin Hypercube (uniform
|
|
400
|
-
- 'random': Random sampling (uniform).
|
|
395
|
+
- 'sobol': Sobol (spatially uniform; power of 2 population sizes recommended).
|
|
396
|
+
- 'hds': Hyperellipsoid (non-uniform; density weights 'hds_weights' recommended).
|
|
397
|
+
- 'lhs': Latin Hypercube (uniform across each dimension).
|
|
398
|
+
- 'random': Random sampling (quasi-uniform).
|
|
401
399
|
- custom population (N x D matrix).
|
|
402
|
-
- popsize: Number of solution vectors to evolve (default 10 * n_dimensions).
|
|
403
|
-
- Recommended to be a power of 2 for Sobol initialization.
|
|
400
|
+
- popsize: Number of solution vectors to evolve (default is next power of 2 of 10 * n_dimensions).
|
|
404
401
|
- maxiter: Number of generations to evolve (default 100).
|
|
405
402
|
|
|
406
403
|
- entangle_rate: Probability of solutions using the local Spooky-Best mutation strategy.
|
|
407
|
-
- Defaults to 0.33
|
|
404
|
+
- Defaults to 0.33; each mutation strategy is applied equally.
|
|
408
405
|
- Higher implies more exploitation.
|
|
406
|
+
|
|
409
407
|
- polish: Boolean to implement final polishing step, using SciPy.optimize.minimize.
|
|
410
|
-
- polish_minimizer:
|
|
408
|
+
- polish_minimizer: Name of Scipy minimization function to polish with.
|
|
411
409
|
- Defaults to 'Powell' minimization, or 'SLSQP' if 'constraints' parameter is provided.
|
|
412
|
-
-
|
|
410
|
+
- Accepts all minimizers ('L-BFGS-B', ...).
|
|
413
411
|
|
|
414
412
|
- patience: Number of generations without improvement before early convergence.
|
|
415
413
|
- tolerance: Target objective function value for early convergence.
|
|
@@ -446,7 +444,7 @@ def optimize(func, bounds, args=(),
|
|
|
446
444
|
|
|
447
445
|
- workers: Number of workers / jobs / cores to use.
|
|
448
446
|
- Default is 1. Set to -1 to use all available.
|
|
449
|
-
- If workers != 1, constraint & objective functions must be imported from external module
|
|
447
|
+
- If workers != 1, constraint & objective functions must be imported from external module for pickling.
|
|
450
448
|
- seed: Random seed for deterministic & reproducible results.
|
|
451
449
|
|
|
452
450
|
Outputs:
|
|
@@ -529,8 +527,8 @@ def optimize(func, bounds, args=(),
|
|
|
529
527
|
raise ValueError("Initial sampler must be one of ['sobol','random','hds','lhs'], or a custom population.")
|
|
530
528
|
|
|
531
529
|
# patience error
|
|
532
|
-
if patience
|
|
533
|
-
raise ValueError('Patience must be >
|
|
530
|
+
if patience < 1:
|
|
531
|
+
raise ValueError('Patience must be > 0 generations.')
|
|
534
532
|
|
|
535
533
|
|
|
536
534
|
################################# INITIAL POPULATION #################################
|
|
@@ -654,7 +652,7 @@ def optimize(func, bounds, args=(),
|
|
|
654
652
|
else:
|
|
655
653
|
reinit_proba = 0.0
|
|
656
654
|
if np.random.rand() < reinit_proba:
|
|
657
|
-
population = asym_reinit(population, current_fitnesses, bounds, reinitialization, seed, vectorized=vectorized)
|
|
655
|
+
population = asym_reinit(population, current_fitnesses, bounds, reinitialization, seed, generation, vectorized=vectorized)
|
|
658
656
|
|
|
659
657
|
# clip population to bounds
|
|
660
658
|
if vectorized:
|
|
@@ -664,19 +662,19 @@ def optimize(func, bounds, args=(),
|
|
|
664
662
|
|
|
665
663
|
# add to population history
|
|
666
664
|
if verbose:
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
665
|
+
# determine which solutions to sample
|
|
666
|
+
if popsize <= num_to_plot:
|
|
667
|
+
indices_to_sample = np.arange(popsize)
|
|
668
|
+
else:
|
|
669
|
+
indices_to_sample = np.random.choice(popsize, num_to_plot, replace=False)
|
|
670
|
+
|
|
671
|
+
if vectorized:
|
|
672
|
+
sampled_population = population[:, indices_to_sample].T.copy()
|
|
673
|
+
else:
|
|
674
|
+
sampled_population = population[indices_to_sample].copy()
|
|
675
|
+
|
|
676
|
+
pop_history.append(sampled_population)
|
|
677
|
+
best_history.append(best_solution.copy())
|
|
680
678
|
|
|
681
679
|
# print generation status
|
|
682
680
|
if verbose:
|
|
@@ -684,9 +682,9 @@ def optimize(func, bounds, args=(),
|
|
|
684
682
|
print(f' Gen. {generation+1}/{maxiter} | f(x)={best_fitness:.2e} | stdev={stdev:.2e} | reinit={reinit_proba:.2f}')
|
|
685
683
|
|
|
686
684
|
# patience for early convergence
|
|
687
|
-
if
|
|
685
|
+
if last_improvement_gen >= patience:
|
|
688
686
|
if verbose:
|
|
689
|
-
print(f'Early convergence:
|
|
687
|
+
print(f'Early convergence: patience exceeded ({patience}).')
|
|
690
688
|
break
|
|
691
689
|
if best_fitness <= tolerance:
|
|
692
690
|
if verbose:
|
|
@@ -0,0 +1,185 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from scipy import stats
|
|
3
|
+
|
|
4
|
+
### sensitivity analysis
|
|
5
|
+
def sens_analysis(func, bounds, n_samples=2**7,
|
|
6
|
+
args=None, kwargs=None,
|
|
7
|
+
param_names=None, calc_second_order=True,
|
|
8
|
+
log_scale=False, num_to_plot=10, verbose=True):
|
|
9
|
+
'''
|
|
10
|
+
Objective:
|
|
11
|
+
- Perform global Sobol sensitivity analysis on the objective function.
|
|
12
|
+
- Utilizes the SALib package.
|
|
13
|
+
Inputs:
|
|
14
|
+
- func: Objective function (Problem) to analyze.
|
|
15
|
+
- bounds: Parameter space bounds, as an array of tuples.
|
|
16
|
+
- n_samples: Number of Sobol samples to generate.
|
|
17
|
+
- kwargs: Keyword arguments (dictionary) for objective function.
|
|
18
|
+
- param_names: Optional parameter names for each dimension.
|
|
19
|
+
- calc_second_order: Boolean to calculate second-order interactions. Disable to improve computation speed.
|
|
20
|
+
- log_scale: Boolean to log-scale plots.
|
|
21
|
+
- num_to_plot: Number of dimensions to plot.
|
|
22
|
+
- verbose: Boolean to display plots.
|
|
23
|
+
Outputs:
|
|
24
|
+
- Si: Matrix of first- and total-order sensitivity indices and confidences.
|
|
25
|
+
- S2_matrix: Matrix of second-order interactions.
|
|
26
|
+
'''
|
|
27
|
+
|
|
28
|
+
### imports
|
|
29
|
+
try:
|
|
30
|
+
import numpy as np
|
|
31
|
+
from SALib.sample import sobol as sobol_sample
|
|
32
|
+
from SALib.analyze import sobol as sobol_analyze
|
|
33
|
+
import pandas as pd
|
|
34
|
+
import time
|
|
35
|
+
except ImportError as e:
|
|
36
|
+
raise ImportError(f'Sensitivity analysis requires dependencies: (SALib, pandas).') from e
|
|
37
|
+
|
|
38
|
+
### extract inputs
|
|
39
|
+
start_time = time.time()
|
|
40
|
+
bounds = np.array(bounds)
|
|
41
|
+
n_params = bounds.shape[0]
|
|
42
|
+
if param_names == None:
|
|
43
|
+
param_names = range(0,n_params)
|
|
44
|
+
elif len(param_names) != len(bounds):
|
|
45
|
+
raise ValueError('Length of param_names does not match length of bounds.')
|
|
46
|
+
|
|
47
|
+
### define problem for SALib
|
|
48
|
+
problem = {
|
|
49
|
+
'num_vars': n_params,
|
|
50
|
+
'names': param_names,
|
|
51
|
+
'bounds' : bounds
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
### generate samples
|
|
55
|
+
if verbose:
|
|
56
|
+
print(f'Generating Sobol samples (N={n_samples:,.0f}, D={n_params}).')
|
|
57
|
+
param_values = sobol_sample.sample(problem, n_samples, calc_second_order=calc_second_order)
|
|
58
|
+
|
|
59
|
+
### args / kwargs for the objective function
|
|
60
|
+
if args is None:
|
|
61
|
+
args = []
|
|
62
|
+
if kwargs is None:
|
|
63
|
+
kwargs = {}
|
|
64
|
+
def wrapped_func(x_samples):
|
|
65
|
+
return func(x_samples, *args, **kwargs)
|
|
66
|
+
|
|
67
|
+
### evaluate samples
|
|
68
|
+
# vectorized evaluation
|
|
69
|
+
n_expected = param_values.shape[0]
|
|
70
|
+
try:
|
|
71
|
+
values = wrapped_func(param_values)
|
|
72
|
+
values = np.asarray(values).flatten()
|
|
73
|
+
if values.shape[0] != n_expected:
|
|
74
|
+
raise ValueError('Non-vectorized objective function.')
|
|
75
|
+
|
|
76
|
+
# loop-based evaluation
|
|
77
|
+
except ValueError as e:
|
|
78
|
+
if verbose:
|
|
79
|
+
print(f'Non-vectorized objective function; loop-based evaluation.')
|
|
80
|
+
values = np.array([wrapped_func(sample) for sample in param_values])
|
|
81
|
+
|
|
82
|
+
# run sensitivity analysis
|
|
83
|
+
print('Running sensitivity analysis.')
|
|
84
|
+
Si = sobol_analyze.analyze(problem, values, calc_second_order=calc_second_order, print_to_console=False)
|
|
85
|
+
|
|
86
|
+
# create Si output dataframe
|
|
87
|
+
Si_keys = ['S1', 'S1_conf', 'ST', 'ST_conf']
|
|
88
|
+
Si_filtered = {k: Si[k] for k in Si_keys if k in Si} # filter for output
|
|
89
|
+
Si_df = pd.DataFrame(Si_filtered, index=param_names)
|
|
90
|
+
|
|
91
|
+
# create S2 output dataframe
|
|
92
|
+
if calc_second_order:
|
|
93
|
+
S2_matrix = Si['S2']
|
|
94
|
+
S2_df = pd.DataFrame(S2_matrix, index=param_names, columns=param_names)
|
|
95
|
+
S2_df = S2_df.fillna(S2_df.T)
|
|
96
|
+
else:
|
|
97
|
+
S2_df = pd.DataFrame()
|
|
98
|
+
|
|
99
|
+
### end of calculations
|
|
100
|
+
end_time = time.time()
|
|
101
|
+
run_time = end_time - start_time
|
|
102
|
+
if verbose:
|
|
103
|
+
num_to_plot = np.minimum(num_to_plot, n_params)
|
|
104
|
+
print(f'\nRun time: {run_time:.2f}s')
|
|
105
|
+
# plotting imports
|
|
106
|
+
try:
|
|
107
|
+
import matplotlib.pyplot as plt
|
|
108
|
+
import seaborn as sns
|
|
109
|
+
except ImportError as e:
|
|
110
|
+
raise ImportError(f'Plotting requires dependencies: (matplotlib, seaborn).') from e
|
|
111
|
+
|
|
112
|
+
# sort by S1
|
|
113
|
+
sort_idx = np.argsort(Si['S1'])
|
|
114
|
+
s1_sorted = Si['S1'][sort_idx][-num_to_plot:]
|
|
115
|
+
st_sorted = Si['ST'][sort_idx][-num_to_plot:]
|
|
116
|
+
s1_conf_sorted = Si['S1_conf'][sort_idx][-num_to_plot:]
|
|
117
|
+
st_conf_sorted = Si['ST_conf'][sort_idx][-num_to_plot:]
|
|
118
|
+
names_sorted = [np.array(param_names)[i] for i in sort_idx][-num_to_plot:]
|
|
119
|
+
index = np.arange(len(names_sorted))
|
|
120
|
+
|
|
121
|
+
### plot 1: first-order (S1) and total-order (ST) indices
|
|
122
|
+
fig, ax = plt.subplots(1,1,figsize=(9, 7))
|
|
123
|
+
|
|
124
|
+
bar_width = 0.35
|
|
125
|
+
ax.barh(index + bar_width/2, s1_sorted, bar_width, xerr=s1_conf_sorted,
|
|
126
|
+
label='First-order ($S_1$)',
|
|
127
|
+
alpha=1,
|
|
128
|
+
capsize=2.5)
|
|
129
|
+
ax.set_yticks(index)
|
|
130
|
+
ax.set_yticklabels(names_sorted)
|
|
131
|
+
|
|
132
|
+
ax.barh(index - bar_width/2, st_sorted, bar_width,
|
|
133
|
+
xerr=st_conf_sorted,
|
|
134
|
+
label='Total-order ($S_T$)',
|
|
135
|
+
alpha=0.75,
|
|
136
|
+
capsize=2.5)
|
|
137
|
+
if log_scale:
|
|
138
|
+
ax.set_xscale('log')
|
|
139
|
+
ax.set_title('Sensitivity Indices ($S_1$, $S_T$)')
|
|
140
|
+
ax.legend()
|
|
141
|
+
plt.tight_layout()
|
|
142
|
+
plt.show()
|
|
143
|
+
|
|
144
|
+
if calc_second_order:
|
|
145
|
+
### plot 2: heatmap of second order indices
|
|
146
|
+
s2_plot, ax = plt.subplots(1,1,figsize=(9, 7))
|
|
147
|
+
|
|
148
|
+
top_idx_to_show = sort_idx[-num_to_plot:]
|
|
149
|
+
S2_filtered = S2_df.iloc[top_idx_to_show, top_idx_to_show]
|
|
150
|
+
mask_filtered = np.tril(np.ones_like(S2_filtered, dtype=bool))
|
|
151
|
+
sns.heatmap(data=S2_filtered, mask=mask_filtered, annot=True, vmin=0.0, fmt='.2f')
|
|
152
|
+
ax.set_title('Second-order Interactions ($S_2$)')
|
|
153
|
+
ax.invert_yaxis()
|
|
154
|
+
plt.tight_layout()
|
|
155
|
+
plt.show()
|
|
156
|
+
|
|
157
|
+
return Si_df, S2_df
|
|
158
|
+
|
|
159
|
+
### sobol sampling
|
|
160
|
+
def sobol_sample(n_samples, bounds, normalize=False, seed=None):
|
|
161
|
+
'''
|
|
162
|
+
Objective:
|
|
163
|
+
- Generates a uniform scrambled Sobol sample sequence.
|
|
164
|
+
Inputs:
|
|
165
|
+
- n_samples: Number of samples to generate.
|
|
166
|
+
- bounds: Range to sample over.
|
|
167
|
+
- normalize: Boolean, if True keeps samples normalized to [0,1].
|
|
168
|
+
- seed: Random seed.
|
|
169
|
+
Outputs:
|
|
170
|
+
- sobol_sequence: Sobol sample sequence.
|
|
171
|
+
'''
|
|
172
|
+
|
|
173
|
+
# clean bounds & n_dimensions
|
|
174
|
+
bounds = np.array(bounds)
|
|
175
|
+
n_dimensions = bounds.shape[0]
|
|
176
|
+
|
|
177
|
+
sobol_sampler = stats.qmc.Sobol(d=n_dimensions, scramble=True, seed=seed)
|
|
178
|
+
sobol_samples_unit = sobol_sampler.random(n=n_samples)
|
|
179
|
+
|
|
180
|
+
if not normalize:
|
|
181
|
+
sobol_sequence = stats.qmc.scale(sobol_samples_unit, bounds[:, 0], bounds[:, 1])
|
|
182
|
+
else:
|
|
183
|
+
sobol_sequence = sobol_samples_unit
|
|
184
|
+
|
|
185
|
+
return sobol_sequence
|
|
@@ -406,7 +406,7 @@ def plot_diagnostic_dashboard(temporal, spectral, metrics):
|
|
|
406
406
|
ax_cum.plot(f_mhz, spectral['energy'], color='gold', linewidth=2)
|
|
407
407
|
ax_cum.fill_between(f_mhz, spectral['energy'], color='gold', alpha=0.2)
|
|
408
408
|
ax_cum.axvline(metrics['bw_90_hz'], color='red', linestyle='--',
|
|
409
|
-
label=f'90% Band: {metrics['bw_90_hz']:.
|
|
409
|
+
label=f'90% Band: {metrics['bw_90_hz']:.1e} Hz')
|
|
410
410
|
ax_cum.set_title('Cumulative Energy')
|
|
411
411
|
ax_cum.set_ylabel('Normalized Energy')
|
|
412
412
|
ax_cum.set_xlabel('Frequency (MHz)')
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: hdim_opt
|
|
3
|
-
Version: 1.3.
|
|
4
|
-
Summary: High-dimensional
|
|
3
|
+
Version: 1.3.2
|
|
4
|
+
Summary: High-dimensional numerical optimization and sampling toolkit for complex, non-differentiable problems.
|
|
5
5
|
Author-email: Julian Soltes <jsoltes@regis.edu>
|
|
6
6
|
License: MIT
|
|
7
7
|
Project-URL: Homepage, https://github.com/jgsoltes/hdim_opt
|
|
@@ -42,13 +42,14 @@ Requires-Dist: SALib; extra == "sensitivity"
|
|
|
42
42
|
|
|
43
43
|
# hdim-opt: High-Dimensional Optimization Toolkit
|
|
44
44
|
|
|
45
|
-
|
|
45
|
+
Modern optimization package to accelerate convergence in complex, high-dimensional problems. Includes the QUASAR evolutionary algorithm, HDS exploitative QMC sampler, Sobol sensitivity analysis, signal waveform decomposition, and data transformations.
|
|
46
46
|
|
|
47
47
|
All core functions, listed below, are single-line executable and require three essential parameters: [obj_function, bounds, n_samples].
|
|
48
48
|
* **quasar**: QUASAR optimization for high-dimensional, non-differentiable problems.
|
|
49
49
|
* **hyperellipsoid**: Generate a non-uniform Hyperellipsoid Density sequence, to focus sample distributions.
|
|
50
50
|
* **sobol**: Generate a uniform Sobol sequence (via SciPy).
|
|
51
51
|
* **sensitivity**: Perform Sobol sensitivity analysis to measure each variable's importance on objective function results (via SALib).
|
|
52
|
+
* **isotropize**: Isotropizes the input matrix.
|
|
52
53
|
* **waveform**: Decompose the input waveform array (handles time- and frequency-domain via FFT / IFFT) into a diagnostic summary.
|
|
53
54
|
|
|
54
55
|
---
|
|
@@ -6,8 +6,8 @@ build-backend = "setuptools.build_meta"
|
|
|
6
6
|
|
|
7
7
|
[project]
|
|
8
8
|
name = "hdim_opt"
|
|
9
|
-
version = "1.3.
|
|
10
|
-
description = "High-dimensional
|
|
9
|
+
version = "1.3.2" # match version in __init__.py
|
|
10
|
+
description = "High-dimensional numerical optimization and sampling toolkit for complex, non-differentiable problems."
|
|
11
11
|
readme = {file = "README.md", content-type = "text/markdown"}
|
|
12
12
|
|
|
13
13
|
authors = [
|
|
@@ -1,29 +0,0 @@
|
|
|
1
|
-
from scipy import stats
|
|
2
|
-
import numpy as np
|
|
3
|
-
|
|
4
|
-
def sobol_sample(n_samples, bounds, normalize=False, seed=None):
|
|
5
|
-
'''
|
|
6
|
-
Objective:
|
|
7
|
-
- Generates a uniform scrambled Sobol sample sequence.
|
|
8
|
-
Inputs:
|
|
9
|
-
- n_samples: Number of samples to generate.
|
|
10
|
-
- bounds: Range to sample over.
|
|
11
|
-
- normalize: Boolean, if True keeps samples normalized to [0,1].
|
|
12
|
-
- seed: Random seed.
|
|
13
|
-
Outputs:
|
|
14
|
-
- sobol_sequence: Sobol sample sequence.
|
|
15
|
-
'''
|
|
16
|
-
|
|
17
|
-
# clean bounds & n_dimensions
|
|
18
|
-
bounds = np.array(bounds)
|
|
19
|
-
n_dimensions = bounds.shape[0]
|
|
20
|
-
|
|
21
|
-
sobol_sampler = stats.qmc.Sobol(d=n_dimensions, scramble=True, seed=seed)
|
|
22
|
-
sobol_samples_unit = sobol_sampler.random(n=n_samples)
|
|
23
|
-
|
|
24
|
-
if not normalize:
|
|
25
|
-
sobol_sequence = stats.qmc.scale(sobol_samples_unit, bounds[:, 0], bounds[:, 1])
|
|
26
|
-
else:
|
|
27
|
-
sobol_sequence = sobol_samples_unit
|
|
28
|
-
|
|
29
|
-
return sobol_sequence
|
|
@@ -1,126 +0,0 @@
|
|
|
1
|
-
def sens_analysis(func, bounds, n_samples=2**7,
|
|
2
|
-
kwargs=None, param_names=None,
|
|
3
|
-
verbose=True, log_scale=True):
|
|
4
|
-
'''
|
|
5
|
-
Objective:
|
|
6
|
-
- Perform Sobol sensitivity analysis on the vectorized objective function.
|
|
7
|
-
Inputs:
|
|
8
|
-
- func: Objective function (Problem) to analyze.
|
|
9
|
-
- bounds: Parameter space bounds, as an array of tuples.
|
|
10
|
-
- n_samples: Number of Sobol samples to generate.
|
|
11
|
-
- kwargs: Keyword arguments (dictionary) for objective function.
|
|
12
|
-
- param_names: Optional parameter names for each dimension.
|
|
13
|
-
- verbose: Boolean to display plots.
|
|
14
|
-
- log_scale: Boolean to log-scale plots.
|
|
15
|
-
Outputs:
|
|
16
|
-
- Si: Full sensitivity indices and confidences.
|
|
17
|
-
- S2_matrix: Matrix of S2 relationship sensitivity indices.
|
|
18
|
-
'''
|
|
19
|
-
|
|
20
|
-
# imports
|
|
21
|
-
try:
|
|
22
|
-
import numpy as np
|
|
23
|
-
from SALib.sample import sobol as sobol_sample
|
|
24
|
-
from SALib.analyze import sobol as sobol_analyze
|
|
25
|
-
import pandas as pd
|
|
26
|
-
from functools import partial
|
|
27
|
-
import time
|
|
28
|
-
except ImportError as e:
|
|
29
|
-
raise ImportError(f'Sensitivity analysis requires dependencies: (SALib, pandas, functools).') from e
|
|
30
|
-
start_time = time.time()
|
|
31
|
-
|
|
32
|
-
# define input parameters and their ranges
|
|
33
|
-
bounds = np.array(bounds)
|
|
34
|
-
n_params = bounds.shape[0]
|
|
35
|
-
if param_names == None:
|
|
36
|
-
param_names = range(0,n_params)
|
|
37
|
-
|
|
38
|
-
# define problem
|
|
39
|
-
problem = {
|
|
40
|
-
'num_vars': n_params,
|
|
41
|
-
'names': param_names,
|
|
42
|
-
'bounds' : bounds
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
# generate samples
|
|
46
|
-
if verbose:
|
|
47
|
-
print(f'Generating {n_samples:,.0f} Sobol samples for sensitivity analysis.')
|
|
48
|
-
param_values = sobol_sample.sample(problem, n_samples)
|
|
49
|
-
|
|
50
|
-
# kwargs for the objective function
|
|
51
|
-
if kwargs:
|
|
52
|
-
func = partial(func, **kwargs)
|
|
53
|
-
|
|
54
|
-
# evaluate the samples
|
|
55
|
-
values = func(param_values)
|
|
56
|
-
|
|
57
|
-
# running sensitivity analysis
|
|
58
|
-
print('Running sensitivity analysis.')
|
|
59
|
-
Si = sobol_analyze.analyze(problem, values, calc_second_order=True, print_to_console=False)
|
|
60
|
-
|
|
61
|
-
# calculate S2 sensitivities
|
|
62
|
-
# convert S2 indices to dataframe to process easier
|
|
63
|
-
S2_matrix = Si['S2']
|
|
64
|
-
S2_df = pd.DataFrame(S2_matrix, index=param_names, columns=param_names)
|
|
65
|
-
S2_df = S2_df.fillna(S2_df.T)
|
|
66
|
-
mask = np.tril(np.ones_like(S2_df, dtype=bool))
|
|
67
|
-
|
|
68
|
-
end_time = time.time()
|
|
69
|
-
run_time = end_time - start_time
|
|
70
|
-
if verbose:
|
|
71
|
-
print(f'\nRun time: {run_time:.2f}s')
|
|
72
|
-
# import
|
|
73
|
-
try:
|
|
74
|
-
import matplotlib.pyplot as plt
|
|
75
|
-
import seaborn as sns
|
|
76
|
-
except ImportError as e:
|
|
77
|
-
raise ImportError(f'Plotting requires dependencies: (matplotlib, seaborn).') from e
|
|
78
|
-
|
|
79
|
-
# sort by S1 values
|
|
80
|
-
sort_idx = np.argsort(Si['S1'])
|
|
81
|
-
s1_sorted = Si['S1'][sort_idx]
|
|
82
|
-
st_sorted = Si['ST'][sort_idx]
|
|
83
|
-
s1_conf_sorted = Si['S1_conf'][sort_idx]
|
|
84
|
-
st_conf_sorted = Si['ST_conf'][sort_idx]
|
|
85
|
-
names_sorted = [np.array(param_names)[i] for i in sort_idx]
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
bar_width = 0.35
|
|
89
|
-
index = np.arange(n_params)
|
|
90
|
-
|
|
91
|
-
# plot 1: first-order (S1) and total-order (ST) indices
|
|
92
|
-
sens_plot, axs = plt.subplots(2,1,figsize=(9, 13))
|
|
93
|
-
|
|
94
|
-
# define bar width and positions
|
|
95
|
-
bar_width = 0.35
|
|
96
|
-
index = np.arange(n_params)
|
|
97
|
-
|
|
98
|
-
# plot S1 (first order) sensitivities
|
|
99
|
-
axs[0].barh(index - bar_width/2, s1_sorted, bar_width,
|
|
100
|
-
xerr=s1_conf_sorted,
|
|
101
|
-
label='First-order ($S_1$)',
|
|
102
|
-
alpha=1,
|
|
103
|
-
capsize=2.5)
|
|
104
|
-
|
|
105
|
-
axs[0].barh(index + bar_width/2, st_sorted, bar_width,
|
|
106
|
-
xerr=st_conf_sorted,
|
|
107
|
-
label='Total-order ($S_T$)',
|
|
108
|
-
alpha=0.75,
|
|
109
|
-
capsize=2.5)
|
|
110
|
-
|
|
111
|
-
axs[0].set_title('Sensitivity Indices ($S_1$, $S_T$)')
|
|
112
|
-
if log_scale:
|
|
113
|
-
axs[0].set_xscale('log')
|
|
114
|
-
|
|
115
|
-
axs[0].set_yticks(index)
|
|
116
|
-
axs[0].set_yticklabels(names_sorted)
|
|
117
|
-
axs[0].legend()
|
|
118
|
-
|
|
119
|
-
# plot 2: heatmap of second order indices
|
|
120
|
-
sns.heatmap(data=S2_df, mask=mask, cbar_kws={'label': 'Second-order Index ($S_2$)'},ax=axs[1]) # magma
|
|
121
|
-
axs[1].set_title('Second-order Interactions ($S_2$)')
|
|
122
|
-
axs[1].invert_yaxis()
|
|
123
|
-
sens_plot.tight_layout()
|
|
124
|
-
plt.show()
|
|
125
|
-
|
|
126
|
-
return Si, S2_matrix
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|