hdim-opt 1.2.3__tar.gz → 1.3.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,19 +1,33 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hdim_opt
3
- Version: 1.2.3
4
- Summary: Optimization toolkit for high-dimensional, non-differentiable problems.
3
+ Version: 1.3.1
4
+ Summary: High-dimensional global optimization and sampling toolkit for complex and non-differentiable problems.
5
5
  Author-email: Julian Soltes <jsoltes@regis.edu>
6
6
  License: MIT
7
+ Project-URL: Homepage, https://github.com/jgsoltes/hdim_opt
7
8
  Project-URL: Repository, https://github.com/jgsoltes/hdim_opt
8
- Keywords: optimization,high-dimensional,sampling,QUASAR,HDS
9
+ Project-URL: Issues, https://github.com/jgsoltes/hdim_opt/issues
10
+ Project-URL: Changelog, https://github.com/jgsoltes/hdim_opt/releases
11
+ Keywords: optimization,high-dimensional,sampling,QUASAR,hyperellipsoid,evolutionary-algorithm,non-differentiable,global-optimization,stochastic-optimization,black-box-optimization
12
+ Classifier: Development Status :: 5 - Production/Stable
9
13
  Classifier: Programming Language :: Python :: 3
14
+ Classifier: Programming Language :: Python :: 3.8
15
+ Classifier: Programming Language :: Python :: 3.9
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
10
19
  Classifier: License :: OSI Approved :: MIT License
11
20
  Classifier: Operating System :: OS Independent
12
21
  Classifier: Intended Audience :: Science/Research
13
22
  Classifier: Intended Audience :: Developers
23
+ Classifier: Intended Audience :: Education
24
+ Classifier: Natural Language :: English
14
25
  Classifier: Topic :: Scientific/Engineering :: Mathematics
15
26
  Classifier: Topic :: Scientific/Engineering :: Physics
16
27
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
28
+ Classifier: Topic :: Scientific/Engineering :: Information Analysis
29
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
30
+ Classifier: Topic :: Scientific/Engineering :: Bio-Informatics
17
31
  Requires-Python: >=3.8
18
32
  Description-Content-Type: text/markdown
19
33
  Requires-Dist: numpy
@@ -28,13 +42,14 @@ Requires-Dist: SALib; extra == "sensitivity"
28
42
 
29
43
  # hdim-opt: High-Dimensional Optimization Toolkit
30
44
 
31
- A modern optimization package to accelerate convergence in complex, high-dimensional problems. Includes the QUASAR evolutionary algorithm and HDS exploitative QMC sampler.
45
+ A modern optimization package to accelerate convergence in complex, high-dimensional problems. Includes the QUASAR evolutionary algorithm, HDS exploitative QMC sampler, Sobol sensitivity analysis, and signal waveform decomposition.
32
46
 
33
47
  All core functions, listed below, are single-line executable and require three essential parameters: [obj_function, bounds, n_samples].
34
48
  * **quasar**: QUASAR optimization for high-dimensional, non-differentiable problems.
35
- * **hds**: Generate an exploitative HDS sequence, to distribute samples in focused regions.
49
+ * **hyperellipsoid**: Generate a non-uniform Hyperellipsoid Density sequence, to focus sample distributions.
36
50
  * **sobol**: Generate a uniform Sobol sequence (via SciPy).
37
51
  * **sensitivity**: Perform Sobol sensitivity analysis to measure each variable's importance on objective function results (via SALib).
52
+ * **waveform**: Decompose the input waveform array (handles time- and frequency-domain via FFT / IFFT) into a diagnostic summary.
38
53
 
39
54
  ---
40
55
 
@@ -52,15 +67,21 @@ pip install hdim_opt
52
67
  import hdim_opt as h
53
68
 
54
69
  # Parameter Space
55
- n_dimensions = 100
70
+ n_dimensions = 30
56
71
  bounds = [(-100,100)] * n_dimensions
57
72
  n_samples = 1000
58
73
  obj_func = h.test_functions.rastrigin
74
+ time, pulse = h.waveform_analysis.e1_waveform()
59
75
 
76
+ # Functions
60
77
  solution, fitness = h.quasar(obj_func, bounds)
61
78
  sens_matrix = h.sensitivity(obj_func, bounds)
62
- hds_samples = h.hds(n_samples, bounds)
79
+
80
+ hds_samples = h.hyperellipsoid(n_samples, bounds)
63
81
  sobol_samples = h.sobol(n_samples, bounds)
82
+ isotropic_samples = h.isotropize(sobol_samples)
83
+
84
+ signal_data = h.waveform(x=time,y=pulse)
64
85
  ```
65
86
 
66
87
  ## QUASAR Optimizer
@@ -68,7 +89,7 @@ sobol_samples = h.sobol(n_samples, bounds)
68
89
 
69
90
  * Benefit: Significant improvements in convergence speed and solution quality compared to contemporary optimizers. (Reference: [https://arxiv.org/abs/2511.13843]).
70
91
 
71
- ## HDS Sampler (Hyperellipsoid Density Sampling)
72
- **HDS** is a non-uniform Quasi-Monte Carlo sampling method, specifically designed to exploit promising regions of the search space.
92
+ ## HDS Sampler
93
+ **HDS** (Hyperellipsoid Density Sampling) is a non-uniform Quasi-Monte Carlo sampling method, specifically designed to exploit promising regions of the search space.
73
94
 
74
95
  * Benefit: Provides control over the sample distribution. Results in higher average optimization solution quality when used for population initialization compared to uniform QMC methods. (Reference: [https://arxiv.org/abs/2511.07836]).
@@ -1,12 +1,13 @@
1
1
  # hdim-opt: High-Dimensional Optimization Toolkit
2
2
 
3
- A modern optimization package to accelerate convergence in complex, high-dimensional problems. Includes the QUASAR evolutionary algorithm and HDS exploitative QMC sampler.
3
+ A modern optimization package to accelerate convergence in complex, high-dimensional problems. Includes the QUASAR evolutionary algorithm, HDS exploitative QMC sampler, Sobol sensitivity analysis, and signal waveform decomposition.
4
4
 
5
5
  All core functions, listed below, are single-line executable and require three essential parameters: [obj_function, bounds, n_samples].
6
6
  * **quasar**: QUASAR optimization for high-dimensional, non-differentiable problems.
7
- * **hds**: Generate an exploitative HDS sequence, to distribute samples in focused regions.
7
+ * **hyperellipsoid**: Generate a non-uniform Hyperellipsoid Density sequence, to focus sample distributions.
8
8
  * **sobol**: Generate a uniform Sobol sequence (via SciPy).
9
9
  * **sensitivity**: Perform Sobol sensitivity analysis to measure each variable's importance on objective function results (via SALib).
10
+ * **waveform**: Decompose the input waveform array (handles time- and frequency-domain via FFT / IFFT) into a diagnostic summary.
10
11
 
11
12
  ---
12
13
 
@@ -24,15 +25,21 @@ pip install hdim_opt
24
25
  import hdim_opt as h
25
26
 
26
27
  # Parameter Space
27
- n_dimensions = 100
28
+ n_dimensions = 30
28
29
  bounds = [(-100,100)] * n_dimensions
29
30
  n_samples = 1000
30
31
  obj_func = h.test_functions.rastrigin
32
+ time, pulse = h.waveform_analysis.e1_waveform()
31
33
 
34
+ # Functions
32
35
  solution, fitness = h.quasar(obj_func, bounds)
33
36
  sens_matrix = h.sensitivity(obj_func, bounds)
34
- hds_samples = h.hds(n_samples, bounds)
37
+
38
+ hds_samples = h.hyperellipsoid(n_samples, bounds)
35
39
  sobol_samples = h.sobol(n_samples, bounds)
40
+ isotropic_samples = h.isotropize(sobol_samples)
41
+
42
+ signal_data = h.waveform(x=time,y=pulse)
36
43
  ```
37
44
 
38
45
  ## QUASAR Optimizer
@@ -40,7 +47,7 @@ sobol_samples = h.sobol(n_samples, bounds)
40
47
 
41
48
  * Benefit: Significant improvements in convergence speed and solution quality compared to contemporary optimizers. (Reference: [https://arxiv.org/abs/2511.13843]).
42
49
 
43
- ## HDS Sampler (Hyperellipsoid Density Sampling)
44
- **HDS** is a non-uniform Quasi-Monte Carlo sampling method, specifically designed to exploit promising regions of the search space.
50
+ ## HDS Sampler
51
+ **HDS** (Hyperellipsoid Density Sampling) is a non-uniform Quasi-Monte Carlo sampling method, specifically designed to exploit promising regions of the search space.
45
52
 
46
53
  * Benefit: Provides control over the sample distribution. Results in higher average optimization solution quality when used for population initialization compared to uniform QMC methods. (Reference: [https://arxiv.org/abs/2511.07836]).
@@ -0,0 +1,54 @@
1
+ """
2
+
3
+ # hdim-opt: High-Dimensional Optimization Toolkit
4
+
5
+ Functions:
6
+ - quasar: QUASAR optimization for high-dimensional, non-differentiable problems.
7
+ - hyperellipsoid: Generate a non-uniform hyperellipsoid density sequence.
8
+ - sobol: Generate a uniform Sobol sequence (via SciPy).
9
+ - sensitivity: Perform Sobol sensitivity analysis to measure each variable's importance on objective function results (via SALib).
10
+ - waveform: Decompose the input waveform array (handles time- and frequency-domain via FFT / IFFT) into a diagnostic summary (work in progress).
11
+ - isotropize: Isotropize the input matrix using zero-phase component analysis.
12
+
13
+ Modules:
14
+ - test_functions: Contains test functions for local optimization testing.
15
+ - waveform_analysis: Contains pulse generation functions.
16
+
17
+ Example Usage:
18
+
19
+ # Import
20
+ >>> import hdim_opt as h
21
+
22
+ # Parameter Space
23
+ >>> n_dimensions = 30
24
+ >>> bounds = [(-100,100)] * n_dimensions
25
+ >>> n_samples = 1000
26
+ >>> obj_func = h.test_functions.rastrigin
27
+ >>> time, pulse = h.waveform_analysis.e1_waveform()
28
+
29
+ # Functions
30
+ >>> solution, fitness = h.quasar(obj_func, bounds)
31
+ >>> sens_matrix = h.sensitivity(obj_func, bounds)
32
+
33
+ >>> hds_samples = h.hyperellipsoid(n_samples, bounds)
34
+ >>> sobol_samples = h.sobol(n_samples, bounds)
35
+ >>> isotropic_samples = h.isotropize(sobol_samples)
36
+
37
+ >>> signal_data = h.waveform(x=time,y=pulse)
38
+ """
39
+
40
+ # package version
41
+ __version__ = "1.3.1"
42
+ __all__ = ['quasar', 'hyperellipsoid', 'sobol', 'sensitivity', 'test_functions', 'quasar_helpers','waveform'] # available for star imports
43
+
44
+ # import core components
45
+ from .quasar_optimization import optimize as quasar
46
+ from .hyperellipsoid_sampling import sample as hyperellipsoid
47
+ from .sobol_sampling import sobol_sample as sobol
48
+ from .sobol_sensitivity import sens_analysis as sensitivity
49
+ from .waveform_analysis import analyze_waveform as waveform
50
+ from .quasar_helpers import isotropize
51
+ from .quasar_helpers import deisotropize
52
+ from . import test_functions
53
+ from . import quasar_helpers
54
+ from . import waveform_analysis
@@ -1,5 +1,5 @@
1
1
  # global epslion
2
- epsilon = 1e-12
2
+ epsilon = 1e-16
3
3
 
4
4
  ### misc helper functions
5
5
 
@@ -179,6 +179,7 @@ def fit_pca_for_cluster(cluster_samples, current_origin, initial_samples_std, n_
179
179
  def sample(n_samples, bounds,
180
180
  weights=None, normalize=False,
181
181
  n_ellipsoids=None, n_initial_clusters=None, n_initial_qmc=None,
182
+ ellipsoid_scaling_factor=None,
182
183
  seed=None, plot_dendrogram=False, verbose=False):
183
184
  '''
184
185
  Objective:
@@ -367,11 +368,12 @@ def sample(n_samples, bounds,
367
368
  confidence_level = 0.9999 # captures 99.99% of cluster's samples
368
369
 
369
370
  # critical value (the statistical radius squared)
370
- chi2_critical_value = stats.chi2.ppf(confidence_level, df=n_dimensions)
371
- baseline_factor = 0.55 - 0.01*np.log(n_dimensions) # empirically derived to resample out-of-bounds points
372
-
373
- # square root as the scaling factor (Mahalanobis distance)
374
- ellipsoid_scaling_factor = baseline_factor * np.sqrt(chi2_critical_value)
371
+ if ellipsoid_scaling_factor == None:
372
+ chi2_critical_value = stats.chi2.ppf(confidence_level, df=n_dimensions)
373
+ baseline_factor = 0.55 - 0.01*np.log(n_dimensions) # empirically derived to resample out-of-bounds points
374
+
375
+ # square root as the scaling factor (Mahalanobis distance)
376
+ ellipsoid_scaling_factor = baseline_factor * np.sqrt(chi2_critical_value)
375
377
 
376
378
  # QMC sequence for radius scaling
377
379
  radius_qmc_sampler = stats.qmc.Sobol(d=1, seed=seed+1) # offset seed from initial qmc
@@ -472,10 +474,8 @@ def sample(n_samples, bounds,
472
474
  print(' - number of samples:', len(hds_sequence))
473
475
  print(f' - sample generation time: {sample_generation_time:.2f}')
474
476
  print(f' - number of hyperellipsoids: {n_hyperellipsoids}')
475
- print(f' - number of initial QMC: {n_initial_qmc}')
476
- print(f' - number of initial clusters: {n_initial_clusters}')
477
477
  if weights:
478
- print(f' - gaussian weights: {weights}')
478
+ print(f' - weights: {weights}')
479
479
 
480
480
  # generate a sobol sequence for comparison
481
481
  sobol_sampler = stats.qmc.Sobol(d=n_dimensions, seed=seed+2) # offset seed to be different from initial qmc
@@ -492,10 +492,8 @@ def sample(n_samples, bounds,
492
492
  sobol_std = np.std(sobol_samples)
493
493
 
494
494
  print('\nstats:')
495
- print(f' - mean HDS: {hds_mean:.2f}')
496
- print(f' - mean comparison QMC: {sobol_mean:.2f}')
497
- print(f' - stdev HDS: {hds_std:.2f}')
498
- print(f' - stdev comparison QMC: {sobol_std:.2f}\n')
495
+ print(f' - HDS mean: {hds_mean:.2f}')
496
+ print(f' - HDS stdev: {hds_std:.2f}\n')
499
497
 
500
498
  # dendrogram of centroids
501
499
  if plot_dendrogram:
@@ -1,6 +1,55 @@
1
1
  # global imports
2
+ import pandas as pd
3
+ from sklearn.preprocessing import StandardScaler
2
4
  import numpy as np
3
- epsilon = 1e-12
5
+ epsilon = 1e-16
6
+
7
+ def isotropize(data):
8
+ '''
9
+ Objective:
10
+ - Converts data to isotropic space using Zero-Phase Component Analysis (ZCA).
11
+ - Maintains original feature orientation while removing correlations.
12
+ '''
13
+ from scipy.linalg import eigh
14
+
15
+ # convert to array
16
+ X = np.array(data)
17
+
18
+ # standard scaling (mean = 0, var = 1)
19
+ mean = np.mean(X, axis=0)
20
+ stdev = np.std(X, axis=0) + epsilon # Add epsilon to avoid div0
21
+ X_centered = (X - mean) / stdev
22
+
23
+ # eigen-decomposition of the correlation matrix
24
+ cov = np.cov(X_centered, rowvar=False) + np.eye(X_centered.shape[1]) * epsilon
25
+ eigenvalues, eigenvectors = eigh(cov) # eigh is more stable for symmetric matrices like covariance
26
+
27
+ # ZCA whitening matrix: W_zca = U @ diag(1/sqrt(lambda)) @ U.T
28
+ # transforms data to identity covariance while minimizing rotation
29
+ diag_inv_sqrt = np.diag(1.0 / np.sqrt(eigenvalues + epsilon))
30
+ W_zca = eigenvectors @ diag_inv_sqrt @ eigenvectors.T
31
+
32
+ # transform: y = X_centered @ W_zca.T
33
+ data_iso = np.dot(X_centered, W_zca.T)
34
+
35
+ # store parameters for deisotropization
36
+ params = {
37
+ 'mean': mean,
38
+ 'stdev': stdev,
39
+ 'W_zca': W_zca,
40
+ 'W_zca_inv': eigenvectors @ np.diag(np.sqrt(eigenvalues + epsilon)) @ eigenvectors.T
41
+ }
42
+ return data_iso, params
43
+
44
+ def deisotropize(data_iso, params):
45
+ '''De-isotropize data to its original parameter space via inverse ZCA.'''
46
+
47
+ # inverse ZCA: X_centered = y @ W_zca_inv.T
48
+ data_centered = np.dot(data_iso, params['W_zca_inv'].T)
49
+
50
+ # inverse scaling: X = (X_centered * std) + mean
51
+ data_original = (data_centered * params['stdev']) + params['mean']
52
+ return data_original
4
53
 
5
54
  ############## CONSTRAINTS ##############
6
55
  def apply_penalty(fitnesses, solutions, constraints, constraint_penalty, vectorized):
@@ -32,7 +32,7 @@ def initialize_population(popsize, bounds, init, hds_weights, seed, verbose):
32
32
 
33
33
  # generate samples
34
34
  if verbose:
35
- print(f'Initializing: Hyperellipsoid pop. (N={popsize}, D={n_dimensions}).')
35
+ print(f'Initializing: Hyperellipsoid (N={popsize}, D={n_dimensions}).')
36
36
  initial_population = hds.sample(popsize, bounds, weights=hds_weights,
37
37
  seed=seed, verbose=False)
38
38
 
@@ -371,10 +371,10 @@ def asym_reinit(population, current_fitnesses, bounds, reinit_method, seed, vect
371
371
  def optimize(func, bounds, args=(),
372
372
  init='sobol', popsize=None, maxiter=100,
373
373
  entangle_rate=0.33, polish=True, polish_minimizer=None,
374
- patience=np.inf, vectorized=False,
375
- hds_weights=None, kwargs={},
374
+ patience=np.inf, tolerance=-np.inf, vectorized=False,
375
+ kwargs={},
376
376
  constraints=None, constraint_penalty=1e9,
377
- reinitialization_method='covariance',
377
+ reinitialization='covariance', hds_weights=None,
378
378
  verbose=True, plot_solutions=True, num_to_plot=10, plot_contour=True,
379
379
  workers=1, seed=None
380
380
  ):
@@ -382,7 +382,6 @@ def optimize(func, bounds, args=(),
382
382
  Objective:
383
383
  - Finds the optimal solution for a given objective function.
384
384
  - Designed for non-differentiable, high-dimensional problems.
385
- - For explorative problems chance reinitialization_method to '
386
385
  - Test functions available for local testing, called as hdim_opt.test_functions.function_name.
387
386
  - Existing test functions: [rastrigin, ackley, sinusoid, sphere, shubert].
388
387
 
@@ -413,11 +412,10 @@ def optimize(func, bounds, args=(),
413
412
  - Recommended to place constraints in objective function to use Powell.
414
413
 
415
414
  - patience: Number of generations without improvement before early convergence.
415
+ - tolerance: Target objective function value for early convergence.
416
416
  - vectorized: Boolean to accept vectorized (N,D) objective functions
417
- - Extremely efficient, highly recommended whenever possible.
417
+ - Highly efficient, recommended when possible.
418
418
 
419
- - hds_weights: Optional weights for hyperellipsoid density sampling initialization.
420
- - {0 : {'center' : center, 'std': stdev}, 1: {...} }
421
419
  - kwargs: Dictionary of keyword arguments for the objective function.
422
420
 
423
421
  - constraints: Dictionary of constraints to penalize.
@@ -433,11 +431,13 @@ def optimize(func, bounds, args=(),
433
431
  }
434
432
  - constraint_penalty: Penalty applied to each constraint violated, defaults to 1e12.
435
433
 
436
- - reinitialization_method: Type of re-sampling to use in the asymptotic reinitialization.
434
+ - reinitialization: Type of re-sampling to use in the asymptotic reinitialization.
437
435
  - Options are ['covariance', 'sobol'].
438
- - 'covariance' (exploitative) is default for most problems.
436
+ - 'covariance' (exploitative) is default for N > D problems.
439
437
  - 'sobol' (explorative) is optional, for high exploration and faster computation.
440
438
  - None to disable reinitialization calculations.
439
+ - hds_weights: Optional weights for hyperellipsoid density sampling initialization.
440
+ - {0 : {'center' : center, 'std': stdev}, 1: {...} }
441
441
 
442
442
  - verbose: Displays prints and plots.
443
443
  - Mutation factor distribution shown with hdim_opt.test_functions.plot_mutations()
@@ -499,10 +499,7 @@ def optimize(func, bounds, args=(),
499
499
  # ensure bounds is array; shape (n_dimensions,2)
500
500
  bounds = np.array(bounds)
501
501
  n_dimensions = bounds.shape[0]
502
-
503
- if n_dimensions == 1:
504
- reinitialization = False
505
-
502
+
506
503
  # if init is not a string, assume it is a custom population
507
504
  if not isinstance(init, str):
508
505
  popsize = init.shape[0]
@@ -516,7 +513,11 @@ def optimize(func, bounds, args=(),
516
513
  # ensure integers
517
514
  popsize, maxiter = int(popsize), int(maxiter)
518
515
 
516
+ # map to sobol if N < D
517
+ if n_dimensions == 1:
518
+ reinitialization = None
519
519
 
520
+
520
521
  ################################# INPUT ERRORS #################################
521
522
 
522
523
  # entangle rate error
@@ -537,11 +538,11 @@ def optimize(func, bounds, args=(),
537
538
  # generate initial population
538
539
  initial_population = initialize_population(popsize, bounds, init, hds_weights, seed, verbose)
539
540
  if verbose:
540
- if reinitialization_method not in ['sobol', 'covariance', None]:
541
- print("reinitialization_method must be one of ['covariance', 'sobol', None].")
541
+ if reinitialization not in ['sobol', 'covariance', None]:
542
+ print("reinitialization must be one of ['covariance', 'sobol', None].")
542
543
  print(f'\nEvolving (None):')
543
544
  else:
544
- print(f'\nEvolving ({reinitialization_method}):')
545
+ print(f'\nEvolving ({reinitialization}):')
545
546
 
546
547
  # match differential evolution conventions
547
548
  if vectorized:
@@ -648,12 +649,12 @@ def optimize(func, bounds, args=(),
648
649
  # apply asymptotic covariance reinitialization to population
649
650
  final_proba = 0.33
650
651
  decay_generation = 0.33
651
- if reinitialization_method in ['sobol','covariance']:
652
+ if (reinitialization in ['sobol','covariance']):
652
653
  reinit_proba = np.e**((np.log(final_proba)/(decay_generation*maxiter))*generation)
653
654
  else:
654
655
  reinit_proba = 0.0
655
656
  if np.random.rand() < reinit_proba:
656
- population = asym_reinit(population, current_fitnesses, bounds, reinitialization_method, seed, vectorized=vectorized)
657
+ population = asym_reinit(population, current_fitnesses, bounds, reinitialization, seed, vectorized=vectorized)
657
658
 
658
659
  # clip population to bounds
659
660
  if vectorized:
@@ -685,9 +686,12 @@ def optimize(func, bounds, args=(),
685
686
  # patience for early convergence
686
687
  if (generation - last_improvement_gen) > patience:
687
688
  if verbose:
688
- print(f'\nEarly convergence: number of generations without improvement exceeds patience ({patience}).')
689
+ print(f'Early convergence: number of generations without improvement exceeds patience ({patience}).')
690
+ break
691
+ if best_fitness <= tolerance:
692
+ if verbose:
693
+ print(f'Early convergence: f(x) below tolerance ({tolerance:.2e}).')
689
694
  break
690
-
691
695
 
692
696
  ################################# POLISH #################################
693
697
 
@@ -703,7 +707,7 @@ def optimize(func, bounds, args=(),
703
707
  maxiter=maxiter, vectorized=vectorized, constraints=constraints,
704
708
  args=args, kwargs=kwargs,
705
709
  polish_minimizer=polish_minimizer, verbose=verbose
706
- )
710
+ )
707
711
 
708
712
 
709
713
  ################################# VERBOSE #################################
@@ -4,7 +4,7 @@ import numpy as np
4
4
  def sobol_sample(n_samples, bounds, normalize=False, seed=None):
5
5
  '''
6
6
  Objective:
7
- - Generate a uniform scrambled Sobol sample sequence.
7
+ - Generates a uniform scrambled Sobol sample sequence.
8
8
  Inputs:
9
9
  - n_samples: Number of samples to generate.
10
10
  - bounds: Range to sample over.
@@ -1,4 +1,4 @@
1
- def sens_analysis(func, bounds, n_samples=None,
1
+ def sens_analysis(func, bounds, n_samples=2**7,
2
2
  kwargs=None, param_names=None,
3
3
  verbose=True, log_scale=True):
4
4
  '''
@@ -24,9 +24,10 @@ def sens_analysis(func, bounds, n_samples=None,
24
24
  from SALib.analyze import sobol as sobol_analyze
25
25
  import pandas as pd
26
26
  from functools import partial
27
+ import time
27
28
  except ImportError as e:
28
29
  raise ImportError(f'Sensitivity analysis requires dependencies: (SALib, pandas, functools).') from e
29
-
30
+ start_time = time.time()
30
31
 
31
32
  # define input parameters and their ranges
32
33
  bounds = np.array(bounds)
@@ -34,10 +35,6 @@ def sens_analysis(func, bounds, n_samples=None,
34
35
  if param_names == None:
35
36
  param_names = range(0,n_params)
36
37
 
37
- # scale default n_samples by dimension (power of 2)
38
- if n_samples == None:
39
- n_samples = int(2**np.ceil(np.log2(10*n_params)))
40
-
41
38
  # define problem
42
39
  problem = {
43
40
  'num_vars': n_params,
@@ -68,14 +65,29 @@ def sens_analysis(func, bounds, n_samples=None,
68
65
  S2_df = S2_df.fillna(S2_df.T)
69
66
  mask = np.tril(np.ones_like(S2_df, dtype=bool))
70
67
 
68
+ end_time = time.time()
69
+ run_time = end_time - start_time
71
70
  if verbose:
71
+ print(f'\nRun time: {run_time:.2f}s')
72
72
  # import
73
73
  try:
74
74
  import matplotlib.pyplot as plt
75
75
  import seaborn as sns
76
76
  except ImportError as e:
77
77
  raise ImportError(f'Plotting requires dependencies: (matplotlib, seaborn).') from e
78
-
78
+
79
+ # sort by S1 values
80
+ sort_idx = np.argsort(Si['S1'])
81
+ s1_sorted = Si['S1'][sort_idx]
82
+ st_sorted = Si['ST'][sort_idx]
83
+ s1_conf_sorted = Si['S1_conf'][sort_idx]
84
+ st_conf_sorted = Si['ST_conf'][sort_idx]
85
+ names_sorted = [np.array(param_names)[i] for i in sort_idx]
86
+
87
+
88
+ bar_width = 0.35
89
+ index = np.arange(n_params)
90
+
79
91
  # plot 1: first-order (S1) and total-order (ST) indices
80
92
  sens_plot, axs = plt.subplots(2,1,figsize=(9, 13))
81
93
 
@@ -84,35 +96,27 @@ def sens_analysis(func, bounds, n_samples=None,
84
96
  index = np.arange(n_params)
85
97
 
86
98
  # plot S1 (first order) sensitivities
87
- axs[0].barh(index - bar_width/2, Si['S1'], bar_width,
88
- xerr=Si['S1_conf'],
89
- label='First-order ($S_1$)',
90
- # color='cornflowerblue',
91
- alpha=1,
92
- # ecolor='lightgray',
93
- capsize=2.5)
94
- # edgecolor='black')
99
+ axs[0].barh(index - bar_width/2, s1_sorted, bar_width,
100
+ xerr=s1_conf_sorted,
101
+ label='First-order ($S_1$)',
102
+ alpha=1,
103
+ capsize=2.5)
95
104
 
96
- # plot ST (total order) sensitivities
97
- axs[0].barh(index + bar_width/2, Si['ST'], bar_width,
98
- xerr=Si['ST_conf'],
99
- label='Total-order ($S_T$)',
100
- # color='violet',
101
- # ecolor='lightgray',
102
- alpha=0.75,
103
- capsize=2.5)
104
- # edgecolor='black')
105
+ axs[0].barh(index + bar_width/2, st_sorted, bar_width,
106
+ xerr=st_conf_sorted,
107
+ label='Total-order ($S_T$)',
108
+ alpha=0.75,
109
+ capsize=2.5)
110
+
105
111
  axs[0].set_title('Sensitivity Indices ($S_1$, $S_T$)')
106
112
  if log_scale:
107
113
  axs[0].set_xscale('log')
108
- axs[0].set_xlabel('Sensitivity Index')
109
- axs[0].set_ylabel('Parameter')
110
- axs[0].legend(loc='upper right')
111
- axs[0].grid(False)
114
+
112
115
  axs[0].set_yticks(index)
113
- axs[0].set_yticklabels(param_names, ha='right')
116
+ axs[0].set_yticklabels(names_sorted)
117
+ axs[0].legend()
114
118
 
115
- # heatmap of second order indices
119
+ # plot 2: heatmap of second order indices
116
120
  sns.heatmap(data=S2_df, mask=mask, cbar_kws={'label': 'Second-order Index ($S_2$)'},ax=axs[1]) # magma
117
121
  axs[1].set_title('Second-order Interactions ($S_2$)')
118
122
  axs[1].invert_yaxis()
@@ -0,0 +1,449 @@
1
+ import numpy as np
2
+ import pandas as pd
3
+ import matplotlib.pyplot as plt
4
+ from scipy import signal
5
+
6
+ # constants
7
+ epsilon = 1e-12 # to avoid mathematical singularities
8
+
9
+ # visualization parameters
10
+ plt.rcParams['figure.facecolor'] = 'black'
11
+ plt.rcParams['axes.facecolor'] = 'black'
12
+ plt.rcParams['text.color'] = 'white'
13
+ plt.rcParams['axes.labelcolor'] = 'white'
14
+ plt.rcParams['xtick.color'] = 'white'
15
+ plt.rcParams['ytick.color'] = 'white'
16
+ plt.rcParams['axes.edgecolor'] = 'white'
17
+ plt.rcParams['grid.color'] = 'white'
18
+ plt.rcParams['lines.color'] = 'white'
19
+
20
+ def apply_noise(signal, noise):
21
+ # adding random gaussian noise within 1% of discrete amplitudes
22
+ stdev = np.std(np.abs(signal))
23
+ signal_noisy = signal + noise * np.random.normal(loc=0, scale=stdev, size=signal.shape)
24
+
25
+ return signal_noisy
26
+
27
+ def e1_waveform(peak_Vm=50e3, rise_s=5e-9, decay_s=200e-9,
28
+ sample_Hz=10e9, duration_s=1e-6, noise=0.0):
29
+ '''
30
+ Objective:
31
+ - Generates time-domain double exponential H-EMP E1 waveform, per MIL-STD / IEC specifications.
32
+ - Pulse waveform: E(t) = E0 * k * (exp(-alpha*t) - exp(-beta*t))
33
+
34
+ Inputs:
35
+ - E_peak_V_m: Target EMP amplitude (V/m).
36
+ - E_rise_time_ns: Target EMP risetime (ns).
37
+ - E_decay_time_ns: Target decay time of the pulse (FWHM).
38
+ - sampling_rate: Sampling rate of pulse (Hz).
39
+ - duration_ns: Duration of pulse (ns).
40
+
41
+ Outputs:
42
+ - time_s: Time array of pulse (s).
43
+ - E_t: Electric field amplitude array at each time (V/m).
44
+
45
+ '''
46
+
47
+ dt_s = 1 / sample_Hz
48
+ time_s = np.arange(0, duration_s, dt_s)
49
+
50
+ # alpha controls the decay and broadband shape
51
+ # input E_decay_time_ns = desired pulse FWHM
52
+ alpha = np.log(2) / decay_s # derived from from FWHM = ln(2) / FWHM
53
+
54
+ # beta controls the rise time and high-frequency content
55
+ beta = 2.0035 / rise_s # # approximate relationship: beta ~= 2.2 / (rise_time) (in seconds^-1)
56
+
57
+ # calculate time of pulse peak for k_norm calculation
58
+ t_peak_s = np.log(beta / alpha) / (beta - alpha)
59
+
60
+ # 'k_norm' factor normalizes pulse peak to input E_peak_V_m (50,000 V/m)
61
+ denominator = (np.exp(-alpha * t_peak_s) - np.exp(-beta * t_peak_s))
62
+ if np.isclose(denominator, 0):
63
+ k_norm = peak_Vm
64
+ else:
65
+ k_norm = peak_Vm / denominator
66
+
67
+ # generate the E1 waveform
68
+ E_t = k_norm * (np.exp(-alpha * time_s) - np.exp(-beta * time_s))
69
+
70
+ # adding random gaussian noise
71
+ E_t = apply_noise(E_t, noise)
72
+
73
+ return time_s, E_t
74
+
75
+ def e2_waveform(E_peak=100, tr_us=1.5, tf_ms=1.0, sample_rate=1e6, duration_s=0.01, noise=0.0):
76
+ '''Generates E2 H-EMP pulse waveform (lightning-like).'''
77
+ t = np.arange(0, duration_s, 1/sample_rate)
78
+ alpha = 1 / (tf_ms * 1e-3)
79
+ beta = 1 / (tr_us * 1e-6)
80
+ E_t = E_peak * 1.1 * (np.exp(-alpha * t) - np.exp(-beta * t))
81
+
82
+ # adding random gaussian noise
83
+ E_t = apply_noise(E_t, noise)
84
+
85
+ return t, E_t
86
+
87
+ def e3_waveform(E_peak=40, t_peak_s=10, sample_rate=10, duration_s=500, noise=0.0):
88
+ '''Generates E3 H-EMP pulse waveform (geostorm-like; magnetohydrodynamic).'''
89
+ t = np.arange(0, duration_s, 1/sample_rate)
90
+
91
+ # simplified IEC 61000-2-9 E3 waveform
92
+ E_t = E_peak * (np.exp(-t/120) - np.exp(-t/20))
93
+
94
+ # adding random gaussian noise
95
+ E_t = apply_noise(E_t, noise)
96
+
97
+ return t, E_t
98
+
99
+ def calculate_rise_time(time_array, pulse_array):
100
+ '''
101
+ Objective:
102
+ - Calculates the 10%-90% rise time of a double exponential pulse waveform.
103
+
104
+ Inputs:
105
+ - time_array: Pulse time array (s).
106
+ - pulse_array: Pulse waveform array (V/m).
107
+
108
+ Outputs:
109
+ - rise_time: Rise time (10%-90%) of pulse waveform (ns).
110
+ - t_90_percent: Time at rising 90% peak amplitude (ns).
111
+ - t_10_percent: Time at rising 10% amplitude (ns).
112
+
113
+ '''
114
+
115
+ peak_amplitude = np.max(pulse_array)
116
+
117
+ # calculate 10% and 90% thresholds
118
+ threshold_10_percent = 0.1 * peak_amplitude
119
+ threshold_90_percent = 0.9 * peak_amplitude
120
+
121
+ # find indices where the pulse first crosses the 10% threshold (rising side)
122
+ idx_10 = np.where(pulse_array >= threshold_10_percent)[0]
123
+ if len(idx_10) == 0:
124
+ return None
125
+ idx_10_first = idx_10[0]
126
+
127
+ # find indices where the pulse first crosses the 90% threshold (rising side)
128
+ idx_90 = np.where(pulse_array >= threshold_90_percent)[0]
129
+ if len(idx_90) == 0:
130
+ return None
131
+ idx_90_first = idx_90[0]
132
+
133
+ # interpolate to find the exact time points at 10% and 90% thresholds
134
+ # time at 10% threshold
135
+ t_10_percent = time_array[idx_10_first-1] + (threshold_10_percent - pulse_array[idx_10_first-1]) * \
136
+ (time_array[idx_10_first] - time_array[idx_10_first-1]) / \
137
+ (pulse_array[idx_10_first] - pulse_array[idx_10_first-1])
138
+
139
+ # time at 90% threshold
140
+ t_90_percent = time_array[idx_90_first-1] + (threshold_90_percent - pulse_array[idx_90_first-1]) * \
141
+ (time_array[idx_90_first] - time_array[idx_90_first-1]) / \
142
+ (pulse_array[idx_90_first] - pulse_array[idx_90_first-1])
143
+
144
+ # calculate risetime
145
+ rise_time = t_90_percent - t_10_percent
146
+
147
+ return rise_time, t_90_percent, t_10_percent
148
+
149
+ def calculate_fwhm(time_array, pulse_array):
150
+ '''
151
+ Objective:
152
+ - Calculates the full width at half maximum (FWHM) of a double exponential pulse waveform.
153
+
154
+ Inputs:
155
+ - time_array: Pulse time array (s).
156
+ - pulse_array: Pulse waveform array (V/m).
157
+
158
+ Outputs:
159
+ - fwhm: Full-width half-max of pulse waveform (ns).
160
+ - t_fwhm2: Time at rising half-max (ns).
161
+ - t_fwhm1: Time at decaying half-max (ns).
162
+
163
+ '''
164
+
165
+ # find the peak value of the pulse
166
+ peak_amplitude = np.max(pulse_array)
167
+ half_max = peak_amplitude / 2.0
168
+
169
+ # find indices where the pulse is above half_max
170
+ indices_above_half_max = np.where(pulse_array >= half_max)[0]
171
+
172
+ # find the first and last points where the pulse crosses half_max
173
+ idx1 = indices_above_half_max[0]
174
+ idx2 = indices_above_half_max[-1]
175
+
176
+ # first FWHM crossing point (rising side)
177
+ if idx1 == 0: # pulse starts above half_max
178
+ t_fwhm1 = time_array[idx1]
179
+ else:
180
+ # interpolate to find first time point at half_max
181
+ t_fwhm1 = time_array[idx1-1] + (half_max - pulse_array[idx1-1]) * \
182
+ (time_array[idx1] - time_array[idx1-1]) / \
183
+ (pulse_array[idx1] - pulse_array[idx1-1])
184
+
185
+ # second FWHM crossing point (decaying side)
186
+ if idx2 == len(pulse_array) - 1: # pulse ends above half_max
187
+ t_fwhm2 = time_array[idx2]
188
+ else:
189
+ # interpolate to find second time point at half_max
190
+ t_fwhm2 = time_array[idx2] + (half_max - pulse_array[idx2]) * \
191
+ (time_array[idx2+1] - time_array[idx2]) / \
192
+ (pulse_array[idx2+1] - pulse_array[idx2])
193
+
194
+ # calculate FWHM
195
+ fwhm = t_fwhm2 - t_fwhm1
196
+
197
+ return fwhm, t_fwhm2, t_fwhm1
198
+
199
+ def apply_shielding(f, shielding_dB, rolloff_hf=500e6, rolloff_lf=1e3):
200
+ '''
201
+ Combines complex transfer function math with real-world
202
+ LF (Magnetic) and HF (Leakage) rolloff physics.
203
+ '''
204
+ # base linear gain
205
+ base_gain = 10**(-shielding_dB/20)
206
+
207
+ # HF rolloff
208
+ h_hf = 1 / (1 + 1j * (f / rolloff_hf))
209
+
210
+ # LF rolloff
211
+ h_lf = 1 / (1 + (rolloff_lf / (f + 1e-12)))
212
+
213
+ # total complex shielding function
214
+ h_total = base_gain * h_hf * h_lf
215
+
216
+ return h_total
217
+
218
+ def analyze_waveform(x=None, y=None, sample_rate=None, domain='time', method='complex',
219
+ tf_function=None, tf_kwargs=None, noise=0.0, verbose=True):
220
+ '''
221
+ Decomposes & analyzes the given signal waveform.
222
+ Outputs:
223
+ - df_time: DataFrame of time-domain data (N rows)
224
+ - df_freq: DataFrame of positive frequency-domain data (N/2+1 rows)
225
+ - metrics: Dictionary of scalar results
226
+ '''
227
+
228
+ # clean
229
+ x = np.array(x)
230
+ y = np.array(y).flatten()
231
+ domain = 'time' if domain.lower() in ['t', 'time'] else 'freq'
232
+
233
+ # apply transfer function
234
+ if tf_function:
235
+ y = apply_transfer_function(x, y, tf_function, domain=domain, **(tf_kwargs or {}))
236
+
237
+ # apply gaussian noise
238
+ y = apply_noise(y, noise)
239
+
240
+ # extract parameters
241
+ if domain == 'time':
242
+ n = len(y)
243
+ fs = 1 / (x[1] - x[0])
244
+ if method == 'complex':
245
+ y_f = np.fft.fft(y) / n
246
+ freqs = np.fft.fftfreq(n, 1/fs)
247
+ elif method == 'real':
248
+ y_f = np.fft.rfft(y) / n
249
+ freqs = np.fft.rfftfreq(n, 1/fs)
250
+
251
+ y_t, t = y, x
252
+ else:
253
+ n = len(y)
254
+ fs = sample_rate if sample_rate else x[len(x)//2] * 2
255
+ if method == 'complex':
256
+ y_t = np.fft.ifft(y).real * n
257
+ elif method == 'real':
258
+ y_t = np.fft.irfft(y).real * n
259
+ t = np.arange(0, n) / fs
260
+ y_f, freqs = y, x
261
+
262
+ # positive half of frequencies
263
+ mask = freqs >= 0
264
+ f_pos = freqs[mask]
265
+ yf_pos = y_f[mask]
266
+
267
+ # energy calculation
268
+ esd = (np.abs(yf_pos)**2) * (2 / fs)
269
+
270
+ # handle DC
271
+ esd = (np.abs(yf_pos)**2) * (2 / fs)
272
+ esd[0] = esd[0] / 2 # DC only exists once
273
+
274
+ # if real, last bin (nyquist) only exists once
275
+ if method == 'real' and len(esd) > 0:
276
+ esd[-1] = esd[-1] / 2
277
+
278
+ df_freq = f_pos[1] - f_pos[0]
279
+ cumul_energy = np.cumsum(esd) * df_freq
280
+ total_energy = cumul_energy[-1]
281
+
282
+ # convert to dataframes:
283
+ # time domain
284
+ temporal = {
285
+ 'time_s': t,
286
+ 'amplitude': y_t
287
+ }
288
+
289
+ # frequency domain
290
+ spectral = {
291
+ 'freq': f_pos,
292
+ 'signal': yf_pos,
293
+ 'esd': esd,
294
+ 'energy': cumul_energy
295
+ }
296
+
297
+ # max gradient
298
+ dv_dt = np.diff(y_t) * fs
299
+
300
+ # action integral
301
+ action_integral = np.trapezoid(y_t**2, t)
302
+
303
+ # calculate rise time/decay time
304
+ rise_s, _, _ = calculate_rise_time(t, np.abs(y_t))
305
+ rise_ns = 1e9 * rise_s
306
+ fwhm_s, _, _ = calculate_fwhm(t, np.abs(y_t))
307
+ fwhm_ns = 1e9 * fwhm_s
308
+
309
+ # scalar metrics
310
+ metrics = {
311
+ 'peak_t': np.max(np.abs(y_t)),
312
+ 'peak_f': np.max(np.abs(yf_pos)),
313
+ 'total_energy': total_energy,
314
+ 'action_integral': action_integral,
315
+ 'max_dv_dt': np.max(np.abs(dv_dt)),
316
+ 'bw_90_hz': f_pos[np.where(cumul_energy >= 0.9 * total_energy)[0][0]],
317
+ 'center_freq_hz': np.sum(f_pos * esd) / (total_energy + 1e-12),
318
+ 'papr_db': 10 * np.log10(np.max(y_t**2) / np.mean(y_t**2)),
319
+ 'sample_rate': fs,
320
+ 'rise90_ns': rise_ns,
321
+ 'fwhm_ns': fwhm_ns,
322
+ }
323
+
324
+ # plot
325
+ if verbose:
326
+ plot_diagnostic_dashboard(temporal, spectral, metrics)
327
+
328
+ return temporal, spectral, metrics
329
+
330
+ def apply_transfer_function(x, y, tf_function, domain='freq', **kwargs):
331
+ '''
332
+ x: Time or frequency array.
333
+ y: Input signal.
334
+ tf_function: The transfer function.
335
+ domain: 'freq' or 'time'.
336
+ **kwargs: Arguments passed to the transfer function.
337
+ '''
338
+
339
+ if domain == 'freq':
340
+ H_f = tf_function(np.abs(x), **kwargs)
341
+ output = y * H_f
342
+ elif domain == 'time':
343
+ h_t = tf_function(x, **kwargs)
344
+ output = np.convolve(y, h_t, mode='same')
345
+ output *= (x[1] - x[0]) # normalize by dt
346
+ else:
347
+ raise ValueError('Unrecognized signal domain.')
348
+
349
+ return output
350
+
351
+ def plot_diagnostic_dashboard(temporal, spectral, metrics):
352
+ '''
353
+ 6-plot diagnostic dashboard:
354
+ Time Domain | Frequency Magnitude
355
+ Phase Spectrum | Energy Spectral Density (ESD)
356
+ Cumulative Energy | Spectrogram
357
+ '''
358
+
359
+ # 3 rows, 2 columns
360
+ fig = plt.figure(figsize=(16, 20))
361
+ gs = fig.add_gridspec(3, 2, hspace=0.3, wspace=0.3)
362
+
363
+ ax_time = fig.add_subplot(gs[0, 0])
364
+ ax_freq = fig.add_subplot(gs[0, 1])
365
+ ax_phase = fig.add_subplot(gs[1, 0])
366
+ ax_esd = fig.add_subplot(gs[1, 1])
367
+ ax_cum = fig.add_subplot(gs[2, 0])
368
+ ax_spec = fig.add_subplot(gs[2, 1])
369
+
370
+ # mask for positive frequencies to ensure dimensions match
371
+ pos_mask = spectral['freq'] >= 0
372
+ f_mhz = spectral['freq'][pos_mask] / 1e6
373
+ mag_pos = np.abs(spectral['signal'])[pos_mask]
374
+
375
+ # time domain
376
+ ax_time.plot(temporal['time_s']*1e6, temporal['amplitude'], color='cyan', label=f'Peak: {metrics['peak_t']:.2f}')
377
+ ax_time.set_title(f'Time-Domain Signal')
378
+ ax_time.set_xlabel(r'Time ($\mu s$)')
379
+ ax_time.set_ylabel('Amplitude')
380
+ ax_time.legend()
381
+
382
+ # frequency domain
383
+ ax_freq.semilogy(f_mhz, np.abs(spectral['signal']), color='violet')
384
+ ax_freq.set_title('Frequency-Domain Signal')
385
+ ax_freq.set_xlabel('Frequency (MHz)')
386
+ max_f = metrics['sample_rate'] / 2e6 # limit no higher than nyquist
387
+ ax_freq.set_xlim(0, min(1000, max_f))
388
+ ax_freq.grid(alpha=0.2, which='both')
389
+
390
+ # phase spectrum
391
+ ax_phase.plot(f_mhz, np.angle(spectral['signal']), color='limegreen', linewidth=0.5)
392
+ ax_phase.set_title('Phase Spectrum')
393
+ ax_phase.set_xlabel('Frequency (MHz)')
394
+ ax_phase.set_ylabel('Phase (rad)')
395
+ ax_phase.set_xlim(ax_freq.get_xlim())
396
+
397
+ # energy spectral density (ESD)
398
+ ax_esd.semilogy(f_mhz, spectral['esd'], color='gold')
399
+ ax_esd.set_title('Energy Spectral Density (ESD)')
400
+ ax_esd.set_ylabel(r'$V^2 \cdot s / Hz$')
401
+ ax_esd.set_xlabel('Frequency (MHz)')
402
+ ax_esd.set_xlim(ax_freq.get_xlim())
403
+ ax_esd.set_xlim(0, min(1000, max_f))
404
+
405
+ # cumulative energy distribution
406
+ ax_cum.plot(f_mhz, spectral['energy'], color='gold', linewidth=2)
407
+ ax_cum.fill_between(f_mhz, spectral['energy'], color='gold', alpha=0.2)
408
+ ax_cum.axvline(metrics['bw_90_hz'], color='red', linestyle='--',
409
+ label=f'90% Band: {metrics['bw_90_hz']:.1f} MHz')
410
+ ax_cum.set_title('Cumulative Energy')
411
+ ax_cum.set_ylabel('Normalized Energy')
412
+ ax_cum.set_xlabel('Frequency (MHz)')
413
+ ax_cum.set_xlim(ax_freq.get_xlim())
414
+ ax_cum.legend(fontsize='small')
415
+
416
+ # spectrogram
417
+ # ensure amplitude is real for spectrogram
418
+ sample_rate = metrics['sample_rate']
419
+ y_signal = np.real(temporal['amplitude'])
420
+ window_duration_s = 0.01*(temporal['time_s'].max() - temporal['time_s'].min())
421
+ nperseg = int(window_duration_s * sample_rate)
422
+
423
+ raw_n = (1/1.15) * (window_duration_s * sample_rate)
424
+
425
+ # powers of 2
426
+ if raw_n <= 1:
427
+ nperseg = 16 # minimum window
428
+ else:
429
+ # round to nearest power of 2
430
+ nperseg = int(2**np.round(np.log2(raw_n)))
431
+
432
+ # reinforce minimum
433
+ nperseg = max(nperseg, 16)
434
+
435
+ # nonoverlap always less than num per segment
436
+ noverlap = nperseg // 2
437
+ f, t_spec, Sxx = signal.spectrogram(y_signal, fs=sample_rate, window='hann',
438
+ nperseg=nperseg, noverlap=nperseg//2)
439
+
440
+ im = ax_spec.pcolormesh(t_spec*1e6, f, 10*np.log10(Sxx + epsilon),
441
+ shading='gouraud', cmap='plasma')
442
+
443
+ ax_spec.set_title('Spectrogram')
444
+ ax_spec.set_yscale('log')
445
+ ax_spec.set_ylim(np.abs(spectral['freq']).min()+epsilon, np.abs(spectral['freq'].max()/sample_rate))
446
+ cbar = fig.colorbar(im, ax=ax_spec)
447
+ cbar.set_label('Power/Frequency (dB/Hz)')
448
+
449
+ plt.show()
@@ -1,19 +1,33 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hdim_opt
3
- Version: 1.2.3
4
- Summary: Optimization toolkit for high-dimensional, non-differentiable problems.
3
+ Version: 1.3.1
4
+ Summary: High-dimensional global optimization and sampling toolkit for complex and non-differentiable problems.
5
5
  Author-email: Julian Soltes <jsoltes@regis.edu>
6
6
  License: MIT
7
+ Project-URL: Homepage, https://github.com/jgsoltes/hdim_opt
7
8
  Project-URL: Repository, https://github.com/jgsoltes/hdim_opt
8
- Keywords: optimization,high-dimensional,sampling,QUASAR,HDS
9
+ Project-URL: Issues, https://github.com/jgsoltes/hdim_opt/issues
10
+ Project-URL: Changelog, https://github.com/jgsoltes/hdim_opt/releases
11
+ Keywords: optimization,high-dimensional,sampling,QUASAR,hyperellipsoid,evolutionary-algorithm,non-differentiable,global-optimization,stochastic-optimization,black-box-optimization
12
+ Classifier: Development Status :: 5 - Production/Stable
9
13
  Classifier: Programming Language :: Python :: 3
14
+ Classifier: Programming Language :: Python :: 3.8
15
+ Classifier: Programming Language :: Python :: 3.9
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
10
19
  Classifier: License :: OSI Approved :: MIT License
11
20
  Classifier: Operating System :: OS Independent
12
21
  Classifier: Intended Audience :: Science/Research
13
22
  Classifier: Intended Audience :: Developers
23
+ Classifier: Intended Audience :: Education
24
+ Classifier: Natural Language :: English
14
25
  Classifier: Topic :: Scientific/Engineering :: Mathematics
15
26
  Classifier: Topic :: Scientific/Engineering :: Physics
16
27
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
28
+ Classifier: Topic :: Scientific/Engineering :: Information Analysis
29
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
30
+ Classifier: Topic :: Scientific/Engineering :: Bio-Informatics
17
31
  Requires-Python: >=3.8
18
32
  Description-Content-Type: text/markdown
19
33
  Requires-Dist: numpy
@@ -28,13 +42,14 @@ Requires-Dist: SALib; extra == "sensitivity"
28
42
 
29
43
  # hdim-opt: High-Dimensional Optimization Toolkit
30
44
 
31
- A modern optimization package to accelerate convergence in complex, high-dimensional problems. Includes the QUASAR evolutionary algorithm and HDS exploitative QMC sampler.
45
+ A modern optimization package to accelerate convergence in complex, high-dimensional problems. Includes the QUASAR evolutionary algorithm, HDS exploitative QMC sampler, Sobol sensitivity analysis, and signal waveform decomposition.
32
46
 
33
47
  All core functions, listed below, are single-line executable and require three essential parameters: [obj_function, bounds, n_samples].
34
48
  * **quasar**: QUASAR optimization for high-dimensional, non-differentiable problems.
35
- * **hds**: Generate an exploitative HDS sequence, to distribute samples in focused regions.
49
+ * **hyperellipsoid**: Generate a non-uniform Hyperellipsoid Density sequence, to focus sample distributions.
36
50
  * **sobol**: Generate a uniform Sobol sequence (via SciPy).
37
51
  * **sensitivity**: Perform Sobol sensitivity analysis to measure each variable's importance on objective function results (via SALib).
52
+ * **waveform**: Decompose the input waveform array (handles time- and frequency-domain via FFT / IFFT) into a diagnostic summary.
38
53
 
39
54
  ---
40
55
 
@@ -52,15 +67,21 @@ pip install hdim_opt
52
67
  import hdim_opt as h
53
68
 
54
69
  # Parameter Space
55
- n_dimensions = 100
70
+ n_dimensions = 30
56
71
  bounds = [(-100,100)] * n_dimensions
57
72
  n_samples = 1000
58
73
  obj_func = h.test_functions.rastrigin
74
+ time, pulse = h.waveform_analysis.e1_waveform()
59
75
 
76
+ # Functions
60
77
  solution, fitness = h.quasar(obj_func, bounds)
61
78
  sens_matrix = h.sensitivity(obj_func, bounds)
62
- hds_samples = h.hds(n_samples, bounds)
79
+
80
+ hds_samples = h.hyperellipsoid(n_samples, bounds)
63
81
  sobol_samples = h.sobol(n_samples, bounds)
82
+ isotropic_samples = h.isotropize(sobol_samples)
83
+
84
+ signal_data = h.waveform(x=time,y=pulse)
64
85
  ```
65
86
 
66
87
  ## QUASAR Optimizer
@@ -68,7 +89,7 @@ sobol_samples = h.sobol(n_samples, bounds)
68
89
 
69
90
  * Benefit: Significant improvements in convergence speed and solution quality compared to contemporary optimizers. (Reference: [https://arxiv.org/abs/2511.13843]).
70
91
 
71
- ## HDS Sampler (Hyperellipsoid Density Sampling)
72
- **HDS** is a non-uniform Quasi-Monte Carlo sampling method, specifically designed to exploit promising regions of the search space.
92
+ ## HDS Sampler
93
+ **HDS** (Hyperellipsoid Density Sampling) is a non-uniform Quasi-Monte Carlo sampling method, specifically designed to exploit promising regions of the search space.
73
94
 
74
95
  * Benefit: Provides control over the sample distribution. Results in higher average optimization solution quality when used for population initialization compared to uniform QMC methods. (Reference: [https://arxiv.org/abs/2511.07836]).
@@ -7,6 +7,7 @@ hdim_opt/quasar_optimization.py
7
7
  hdim_opt/sobol_sampling.py
8
8
  hdim_opt/sobol_sensitivity.py
9
9
  hdim_opt/test_functions.py
10
+ hdim_opt/waveform_analysis.py
10
11
  hdim_opt.egg-info/PKG-INFO
11
12
  hdim_opt.egg-info/SOURCES.txt
12
13
  hdim_opt.egg-info/dependency_links.txt
@@ -0,0 +1,76 @@
1
+ # pyproject.toml
2
+
3
+ [build-system]
4
+ requires = ["setuptools>=61.0.0", "wheel"]
5
+ build-backend = "setuptools.build_meta"
6
+
7
+ [project]
8
+ name = "hdim_opt"
9
+ version = "1.3.1" # match __version__ in __init__.py
10
+ description = "High-dimensional global optimization and sampling toolkit for complex and non-differentiable problems."
11
+ readme = {file = "README.md", content-type = "text/markdown"}
12
+
13
+ authors = [
14
+ {name="Julian Soltes", email="jsoltes@regis.edu"}
15
+ ]
16
+
17
+ license = { text = "MIT" }
18
+ requires-python = ">=3.8"
19
+ keywords = [
20
+ "optimization",
21
+ "high-dimensional",
22
+ "sampling",
23
+ "QUASAR",
24
+ "hyperellipsoid",
25
+ "evolutionary-algorithm",
26
+ "non-differentiable",
27
+ "global-optimization",
28
+ "stochastic-optimization",
29
+ "black-box-optimization"
30
+ ]
31
+ classifiers = [
32
+ "Development Status :: 5 - Production/Stable",
33
+ "Programming Language :: Python :: 3",
34
+ "Programming Language :: Python :: 3.8",
35
+ "Programming Language :: Python :: 3.9",
36
+ "Programming Language :: Python :: 3.10",
37
+ "Programming Language :: Python :: 3.11",
38
+ "Programming Language :: Python :: 3.12",
39
+ "License :: OSI Approved :: MIT License",
40
+ "Operating System :: OS Independent",
41
+ "Intended Audience :: Science/Research",
42
+ "Intended Audience :: Developers",
43
+ "Intended Audience :: Education",
44
+ "Natural Language :: English",
45
+ "Topic :: Scientific/Engineering :: Mathematics",
46
+ "Topic :: Scientific/Engineering :: Physics",
47
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
48
+ "Topic :: Scientific/Engineering :: Information Analysis",
49
+ "Topic :: Software Development :: Libraries :: Python Modules",
50
+ "Topic :: Scientific/Engineering :: Bio-Informatics"
51
+ ]
52
+ dependencies = ["numpy", "scipy"]
53
+
54
+ [project.optional-dependencies]
55
+ hds = [
56
+ # required by HDS `pip install hdim_opt[hds]`
57
+ "pandas",
58
+ "scikit-learn",
59
+ "joblib"
60
+ ]
61
+
62
+ sensitivity = [
63
+ # required by sensitivity `pip install hdim_opt[sensitivity]`
64
+ "pandas",
65
+ "SALib"
66
+ ]
67
+
68
+ [project.urls]
69
+ Homepage = "https://github.com/jgsoltes/hdim_opt"
70
+ Repository = "https://github.com/jgsoltes/hdim_opt"
71
+ Issues = "https://github.com/jgsoltes/hdim_opt/issues"
72
+ Changelog = "https://github.com/jgsoltes/hdim_opt/releases"
73
+
74
+ [tool.setuptools.packages.find]
75
+ where = ["."]
76
+ include = ["hdim_opt*"] # find hdim_opt package
@@ -1,13 +0,0 @@
1
- # hdim_opt/__init__.py
2
-
3
- # package version
4
- __version__ = "1.2.3"
5
- __all__ = ['quasar', 'hds', 'sobol', 'sensitivity', 'test_functions', 'quasar_helpers'] # available for star imports
6
-
7
- # import core components
8
- from .quasar_optimization import optimize as quasar
9
- from .hyperellipsoid_sampling import sample as hds
10
- from .sobol_sampling import sobol_sample as sobol
11
- from .sobol_sensitivity import sens_analysis as sensitivity
12
- from . import test_functions
13
- from . import quasar_helpers
@@ -1,51 +0,0 @@
1
- # pyproject.toml
2
-
3
- [build-system]
4
- requires = ["setuptools>=61.0.0", "wheel"]
5
- build-backend = "setuptools.build_meta"
6
-
7
- [project]
8
- name = "hdim_opt"
9
- version = "1.2.3" # match __version__ in __init__.py
10
- description = "Optimization toolkit for high-dimensional, non-differentiable problems."
11
- readme = {file = "README.md", content-type = "text/markdown"}
12
-
13
- authors = [
14
- {name="Julian Soltes", email="jsoltes@regis.edu"}
15
- ]
16
-
17
- license = { text = "MIT" }
18
- requires-python = ">=3.8"
19
- keywords = ["optimization", "high-dimensional", "sampling", "QUASAR", "HDS"]
20
- classifiers = [
21
- "Programming Language :: Python :: 3",
22
- "License :: OSI Approved :: MIT License",
23
- "Operating System :: OS Independent",
24
- "Intended Audience :: Science/Research",
25
- "Intended Audience :: Developers",
26
- "Topic :: Scientific/Engineering :: Mathematics",
27
- "Topic :: Scientific/Engineering :: Physics",
28
- "Topic :: Scientific/Engineering :: Artificial Intelligence"
29
- ]
30
- dependencies = ["numpy", "scipy"]
31
-
32
- [project.optional-dependencies]
33
- hds = [
34
- # required by HDS `pip install hdim_opt[hds]`
35
- "pandas",
36
- "scikit-learn",
37
- "joblib"
38
- ]
39
-
40
- sensitivity = [
41
- # required by sensitivity `pip install hdim_opt[sensitivity]`
42
- "pandas",
43
- "SALib"
44
- ]
45
-
46
- [project.urls]
47
- Repository = "https://github.com/jgsoltes/hdim_opt"
48
-
49
- [tool.setuptools.packages.find]
50
- where = ["."]
51
- include = ["hdim_opt*"] # find hdim_opt package
File without changes