hdim-opt 1.2.3__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
hdim_opt/__init__.py CHANGED
@@ -1,7 +1,43 @@
1
- # hdim_opt/__init__.py
1
+ """
2
+
3
+ # hdim-opt: High-Dimensional Optimization Toolkit
4
+
5
+ Functions:
6
+ - quasar: QUASAR optimization for high-dimensional, non-differentiable problems.
7
+ - hds: Generate an exploitative HDS sequence, to distribute samples in focused regions.
8
+ - sobol: Generate a uniform Sobol sequence (via SciPy).
9
+ - sensitivity: Perform Sobol sensitivity analysis to measure each variable's importance on objective function results (via SALib).
10
+ - waveform: Decompose the input waveform array (handles time- and frequency-domain via FFT / IFFT) into a diagnostic summary.
11
+
12
+ Modules:
13
+ - test_functions: Contains test functions for local optimization testing.
14
+ - waveform_analysis: Contains pulse generation functions.
15
+
16
+ Example Usage:
17
+
18
+ # Import
19
+ >>> import hdim_opt as h
20
+
21
+ # Parameter Space
22
+ >>> n_dimensions = 30
23
+ >>> bounds = [(-100,100)] * n_dimensions
24
+ >>> n_samples = 1000
25
+ >>> obj_func = h.test_functions.rastrigin
26
+ >>> time, pulse = h.waveform_analysis.e1_waveform()
27
+
28
+ # Functions
29
+ >>> solution, fitness = h.quasar(obj_func, bounds)
30
+ >>> sens_matrix = h.sensitivity(obj_func, bounds)
31
+
32
+ >>> hds_samples = h.hds(n_samples, bounds)
33
+ >>> sobol_samples = h.sobol(n_samples, bounds)
34
+ >>> isotropic_samples = h.isotropize(sobol_samples)
35
+
36
+ >>> signal_data = h.waveform(x=time,y=pulse)
37
+ """
2
38
 
3
39
  # package version
4
- __version__ = "1.2.3"
40
+ __version__ = "1.3.0"
5
41
  __all__ = ['quasar', 'hds', 'sobol', 'sensitivity', 'test_functions', 'quasar_helpers'] # available for star imports
6
42
 
7
43
  # import core components
@@ -9,5 +45,9 @@ from .quasar_optimization import optimize as quasar
9
45
  from .hyperellipsoid_sampling import sample as hds
10
46
  from .sobol_sampling import sobol_sample as sobol
11
47
  from .sobol_sensitivity import sens_analysis as sensitivity
48
+ from .waveform_analysis import analyze_waveform as waveform
49
+ from .quasar_helpers import isotropize
50
+ from .quasar_helpers import deisotropize
12
51
  from . import test_functions
13
- from . import quasar_helpers
52
+ from . import quasar_helpers
53
+ from . import waveform_analysis
@@ -472,10 +472,8 @@ def sample(n_samples, bounds,
472
472
  print(' - number of samples:', len(hds_sequence))
473
473
  print(f' - sample generation time: {sample_generation_time:.2f}')
474
474
  print(f' - number of hyperellipsoids: {n_hyperellipsoids}')
475
- print(f' - number of initial QMC: {n_initial_qmc}')
476
- print(f' - number of initial clusters: {n_initial_clusters}')
477
475
  if weights:
478
- print(f' - gaussian weights: {weights}')
476
+ print(f' - weights: {weights}')
479
477
 
480
478
  # generate a sobol sequence for comparison
481
479
  sobol_sampler = stats.qmc.Sobol(d=n_dimensions, seed=seed+2) # offset seed to be different from initial qmc
@@ -492,10 +490,8 @@ def sample(n_samples, bounds,
492
490
  sobol_std = np.std(sobol_samples)
493
491
 
494
492
  print('\nstats:')
495
- print(f' - mean HDS: {hds_mean:.2f}')
496
- print(f' - mean comparison QMC: {sobol_mean:.2f}')
497
- print(f' - stdev HDS: {hds_std:.2f}')
498
- print(f' - stdev comparison QMC: {sobol_std:.2f}\n')
493
+ print(f' - HDS mean: {hds_mean:.2f}')
494
+ print(f' - HDS stdev: {hds_std:.2f}\n')
499
495
 
500
496
  # dendrogram of centroids
501
497
  if plot_dendrogram:
@@ -1,7 +1,56 @@
1
1
  # global imports
2
+ import pandas as pd
3
+ from sklearn.preprocessing import StandardScaler
2
4
  import numpy as np
5
+ from scipy.linalg import cholesky, solve_triangular
3
6
  epsilon = 1e-12
4
7
 
8
+ def isotropize(data):
9
+ '''
10
+ Objective:
11
+ - Converts data to isotropic space. Removes correlations and scales to mean and variance.
12
+ - Promotes optimization stability.
13
+ Inputs:
14
+ - data: Input data.
15
+ Outputs:
16
+ - data_isotropic: Isotropized data.
17
+ - metadata: Scaler and whitening matrix
18
+ '''
19
+
20
+ # convert to array
21
+ X = np.array(data)
22
+
23
+ # standard scaling (mean = 0, var = 1)
24
+ mean = np.mean(X, axis=0)
25
+ stdev = np.std(X, axis=0)
26
+ X_centered = (X - mean) / stdev
27
+
28
+ # whitening parameters
29
+ n_dims = X_centered.shape[1]
30
+ cov = np.cov(X_centered, rowvar=False) + np.eye(n_dims) * epsilon
31
+ L = cholesky(cov, lower=True)
32
+
33
+ # transform Y = (X_centered) @ (L^-1).T
34
+ data_iso = solve_triangular(L, X_centered.T, lower=True).T
35
+
36
+ # store parameters for deisotropization
37
+ params = {
38
+ 'mean': mean,
39
+ 'stdev': stdev,
40
+ 'L': L
41
+ }
42
+ return data_iso, params
43
+
44
+ def deisotropize(data_iso, params):
45
+ '''De-isotropize data to its original parameter space.'''
46
+
47
+ # reverse whitening: X_centered = Y @ L.T
48
+ data_centered = np.dot(data_iso, params['L'].T)
49
+
50
+ # reverse scaling: X = (X_centered * std) + mean
51
+ data_original = (data_centered * params['stdev']) + params['mean']
52
+ return data_original
53
+
5
54
  ############## CONSTRAINTS ##############
6
55
  def apply_penalty(fitnesses, solutions, constraints, constraint_penalty, vectorized):
7
56
  '''
@@ -32,7 +32,7 @@ def initialize_population(popsize, bounds, init, hds_weights, seed, verbose):
32
32
 
33
33
  # generate samples
34
34
  if verbose:
35
- print(f'Initializing: Hyperellipsoid pop. (N={popsize}, D={n_dimensions}).')
35
+ print(f'Initializing: Hyperellipsoid (N={popsize}, D={n_dimensions}).')
36
36
  initial_population = hds.sample(popsize, bounds, weights=hds_weights,
37
37
  seed=seed, verbose=False)
38
38
 
@@ -75,7 +75,19 @@ def sens_analysis(func, bounds, n_samples=None,
75
75
  import seaborn as sns
76
76
  except ImportError as e:
77
77
  raise ImportError(f'Plotting requires dependencies: (matplotlib, seaborn).') from e
78
-
78
+
79
+ # sort by S1 values
80
+ sort_idx = np.argsort(Si['S1'])
81
+ s1_sorted = Si['S1'][sort_idx]
82
+ st_sorted = Si['ST'][sort_idx]
83
+ s1_conf_sorted = Si['S1_conf'][sort_idx]
84
+ st_conf_sorted = Si['ST_conf'][sort_idx]
85
+ names_sorted = [np.array(param_names)[i] for i in sort_idx]
86
+
87
+
88
+ bar_width = 0.35
89
+ index = np.arange(n_params)
90
+
79
91
  # plot 1: first-order (S1) and total-order (ST) indices
80
92
  sens_plot, axs = plt.subplots(2,1,figsize=(9, 13))
81
93
 
@@ -84,35 +96,26 @@ def sens_analysis(func, bounds, n_samples=None,
84
96
  index = np.arange(n_params)
85
97
 
86
98
  # plot S1 (first order) sensitivities
87
- axs[0].barh(index - bar_width/2, Si['S1'], bar_width,
88
- xerr=Si['S1_conf'],
89
- label='First-order ($S_1$)',
90
- # color='cornflowerblue',
91
- alpha=1,
92
- # ecolor='lightgray',
93
- capsize=2.5)
94
- # edgecolor='black')
99
+ axs[0].barh(index - bar_width/2, s1_sorted, bar_width,
100
+ xerr=s1_conf_sorted,
101
+ label='First-order ($S_1$)',
102
+ alpha=1,
103
+ capsize=2.5)
95
104
 
96
- # plot ST (total order) sensitivities
97
- axs[0].barh(index + bar_width/2, Si['ST'], bar_width,
98
- xerr=Si['ST_conf'],
99
- label='Total-order ($S_T$)',
100
- # color='violet',
101
- # ecolor='lightgray',
102
- alpha=0.75,
103
- capsize=2.5)
104
- # edgecolor='black')
105
+ axs[0].barh(index + bar_width/2, st_sorted, bar_width,
106
+ xerr=st_conf_sorted,
107
+ label='Total-order ($S_T$)',
108
+ alpha=0.75,
109
+ capsize=2.5)
110
+
105
111
  axs[0].set_title('Sensitivity Indices ($S_1$, $S_T$)')
106
112
  if log_scale:
107
113
  axs[0].set_xscale('log')
108
- axs[0].set_xlabel('Sensitivity Index')
109
- axs[0].set_ylabel('Parameter')
110
- axs[0].legend(loc='upper right')
111
- axs[0].grid(False)
114
+
112
115
  axs[0].set_yticks(index)
113
- axs[0].set_yticklabels(param_names, ha='right')
116
+ axs[0].set_yticklabels(names_sorted)
114
117
 
115
- # heatmap of second order indices
118
+ # plot 2: heatmap of second order indices
116
119
  sns.heatmap(data=S2_df, mask=mask, cbar_kws={'label': 'Second-order Index ($S_2$)'},ax=axs[1]) # magma
117
120
  axs[1].set_title('Second-order Interactions ($S_2$)')
118
121
  axs[1].invert_yaxis()
@@ -0,0 +1,446 @@
1
+ import numpy as np
2
+ import pandas as pd
3
+ import matplotlib.pyplot as plt
4
+ from scipy import signal
5
+
6
+ # constants
7
+ epsilon = 1e-12 # to avoid mathematical singularities
8
+
9
+ # visualization parameters
10
+ plt.rcParams['figure.facecolor'] = 'black'
11
+ plt.rcParams['axes.facecolor'] = 'black'
12
+ plt.rcParams['text.color'] = 'white'
13
+ plt.rcParams['axes.labelcolor'] = 'white'
14
+ plt.rcParams['xtick.color'] = 'white'
15
+ plt.rcParams['ytick.color'] = 'white'
16
+ plt.rcParams['axes.edgecolor'] = 'white'
17
+ plt.rcParams['grid.color'] = 'white'
18
+ plt.rcParams['lines.color'] = 'white'
19
+
20
+ def apply_noise(signal, noise):
21
+ # adding random gaussian noise within 1% of discrete amplitudes
22
+ stdev = np.std(np.abs(signal))
23
+ signal_noisy = signal + noise * np.random.normal(loc=0, scale=stdev, size=signal.shape)
24
+
25
+ return signal_noisy
26
+
27
+ def e1_waveform(peak_Vm=50e3, rise_s=5e-9, decay_s=200e-9,
28
+ sample_Hz=10e9, duration_s=1e-6, noise=0.0):
29
+ '''
30
+ Objective:
31
+ - Generates time-domain double exponential H-EMP E1 waveform, per MIL-STD / IEC specifications.
32
+ - Pulse waveform: E(t) = E0 * k * (exp(-alpha*t) - exp(-beta*t))
33
+
34
+ Inputs:
35
+ - E_peak_V_m: Target EMP amplitude (V/m).
36
+ - E_rise_time_ns: Target EMP risetime (ns).
37
+ - E_decay_time_ns: Target decay time of the pulse (FWHM).
38
+ - sampling_rate: Sampling rate of pulse (Hz).
39
+ - duration_ns: Duration of pulse (ns).
40
+
41
+ Outputs:
42
+ - time_s: Time array of pulse (s).
43
+ - E_t: Electric field amplitude array at each time (V/m).
44
+
45
+ '''
46
+
47
+ dt_s = 1 / sample_Hz
48
+ time_s = np.arange(0, duration_s, dt_s)
49
+
50
+ # alpha controls the decay and broadband shape
51
+ # input E_decay_time_ns = desired pulse FWHM
52
+ alpha = np.log(2) / decay_s # derived from from FWHM = ln(2) / FWHM
53
+
54
+ # beta controls the rise time and high-frequency content
55
+ beta = 2.0035 / rise_s # # approximate relationship: beta ~= 2.2 / (rise_time) (in seconds^-1)
56
+
57
+ # calculate time of pulse peak for k_norm calculation
58
+ t_peak_s = np.log(beta / alpha) / (beta - alpha)
59
+
60
+ # 'k_norm' factor normalizes pulse peak to input E_peak_V_m (50,000 V/m)
61
+ denominator = (np.exp(-alpha * t_peak_s) - np.exp(-beta * t_peak_s))
62
+ if np.isclose(denominator, 0):
63
+ k_norm = peak_Vm
64
+ else:
65
+ k_norm = peak_Vm / denominator
66
+
67
+ # generate the E1 waveform
68
+ E_t = k_norm * (np.exp(-alpha * time_s) - np.exp(-beta * time_s))
69
+
70
+ # adding random gaussian noise
71
+ E_t = apply_noise(E_t, noise)
72
+
73
+ return time_s, E_t
74
+
75
+ def e2_waveform(E_peak=100, tr_us=1.5, tf_ms=1.0, sample_rate=1e6, duration_s=0.01, noise=0.0):
76
+ '''Generates E2 H-EMP pulse waveform (lightning-like).'''
77
+ t = np.arange(0, duration_s, 1/sample_rate)
78
+ alpha = 1 / (tf_ms * 1e-3)
79
+ beta = 1 / (tr_us * 1e-6)
80
+ E_t = E_peak * 1.1 * (np.exp(-alpha * t) - np.exp(-beta * t))
81
+
82
+ # adding random gaussian noise
83
+ E_t = apply_noise(E_t, noise)
84
+
85
+ return t, E_t
86
+
87
+ def e3_waveform(E_peak=40, t_peak_s=10, sample_rate=10, duration_s=500, noise=0.0):
88
+ '''Generates E3 H-EMP pulse waveform (geostorm-like; magnetohydrodynamic).'''
89
+ t = np.arange(0, duration_s, 1/sample_rate)
90
+
91
+ # simplified IEC 61000-2-9 E3 waveform
92
+ E_t = E_peak * (np.exp(-t/120) - np.exp(-t/20))
93
+
94
+ # adding random gaussian noise
95
+ E_t = apply_noise(E_t, noise)
96
+
97
+ return t, E_t
98
+
99
+ def calculate_rise_time(time_array, pulse_array):
100
+ '''
101
+ Objective:
102
+ - Calculates the 10%-90% rise time of a double exponential pulse waveform.
103
+
104
+ Inputs:
105
+ - time_array: Pulse time array (s).
106
+ - pulse_array: Pulse waveform array (V/m).
107
+
108
+ Outputs:
109
+ - rise_time: Rise time (10%-90%) of pulse waveform (ns).
110
+ - t_90_percent: Time at rising 90% peak amplitude (ns).
111
+ - t_10_percent: Time at rising 10% amplitude (ns).
112
+
113
+ '''
114
+
115
+ peak_amplitude = np.max(pulse_array)
116
+
117
+ # calculate 10% and 90% thresholds
118
+ threshold_10_percent = 0.1 * peak_amplitude
119
+ threshold_90_percent = 0.9 * peak_amplitude
120
+
121
+ # find indices where the pulse first crosses the 10% threshold (rising side)
122
+ idx_10 = np.where(pulse_array >= threshold_10_percent)[0]
123
+ if len(idx_10) == 0:
124
+ return None
125
+ idx_10_first = idx_10[0]
126
+
127
+ # find indices where the pulse first crosses the 90% threshold (rising side)
128
+ idx_90 = np.where(pulse_array >= threshold_90_percent)[0]
129
+ if len(idx_90) == 0:
130
+ return None
131
+ idx_90_first = idx_90[0]
132
+
133
+ # interpolate to find the exact time points at 10% and 90% thresholds
134
+ # time at 10% threshold
135
+ t_10_percent = time_array[idx_10_first-1] + (threshold_10_percent - pulse_array[idx_10_first-1]) * \
136
+ (time_array[idx_10_first] - time_array[idx_10_first-1]) / \
137
+ (pulse_array[idx_10_first] - pulse_array[idx_10_first-1])
138
+
139
+ # time at 90% threshold
140
+ t_90_percent = time_array[idx_90_first-1] + (threshold_90_percent - pulse_array[idx_90_first-1]) * \
141
+ (time_array[idx_90_first] - time_array[idx_90_first-1]) / \
142
+ (pulse_array[idx_90_first] - pulse_array[idx_90_first-1])
143
+
144
+ # calculate risetime
145
+ rise_time = t_90_percent - t_10_percent
146
+
147
+ return rise_time, t_90_percent, t_10_percent
148
+
149
+ def calculate_fwhm(time_array, pulse_array):
150
+ '''
151
+ Objective:
152
+ - Calculates the full width at half maximum (FWHM) of a double exponential pulse waveform.
153
+
154
+ Inputs:
155
+ - time_array: Pulse time array (s).
156
+ - pulse_array: Pulse waveform array (V/m).
157
+
158
+ Outputs:
159
+ - fwhm: Full-width half-max of pulse waveform (ns).
160
+ - t_fwhm2: Time at rising half-max (ns).
161
+ - t_fwhm1: Time at decaying half-max (ns).
162
+
163
+ '''
164
+
165
+ # find the peak value of the pulse
166
+ peak_amplitude = np.max(pulse_array)
167
+ half_max = peak_amplitude / 2.0
168
+
169
+ # find indices where the pulse is above half_max
170
+ indices_above_half_max = np.where(pulse_array >= half_max)[0]
171
+
172
+ # find the first and last points where the pulse crosses half_max
173
+ idx1 = indices_above_half_max[0]
174
+ idx2 = indices_above_half_max[-1]
175
+
176
+ # first FWHM crossing point (rising side)
177
+ if idx1 == 0: # pulse starts above half_max
178
+ t_fwhm1 = time_array[idx1]
179
+ else:
180
+ # interpolate to find first time point at half_max
181
+ t_fwhm1 = time_array[idx1-1] + (half_max - pulse_array[idx1-1]) * \
182
+ (time_array[idx1] - time_array[idx1-1]) / \
183
+ (pulse_array[idx1] - pulse_array[idx1-1])
184
+
185
+ # second FWHM crossing point (decaying side)
186
+ if idx2 == len(pulse_array) - 1: # pulse ends above half_max
187
+ t_fwhm2 = time_array[idx2]
188
+ else:
189
+ # interpolate to find second time point at half_max
190
+ t_fwhm2 = time_array[idx2] + (half_max - pulse_array[idx2]) * \
191
+ (time_array[idx2+1] - time_array[idx2]) / \
192
+ (pulse_array[idx2+1] - pulse_array[idx2])
193
+
194
+ # calculate FWHM
195
+ fwhm = t_fwhm2 - t_fwhm1
196
+
197
+ return fwhm, t_fwhm2, t_fwhm1
198
+
199
+ def apply_shielding(f, shielding_dB, rolloff_hf=500e6, rolloff_lf=1e3):
200
+ '''
201
+ Combines complex transfer function math with real-world
202
+ LF (Magnetic) and HF (Leakage) rolloff physics.
203
+ '''
204
+ # base linear gain
205
+ base_gain = 10**(-shielding_dB/20)
206
+
207
+ # HF rolloff
208
+ h_hf = 1 / (1 + 1j * (f / rolloff_hf))
209
+
210
+ # LF rolloff
211
+ h_lf = 1 / (1 + (rolloff_lf / (f + 1e-12)))
212
+
213
+ # total complex shielding function
214
+ h_total = base_gain * h_hf * h_lf
215
+
216
+ return h_total
217
+
218
+ def analyze_waveform(x=None, y=None, sample_rate=None, domain='time', method='complex',
219
+ tf_function=None, tf_kwargs=None, noise=0.0, verbose=True):
220
+ '''
221
+ Decomposes & analyzes the given signal waveform.
222
+ Outputs:
223
+ - df_time: DataFrame of time-domain data (N rows)
224
+ - df_freq: DataFrame of positive frequency-domain data (N/2+1 rows)
225
+ - metrics: Dictionary of scalar results
226
+ '''
227
+
228
+ # clean
229
+ x = np.array(x)
230
+ y = np.array(y).flatten()
231
+ domain = 'time' if domain.lower() in ['t', 'time'] else 'freq'
232
+
233
+ # apply transfer function
234
+ if tf_function:
235
+ y = apply_transfer_function(x, y, tf_function, domain=domain, **(tf_kwargs or {}))
236
+
237
+ # apply gaussian noise
238
+ y = apply_noise(y, noise)
239
+
240
+ # extract parameters
241
+ if domain == 'time':
242
+ n = len(y)
243
+ fs = 1 / (x[1] - x[0])
244
+ if method == 'complex':
245
+ y_f = np.fft.fft(y) / n
246
+ freqs = np.fft.fftfreq(n, 1/fs)
247
+ elif method == 'real':
248
+ y_f = np.fft.rfft(y) / n
249
+ freqs = np.fft.rfftfreq(n, 1/fs)
250
+
251
+ y_t, t = y, x
252
+ else:
253
+ n = len(y)
254
+ fs = sample_rate if sample_rate else x[len(x)//2] * 2
255
+ if method == 'complex':
256
+ y_t = np.fft.ifft(y).real * n
257
+ elif method == 'real':
258
+ y_t = np.fft.irfft(y).real * n
259
+ t = np.arange(0, n) / fs
260
+ y_f, freqs = y, x
261
+
262
+ # positive half of frequencies
263
+ mask = freqs >= 0
264
+ f_pos = freqs[mask]
265
+ yf_pos = y_f[mask]
266
+
267
+ # energy calculation
268
+ esd = (np.abs(yf_pos)**2) * (2 / fs)
269
+
270
+ # handle DC
271
+ esd = (np.abs(yf_pos)**2) * (2 / fs)
272
+ esd[0] = esd[0] / 2 # DC only exists once
273
+
274
+ # if real, last bin (nyquist) only exists once
275
+ if method == 'real' and len(esd) > 0:
276
+ esd[-1] = esd[-1] / 2
277
+
278
+ df_freq = f_pos[1] - f_pos[0]
279
+ cumul_energy = np.cumsum(esd) * df_freq
280
+ total_energy = cumul_energy[-1]
281
+
282
+ # convert to dataframes:
283
+ # time domain
284
+ temporal = {
285
+ 'time_s': t,
286
+ 'amplitude': y_t
287
+ }
288
+
289
+ # frequency domain
290
+ spectral = {
291
+ 'freq': f_pos,
292
+ 'signal': yf_pos,
293
+ 'esd': esd,
294
+ 'energy': cumul_energy
295
+ }
296
+
297
+ # max gradient
298
+ dv_dt = np.diff(y_t) * fs
299
+
300
+ # action integral
301
+ action_integral = np.trapezoid(y_t**2, t)
302
+
303
+ # calculate rise time/decay time
304
+ rise_s, _, _ = calculate_rise_time(t, np.abs(y_t))
305
+ rise_ns = 1e9 * rise_s
306
+ fwhm_s, _, _ = calculate_fwhm(t, np.abs(y_t))
307
+ fwhm_ns = 1e9 * fwhm_s
308
+
309
+ # scalar metrics
310
+ metrics = {
311
+ 'peak_t': np.max(np.abs(y_t)),
312
+ 'peak_f': np.max(np.abs(yf_pos)),
313
+ 'total_energy': total_energy,
314
+ 'action_integral': action_integral,
315
+ 'max_dv_dt': np.max(np.abs(dv_dt)),
316
+ 'bw_90_hz': f_pos[np.where(cumul_energy >= 0.9 * total_energy)[0][0]],
317
+ 'center_freq_hz': np.sum(f_pos * esd) / (total_energy + 1e-12),
318
+ 'papr_db': 10 * np.log10(np.max(y_t**2) / np.mean(y_t**2)),
319
+ 'sample_rate': fs,
320
+ 'rise90_ns': rise_ns,
321
+ 'fwhm_ns': fwhm_ns,
322
+ }
323
+
324
+ # plot
325
+ if verbose:
326
+ plot_diagnostic_dashboard(temporal, spectral, metrics)
327
+
328
+ return temporal, spectral, metrics
329
+
330
+ def apply_transfer_function(x, y, tf_function, domain='freq', **kwargs):
331
+ '''
332
+ x: Time or frequency array.
333
+ y: Input signal.
334
+ tf_function: The transfer function.
335
+ domain: 'freq' or 'time'.
336
+ **kwargs: Arguments passed to the transfer function.
337
+ '''
338
+
339
+ if domain == 'freq':
340
+ H_f = tf_function(np.abs(x), **kwargs)
341
+ output = y * H_f
342
+ elif domain == 'time':
343
+ h_t = tf_function(x, **kwargs)
344
+ output = np.convolve(y, h_t, mode='same')
345
+ output *= (x[1] - x[0]) # normalize by dt
346
+ else:
347
+ raise ValueError('Unrecognized signal domain.')
348
+
349
+ return output
350
+
351
+ def plot_diagnostic_dashboard(temporal, spectral, metrics):
352
+ '''
353
+ 6-plot diagnostic dashboard:
354
+ Time Domain | Frequency Magnitude
355
+ Phase Spectrum | Energy Spectral Density (ESD)
356
+ Cumulative Energy | Spectrogram
357
+ '''
358
+
359
+ # 3 rows, 2 columns
360
+ fig = plt.figure(figsize=(16, 20))
361
+ gs = fig.add_gridspec(3, 2, hspace=0.3, wspace=0.3)
362
+
363
+ ax_time = fig.add_subplot(gs[0, 0])
364
+ ax_freq = fig.add_subplot(gs[0, 1])
365
+ ax_phase = fig.add_subplot(gs[1, 0])
366
+ ax_esd = fig.add_subplot(gs[1, 1])
367
+ ax_cum = fig.add_subplot(gs[2, 0])
368
+ ax_spec = fig.add_subplot(gs[2, 1])
369
+
370
+ # mask for positive frequencies to ensure dimensions match
371
+ pos_mask = spectral['freq'] >= 0
372
+ f_mhz = spectral['freq'][pos_mask] / 1e6
373
+ mag_pos = np.abs(spectral['signal'])[pos_mask]
374
+
375
+ # time domain
376
+ ax_time.plot(temporal['time_s']*1e6, temporal['amplitude'], color='cyan', label=f'Peak: {metrics['peak_t']:.2f}')
377
+ ax_time.set_title(f'Time-Domain Signal')
378
+ ax_time.set_xlabel(r'Time ($\mu s$)')
379
+ ax_time.set_ylabel('Amplitude')
380
+ ax_time.legend()
381
+
382
+ # frequency domain
383
+ ax_freq.semilogy(f_mhz, np.abs(spectral['signal']), color='violet')
384
+ ax_freq.set_title('Frequency-Domain Signal')
385
+ ax_freq.set_xlabel('Frequency (MHz)')
386
+ max_f = metrics['sample_rate'] / 2e6 # limit no higher than nyquist
387
+ ax_freq.set_xlim(0, min(1000, max_f))
388
+ ax_freq.grid(alpha=0.2, which='both')
389
+
390
+ # phase spectrum
391
+ ax_phase.plot(f_mhz, np.angle(spectral['signal']), color='limegreen', linewidth=0.5)
392
+ ax_phase.set_title('Phase Spectrum')
393
+ ax_phase.set_xlabel('Frequency (MHz)')
394
+ ax_phase.set_ylabel('Phase (rad)')
395
+ ax_phase.set_xlim(ax_freq.get_xlim())
396
+
397
+ # energy spectral density (ESD)
398
+ ax_esd.semilogy(f_mhz, spectral['esd'], color='gold')
399
+ ax_esd.set_title('Energy Spectral Density (ESD)')
400
+ ax_esd.set_ylabel(r'$V^2 \cdot s / Hz$')
401
+ ax_esd.set_xlabel('Frequency (MHz)')
402
+ ax_esd.set_xlim(ax_freq.get_xlim())
403
+ ax_esd.set_xlim(0, min(1000, max_f))
404
+
405
+ # cumulative energy distribution
406
+ ax_cum.plot(f_mhz, spectral['energy'], color='gold', linewidth=2)
407
+ ax_cum.fill_between(f_mhz, spectral['energy'], color='gold', alpha=0.2)
408
+ ax_cum.axvline(metrics['bw_90_hz'], color='red', linestyle='--',
409
+ label=f'90% Band: {metrics['bw_90_hz']:.1f} MHz')
410
+ ax_cum.set_title('Cumulative Energy')
411
+ ax_cum.set_ylabel('Normalized Energy')
412
+ ax_cum.set_xlabel('Frequency (MHz)')
413
+ ax_cum.set_xlim(ax_freq.get_xlim())
414
+ ax_cum.legend(fontsize='small')
415
+
416
+ # spectrogram
417
+ # ensure amplitude is real for spectrogram
418
+ sample_rate = metrics['sample_rate']
419
+ y_signal = np.real(temporal['amplitude'])
420
+ window_duration_s = 0.01*(temporal['time_s'].max() - temporal['time_s'].min())
421
+ nperseg = int(window_duration_s * sample_rate)
422
+
423
+ raw_n = (1/1.15) * (window_duration_s * sample_rate)
424
+
425
+ # powers of 2
426
+ if raw_n <= 1:
427
+ nperseg = 16 # minimum window
428
+ else:
429
+ # round to nearest power of 2
430
+ nperseg = int(2**np.round(np.log2(raw_n)))
431
+
432
+ # reinforce minimum
433
+ nperseg = max(nperseg, 16)
434
+
435
+ # nonoverlap always less than num per segment
436
+ noverlap = nperseg // 2
437
+ f, t_spec, Sxx = signal.spectrogram(y_signal, fs=sample_rate, window='hann',
438
+ nperseg=nperseg, noverlap=nperseg//2)
439
+
440
+ im = ax_spec.pcolormesh(t_spec*1e6, f, 10*np.log10(Sxx + epsilon),
441
+ shading='gouraud', cmap='plasma')
442
+
443
+ ax_spec.set_yscale('log')
444
+ ax_spec.set_ylim(np.abs(spectral['freq']).min()+epsilon, np.abs(spectral['freq'].max()/sample_rate))
445
+
446
+ plt.show()
@@ -1,11 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hdim_opt
3
- Version: 1.2.3
3
+ Version: 1.3.0
4
4
  Summary: Optimization toolkit for high-dimensional, non-differentiable problems.
5
5
  Author-email: Julian Soltes <jsoltes@regis.edu>
6
6
  License: MIT
7
7
  Project-URL: Repository, https://github.com/jgsoltes/hdim_opt
8
- Keywords: optimization,high-dimensional,sampling,QUASAR,HDS
8
+ Keywords: optimization,high-dimensional,sampling,QUASAR,hyperellipsoid
9
9
  Classifier: Programming Language :: Python :: 3
10
10
  Classifier: License :: OSI Approved :: MIT License
11
11
  Classifier: Operating System :: OS Independent
@@ -28,13 +28,14 @@ Requires-Dist: SALib; extra == "sensitivity"
28
28
 
29
29
  # hdim-opt: High-Dimensional Optimization Toolkit
30
30
 
31
- A modern optimization package to accelerate convergence in complex, high-dimensional problems. Includes the QUASAR evolutionary algorithm and HDS exploitative QMC sampler.
31
+ A modern optimization package to accelerate convergence in complex, high-dimensional problems. Includes the QUASAR evolutionary algorithm, HDS exploitative QMC sampler, Sobol sensitivity analysis, and signal waveform decomposition.
32
32
 
33
33
  All core functions, listed below, are single-line executable and require three essential parameters: [obj_function, bounds, n_samples].
34
34
  * **quasar**: QUASAR optimization for high-dimensional, non-differentiable problems.
35
35
  * **hds**: Generate an exploitative HDS sequence, to distribute samples in focused regions.
36
36
  * **sobol**: Generate a uniform Sobol sequence (via SciPy).
37
37
  * **sensitivity**: Perform Sobol sensitivity analysis to measure each variable's importance on objective function results (via SALib).
38
+ * **waveform**: Decompose the input waveform array (handles time- and frequency-domain via FFT / IFFT) into a diagnostic summary.
38
39
 
39
40
  ---
40
41
 
@@ -52,15 +53,21 @@ pip install hdim_opt
52
53
  import hdim_opt as h
53
54
 
54
55
  # Parameter Space
55
- n_dimensions = 100
56
+ n_dimensions = 30
56
57
  bounds = [(-100,100)] * n_dimensions
57
58
  n_samples = 1000
58
59
  obj_func = h.test_functions.rastrigin
60
+ time, pulse = h.waveform_analysis.e1_waveform()
59
61
 
62
+ # Functions
60
63
  solution, fitness = h.quasar(obj_func, bounds)
61
64
  sens_matrix = h.sensitivity(obj_func, bounds)
65
+
62
66
  hds_samples = h.hds(n_samples, bounds)
63
67
  sobol_samples = h.sobol(n_samples, bounds)
68
+ isotropic_samples = h.isotropize(sobol_samples)
69
+
70
+ signal_data = h.waveform(x=time,y=pulse)
64
71
  ```
65
72
 
66
73
  ## QUASAR Optimizer
@@ -0,0 +1,12 @@
1
+ hdim_opt/__init__.py,sha256=CeLrf9egFUScaSirq3gkT8WXcvw0F58L3b_i6oT8Egs,1894
2
+ hdim_opt/hyperellipsoid_sampling.py,sha256=WYlFJxeLKUf8frrl1vM4b28WRJ8zpneKn1NitSPtQMI,24356
3
+ hdim_opt/quasar_helpers.py,sha256=zcoBggHfc6XhZFlJFJy6p0MGVsQwHa6nMudWauT5Vzo,16255
4
+ hdim_opt/quasar_optimization.py,sha256=U7a4ZbgsNJ24V7sff710D7LGYsUWE0CVNoZCmjahZmE,32329
5
+ hdim_opt/sobol_sampling.py,sha256=Xe_Zzs13xMxCben17gT85lFsoV-GKVOAAgi7lMxnlBI,912
6
+ hdim_opt/sobol_sensitivity.py,sha256=HmwqNSDvofX5u1hOayLgjMZOdjQdOHYHzrOLxMTfhhc,4390
7
+ hdim_opt/test_functions.py,sha256=RqjKYIiwAqWplGUsH4oPHLBrVdnLRyw7f0dJX5iyJ4g,2821
8
+ hdim_opt/waveform_analysis.py,sha256=EV87W4D1rJ31x2X5M2_uFaCFDLhhnMwIft-0djCNXoI,15083
9
+ hdim_opt-1.3.0.dist-info/METADATA,sha256=2HJSVNmCZ_zYYrkMHNYo2s53UR3eLQXmi2t-J77IaeQ,3481
10
+ hdim_opt-1.3.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
11
+ hdim_opt-1.3.0.dist-info/top_level.txt,sha256=1KtWo9tEfEK3GC8D43cwVsC8yVG2Kc-9pl0hhcDjw4o,9
12
+ hdim_opt-1.3.0.dist-info/RECORD,,
@@ -1,11 +0,0 @@
1
- hdim_opt/__init__.py,sha256=ydO-uyV2qPK9pRTUJXKmmiQ-aUjleHpptXQG1SabgfA,477
2
- hdim_opt/hyperellipsoid_sampling.py,sha256=c34JkVciZbdAXjdfNjfC4h5NsrT2CD7Epsxpef5a1xY,24625
3
- hdim_opt/quasar_helpers.py,sha256=zTgar2EuWs4MLSLEO7HRcP7At1xbXLP3q4Gg7-GrggQ,14799
4
- hdim_opt/quasar_optimization.py,sha256=6OSjh49MyHzxK1UY9YIgQ2a45VOAqNQLG7cccisFWxM,32334
5
- hdim_opt/sobol_sampling.py,sha256=Xe_Zzs13xMxCben17gT85lFsoV-GKVOAAgi7lMxnlBI,912
6
- hdim_opt/sobol_sensitivity.py,sha256=1ebeDSTmcLn03_MKDGiyJJ7r_ZSNCq2AKNcTX-hI23A,4384
7
- hdim_opt/test_functions.py,sha256=RqjKYIiwAqWplGUsH4oPHLBrVdnLRyw7f0dJX5iyJ4g,2821
8
- hdim_opt-1.2.3.dist-info/METADATA,sha256=enzCv-miFQOJvCVtw9mdE1JKXq1FU7s-HO6AGNVZffQ,3130
9
- hdim_opt-1.2.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
10
- hdim_opt-1.2.3.dist-info/top_level.txt,sha256=1KtWo9tEfEK3GC8D43cwVsC8yVG2Kc-9pl0hhcDjw4o,9
11
- hdim_opt-1.2.3.dist-info/RECORD,,