voly 0.0.129__py3-none-any.whl → 0.0.130__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- voly/client.py +46 -0
- voly/core/hd.py +347 -0
- {voly-0.0.129.dist-info → voly-0.0.130.dist-info}/METADATA +1 -1
- {voly-0.0.129.dist-info → voly-0.0.130.dist-info}/RECORD +7 -7
- {voly-0.0.129.dist-info → voly-0.0.130.dist-info}/WHEEL +0 -0
- {voly-0.0.129.dist-info → voly-0.0.130.dist-info}/licenses/LICENSE +0 -0
- {voly-0.0.129.dist-info → voly-0.0.130.dist-info}/top_level.txt +0 -0
voly/client.py
CHANGED
|
@@ -368,3 +368,49 @@ class VolyClient:
|
|
|
368
368
|
'x_surface': x_surface,
|
|
369
369
|
'moments': moments
|
|
370
370
|
}
|
|
371
|
+
|
|
372
|
+
@staticmethod
|
|
373
|
+
def get_garch_hd_surface(model_results: pd.DataFrame,
|
|
374
|
+
df_hist: pd.DataFrame,
|
|
375
|
+
domain_params: Tuple[float, float, int] = (-1.5, 1.5, 1000),
|
|
376
|
+
return_domain: str = 'log_moneyness',
|
|
377
|
+
n_fits: int = 400,
|
|
378
|
+
simulations: int = 5000,
|
|
379
|
+
window_length: int = 365,
|
|
380
|
+
variate_parameters: bool = True,
|
|
381
|
+
bandwidth: float = 0.15) -> Dict[str, Any]:
|
|
382
|
+
"""
|
|
383
|
+
Generate historical density using GARCH(1,1) model and Monte Carlo simulation.
|
|
384
|
+
|
|
385
|
+
This method implements the approach from SPD Trading, using:
|
|
386
|
+
1. GARCH(1,1) model fit with sliding windows
|
|
387
|
+
2. Monte Carlo simulation with innovation resampling
|
|
388
|
+
3. Kernel density estimation of terminal prices
|
|
389
|
+
|
|
390
|
+
Parameters:
|
|
391
|
+
model_results: DataFrame with model parameters and maturities
|
|
392
|
+
df_hist: DataFrame with historical price data
|
|
393
|
+
domain_params: Tuple of (min, max, num_points) for x-domain
|
|
394
|
+
return_domain: Domain for x-axis values ('log_moneyness', 'moneyness', 'returns', 'strikes')
|
|
395
|
+
n_fits: Number of sliding windows for GARCH parameter estimation
|
|
396
|
+
simulations: Number of Monte Carlo simulations
|
|
397
|
+
window_length: Length of each sliding window for GARCH estimation
|
|
398
|
+
variate_parameters: Whether to vary GARCH parameters between simulations
|
|
399
|
+
bandwidth: Bandwidth for KDE of final density
|
|
400
|
+
|
|
401
|
+
Returns:
|
|
402
|
+
Dictionary containing pdf_surface, cdf_surface, x_surface, and moments
|
|
403
|
+
"""
|
|
404
|
+
logger.info("Calculating GARCH historical density surface")
|
|
405
|
+
|
|
406
|
+
return get_garch_hd_surface(
|
|
407
|
+
model_results=model_results,
|
|
408
|
+
df_hist=df_hist,
|
|
409
|
+
domain_params=domain_params,
|
|
410
|
+
return_domain=return_domain,
|
|
411
|
+
n_fits=n_fits,
|
|
412
|
+
simulations=simulations,
|
|
413
|
+
window_length=window_length,
|
|
414
|
+
variate_parameters=variate_parameters,
|
|
415
|
+
bandwidth=bandwidth
|
|
416
|
+
)
|
voly/core/hd.py
CHANGED
|
@@ -205,3 +205,350 @@ def get_hd_surface(model_results: pd.DataFrame,
|
|
|
205
205
|
moments = pd.DataFrame(all_moments).T
|
|
206
206
|
|
|
207
207
|
return pdf_surface, cdf_surface, x_surface, moments
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
import numpy as np
|
|
212
|
+
import pandas as pd
|
|
213
|
+
from scipy import stats
|
|
214
|
+
from typing import Dict, List, Tuple, Optional, Union, Any
|
|
215
|
+
from voly.utils.logger import logger, catch_exception
|
|
216
|
+
from voly.exceptions import VolyError
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
class GARCHModel:
|
|
220
|
+
"""
|
|
221
|
+
GARCH(1,1) model for volatility modeling and simulation.
|
|
222
|
+
|
|
223
|
+
Fits a GARCH(1,1) model to historical returns and simulates future paths
|
|
224
|
+
for historical density estimation.
|
|
225
|
+
"""
|
|
226
|
+
|
|
227
|
+
def __init__(self,
|
|
228
|
+
data: np.ndarray,
|
|
229
|
+
data_name: str,
|
|
230
|
+
n_fits: int = 400,
|
|
231
|
+
window_length: int = 365,
|
|
232
|
+
z_h: float = 0.1):
|
|
233
|
+
"""
|
|
234
|
+
Initialize the GARCH model.
|
|
235
|
+
|
|
236
|
+
Args:
|
|
237
|
+
data: Array of log returns
|
|
238
|
+
data_name: Identifier for the dataset
|
|
239
|
+
n_fits: Number of sliding windows to use for parameter estimation
|
|
240
|
+
window_length: Length of each sliding window
|
|
241
|
+
z_h: Bandwidth factor for kernel density estimation of innovations
|
|
242
|
+
"""
|
|
243
|
+
self.data = data
|
|
244
|
+
self.data_name = data_name
|
|
245
|
+
self.n_fits = n_fits
|
|
246
|
+
self.window_length = window_length
|
|
247
|
+
self.z_h = z_h
|
|
248
|
+
|
|
249
|
+
# Parameters to be created during fitting and simulation
|
|
250
|
+
self.parameters = None
|
|
251
|
+
self.e_process = None
|
|
252
|
+
self.z_process = None
|
|
253
|
+
self.sigma2_process = None
|
|
254
|
+
self.z_dens = None
|
|
255
|
+
self.simulated_log_returns = None
|
|
256
|
+
self.simulated_tau_mu = None
|
|
257
|
+
|
|
258
|
+
def fit(self):
|
|
259
|
+
"""
|
|
260
|
+
Fit GARCH(1,1) model to historical data using sliding windows.
|
|
261
|
+
|
|
262
|
+
For each window, estimates parameters (ω, α, β) and extracts innovations.
|
|
263
|
+
"""
|
|
264
|
+
from arch import arch_model
|
|
265
|
+
|
|
266
|
+
if len(self.data) < self.window_length + self.n_fits:
|
|
267
|
+
raise VolyError(
|
|
268
|
+
f"Not enough data points. Need at least {self.window_length + self.n_fits}, got {len(self.data)}")
|
|
269
|
+
|
|
270
|
+
start = self.window_length + self.n_fits
|
|
271
|
+
end = self.n_fits
|
|
272
|
+
|
|
273
|
+
parameters = np.zeros((self.n_fits, 4))
|
|
274
|
+
z_process = []
|
|
275
|
+
e_process = []
|
|
276
|
+
sigma2_process = []
|
|
277
|
+
|
|
278
|
+
logger.info(f"Fitting GARCH model with {self.n_fits} windows...")
|
|
279
|
+
|
|
280
|
+
for i in range(self.n_fits):
|
|
281
|
+
window = self.data[end - i - 1:start - i - 1]
|
|
282
|
+
data = window - np.mean(window)
|
|
283
|
+
|
|
284
|
+
model = arch_model(data, vol='GARCH', p=1, q=1)
|
|
285
|
+
GARCH_fit = model.fit(disp='off')
|
|
286
|
+
|
|
287
|
+
mu, omega, alpha, beta = [
|
|
288
|
+
GARCH_fit.params["mu"],
|
|
289
|
+
GARCH_fit.params["omega"],
|
|
290
|
+
GARCH_fit.params["alpha[1]"],
|
|
291
|
+
GARCH_fit.params["beta[1]"],
|
|
292
|
+
]
|
|
293
|
+
parameters[i, :] = [mu, omega, alpha, beta]
|
|
294
|
+
|
|
295
|
+
if i == 0:
|
|
296
|
+
sigma2_tm1 = omega / (1 - alpha - beta)
|
|
297
|
+
else:
|
|
298
|
+
sigma2_tm1 = sigma2_process[-1]
|
|
299
|
+
|
|
300
|
+
e_t = data.tolist()[-1] # last observed log-return
|
|
301
|
+
e_tm1 = data.tolist()[-2] # previous observed log-return
|
|
302
|
+
sigma2_t = omega + alpha * e_tm1 ** 2 + beta * sigma2_tm1
|
|
303
|
+
z_t = e_t / np.sqrt(sigma2_t)
|
|
304
|
+
|
|
305
|
+
e_process.append(e_t)
|
|
306
|
+
z_process.append(z_t)
|
|
307
|
+
sigma2_process.append(sigma2_t)
|
|
308
|
+
|
|
309
|
+
self.parameters = parameters
|
|
310
|
+
self.e_process = e_process
|
|
311
|
+
self.z_process = z_process
|
|
312
|
+
self.sigma2_process = sigma2_process
|
|
313
|
+
|
|
314
|
+
# Kernel density estimation for innovations
|
|
315
|
+
z_dens_x = np.linspace(min(self.z_process), max(self.z_process), 500)
|
|
316
|
+
h_dyn = self.z_h * (np.max(z_process) - np.min(z_process))
|
|
317
|
+
|
|
318
|
+
# Use scipy's gaussian_kde for innovation distribution
|
|
319
|
+
kde = stats.gaussian_kde(np.array(z_process), bw_method=h_dyn)
|
|
320
|
+
z_dens_y = kde(z_dens_x)
|
|
321
|
+
|
|
322
|
+
self.z_dens = {"x": z_dens_x, "y": z_dens_y}
|
|
323
|
+
|
|
324
|
+
logger.info("GARCH model fitting complete")
|
|
325
|
+
|
|
326
|
+
def _GARCH_simulate(self, pars, horizon):
|
|
327
|
+
"""
|
|
328
|
+
Simulate a single GARCH path to specified horizon.
|
|
329
|
+
|
|
330
|
+
Args:
|
|
331
|
+
pars: Tuple of (mu, omega, alpha, beta)
|
|
332
|
+
horizon: Number of steps to simulate
|
|
333
|
+
|
|
334
|
+
Returns:
|
|
335
|
+
Tuple of (sigma2_process, e_process) of simulated values
|
|
336
|
+
"""
|
|
337
|
+
mu, omega, alpha, beta = pars
|
|
338
|
+
burnin = horizon * 2
|
|
339
|
+
sigma2 = [omega / (1 - alpha - beta)]
|
|
340
|
+
e = [self.data.tolist()[-1] - mu] # last observed log-return mean adjusted
|
|
341
|
+
|
|
342
|
+
# Convert density to probability weights
|
|
343
|
+
weights = self.z_dens["y"] / np.sum(self.z_dens["y"])
|
|
344
|
+
|
|
345
|
+
for _ in range(horizon + burnin):
|
|
346
|
+
sigma2_tp1 = omega + alpha * e[-1] ** 2 + beta * sigma2[-1]
|
|
347
|
+
# Sample from the estimated innovation distribution
|
|
348
|
+
z_tp1 = np.random.choice(self.z_dens["x"], 1, p=weights)[0]
|
|
349
|
+
e_tp1 = z_tp1 * np.sqrt(sigma2_tp1)
|
|
350
|
+
sigma2.append(sigma2_tp1)
|
|
351
|
+
e.append(e_tp1)
|
|
352
|
+
|
|
353
|
+
return sigma2[-horizon:], e[-horizon:]
|
|
354
|
+
|
|
355
|
+
def _variate_pars(self, pars, bounds):
|
|
356
|
+
"""
|
|
357
|
+
Add variation to GARCH parameters for simulation uncertainty.
|
|
358
|
+
|
|
359
|
+
Args:
|
|
360
|
+
pars: Array of mean parameters [mu, omega, alpha, beta]
|
|
361
|
+
bounds: Standard deviation bounds for parameters
|
|
362
|
+
|
|
363
|
+
Returns:
|
|
364
|
+
Array of slightly varied parameters
|
|
365
|
+
"""
|
|
366
|
+
new_pars = []
|
|
367
|
+
for i, (par, bound) in enumerate(zip(pars, bounds)):
|
|
368
|
+
var = bound ** 2 / self.n_fits
|
|
369
|
+
new_par = np.random.normal(par, var, 1)[0]
|
|
370
|
+
if (new_par <= 0) and (i >= 1):
|
|
371
|
+
new_par = 0.01
|
|
372
|
+
new_pars.append(new_par)
|
|
373
|
+
return new_pars
|
|
374
|
+
|
|
375
|
+
def simulate_paths(self, horizon, simulations=5000, variate_parameters=True):
|
|
376
|
+
"""
|
|
377
|
+
Simulate multiple GARCH paths using Monte Carlo.
|
|
378
|
+
|
|
379
|
+
Args:
|
|
380
|
+
horizon: Number of steps to simulate (days)
|
|
381
|
+
simulations: Number of Monte Carlo simulations
|
|
382
|
+
variate_parameters: Whether to add variation to GARCH parameters
|
|
383
|
+
|
|
384
|
+
Returns:
|
|
385
|
+
Tuple of (simulated_log_returns, simulated_tau_mu)
|
|
386
|
+
"""
|
|
387
|
+
if self.parameters is None:
|
|
388
|
+
self.fit()
|
|
389
|
+
|
|
390
|
+
pars = np.mean(self.parameters, axis=0).tolist() # [mu, omega, alpha, beta]
|
|
391
|
+
bounds = np.std(self.parameters, axis=0).tolist()
|
|
392
|
+
|
|
393
|
+
logger.info(f"Simulating {simulations} GARCH paths for {horizon} steps...")
|
|
394
|
+
logger.info(f"GARCH parameters: mu={pars[0]:.6f}, omega={pars[1]:.6f}, alpha={pars[2]:.6f}, beta={pars[3]:.6f}")
|
|
395
|
+
|
|
396
|
+
np.random.seed(42) # For reproducibility
|
|
397
|
+
|
|
398
|
+
new_pars = pars.copy() # start with unchanged parameters
|
|
399
|
+
simulated_log_returns = np.zeros(simulations)
|
|
400
|
+
simulated_tau_mu = np.zeros(simulations)
|
|
401
|
+
|
|
402
|
+
for i in range(simulations):
|
|
403
|
+
if ((i + 1) % (simulations // 10) == 0):
|
|
404
|
+
logger.info(f"Simulation progress: {i + 1}/{simulations}")
|
|
405
|
+
|
|
406
|
+
if ((i + 1) % (simulations // 20) == 0) and variate_parameters:
|
|
407
|
+
new_pars = self._variate_pars(pars, bounds)
|
|
408
|
+
|
|
409
|
+
sigma2, e = self._GARCH_simulate(new_pars, horizon)
|
|
410
|
+
simulated_log_returns[i] = np.sum(e) # Sum log returns over horizon
|
|
411
|
+
simulated_tau_mu[i] = horizon * pars[0] # Total drift
|
|
412
|
+
|
|
413
|
+
self.simulated_log_returns = simulated_log_returns
|
|
414
|
+
self.simulated_tau_mu = simulated_tau_mu
|
|
415
|
+
|
|
416
|
+
return simulated_log_returns, simulated_tau_mu
|
|
417
|
+
|
|
418
|
+
|
|
419
|
+
@catch_exception
|
|
420
|
+
def get_garch_hd_surface(model_results: pd.DataFrame,
|
|
421
|
+
df_hist: pd.DataFrame,
|
|
422
|
+
domain_params: Tuple[float, float, int] = (-1.5, 1.5, 1000),
|
|
423
|
+
return_domain: str = 'log_moneyness',
|
|
424
|
+
n_fits: int = 400,
|
|
425
|
+
simulations: int = 5000,
|
|
426
|
+
window_length: int = 365,
|
|
427
|
+
variate_parameters: bool = True,
|
|
428
|
+
bandwidth: float = 0.15) -> Dict[str, Any]:
|
|
429
|
+
"""
|
|
430
|
+
Generate historical density surface using GARCH(1,1) model and Monte Carlo simulation.
|
|
431
|
+
|
|
432
|
+
Parameters:
|
|
433
|
+
model_results: DataFrame with model parameters and maturities
|
|
434
|
+
df_hist: DataFrame with historical price data (must have 'close' column)
|
|
435
|
+
domain_params: Tuple of (min, max, num_points) for x-domain
|
|
436
|
+
return_domain: Domain for x-axis values ('log_moneyness', 'moneyness', 'returns', 'strikes')
|
|
437
|
+
n_fits: Number of sliding windows for GARCH parameter estimation
|
|
438
|
+
simulations: Number of Monte Carlo simulations
|
|
439
|
+
window_length: Length of each sliding window for GARCH estimation
|
|
440
|
+
variate_parameters: Whether to vary GARCH parameters between simulations
|
|
441
|
+
bandwidth: Bandwidth for kernel density estimation of final density
|
|
442
|
+
|
|
443
|
+
Returns:
|
|
444
|
+
Dictionary containing pdf_surface, cdf_surface, x_surface, and moments
|
|
445
|
+
"""
|
|
446
|
+
# Check if required columns are present
|
|
447
|
+
required_columns = ['s', 't', 'r']
|
|
448
|
+
missing_columns = [col for col in required_columns if col not in model_results.columns]
|
|
449
|
+
if missing_columns:
|
|
450
|
+
raise VolyError(f"Required columns missing in model_results: {missing_columns}")
|
|
451
|
+
|
|
452
|
+
# Calculate log returns from price history
|
|
453
|
+
log_returns = np.log(df_hist['close'] / df_hist['close'].shift(1)) * 100
|
|
454
|
+
log_returns = log_returns.dropna().values
|
|
455
|
+
|
|
456
|
+
pdf_surface = {}
|
|
457
|
+
cdf_surface = {}
|
|
458
|
+
x_surface = {}
|
|
459
|
+
all_moments = {}
|
|
460
|
+
|
|
461
|
+
# Process each maturity
|
|
462
|
+
for i in model_results.index:
|
|
463
|
+
# Get parameters for this maturity
|
|
464
|
+
s = model_results.loc[i, 's'] # Current spot price
|
|
465
|
+
r = model_results.loc[i, 'r'] # Risk-free rate
|
|
466
|
+
t = model_results.loc[i, 't'] # Time to maturity in years
|
|
467
|
+
|
|
468
|
+
tau_day = int(t * 365.25) # Convert years to days
|
|
469
|
+
|
|
470
|
+
logger.info(f"Processing GARCH HD for maturity {i} (t={t:.4f} years, {tau_day} days)")
|
|
471
|
+
|
|
472
|
+
# Initialize GARCH model
|
|
473
|
+
garch_model = GARCHModel(
|
|
474
|
+
data=log_returns,
|
|
475
|
+
data_name=str(i),
|
|
476
|
+
n_fits=min(n_fits, len(log_returns) // 3), # Ensure we have enough data
|
|
477
|
+
window_length=min(window_length, len(log_returns) // 3),
|
|
478
|
+
z_h=0.1
|
|
479
|
+
)
|
|
480
|
+
|
|
481
|
+
# Simulate paths
|
|
482
|
+
simulated_log_returns, simulated_tau_mu = garch_model.simulate_paths(
|
|
483
|
+
horizon=tau_day,
|
|
484
|
+
simulations=simulations,
|
|
485
|
+
variate_parameters=variate_parameters
|
|
486
|
+
)
|
|
487
|
+
|
|
488
|
+
# Convert to terminal prices
|
|
489
|
+
simulated_prices = s * np.exp(simulated_log_returns / 100 + simulated_tau_mu / 100)
|
|
490
|
+
|
|
491
|
+
# Convert to moneyness domain
|
|
492
|
+
simulated_moneyness = s / simulated_prices
|
|
493
|
+
|
|
494
|
+
# Get x domain grid based on requested return_domain
|
|
495
|
+
LM = np.linspace(domain_params[0], domain_params[1], domain_params[2])
|
|
496
|
+
M = np.exp(LM) # Moneyness
|
|
497
|
+
R = M - 1 # Returns
|
|
498
|
+
K = s / M # Strike prices
|
|
499
|
+
|
|
500
|
+
# Perform kernel density estimation in moneyness domain
|
|
501
|
+
kde = stats.gaussian_kde(simulated_moneyness, bw_method=bandwidth)
|
|
502
|
+
pdf_m = kde(M)
|
|
503
|
+
|
|
504
|
+
# Ensure density integrates to 1
|
|
505
|
+
dx = LM[1] - LM[0]
|
|
506
|
+
total_area = np.sum(pdf_m * dx)
|
|
507
|
+
pdf_m = pdf_m / total_area
|
|
508
|
+
|
|
509
|
+
# Transform to other domains as needed
|
|
510
|
+
pdf_lm = pdf_m * M # Transform to log-moneyness domain
|
|
511
|
+
pdf_k = pdf_lm / K # Transform to strike domain
|
|
512
|
+
pdf_r = pdf_lm / (1 + R) # Transform to returns domain
|
|
513
|
+
|
|
514
|
+
# Calculate CDF
|
|
515
|
+
cdf = np.cumsum(pdf_lm) * dx
|
|
516
|
+
cdf = np.minimum(cdf / cdf[-1], 1.0) # Normalize and cap at 1.0
|
|
517
|
+
|
|
518
|
+
# Select appropriate domain for return
|
|
519
|
+
if return_domain == 'log_moneyness':
|
|
520
|
+
x = LM
|
|
521
|
+
pdf = pdf_lm
|
|
522
|
+
moments = get_all_moments(x, pdf)
|
|
523
|
+
elif return_domain == 'moneyness':
|
|
524
|
+
x = M
|
|
525
|
+
pdf = pdf_m
|
|
526
|
+
moments = get_all_moments(x, pdf)
|
|
527
|
+
elif return_domain == 'returns':
|
|
528
|
+
x = R
|
|
529
|
+
pdf = pdf_r
|
|
530
|
+
moments = get_all_moments(x, pdf)
|
|
531
|
+
elif return_domain == 'strikes':
|
|
532
|
+
x = K
|
|
533
|
+
pdf = pdf_k
|
|
534
|
+
moments = get_all_moments(x, pdf)
|
|
535
|
+
else:
|
|
536
|
+
raise VolyError(f"Unsupported return_domain: {return_domain}")
|
|
537
|
+
|
|
538
|
+
# Store results
|
|
539
|
+
pdf_surface[i] = pdf
|
|
540
|
+
cdf_surface[i] = cdf
|
|
541
|
+
x_surface[i] = x
|
|
542
|
+
all_moments[i] = moments
|
|
543
|
+
|
|
544
|
+
# Create DataFrame with moments
|
|
545
|
+
moments = pd.DataFrame(all_moments).T
|
|
546
|
+
|
|
547
|
+
logger.info("GARCH historical density calculation complete")
|
|
548
|
+
|
|
549
|
+
return {
|
|
550
|
+
'pdf_surface': pdf_surface,
|
|
551
|
+
'cdf_surface': cdf_surface,
|
|
552
|
+
'x_surface': x_surface,
|
|
553
|
+
'moments': moments
|
|
554
|
+
}
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
voly/__init__.py,sha256=8xyDk7rFCn_MOD5hxuv5cxxKZvBVRiSIM7TgaMPpwpw,211
|
|
2
|
-
voly/client.py,sha256=
|
|
2
|
+
voly/client.py,sha256=yjElxRwONsyjhYaoKQ6dpqlVJ4vppXlshE0r171ng6I,15575
|
|
3
3
|
voly/exceptions.py,sha256=PBsbn1vNMvKcCJwwJ4lBO6glD85jo1h2qiEmD7ArAjs,92
|
|
4
4
|
voly/formulas.py,sha256=G_soRiPwQlHy6milOAj6TdmBWr-fNZpMvm0joXAMZ90,10767
|
|
5
5
|
voly/models.py,sha256=o-pHujGfr5Gn8ItckMzLI4Q8yaX9FQaV8UjCxv2zgTY,3364
|
|
@@ -7,13 +7,13 @@ voly/core/__init__.py,sha256=bu6fS2I1Pj9fPPnl-zY3L7NqrZSY5Zy6NY2uMUvdhKs,183
|
|
|
7
7
|
voly/core/charts.py,sha256=E21OZB5lTY4YL2flgaFJ6s5g3_ExtAQT2zryZZxLPyM,12735
|
|
8
8
|
voly/core/data.py,sha256=pDeuYhP0GX4RbtlqByvsE3rfHcIkix0BU5MLW8sKIeI,8935
|
|
9
9
|
voly/core/fit.py,sha256=Tb9eeG7e_2dQTcqt6aqEwFrZdy6jR9rSNqe6tzOdVhQ,9245
|
|
10
|
-
voly/core/hd.py,sha256=
|
|
10
|
+
voly/core/hd.py,sha256=EgecHfptfOR39DlKQMJDxiDwJIvGIMTNPZxhnS7OayI,19957
|
|
11
11
|
voly/core/interpolate.py,sha256=JkK172-FXyhesW3hY4pEeuJWG3Bugq7QZXbeKoRpLuo,5305
|
|
12
12
|
voly/core/rnd.py,sha256=0VE77lxesx_BPAO46QwKpcauZNaHnPTiDhmRbSURn3c,10022
|
|
13
13
|
voly/utils/__init__.py,sha256=E05mWatyC-PDOsCxQV1p5Xi1IgpOomxrNURyCx_gB-w,200
|
|
14
14
|
voly/utils/logger.py,sha256=4-_2bVJmq17Q0d7Rd2mPg1AeR8gxv6EPvcmBDMFWcSM,1744
|
|
15
|
-
voly-0.0.
|
|
16
|
-
voly-0.0.
|
|
17
|
-
voly-0.0.
|
|
18
|
-
voly-0.0.
|
|
19
|
-
voly-0.0.
|
|
15
|
+
voly-0.0.130.dist-info/licenses/LICENSE,sha256=wcHIVbE12jfcBOai_wqBKY6xvNQU5E909xL1zZNq_2Q,1065
|
|
16
|
+
voly-0.0.130.dist-info/METADATA,sha256=d7uTEFck6kosnM29aDW0jyg8g8CgPNo9PD_a7yZABVw,4115
|
|
17
|
+
voly-0.0.130.dist-info/WHEEL,sha256=DK49LOLCYiurdXXOXwGJm6U4DkHkg4lcxjhqwRa0CP4,91
|
|
18
|
+
voly-0.0.130.dist-info/top_level.txt,sha256=ZfLw2sSxF-LrKAkgGjOmeTcw6_gD-30zvtdEY5W4B7c,5
|
|
19
|
+
voly-0.0.130.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|