metacountregressor 0.1.304__py3-none-any.whl → 0.1.310__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,7 +4,7 @@ import pandas as pd
4
4
  import csv
5
5
  import matplotlib.pyplot as plt
6
6
  from scipy import stats as st
7
- from sklearn.preprocessing import StandardScaler
7
+ from sklearn.preprocessing import StandardScaler, MinMaxScaler
8
8
  import os
9
9
  import shutil
10
10
  plt.style.use('https://github.com/dhaitz/matplotlib-stylesheets/raw/master/pitayasmoothie-dark.mplstyle')
@@ -413,6 +413,10 @@ def transform_dataframe(df, config):
413
413
  # Apply custom function
414
414
  data = data.apply(settings['apply_func'])
415
415
  output_df[column] = data
416
+ elif settings['type'] == 'normalized':
417
+ # Normalize the column
418
+ scaler = MinMaxScaler
419
+ output_df[column] = scaler.fit_transform(df[[column]]).flatten()
416
420
 
417
421
  elif settings['type'] == 'none':
418
422
  # Leave the column unchanged
@@ -447,7 +451,7 @@ def guess_column_type(column_name, series):
447
451
  # Otherwise, fallback to continuous standardization
448
452
  return {
449
453
  'type': 'continuous',
450
- 'apply_func': (lambda x: (x - series.mean()) / series.std()) # Z-Score Standardization
454
+ 'apply_func': (lambda x: ((x - series.mean()) / series.std()) + abs(((series - series.mean()) / series.std()).min()) + 0.001)
451
455
  }
452
456
  else:
453
457
  # Default fallback (leave the column unchanged)
@@ -187,7 +187,7 @@ def main(args, **kwargs):
187
187
  a_des, df = helperprocess.set_up_analyst_constraints(df, model_terms)
188
188
  # some example argument, these are defualt so the following line is just for claritity
189
189
  args = {'algorithm': 'hs', 'test_percentage': 0.15, 'test_complexity': 6, 'instance_number': 1,
190
- 'val_percentage': 0.15, 'obj_1': 'bic', '_obj_2': 'RMSE_TEST', "MAX_TIME": 6, 'desicions':a_des}
190
+ 'val_percentage': 0.15, 'obj_1': 'bic', '_obj_2': 'RMSE_TEST', "MAX_TIME": 600, 'desicions':a_des, 'is_multi': 1}
191
191
  # Fit the model with metacountregressor
192
192
  # Step 5: Transform the dataset based on the configuration
193
193
  #data_new = helperprocess.transform_dataframe(dataset, config)
@@ -405,6 +405,7 @@ class DifferentialEvolution(object):
405
405
  """
406
406
 
407
407
  def __init__(self, objective_function, **kwargs):
408
+ objective_function.algorithm = 'de'
408
409
  self._obj_fun = objective_function
409
410
  if self._obj_fun._obj_1 is None:
410
411
  print('no objective found, automatically selecting BIC')
@@ -421,6 +422,7 @@ class DifferentialEvolution(object):
421
422
  self.iter = kwargs.get('_max_iter', 10000)
422
423
  self.cr = kwargs.get('_crossover_perc') or kwargs.get('_cr', 0.2)
423
424
  self.instance_number = str(kwargs.get('instance_number', 1))
425
+ self.instance_number = objective_function.instance_number
424
426
  self.get_directory()
425
427
 
426
428
  self._population = list()
@@ -780,7 +782,7 @@ class SimulatedAnnealing(object):
780
782
  """
781
783
 
782
784
  def __init__(self, objective_function, **kwargs):
783
-
785
+ objective_function.algorithm = 'sa'
784
786
  self._STEPS_PER_TEMP = int(kwargs.get('STEPS_PER_TEMP', 2)) or int(kwargs.get('_ts', 2))
785
787
  self._INITAL_ACCEPT_RATE = float(kwargs.get('INTL_ACPT', 0.5))
786
788
  self._NUM_INITIAL_SLNS = int(kwargs.get('_num_intl_slns', 20))
@@ -1247,6 +1249,7 @@ class HarmonySearch(object):
1247
1249
  """
1248
1250
  Initialize HS with the specified objective function. Note that this objective function must implement ObjectiveFunctionInterface.
1249
1251
  """
1252
+ objective_function.algorithm = 'hs'
1250
1253
  self._obj_fun = objective_function
1251
1254
  ## NEW CODE, TRYING TO EXCTACT OUT THE PARAMATERS
1252
1255
  self._hms = kwargs.get('_hms', 20)
@@ -30,6 +30,7 @@ from scipy.special import gammaln
30
30
  from sklearn.metrics import mean_absolute_error as MAE
31
31
  from sklearn.metrics import mean_squared_error as MSPE
32
32
  from statsmodels.tools.numdiff import approx_fprime, approx_hess
33
+
33
34
  from sklearn.preprocessing import StandardScaler, MinMaxScaler
34
35
  from texttable import Texttable
35
36
  import time
@@ -123,9 +124,10 @@ class ObjectiveFunction(object):
123
124
 
124
125
  def __init__(self, x_data, y_data, **kwargs):
125
126
  self.gbl_best = 1000000.0
127
+ self.run_numerical_hessian = kwargs.get('r_nu_hess', False)
126
128
  self.run_bootstrap = kwargs.get('run_bootstrap', False)
127
129
  self.linear_regression = kwargs.get('linear_model', False)
128
- self.reg_penalty = 1
130
+ self.reg_penalty = kwargs.get('reg_penalty',1)
129
131
  self.power_up_ll = False
130
132
  self.nb_parma = 1
131
133
  self.bic = None
@@ -176,7 +178,6 @@ class ObjectiveFunction(object):
176
178
  self._max_imp = kwargs.get('_max_imp', 90000000)
177
179
  self._WIC = kwargs.get("WIC",10000) # Number of Iterations without Multiobjective Improvement #tod chuck into solution
178
180
  self._panels = None
179
- self.is_multi = True
180
181
  self.method_ll = 'Nelder-Mead-BFGS'
181
182
 
182
183
  self.method_ll = 'L-BFGS-B' # alternatives 'BFGS_2', 'BFGS
@@ -187,7 +188,7 @@ class ObjectiveFunction(object):
187
188
  self.MP = 0
188
189
  # Nelder-Mead-BFGS
189
190
 
190
- self._max_characteristics = kwargs.get('_max_vars', 30)
191
+ self._max_characteristics = kwargs.get('_max_vars', 90)
191
192
 
192
193
  self.beta_dict = dict
193
194
  if 'model_terms' in kwargs:
@@ -454,7 +455,7 @@ class ObjectiveFunction(object):
454
455
  self._transformations = kwargs.get('_transformations', ["no", "log", "sqrt", "arcsinh", "nil"])
455
456
  # self._distribution = ['triangular', 'uniform', 'normal', 'ln_normal', 'tn_normal', 'lindley']
456
457
 
457
- self._distribution = kwargs.get('_distributions', ['triangular', 'uniform', 'normal', 'ln_normal', 'tn_normal'])
458
+ self._distribution = kwargs.get('_distributions', ['triangular', 'uniform', 'normal', 'tn_normal'])
458
459
 
459
460
  if self.G is not None:
460
461
  #TODO need to handle this for groups
@@ -612,11 +613,12 @@ class ObjectiveFunction(object):
612
613
  Function to for proceccing testing, and finding a suitable initial coefficient (linear intercept)
613
614
  """
614
615
  if hard_code:
616
+ # Grouped Terrs TODO
615
617
  manual_fit_spec = {
616
618
  'fixed_terms': ['Constant', 'US', 'RSMS', 'MCV'],
617
619
  'rdm_terms': ['RSHS:normal', 'AADT:normal', 'Curve50:normal'],
618
620
  'rdm_cor_terms': [],
619
- 'grouped_terms': [],
621
+ 'group_rdm': [],
620
622
  'hetro_in_means': [],
621
623
  'transformations': ['no', 'log', 'log', 'no', 'no', 'no', 'no'],
622
624
  'dispersion': 1
@@ -638,7 +640,7 @@ class ObjectiveFunction(object):
638
640
  'fixed_terms': ['const'],
639
641
  'rdm_terms': [],
640
642
  'rdm_cor_terms': [],
641
- 'grouped_terms': [],
643
+ 'group_rdm': [],
642
644
  'hetro_in_means': [],
643
645
  'transformations': ['no'],
644
646
  'dispersion': 1
@@ -1157,16 +1159,16 @@ class ObjectiveFunction(object):
1157
1159
  x, 2) for x in self.pvalues]
1158
1160
  signif_list = self.pvalue_asterix_add(self.pvalues)
1159
1161
  if model == 1:
1160
-
1161
- #self.coeff_[-1] = 1/np.exp(self.coeff_[-1])
1162
+ # raise to the exponential
1163
+ self.coeff_[-1] = np.maximum([np.exp(self.coeff_[-1]),2])
1162
1164
  if self.no_extra_param:
1163
1165
  self.coeff_ = np.append(self.coeff_, self.nb_parma)
1164
1166
  self.stderr = np.append(self.stderr, 0.00001)
1165
1167
  self.zvalues = np.append(self.zvalues, 50)
1166
1168
 
1167
- elif self.coeff_[-1] < 0.25:
1169
+ #elif self.coeff_[-1] < 0.25:
1168
1170
  #print(self.coeff_[-1], 'Warning Check Dispersion')
1169
- print(f'dispession is para,aters {np.exp(self.coeff_[-1])}')
1171
+ #print(f'dispession is para,aters {np.exp(self.coeff_[-1])}')
1170
1172
  #self.coeff_[-1] = np.exp(self.coeff_[-1]) # min possible value for negbinom
1171
1173
 
1172
1174
 
@@ -1226,6 +1228,7 @@ class ObjectiveFunction(object):
1226
1228
  if model is not None:
1227
1229
  caption_parts = []
1228
1230
  if self.algorithm is not None:
1231
+
1229
1232
  caption_parts.append(
1230
1233
  f"{self._model_type_codes[model]} model found through the {self.algorithm} algorithm.")
1231
1234
 
@@ -1236,7 +1239,8 @@ class ObjectiveFunction(object):
1236
1239
  caption_parts.append(f"Log-Likelihood: {self.round_with_padding(self.log_lik, 2)}")
1237
1240
 
1238
1241
  if solution is not None:
1239
- caption_parts.append(f"{self._obj_2}: {self.round_with_padding(solution[self._obj_2], 2)}")
1242
+ if self.is_multi:
1243
+ caption_parts.append(f"{self._obj_2}: {self.round_with_padding(solution[self._obj_2], 2)}")
1240
1244
 
1241
1245
  caption = " ".join(caption_parts)
1242
1246
  # print(latextable.draw_latex(table, caption=caption, caption_above = True))
@@ -5056,11 +5060,12 @@ class ObjectiveFunction(object):
5056
5060
  proba_ = proba_n.sum(axis =1)
5057
5061
 
5058
5062
  """""
5059
- betas_last = betas[-1]
5063
+ main_disper = self.get_dispersion_paramaters(betas, dispersion)
5064
+
5060
5065
 
5061
5066
  # print(betas_last)
5062
5067
  proba_, proba_n = self.prob_obs_draws_all_at_once(
5063
- eVd, np.atleast_3d(y), betas_last, dispersion)
5068
+ eVd, np.atleast_3d(y), main_disper, dispersion)
5064
5069
  # self._prob_product_against_panels()
5065
5070
 
5066
5071
  # print(top_stats)
@@ -5600,13 +5605,42 @@ class ObjectiveFunction(object):
5600
5605
  return covariance
5601
5606
 
5602
5607
 
5608
+ # Numerical Hessian (finite differences)
5609
+ def numerical_hessian_post(self, f, theta, epsilon=1e-5):
5610
+ n = len(theta)
5611
+ hessian = np.zeros((n, n))
5612
+ for i in range(n):
5613
+ for j in range(n):
5614
+ theta_ij_plus = theta.copy()
5615
+ theta_ij_minus = theta.copy()
5616
+ theta_ij_plus[i] += epsilon
5617
+ theta_ij_plus[j] += epsilon
5618
+ theta_ij_minus[i] += epsilon
5619
+ theta_ij_minus[j] -= epsilon
5620
+
5621
+ f_ij_plus = f(theta_ij_plus)
5622
+ f_ij_minus = f(theta_ij_minus)
5623
+ f_original = f(theta)
5624
+
5625
+ hessian[i, j] = (f_ij_plus - 2 * f_original + f_ij_minus) / (epsilon ** 2)
5626
+ return hessian
5627
+
5628
+
5603
5629
  def _post_fit_ll_aic_bic(self, optim_res, verbose=1, robust=False, simple_fit=True, is_dispersion=0):
5604
5630
  # sample_size = len(self._x_data) - len(optim_res['x']) -1
5605
5631
  sample_size = len(self._x_data)
5606
5632
  convergence = optim_res['success']
5607
5633
  coeff_ = optim_res['x']
5608
5634
  penalty = 0
5635
+ stderr_opg = None
5636
+ if self.run_numerical_hessian:
5637
+
5638
+ stderr_opg = self.stderr
5639
+
5609
5640
 
5641
+
5642
+
5643
+
5610
5644
  if 'hess_inv' in optim_res:
5611
5645
  covariance = self._robust_covariance(optim_res['hess_inv'], optim_res['grad_n']) \
5612
5646
  if robust else optim_res['hess_inv']
@@ -5615,9 +5649,11 @@ class ObjectiveFunction(object):
5615
5649
  covariance = self.handle_covariance(covariance)
5616
5650
  covariance = np.clip(covariance, 0, None)
5617
5651
  stderr = np.sqrt(np.diag(covariance))
5618
- # stderr = [if np.abs(optim_res['x'][i]) >.1 else min(np.abs(optim_res['x'][i]/1.5), stderr[i]) for i in range(len(optim_res['x']))]
5619
- # stderr = [if np.abs(optim_res['x'][i]) > 0.1 else min(np.abs(optim_res['x'][i]/1.5), stderr[i]) for i in range(len(optim_res['x']))]
5620
- # stderr = [np.min(np.abs(optim_res['x'][i]/random.uniform(1.8, 3)), stderr[i]) if i > len(self.none_handler(self.fixed_fit)) and np.abs(optim_res['x'][i] > 0.2) else stderr[i] for i in range(len(optim_res['x']))]
5652
+ if stderr_opg is not None:
5653
+ stderr = np.minimum(stderr, stderr_opg)
5654
+
5655
+
5656
+
5621
5657
  if is_dispersion:
5622
5658
  stderr[-1] = random.uniform(0.001, 0.005)
5623
5659
 
@@ -5910,6 +5946,9 @@ class ObjectiveFunction(object):
5910
5946
  else:
5911
5947
  self.draws = 0
5912
5948
 
5949
+ def hessian_loglik_function(self, params, *args):
5950
+ return self._loglik_gradient(params, *args)
5951
+
5913
5952
  def _run_optimization(self, XX, y, dispersion, initial_params, bounds, tol, mod):
5914
5953
  """
5915
5954
  Run the optimization process with draws logic and update the Solution object.
@@ -5936,10 +5975,10 @@ class ObjectiveFunction(object):
5936
5975
 
5937
5976
  # Optimization method and options
5938
5977
  method = self.method_ll if bounds is None else 'L-BFGS-B'
5939
- print('updataing methods')
5978
+
5940
5979
 
5941
5980
  #method = 'Nelder-Mead-BFGS'
5942
- options = {'gtol': tol['gtol'], 'ftol': tol['ftol'], 'maxiter': 4000}
5981
+ options = {'gtol': tol['gtol'], 'ftol': tol['ftol'], 'maxiter': 20000}
5943
5982
  args=(
5944
5983
  X, y, draws, X, Xr, self.batch_size, self.grad_yes, self.hess_yes, dispersion, 0, False, 0,
5945
5984
  self.rdm_cor_fit, None, None, draws_grouped, XG, mod
@@ -5954,9 +5993,38 @@ class ObjectiveFunction(object):
5954
5993
  ),
5955
5994
  method=method,
5956
5995
  bounds=bounds,
5957
- tol=tol.get('ftol', 1e-8), # Use 'ftol' as the default tolerance
5996
+ tol=tol.get('ftol', 1e-6), # Use 'ftol' as the default tolerance
5958
5997
  options=options
5959
5998
  )
5999
+ if optimization_result.message == 'NaN result encountered.':
6000
+ optimization_result = self._minimize(self._loglik_gradient,
6001
+ initial_params,
6002
+ args=(
6003
+ X, y, draws, X, Xr, self.batch_size, self.grad_yes, self.hess_yes, dispersion, 0, False, 0,
6004
+ self.rdm_cor_fit, None, None, draws_grouped, XG, mod
6005
+ ),
6006
+ method='Nelder-Mead-BFGS',
6007
+ bounds=bounds,
6008
+ tol=tol.get('ftol', 1e-4), # Use 'ftol' as the default tolerance
6009
+ options=options
6010
+ )
6011
+
6012
+
6013
+ if self.run_numerical_hessian:
6014
+ std_errors = self.bootstrap_std_dev(
6015
+ initial_params=optimization_result.x,
6016
+ XX=XX,
6017
+ y=y,
6018
+ dispersion=dispersion,
6019
+ bounds=bounds,
6020
+ tol=tol,
6021
+ mod=mod,
6022
+ n_bootstraps=5
6023
+ )
6024
+ self.stderr = std_errors
6025
+
6026
+
6027
+
5960
6028
 
5961
6029
 
5962
6030
 
@@ -6030,8 +6098,8 @@ class ObjectiveFunction(object):
6030
6098
  ),
6031
6099
  method=self.method_ll,
6032
6100
  bounds=bounds,
6033
- tol=tol.get('ftol', 1e-8), # Use 'ftol' as the default tolerance
6034
- options={'gtol': tol['gtol'], 'ftol': tol['ftol'], 'maxiter': 2000}
6101
+ tol=tol.get('ftol', 1e-6), # Use 'ftol' as the default tolerance
6102
+ options={'gtol': tol['gtol'], 'ftol': tol['ftol'], 'maxiter': 200}
6035
6103
  )
6036
6104
 
6037
6105
  # Store the parameter estimates from this bootstrap iteration
@@ -6120,6 +6188,7 @@ class ObjectiveFunction(object):
6120
6188
  # Validation metrics if test data is available (in-sample and out-of-sample MAE)
6121
6189
  in_sample_mae = None
6122
6190
  out_sample_mae = None
6191
+ out_sample_validation = None
6123
6192
  if self.is_multi and XX_test is not None:
6124
6193
  in_sample_mae = self.validation(
6125
6194
  optimization_result['x'], y, XX, dispersion=dispersion, model_nature=mod, testing=0
@@ -6127,13 +6196,17 @@ class ObjectiveFunction(object):
6127
6196
  out_sample_mae = self.validation(
6128
6197
  optimization_result['x'], y_test, XX_test, dispersion=dispersion, model_nature=mod
6129
6198
  )
6199
+ if self.val_percentage > 0:
6200
+ out_sample_validation = self.validation(
6201
+ optimization_result['x'], y_test, XX_test, dispersion=dispersion, model_nature=mod, testing=1
6202
+ )
6130
6203
 
6131
- return log_ll, aic, bic, stderr, zvalues, pvalue_alt, in_sample_mae, out_sample_mae
6204
+ return log_ll, aic, bic, stderr, zvalues, pvalue_alt, in_sample_mae, out_sample_mae, out_sample_validation
6132
6205
 
6133
6206
  else:
6134
6207
  # Optimization failed, return None for all metrics
6135
6208
  print("Optimization failed.")
6136
- return None, None, None, None, None, None, None, None
6209
+ return None, None, None, None, None, None, None, None, None
6137
6210
  def _prepare_data_and_bounds(self, mod, dispersion):
6138
6211
  """Prepare the data matrices, bounds, and initial parameters."""
6139
6212
  # Prepare data matrices
@@ -6223,8 +6296,9 @@ class ObjectiveFunction(object):
6223
6296
 
6224
6297
  # Dispersion adds one additional parameter if enabled
6225
6298
  dispersion_param = 1 if dispersion > 0 else 0
6226
- return sum(self.get_num_params()) + dispersion_param
6227
- #return k + kr + kg + kh + dispersion_param
6299
+ total = sum(self.get_num_params()) + dispersion_param
6300
+ return total
6301
+
6228
6302
  def _build_initial_params(self, num_coefficients, dispersion):
6229
6303
  """
6230
6304
  Build the initial parameter array for optimization.
@@ -6237,11 +6311,11 @@ class ObjectiveFunction(object):
6237
6311
  Initial parameter array.
6238
6312
  """
6239
6313
  # Generate random initial coefficients
6240
- initial_params = np.random.uniform(-.1, 0.1, size=num_coefficients)
6314
+ initial_params = np.random.uniform(0.0000, 0.01, size=num_coefficients)
6241
6315
  parma_sum = sum(self.get_num_params()[:2])
6242
6316
 
6243
6317
 
6244
- initial_params[parma_sum:-dispersion] =0.5
6318
+ initial_params[parma_sum:-dispersion] =0.0001
6245
6319
 
6246
6320
  # Add dispersion parameter if applicable
6247
6321
  if dispersion > 0:
@@ -6250,7 +6324,7 @@ class ObjectiveFunction(object):
6250
6324
 
6251
6325
  return initial_params
6252
6326
 
6253
- def fitRegression(self, mod, dispersion=0, maxiter=4000, batch_size=None, num_hess=False, **kwargs):
6327
+ def fitRegression(self, mod, dispersion=0, maxiter=20000, batch_size=None, num_hess=False, **kwargs):
6254
6328
  """
6255
6329
  Fits a Poisson regression, NB regression (dispersion=1), or GP regression (dispersion=2).
6256
6330
 
@@ -6292,10 +6366,10 @@ class ObjectiveFunction(object):
6292
6366
  )
6293
6367
 
6294
6368
  # Post-process results
6295
- log_lik, aic, bic, stderr, zvalues, pvalues, in_sample_mae, out_sample_mae = self._postprocess_results(
6369
+ log_lik, aic, bic, stderr, zvalues, pvalues, in_sample_mae, out_sample_mae, out_sample_val = self._postprocess_results(
6296
6370
  optimization_result, XX, XX_test, y, mod.get('y_test'), dispersion, mod
6297
6371
  )
6298
-
6372
+
6299
6373
  # Extract other outputs
6300
6374
  betas = optimization_result['x'] if optimization_result is not None else None
6301
6375
  is_halton = Xr is not None and Xr.size > 0 # Halton draws used if `Xr` is not empty
@@ -6324,10 +6398,14 @@ class ObjectiveFunction(object):
6324
6398
 
6325
6399
  # Add metrics to solution object
6326
6400
  sol = Solution() # Assuming Solution is the appropriate class to store results
6401
+
6327
6402
  sol.add_objective(
6328
6403
  bic=bic,
6329
6404
  aic=aic,
6330
6405
  loglik=log_ll,
6406
+ TRAIN=in_sample_mae,
6407
+ TEST=out_sample_mae,
6408
+ VAL=out_sample_val,
6331
6409
  num_parm=paramNum,
6332
6410
  GOF=other_measures
6333
6411
  )
@@ -7344,7 +7422,7 @@ class ObjectiveFunction(object):
7344
7422
  obj_1[self._obj_1] = 10 ** 100
7345
7423
 
7346
7424
  else:
7347
- print('The model did not converge')
7425
+ print('Did not converge')
7348
7426
  obj_1[self._obj_1] = 10 ** 100
7349
7427
 
7350
7428
  self.significant = 3
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: metacountregressor
3
- Version: 0.1.304
3
+ Version: 0.1.310
4
4
  Summary: Extensive Testing for Estimation of Data Count Models
5
5
  Home-page: https://github.com/zahern/CountDataEstimation
6
6
  Author: Zeke Ahern
@@ -3,18 +3,18 @@ metacountregressor/_device_cust.py,sha256=759fnKmTYccJm4Lpi9_1reurh6OB9d6q9soPR0
3
3
  metacountregressor/app_main.py,sha256=vY3GczTbGbBRalbzMkl_9jVW7RMgEOc6z2Dr1IZJv9c,10014
4
4
  metacountregressor/data_split_helper.py,sha256=M2fIMdIO8znUaYhx5wlacRyNWdQjNYu1z1wkE-kFUYU,3373
5
5
  metacountregressor/halton.py,sha256=jhovA45UBoZYU9g-hl6Lb2sBIx_ZBTNdPrpgkzR9fng,9463
6
- metacountregressor/helperprocess.py,sha256=ufdB6BcCIYN6btWdxyFlRCReuYEbVh6es1sdLsd8RTg,25917
7
- metacountregressor/main.py,sha256=xfpKN2w0kePHp_Q2HOPjtG15PLEN1L3sEnDw1PHBquw,23668
6
+ metacountregressor/helperprocess.py,sha256=8PFxX3KTsWH0MlfhniDzKQOJQ63LmJ0eg6cYhQP_fRA,26162
7
+ metacountregressor/main.py,sha256=tGOm8DdbdyDf316qIxDAre6l6GzfJIWYNYIBaSeIemI,23685
8
8
  metacountregressor/main_old.py,sha256=eTS4ygq27MnU-dZ_j983Ucb-D5XfbVF8OJQK2hVVLZc,24123
9
- metacountregressor/metaheuristics.py,sha256=bDyK5K4SFboWjaUIDlBR0H04a64XabXO_74SX0q_nsk,106813
9
+ metacountregressor/metaheuristics.py,sha256=P0Xjlvhp1cEwZFACrqeeets6x8BK7F2iDyu1OfS4bog,107010
10
10
  metacountregressor/pareto_file.py,sha256=whySaoPAUWYjyI8zo0hwAOa3rFk6SIUlHSpqZiLur0k,23096
11
11
  metacountregressor/pareto_logger__plot.py,sha256=mEU2QN4wmsM7t39GJ_XhJ_jjsdl09JOmG0U2jICrAkI,30037
12
12
  metacountregressor/setup.py,sha256=5UcQCCLR8Fm5odA3MX78WwahavxFq4mVD6oq0IuQvAY,936
13
13
  metacountregressor/single_objective_finder.py,sha256=jVG7GJBqzSP4_riYr-kMMKy_LE3SlGmKMunNhHYxgRg,8011
14
- metacountregressor/solution.py,sha256=j1jzIKn74LNvxjGyVJn9CUrUpvugM6I2duGRXTCOA3A,317500
14
+ metacountregressor/solution.py,sha256=h-sHkShA-P5oWJuowCRt0J2wixcxCgoL_tlyM2NCBxQ,319693
15
15
  metacountregressor/test_generated_paper2.py,sha256=pwOoRzl1jJIIOUAAvbkT6HmmTQ81mwpsshn9SLdKOg8,3927
16
- metacountregressor-0.1.304.dist-info/licenses/LICENSE.txt,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
17
- metacountregressor-0.1.304.dist-info/METADATA,sha256=-Yn7gguicyi13SlwYXvUjrcC0gJPBw320AtbAjtk27M,23581
18
- metacountregressor-0.1.304.dist-info/WHEEL,sha256=1tXe9gY0PYatrMPMDd6jXqjfpz_B-Wqm32CPfRC58XU,91
19
- metacountregressor-0.1.304.dist-info/top_level.txt,sha256=zGG7UC5WIpr76gsFUpwJ4En2aCcoNTONBaS3OewwjR0,19
20
- metacountregressor-0.1.304.dist-info/RECORD,,
16
+ metacountregressor-0.1.310.dist-info/licenses/LICENSE.txt,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
17
+ metacountregressor-0.1.310.dist-info/METADATA,sha256=ahasic-4LXNYf9FJiny3p97mdTCrFtsEISCm_J-1FAA,23581
18
+ metacountregressor-0.1.310.dist-info/WHEEL,sha256=DK49LOLCYiurdXXOXwGJm6U4DkHkg4lcxjhqwRa0CP4,91
19
+ metacountregressor-0.1.310.dist-info/top_level.txt,sha256=zGG7UC5WIpr76gsFUpwJ4En2aCcoNTONBaS3OewwjR0,19
20
+ metacountregressor-0.1.310.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (77.0.3)
2
+ Generator: setuptools (78.0.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5