metacountregressor 0.1.231__tar.gz → 0.1.234__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {metacountregressor-0.1.231/metacountregressor.egg-info → metacountregressor-0.1.234}/PKG-INFO +1 -1
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor/solution.py +53 -87
- {metacountregressor-0.1.231 → metacountregressor-0.1.234/metacountregressor.egg-info}/PKG-INFO +1 -1
- metacountregressor-0.1.234/version.txt +1 -0
- metacountregressor-0.1.231/version.txt +0 -1
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/LICENSE.txt +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/MANIFEST.in +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/README.md +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/README.rst +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor/__init__.py +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor/_device_cust.py +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor/app_main.py +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor/data_split_helper.py +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor/halton.py +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor/helperprocess.py +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor/main.py +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor/main_old.py +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor/metaheuristics.py +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor/pareto_file.py +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor/pareto_logger__plot.py +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor/setup.py +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor/single_objective_finder.py +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor/test_generated_paper2.py +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor.egg-info/SOURCES.txt +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor.egg-info/dependency_links.txt +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor.egg-info/not-zip-safe +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor.egg-info/requires.txt +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor.egg-info/top_level.txt +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/setup.cfg +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/setup.py +0 -0
- {metacountregressor-0.1.231 → metacountregressor-0.1.234}/tests/test.py +0 -0
|
@@ -121,7 +121,7 @@ class ObjectiveFunction(object):
|
|
|
121
121
|
"""
|
|
122
122
|
|
|
123
123
|
def __init__(self, x_data, y_data, **kwargs):
|
|
124
|
-
|
|
124
|
+
self.linear_regression = kwargs.get('linear_model', False)
|
|
125
125
|
self.reg_penalty = 0
|
|
126
126
|
self.power_up_ll = False
|
|
127
127
|
self.nb_parma = 1
|
|
@@ -164,13 +164,13 @@ class ObjectiveFunction(object):
|
|
|
164
164
|
self.generated_sln = set()
|
|
165
165
|
self.ave_mae = 0
|
|
166
166
|
# defalt paramaters for hs #TODO unpack into harmony search class
|
|
167
|
-
self.algorithm = 'hs' # 'sa' 'de' also avialable
|
|
167
|
+
self.algorithm = kwargs.get('algorithm', 'hs') # 'sa' 'de' also avialable
|
|
168
168
|
self._hms = 20
|
|
169
|
-
self._max_time = kwargs.get('_max_time'
|
|
170
|
-
self._hmcr = kwargs.get('_hmcr'
|
|
169
|
+
self._max_time = kwargs.get('_max_time', 0.8* 60 * 60 * 24)
|
|
170
|
+
self._hmcr = kwargs.get('_hmcr', .5)
|
|
171
171
|
self._par = 0.3 #dont think this gets useted
|
|
172
172
|
self._mpai = 1
|
|
173
|
-
self._max_imp =
|
|
173
|
+
self._max_imp = kwargs.get('_max_imp', 90000000)
|
|
174
174
|
self._WIC = kwargs.get("WIC",10000) # Number of Iterations without Multiobjective Improvement #tod chuck into solution
|
|
175
175
|
self._panels = None
|
|
176
176
|
self.is_multi = True
|
|
@@ -184,7 +184,7 @@ class ObjectiveFunction(object):
|
|
|
184
184
|
self.MP = 0
|
|
185
185
|
# Nelder-Mead-BFGS
|
|
186
186
|
|
|
187
|
-
self._max_characteristics = kwargs.get('_max_vars',
|
|
187
|
+
self._max_characteristics = kwargs.get('_max_vars', 30)
|
|
188
188
|
|
|
189
189
|
self.beta_dict = dict
|
|
190
190
|
if 'model_terms' in kwargs:
|
|
@@ -789,11 +789,7 @@ class ObjectiveFunction(object):
|
|
|
789
789
|
data_names = list(set(b + a))
|
|
790
790
|
|
|
791
791
|
print(data_names)
|
|
792
|
-
|
|
793
|
-
# explainer = shap.TreeExplainer(rf)
|
|
794
|
-
# shap_values = explainer.shap_values(self._x_data)
|
|
795
|
-
# shap.initjs()
|
|
796
|
-
# dis = shap.force_plot(explainer.expected_value, shap_values[0,:], self._x_data.iloc[0,:], matplotlib = True)
|
|
792
|
+
|
|
797
793
|
|
|
798
794
|
return data_names
|
|
799
795
|
|
|
@@ -813,18 +809,18 @@ class ObjectiveFunction(object):
|
|
|
813
809
|
def get_dispersion_paramaters(self, betas, dispersion):
|
|
814
810
|
|
|
815
811
|
if dispersion == 0:
|
|
816
|
-
return None
|
|
812
|
+
return None
|
|
817
813
|
elif dispersion == 2 or dispersion == 1:
|
|
818
814
|
if self.no_extra_param:
|
|
819
|
-
return self.nb_parma
|
|
820
|
-
return betas[-1]
|
|
815
|
+
return self.nb_parma
|
|
816
|
+
return betas[-1]
|
|
821
817
|
|
|
822
818
|
elif dispersion == 3:
|
|
823
|
-
return
|
|
819
|
+
return betas[-1]
|
|
824
820
|
elif dispersion == 4:
|
|
825
|
-
return betas[-1]
|
|
821
|
+
return betas[-1]
|
|
826
822
|
elif dispersion == 'poisson_lognormal':
|
|
827
|
-
return betas[-1]
|
|
823
|
+
return betas[-1]
|
|
828
824
|
|
|
829
825
|
def reset_pvalue_conditions(self):
|
|
830
826
|
self.initial_sig = .5 # pass the test of a single model
|
|
@@ -1410,20 +1406,7 @@ class ObjectiveFunction(object):
|
|
|
1410
1406
|
|
|
1411
1407
|
|
|
1412
1408
|
def poisson_mean_get_dispersion(self, betas, X, y):
|
|
1413
|
-
'''
|
|
1414
|
-
eVy = self._loglik_gradient(betas, X, y, None, X, None, None, False, False, dispersion=0,
|
|
1415
|
-
return_EV=True,
|
|
1416
|
-
zi_list=None, draws_grouped=None, Xgroup=None)
|
|
1417
|
-
|
|
1418
|
-
print('trying thi instead')
|
|
1419
1409
|
|
|
1420
|
-
'''
|
|
1421
|
-
|
|
1422
|
-
'''
|
|
1423
|
-
nb_model = sm.GLM(y_long, x_long, family=sm.families.NegativeBinomial()).fit()
|
|
1424
|
-
gamma = nb_model.scale
|
|
1425
|
-
|
|
1426
|
-
'''
|
|
1427
1410
|
#poisson way
|
|
1428
1411
|
try:
|
|
1429
1412
|
num_panels, num_obs, num_features = X.shape # Dimensions of x
|
|
@@ -1547,9 +1530,7 @@ class ObjectiveFunction(object):
|
|
|
1547
1530
|
y = np.tile(y, self.Ndraws).ravel()
|
|
1548
1531
|
eVy = eVy.ravel()
|
|
1549
1532
|
|
|
1550
|
-
|
|
1551
|
-
# eVy_avg = np.mean(eVy, axis = (1,2))
|
|
1552
|
-
# mspe1 = np.nan_to_num(MSPE(np.squeeze(y_avg), np.squeeze(eVy_avg)), nan=100000, posinf=100000)
|
|
1533
|
+
|
|
1553
1534
|
eVy = np.nan_to_num(eVy, nan=100000, posinf=100000)
|
|
1554
1535
|
eVy = np.clip(eVy, None, 1000)
|
|
1555
1536
|
mae = np.nan_to_num(MAE(np.squeeze(y), np.squeeze(eVy)), nan=100000, posinf=100000)
|
|
@@ -3921,9 +3902,9 @@ class ObjectiveFunction(object):
|
|
|
3921
3902
|
|
|
3922
3903
|
return penalty, b_gam
|
|
3923
3904
|
|
|
3924
|
-
def eXB_calc(self, params_main, Xd, offset, dispersion,
|
|
3905
|
+
def eXB_calc(self, params_main, Xd, offset, dispersion, linear = False):
|
|
3925
3906
|
|
|
3926
|
-
|
|
3907
|
+
|
|
3927
3908
|
if dispersion:
|
|
3928
3909
|
eta= np.dot(Xd, params_main)[:, :, None] + np.array(offset[:, :, :])
|
|
3929
3910
|
|
|
@@ -3933,20 +3914,10 @@ class ObjectiveFunction(object):
|
|
|
3933
3914
|
eta = np.dot(Xd, params_main)[:, :, None] + np.array(offset[:, :, :])
|
|
3934
3915
|
eta = np.array(eta)
|
|
3935
3916
|
|
|
3936
|
-
# eta = np.float64(eta)
|
|
3937
|
-
# eta = np.dot(Xd, params_main)+offset[:,:,0]
|
|
3938
|
-
# eta2 = np.dot(Xd, params_main)[:,:,None]+np.array(offset[:,:,:])
|
|
3939
3917
|
|
|
3940
|
-
if
|
|
3941
|
-
|
|
3942
|
-
|
|
3943
|
-
get_lindley = 0.01
|
|
3944
|
-
eps_i = self.my_lindley(Xd, get_lindley)
|
|
3945
|
-
eVd = eps_i * np.exp(np.clip(eta, 0, EXP_UPPER_LIMIT)).ravel()
|
|
3946
|
-
# Vd = self.my_lindley(np.exp(np.clip(eta, 0, EXP_UPPER_LIMIT)), get_lindley)
|
|
3947
|
-
|
|
3948
|
-
# eVd = np.exp(np.clip(eta, 0, EXP_UPPER_LIMIT))
|
|
3949
|
-
# eVd = self.my_lindley(np.exp(np.clip(eta, None, EXP_UPPER_LIMIT)), 1) #todo grab param
|
|
3918
|
+
if linear:
|
|
3919
|
+
eta = eta.astype('float')
|
|
3920
|
+
return eta
|
|
3950
3921
|
|
|
3951
3922
|
|
|
3952
3923
|
else:
|
|
@@ -4079,7 +4050,7 @@ class ObjectiveFunction(object):
|
|
|
4079
4050
|
# proba_r = self._nonlog_nbin_lindley(y, eVd, fa, ba)
|
|
4080
4051
|
|
|
4081
4052
|
elif dispersion == 'poisson_lognormal':
|
|
4082
|
-
sig
|
|
4053
|
+
sig = self.get_dispersion_paramaters(betas, dispersion)
|
|
4083
4054
|
store = list()
|
|
4084
4055
|
for i in range(len(y)):
|
|
4085
4056
|
store.append(self.poisson_lognormal_pmf(
|
|
@@ -4127,7 +4098,7 @@ class ObjectiveFunction(object):
|
|
|
4127
4098
|
|
|
4128
4099
|
# if alpha < 0:
|
|
4129
4100
|
# alpha = np.abs(alpha)
|
|
4130
|
-
sig
|
|
4101
|
+
sig = self.get_dispersion_paramaters(betas, dispersion)
|
|
4131
4102
|
|
|
4132
4103
|
if model_nature is not None:
|
|
4133
4104
|
if 'XH' in model_nature:
|
|
@@ -4152,15 +4123,7 @@ class ObjectiveFunction(object):
|
|
|
4152
4123
|
gr_f, gr_u, gr_s = np.zeros((N, Kf)), np.zeros(
|
|
4153
4124
|
(N, Kr + Kc)), np.zeros((N, Kchol + Kr_b))
|
|
4154
4125
|
|
|
4155
|
-
|
|
4156
|
-
|
|
4157
|
-
q = omeg / (1 + omeg)
|
|
4158
|
-
d_beta = (y + 1) / (eVd + y + 1) - q / (1 - q)
|
|
4159
|
-
|
|
4160
|
-
gr_e = d_beta * (proba_n[:, None, :]).sum(axis=2)
|
|
4161
|
-
for i in len(y):
|
|
4162
|
-
if y[i] == 0:
|
|
4163
|
-
gr_e[i] = 0
|
|
4126
|
+
|
|
4164
4127
|
|
|
4165
4128
|
if self.is_dispersion(dispersion) and not self.no_extra_param:
|
|
4166
4129
|
gr_d = np.zeros((N, 1))
|
|
@@ -4329,9 +4292,8 @@ class ObjectiveFunction(object):
|
|
|
4329
4292
|
# print('check this')
|
|
4330
4293
|
if dispersion == 0:
|
|
4331
4294
|
grad_n = self._concat_gradients((gr_f, gr_u, gr_s, gr_h, gr_hs)) / Rlik # (N,K)
|
|
4332
|
-
|
|
4333
|
-
|
|
4334
|
-
(gr_f, gr_u, gr_s, gr_e)) / Rlik # (N,K)
|
|
4295
|
+
|
|
4296
|
+
|
|
4335
4297
|
else:
|
|
4336
4298
|
if self.no_extra_param:
|
|
4337
4299
|
grad_n = self._concat_gradients(
|
|
@@ -4426,17 +4388,9 @@ class ObjectiveFunction(object):
|
|
|
4426
4388
|
|
|
4427
4389
|
return der, grad_n
|
|
4428
4390
|
|
|
4429
|
-
|
|
4430
|
-
b_gam, l_gam = self.get_dispersion_paramaters(betas, dispersion)
|
|
4431
|
-
ravel_me = self.my_lindley(y, l_gam)
|
|
4432
|
-
der = self.nbl_score(y, Xd, betas, b_gam, l_gam)
|
|
4433
|
-
print('00lol')
|
|
4434
|
-
# der = -self.NB_score_lindley(betas, y, eVd, Xd, 0, obs_specific)
|
|
4435
|
-
# if both:
|
|
4436
|
-
# grad_n = -self.NB_score_lindley(betas, y, eVd, Xd, 0, True)
|
|
4437
|
-
# return der, grad_n
|
|
4391
|
+
|
|
4438
4392
|
elif dispersion == 'poisson_lognormal':
|
|
4439
|
-
sig
|
|
4393
|
+
sig= self.get_dispersion_paramaters(betas, dispersion)
|
|
4440
4394
|
der, grad_n = self.poisson_lognormal_glm_score(betas, y, Xd, sig)
|
|
4441
4395
|
return der, grad_n
|
|
4442
4396
|
|
|
@@ -4666,30 +4620,31 @@ class ObjectiveFunction(object):
|
|
|
4666
4620
|
betas = np.array(betas)
|
|
4667
4621
|
Bf = betas[0:Kf] # Fixed betas
|
|
4668
4622
|
|
|
4669
|
-
main_disper
|
|
4670
|
-
betas, dispersion)
|
|
4671
|
-
|
|
4672
|
-
if lindley_disp <= 0:
|
|
4673
|
-
penalty += 1
|
|
4674
|
-
penalty += - lindley_disp
|
|
4675
|
-
lindley_disp = 0
|
|
4623
|
+
main_disper = self.get_dispersion_paramaters(
|
|
4624
|
+
betas, dispersion)
|
|
4625
|
+
|
|
4676
4626
|
|
|
4677
|
-
eVd = self.eXB_calc(Bf, Xd, offset, main_disper,
|
|
4627
|
+
eVd = self.eXB_calc(Bf, Xd, offset, main_disper, kwargs.get('linear_model'))
|
|
4678
4628
|
|
|
4679
4629
|
if return_EV is True:
|
|
4680
4630
|
return eVd
|
|
4681
4631
|
|
|
4682
|
-
|
|
4683
|
-
|
|
4684
|
-
# self.lam = eVd
|
|
4632
|
+
|
|
4685
4633
|
|
|
4686
4634
|
if self.is_dispersion(dispersion):
|
|
4687
4635
|
penalty, main_disper = self._penalty_dispersion(dispersion, main_disper, eVd, y, penalty,
|
|
4688
4636
|
model_nature)
|
|
4689
4637
|
|
|
4690
4638
|
betas[-1] = main_disper
|
|
4639
|
+
|
|
4640
|
+
if kwargs.get('linear_model'):
|
|
4641
|
+
# LINEAR MODEL PROCESS
|
|
4642
|
+
mse = np.mean((y - eVd) ** 2)
|
|
4643
|
+
return mse
|
|
4644
|
+
|
|
4645
|
+
### GLM PROCESS ########
|
|
4691
4646
|
llf_main = self.loglik_obs(
|
|
4692
|
-
y, eVd, dispersion, main_disper,
|
|
4647
|
+
y, eVd, dispersion, main_disper, None, betas)
|
|
4693
4648
|
|
|
4694
4649
|
llf_main = np.clip(llf_main, log_lik_min, log_lik_max)
|
|
4695
4650
|
|
|
@@ -4725,7 +4680,9 @@ class ObjectiveFunction(object):
|
|
|
4725
4680
|
else:
|
|
4726
4681
|
|
|
4727
4682
|
return (-loglik + penalty)*self.minimize_scaler
|
|
4728
|
-
|
|
4683
|
+
### ELSE WE HAVE DRAW DO THE DRAWS CODE ####
|
|
4684
|
+
## ELSE DRAWS ####
|
|
4685
|
+
#############################################
|
|
4729
4686
|
self.n_obs = len(y) * self.Ndraws #todo is this problematic
|
|
4730
4687
|
penalty += self._penalty_betas(
|
|
4731
4688
|
betas, dispersion, penalty, float(len(y) / 10.0))
|
|
@@ -4916,7 +4873,14 @@ class ObjectiveFunction(object):
|
|
|
4916
4873
|
betas_hetro_sd = None
|
|
4917
4874
|
|
|
4918
4875
|
Vdr = dev.cust_einsum("njk,nkr -> njr", Xdr, Br) # (N,P,R)
|
|
4919
|
-
|
|
4876
|
+
if kwargs.get('linear_model'):
|
|
4877
|
+
### LINEAR MODEL WAY #######
|
|
4878
|
+
eVd = np.clip(
|
|
4879
|
+
Vdf[:, :, None] + Vdr + Vdh + dev.np.array(offset), None, EXP_UPPER_LIMIT)
|
|
4880
|
+
mse = np.mean((y - eVd) ** 2)
|
|
4881
|
+
return mse
|
|
4882
|
+
|
|
4883
|
+
##### GLM WAY #####
|
|
4920
4884
|
eVd = dev.np.exp(np.clip(
|
|
4921
4885
|
Vdf[:, :, None] + Vdr + Vdh + dev.np.array(offset), None, EXP_UPPER_LIMIT))
|
|
4922
4886
|
if dispersion == 3:
|
|
@@ -5034,7 +4998,7 @@ class ObjectiveFunction(object):
|
|
|
5034
4998
|
def print_chol_mat(self, betas):
|
|
5035
4999
|
print(self.chol_mat)
|
|
5036
5000
|
self.get_br_and_bstd(betas)
|
|
5037
|
-
|
|
5001
|
+
|
|
5038
5002
|
|
|
5039
5003
|
|
|
5040
5004
|
def regularise_l2(self, betas, backwards = False):
|
|
@@ -5574,6 +5538,8 @@ class ObjectiveFunction(object):
|
|
|
5574
5538
|
"""
|
|
5575
5539
|
Fits a poisson regression given data and outcomes if dispersion is not declared
|
|
5576
5540
|
if declared, fits a NB (dispersion = 1) regression or GP (disperions = 2)
|
|
5541
|
+
|
|
5542
|
+
#TODO lineraregression
|
|
5577
5543
|
Inputs:
|
|
5578
5544
|
X - array. Design matrix
|
|
5579
5545
|
y - array. Observed outcomes
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
0.1.234
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
0.1.231
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor/_device_cust.py
RENAMED
|
File without changes
|
|
File without changes
|
{metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor/data_split_helper.py
RENAMED
|
File without changes
|
|
File without changes
|
{metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor/helperprocess.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor/metaheuristics.py
RENAMED
|
File without changes
|
|
File without changes
|
{metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor/pareto_logger__plot.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor.egg-info/SOURCES.txt
RENAMED
|
File without changes
|
|
File without changes
|
{metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor.egg-info/not-zip-safe
RENAMED
|
File without changes
|
{metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor.egg-info/requires.txt
RENAMED
|
File without changes
|
{metacountregressor-0.1.231 → metacountregressor-0.1.234}/metacountregressor.egg-info/top_level.txt
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|