icol 0.9.7__tar.gz → 0.10.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {icol-0.9.7 → icol-0.10.1}/PKG-INFO +1 -1
- {icol-0.9.7 → icol-0.10.1}/icol/logistic_icol.py +35 -8
- {icol-0.9.7 → icol-0.10.1}/icol.egg-info/PKG-INFO +1 -1
- {icol-0.9.7 → icol-0.10.1}/pyproject.toml +1 -1
- {icol-0.9.7 → icol-0.10.1}/LICENSE +0 -0
- {icol-0.9.7 → icol-0.10.1}/README.md +0 -0
- {icol-0.9.7 → icol-0.10.1}/icol/__init__.py +0 -0
- {icol-0.9.7 → icol-0.10.1}/icol/feature_expansion.py +0 -0
- {icol-0.9.7 → icol-0.10.1}/icol/icol.py +0 -0
- {icol-0.9.7 → icol-0.10.1}/icol.egg-info/SOURCES.txt +0 -0
- {icol-0.9.7 → icol-0.10.1}/icol.egg-info/dependency_links.txt +0 -0
- {icol-0.9.7 → icol-0.10.1}/icol.egg-info/requires.txt +0 -0
- {icol-0.9.7 → icol-0.10.1}/icol.egg-info/top_level.txt +0 -0
- {icol-0.9.7 → icol-0.10.1}/setup.cfg +0 -0
- {icol-0.9.7 → icol-0.10.1}/tests/test_icl.py +0 -0
|
@@ -84,10 +84,10 @@ class generalised_SIS:
|
|
|
84
84
|
chosen = np.array(chosen, dtype=int)
|
|
85
85
|
return scores[chosen], chosen
|
|
86
86
|
|
|
87
|
-
class
|
|
87
|
+
class LOGISTIC_ADALASSO:
|
|
88
88
|
def __init__(self, log_c_lo=-4, log_c_hi=3, c_num=100, solver="saga",
|
|
89
89
|
class_weight=None, max_iter=5000, tol=1e-4, eps_nnz=1e-12,
|
|
90
|
-
clp=np.infty, random_state=None):
|
|
90
|
+
clp=np.infty, random_state=None, gamma=1):
|
|
91
91
|
self.log_c_lo = log_c_lo
|
|
92
92
|
self.log_c_hi = log_c_hi
|
|
93
93
|
self.c_num= c_num
|
|
@@ -99,6 +99,7 @@ class LOGISTIC_LASSO:
|
|
|
99
99
|
self.eps_nnz = eps_nnz
|
|
100
100
|
self.random_state = random_state
|
|
101
101
|
self.clp = clp
|
|
102
|
+
self.gamma = gamma
|
|
102
103
|
|
|
103
104
|
self.models = np.array([LogisticRegression(C=c,
|
|
104
105
|
solver=self.solver, class_weight=self.class_weight,
|
|
@@ -126,10 +127,30 @@ class LOGISTIC_LASSO:
|
|
|
126
127
|
|
|
127
128
|
def fit(self, X, y, d, feature_names=None, verbose=False):
|
|
128
129
|
self.feature_names = ['X_{0}'.format(i) for i in range(X.shape[1])] if feature_names is None else feature_names
|
|
130
|
+
|
|
131
|
+
nonancols = np.isnan(X).sum(axis=0)==0
|
|
132
|
+
noinfcols = np.isinf(X).sum(axis=0)==0
|
|
133
|
+
valcols = np.logical_and(nonancols, noinfcols)
|
|
134
|
+
if np.abs(self.gamma) <= 1e-10:
|
|
135
|
+
beta_hat = np.ones(X.shape[1])
|
|
136
|
+
w_hat = np.ones(X.shape[1])
|
|
137
|
+
X_star_star = X.copy()
|
|
138
|
+
else:
|
|
139
|
+
X_valcols = X[:, valcols]
|
|
140
|
+
LR = LogisticRegression(penalty=None, fit_intercept=False, random_state=self.random_state)
|
|
141
|
+
LR.fit(X_valcols, y)
|
|
142
|
+
beta_hat = LR.coef_
|
|
143
|
+
|
|
144
|
+
w_hat = 1/np.power(np.abs(beta_hat), self.gamma)
|
|
145
|
+
X_star_star = np.zeros_like(X_valcols)
|
|
146
|
+
for j in range(X_star_star.shape[1]): # vectorise
|
|
147
|
+
X_j = X_valcols[:, j]/w_hat[j]
|
|
148
|
+
X_star_star[:, j] = X_j
|
|
149
|
+
|
|
129
150
|
best_idx = 0
|
|
130
151
|
for i, model in enumerate(self.models):
|
|
131
152
|
if verbose: print('Fitting model {0} of {1} with C={2} and has '.format(i, len(self.models), model.C), end='')
|
|
132
|
-
model.fit(
|
|
153
|
+
model.fit(X_star_star, y)
|
|
133
154
|
nnz = self._count_nnz(model.coef_)
|
|
134
155
|
if verbose: print('{0} nonzero terms'.format(nnz))
|
|
135
156
|
if nnz<=d:
|
|
@@ -138,9 +159,17 @@ class LOGISTIC_LASSO:
|
|
|
138
159
|
break
|
|
139
160
|
|
|
140
161
|
self.model_idx = best_idx
|
|
141
|
-
self.model = self.models[self.model_idx]
|
|
142
|
-
self.coef_ = self.model.coef_.ravel()
|
|
143
162
|
self.coef_idx_ = np.arange(len(self.coef_))[np.abs(np.ravel(self.coef_)) > self.eps_nnz]
|
|
163
|
+
|
|
164
|
+
beta_hat_star_star = self.models[self.model_idx].coef_.ravel()
|
|
165
|
+
beta_hat_star_n_valcol = np.array([beta_hat_star_star[j]/w_hat[j] for j in range(len(beta_hat_star_star))])
|
|
166
|
+
beta_hat_star_n = np.zeros(X.shape[1])
|
|
167
|
+
beta_hat_star_n[valcols] = beta_hat_star_n_valcol
|
|
168
|
+
|
|
169
|
+
self.coef_ = beta_hat_star_n
|
|
170
|
+
self.model = self.models[self.model_idx]
|
|
171
|
+
self.model.coef_ = self.coef_
|
|
172
|
+
|
|
144
173
|
return self
|
|
145
174
|
|
|
146
175
|
def _count_nnz(self, coef):
|
|
@@ -149,9 +178,7 @@ class LOGISTIC_LASSO:
|
|
|
149
178
|
))
|
|
150
179
|
|
|
151
180
|
def __str__(self):
|
|
152
|
-
|
|
153
|
-
params_str = ", ".join(f"{k}={params[k]!r}" for k in sorted(params))
|
|
154
|
-
return f"LogisticLasso({params_str})"
|
|
181
|
+
return f"Logistic{0}Lasso".format("Ada" if np.abs(self.gamma)<=1e-10 else '')
|
|
155
182
|
|
|
156
183
|
def decision_function(self, X):
|
|
157
184
|
return np.dot(X, self.model.coef_.ravel())
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|