scikit-survival 0.23.1__cp313-cp313-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. scikit_survival-0.23.1.dist-info/COPYING +674 -0
  2. scikit_survival-0.23.1.dist-info/METADATA +888 -0
  3. scikit_survival-0.23.1.dist-info/RECORD +55 -0
  4. scikit_survival-0.23.1.dist-info/WHEEL +5 -0
  5. scikit_survival-0.23.1.dist-info/top_level.txt +1 -0
  6. sksurv/__init__.py +138 -0
  7. sksurv/base.py +103 -0
  8. sksurv/bintrees/__init__.py +15 -0
  9. sksurv/bintrees/_binarytrees.cp313-win_amd64.pyd +0 -0
  10. sksurv/column.py +201 -0
  11. sksurv/compare.py +123 -0
  12. sksurv/datasets/__init__.py +10 -0
  13. sksurv/datasets/base.py +436 -0
  14. sksurv/datasets/data/GBSG2.arff +700 -0
  15. sksurv/datasets/data/actg320.arff +1169 -0
  16. sksurv/datasets/data/breast_cancer_GSE7390-metastasis.arff +283 -0
  17. sksurv/datasets/data/flchain.arff +7887 -0
  18. sksurv/datasets/data/veteran.arff +148 -0
  19. sksurv/datasets/data/whas500.arff +520 -0
  20. sksurv/ensemble/__init__.py +2 -0
  21. sksurv/ensemble/_coxph_loss.cp313-win_amd64.pyd +0 -0
  22. sksurv/ensemble/boosting.py +1610 -0
  23. sksurv/ensemble/forest.py +947 -0
  24. sksurv/ensemble/survival_loss.py +151 -0
  25. sksurv/exceptions.py +18 -0
  26. sksurv/functions.py +114 -0
  27. sksurv/io/__init__.py +2 -0
  28. sksurv/io/arffread.py +58 -0
  29. sksurv/io/arffwrite.py +145 -0
  30. sksurv/kernels/__init__.py +1 -0
  31. sksurv/kernels/_clinical_kernel.cp313-win_amd64.pyd +0 -0
  32. sksurv/kernels/clinical.py +328 -0
  33. sksurv/linear_model/__init__.py +3 -0
  34. sksurv/linear_model/_coxnet.cp313-win_amd64.pyd +0 -0
  35. sksurv/linear_model/aft.py +205 -0
  36. sksurv/linear_model/coxnet.py +543 -0
  37. sksurv/linear_model/coxph.py +618 -0
  38. sksurv/meta/__init__.py +4 -0
  39. sksurv/meta/base.py +35 -0
  40. sksurv/meta/ensemble_selection.py +642 -0
  41. sksurv/meta/stacking.py +349 -0
  42. sksurv/metrics.py +996 -0
  43. sksurv/nonparametric.py +588 -0
  44. sksurv/preprocessing.py +155 -0
  45. sksurv/svm/__init__.py +11 -0
  46. sksurv/svm/_minlip.cp313-win_amd64.pyd +0 -0
  47. sksurv/svm/_prsvm.cp313-win_amd64.pyd +0 -0
  48. sksurv/svm/minlip.py +606 -0
  49. sksurv/svm/naive_survival_svm.py +221 -0
  50. sksurv/svm/survival_svm.py +1228 -0
  51. sksurv/testing.py +108 -0
  52. sksurv/tree/__init__.py +1 -0
  53. sksurv/tree/_criterion.cp313-win_amd64.pyd +0 -0
  54. sksurv/tree/tree.py +703 -0
  55. sksurv/util.py +333 -0
@@ -0,0 +1,543 @@
1
+ # This program is free software: you can redistribute it and/or modify
2
+ # it under the terms of the GNU General Public License as published by
3
+ # the Free Software Foundation, either version 3 of the License, or
4
+ # (at your option) any later version.
5
+ #
6
+ # This program is distributed in the hope that it will be useful,
7
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
8
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9
+ # GNU General Public License for more details.
10
+ #
11
+ # You should have received a copy of the GNU General Public License
12
+ # along with this program. If not, see <http://www.gnu.org/licenses/>.
13
+ import numbers
14
+ import warnings
15
+
16
+ import numpy as np
17
+ from sklearn.base import BaseEstimator
18
+ from sklearn.exceptions import ConvergenceWarning
19
+ from sklearn.preprocessing import normalize as f_normalize
20
+ from sklearn.utils._param_validation import Interval, StrOptions
21
+ from sklearn.utils.validation import assert_all_finite, check_is_fitted, check_non_negative, column_or_1d
22
+
23
+ from ..base import SurvivalAnalysisMixin
24
+ from ..util import check_array_survival
25
+ from ._coxnet import call_fit_coxnet
26
+ from .coxph import BreslowEstimator
27
+
28
+ __all__ = ["CoxnetSurvivalAnalysis"]
29
+
30
+
31
+ class CoxnetSurvivalAnalysis(BaseEstimator, SurvivalAnalysisMixin):
32
+ """Cox's proportional hazard's model with elastic net penalty.
33
+
34
+ See the :ref:`User Guide </user_guide/coxnet.ipynb>` and [1]_ for further description.
35
+
36
+ Parameters
37
+ ----------
38
+ n_alphas : int, optional, default: 100
39
+ Number of alphas along the regularization path.
40
+
41
+ alphas : array-like or None, optional
42
+ List of alphas where to compute the models.
43
+ If ``None`` alphas are set automatically.
44
+
45
+ alpha_min_ratio : float or { "auto" }, optional, default: "auto"
46
+ Determines minimum alpha of the regularization path
47
+ if ``alphas`` is ``None``. The smallest value for alpha
48
+ is computed as the fraction of the data derived maximum
49
+ alpha (i.e. the smallest value for which all
50
+ coefficients are zero).
51
+
52
+ If set to "auto", the value will depend on the
53
+ sample size relative to the number of features.
54
+ If ``n_samples > n_features``, the default value is 0.0001
55
+ If ``n_samples <= n_features``, 0.01 is the default value.
56
+
57
+ l1_ratio : float, optional, default: 0.5
58
+ The ElasticNet mixing parameter, with ``0 < l1_ratio <= 1``.
59
+ For ``l1_ratio = 0`` the penalty is an L2 penalty.
60
+ For ``l1_ratio = 1`` it is an L1 penalty.
61
+ For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2.
62
+
63
+ penalty_factor : array-like or None, optional
64
+ Separate penalty factors can be applied to each coefficient.
65
+ This is a number that multiplies alpha to allow differential
66
+ shrinkage. Can be 0 for some variables, which implies no shrinkage,
67
+ and that variable is always included in the model.
68
+ Default is 1 for all variables.
69
+
70
+ Note: the penalty factors are internally rescaled to sum to
71
+ `n_features`, and the alphas sequence will reflect this change.
72
+
73
+ normalize : boolean, optional, default: False
74
+ If True, the features X will be normalized before optimization by
75
+ subtracting the mean and dividing by the l2-norm.
76
+ If you wish to standardize, please use
77
+ :class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
78
+ on an estimator with ``normalize=False``.
79
+
80
+ copy_X : boolean, optional, default: True
81
+ If ``True``, X will be copied; else, it may be overwritten.
82
+
83
+ tol : float, optional, default: 1e-7
84
+ The tolerance for the optimization: optimization continues
85
+ until all updates are smaller than ``tol``.
86
+
87
+ max_iter : int, optional, default: 100000
88
+ The maximum number of iterations.
89
+
90
+ verbose : bool, optional, default: False
91
+ Whether to print additional information during optimization.
92
+
93
+ fit_baseline_model : bool, optional, default: False
94
+ Whether to estimate baseline survival function
95
+ and baseline cumulative hazard function for each alpha.
96
+ If enabled, :meth:`predict_cumulative_hazard_function` and
97
+ :meth:`predict_survival_function` can be used to obtain
98
+ predicted cumulative hazard function and survival function.
99
+
100
+ Attributes
101
+ ----------
102
+ alphas_ : ndarray, shape=(n_alphas,)
103
+ The actual sequence of alpha values used.
104
+
105
+ alpha_min_ratio_ : float
106
+ The inferred value of alpha_min_ratio.
107
+
108
+ penalty_factor_ : ndarray, shape=(n_features,)
109
+ The actual penalty factors used.
110
+
111
+ coef_ : ndarray, shape=(n_features, n_alphas)
112
+ Matrix of coefficients.
113
+
114
+ offset_ : ndarray, shape=(n_alphas,)
115
+ Bias term to account for non-centered features.
116
+
117
+ deviance_ratio_ : ndarray, shape=(n_alphas,)
118
+ The fraction of (null) deviance explained.
119
+
120
+ n_features_in_ : int
121
+ Number of features seen during ``fit``.
122
+
123
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
124
+ Names of features seen during ``fit``. Defined only when `X`
125
+ has feature names that are all strings.
126
+
127
+ unique_times_ : array of shape = (n_unique_times,)
128
+ Unique time points.
129
+
130
+ References
131
+ ----------
132
+ .. [1] Simon N, Friedman J, Hastie T, Tibshirani R.
133
+ Regularization paths for Cox’s proportional hazards model via coordinate descent.
134
+ Journal of statistical software. 2011 Mar;39(5):1.
135
+ """
136
+
137
+ _parameter_constraints: dict = {
138
+ "n_alphas": [Interval(numbers.Integral, 1, None, closed="left")],
139
+ "alphas": ["array-like", None],
140
+ "alpha_min_ratio": [Interval(numbers.Real, 0, None, closed="neither"), StrOptions({"auto"})],
141
+ "l1_ratio": [Interval(numbers.Real, 0.0, 1.0, closed="right")],
142
+ "penalty_factor": ["array-like", None],
143
+ "normalize": ["boolean"],
144
+ "copy_X": ["boolean"],
145
+ "tol": [Interval(numbers.Real, 0, None, closed="left")],
146
+ "max_iter": [Interval(numbers.Integral, 1, None, closed="left")],
147
+ "verbose": ["verbose"],
148
+ "fit_baseline_model": ["boolean"],
149
+ }
150
+
151
+ def __init__(
152
+ self,
153
+ *,
154
+ n_alphas=100,
155
+ alphas=None,
156
+ alpha_min_ratio="auto",
157
+ l1_ratio=0.5,
158
+ penalty_factor=None,
159
+ normalize=False,
160
+ copy_X=True,
161
+ tol=1e-7,
162
+ max_iter=100000,
163
+ verbose=False,
164
+ fit_baseline_model=False,
165
+ ):
166
+ self.n_alphas = n_alphas
167
+ self.alphas = alphas
168
+ self.alpha_min_ratio = alpha_min_ratio
169
+ self.l1_ratio = l1_ratio
170
+ self.penalty_factor = penalty_factor
171
+ self.normalize = normalize
172
+ self.copy_X = copy_X
173
+ self.tol = tol
174
+ self.max_iter = max_iter
175
+ self.verbose = verbose
176
+ self.fit_baseline_model = fit_baseline_model
177
+
178
+ self._baseline_models = None
179
+
180
+ def _pre_fit(self, X, y):
181
+ X = self._validate_data(X, ensure_min_samples=2, dtype=np.float64, copy=self.copy_X)
182
+ event, time = check_array_survival(X, y)
183
+ # center feature matrix
184
+ X_offset = np.average(X, axis=0)
185
+ X -= X_offset
186
+ if self.normalize:
187
+ X, X_scale = f_normalize(X, copy=False, axis=0, return_norm=True)
188
+ else:
189
+ X_scale = np.ones(X.shape[1], dtype=X.dtype)
190
+
191
+ # sort descending
192
+ o = np.argsort(-time, kind="mergesort")
193
+ X = np.asfortranarray(X[o, :])
194
+ event_num = event[o].astype(np.uint8)
195
+ time = time[o].astype(np.float64)
196
+ return X, event_num, time, X_offset, X_scale
197
+
198
+ def _check_penalty_factor(self, n_features):
199
+ if self.penalty_factor is None:
200
+ penalty_factor = np.ones(n_features, dtype=np.float64)
201
+ else:
202
+ pf = column_or_1d(self.penalty_factor, warn=True)
203
+ if pf.shape[0] != n_features:
204
+ raise ValueError(
205
+ f"penalty_factor must be array of length n_features ({n_features}), but got {pf.shape[0]}"
206
+ )
207
+ assert_all_finite(pf, input_name="penalty_factor")
208
+ check_non_negative(pf, "penalty_factor")
209
+ penalty_factor = pf * n_features / pf.sum()
210
+ return penalty_factor
211
+
212
+ def _check_alphas(self):
213
+ create_path = self.alphas is None
214
+ if create_path:
215
+ if self.n_alphas <= 0:
216
+ raise ValueError("n_alphas must be a positive integer")
217
+
218
+ alphas = np.empty(int(self.n_alphas), dtype=np.float64)
219
+ else:
220
+ alphas = column_or_1d(self.alphas, warn=True)
221
+ assert_all_finite(alphas, input_name="alphas")
222
+ check_non_negative(alphas, "alphas")
223
+ return alphas, create_path
224
+
225
+ def _check_alpha_min_ratio(self, n_samples, n_features):
226
+ alpha_min_ratio = self.alpha_min_ratio
227
+ if isinstance(alpha_min_ratio, str) and self.alpha_min_ratio == "auto":
228
+ if n_samples > n_features:
229
+ alpha_min_ratio = 0.0001
230
+ else:
231
+ alpha_min_ratio = 0.01
232
+
233
+ return alpha_min_ratio
234
+
235
+ def _check_params(self, n_samples, n_features):
236
+ self._validate_params()
237
+
238
+ penalty_factor = self._check_penalty_factor(n_features)
239
+
240
+ alphas, create_path = self._check_alphas()
241
+
242
+ if self.max_iter <= 0:
243
+ raise ValueError("max_iter must be a positive integer")
244
+
245
+ alpha_min_ratio = self._check_alpha_min_ratio(n_samples, n_features)
246
+
247
+ return create_path, alphas.astype(np.float64), penalty_factor.astype(np.float64), alpha_min_ratio
248
+
249
+ def fit(self, X, y):
250
+ """Fit estimator.
251
+
252
+ Parameters
253
+ ----------
254
+ X : array-like, shape = (n_samples, n_features)
255
+ Data matrix
256
+
257
+ y : structured array, shape = (n_samples,)
258
+ A structured array containing the binary event indicator
259
+ as first field, and time of event or time of censoring as
260
+ second field.
261
+
262
+ Returns
263
+ -------
264
+ self
265
+ """
266
+ X, event_num, time, X_offset, X_scale = self._pre_fit(X, y)
267
+ create_path, alphas, penalty, alpha_min_ratio = self._check_params(*X.shape)
268
+
269
+ coef, alphas, deviance_ratio, n_iter = call_fit_coxnet(
270
+ X,
271
+ time,
272
+ event_num,
273
+ penalty,
274
+ alphas,
275
+ create_path,
276
+ alpha_min_ratio,
277
+ self.l1_ratio,
278
+ int(self.max_iter),
279
+ self.tol,
280
+ self.verbose,
281
+ )
282
+ assert np.isfinite(coef).all()
283
+
284
+ if np.all(np.absolute(coef) < np.finfo(float).eps):
285
+ warnings.warn("all coefficients are zero, consider decreasing alpha.", stacklevel=2)
286
+
287
+ if n_iter >= self.max_iter:
288
+ warnings.warn(
289
+ "Optimization terminated early, you might want"
290
+ f" to increase the number of iterations (max_iter={self.max_iter}).",
291
+ category=ConvergenceWarning,
292
+ stacklevel=2,
293
+ )
294
+
295
+ coef /= X_scale[:, np.newaxis]
296
+
297
+ if self.fit_baseline_model:
298
+ predictions = np.dot(X, coef)
299
+ self._baseline_models = tuple(
300
+ BreslowEstimator().fit(predictions[:, i], event_num, time) for i in range(coef.shape[1])
301
+ )
302
+ else:
303
+ self._baseline_models = None
304
+
305
+ self.alphas_ = alphas
306
+ self.alpha_min_ratio_ = alpha_min_ratio
307
+ self.penalty_factor_ = penalty
308
+ self.coef_ = coef
309
+ self.deviance_ratio_ = deviance_ratio
310
+ self.offset_ = np.dot(X_offset, coef)
311
+ return self
312
+
313
+ def _get_coef(self, alpha):
314
+ check_is_fitted(self, "coef_")
315
+
316
+ if alpha is None:
317
+ coef = self.coef_[:, -1], self.offset_[-1]
318
+ else:
319
+ coef = self._interpolate_coefficients(alpha)
320
+ return coef
321
+
322
+ def _interpolate_coefficients(self, alpha):
323
+ """Interpolate coefficients by calculating the weighted average of coefficient vectors corresponding to
324
+ neighbors of alpha in the list of alphas constructed during training."""
325
+ exact = False
326
+ coef_idx = None
327
+ for i, val in enumerate(self.alphas_):
328
+ if val > alpha:
329
+ coef_idx = i
330
+ elif alpha - val < np.finfo(float).eps:
331
+ coef_idx = i
332
+ exact = True
333
+ break
334
+
335
+ if coef_idx is None:
336
+ coef = self.coef_[:, 0]
337
+ offset = self.offset_[0]
338
+ elif exact or coef_idx == len(self.alphas_) - 1:
339
+ coef = self.coef_[:, coef_idx]
340
+ offset = self.offset_[coef_idx]
341
+ else:
342
+ # interpolate between coefficients
343
+ a1 = self.alphas_[coef_idx + 1]
344
+ a2 = self.alphas_[coef_idx]
345
+ frac = (alpha - a1) / (a2 - a1)
346
+ coef = frac * self.coef_[:, coef_idx] + (1.0 - frac) * self.coef_[:, coef_idx + 1]
347
+ offset = frac * self.offset_[coef_idx] + (1.0 - frac) * self.offset_[coef_idx + 1]
348
+
349
+ return coef, offset
350
+
351
+ def predict(self, X, alpha=None):
352
+ """The linear predictor of the model.
353
+
354
+ Parameters
355
+ ----------
356
+ X : array-like, shape = (n_samples, n_features)
357
+ Test data of which to calculate log-likelihood from
358
+
359
+ alpha : float, optional
360
+ Constant that multiplies the penalty terms. If the same alpha was used during training, exact
361
+ coefficients are used, otherwise coefficients are interpolated from the closest alpha values that
362
+ were used during training. If set to ``None``, the last alpha in the solution path is used.
363
+
364
+ Returns
365
+ -------
366
+ T : array, shape = (n_samples,)
367
+ The predicted decision function
368
+ """
369
+ X = self._validate_data(X, reset=False)
370
+ coef, offset = self._get_coef(alpha)
371
+ return np.dot(X, coef) - offset
372
+
373
+ def _get_baseline_model(self, alpha):
374
+ check_is_fitted(self, "coef_")
375
+ if self._baseline_models is None:
376
+ raise ValueError("`fit` must be called with the fit_baseline_model option set to True.")
377
+
378
+ if alpha is None:
379
+ baseline_model = self._baseline_models[-1]
380
+ else:
381
+ is_close = np.isclose(alpha, self.alphas_)
382
+ if is_close.any():
383
+ idx = np.flatnonzero(is_close)[0]
384
+ baseline_model = self._baseline_models[idx]
385
+ else:
386
+ raise ValueError(f"alpha must be one value of alphas_: {self.alphas_}")
387
+
388
+ return baseline_model
389
+
390
+ def predict_cumulative_hazard_function(self, X, alpha=None, return_array=False):
391
+ """Predict cumulative hazard function.
392
+
393
+ Only available if :meth:`fit` has been called with `fit_baseline_model = True`.
394
+
395
+ The cumulative hazard function for an individual
396
+ with feature vector :math:`x_\\alpha` is defined as
397
+
398
+ .. math::
399
+
400
+ H(t \\mid x_\\alpha) = \\exp(x_\\alpha^\\top \\beta) H_0(t) ,
401
+
402
+ where :math:`H_0(t)` is the baseline hazard function,
403
+ estimated by Breslow's estimator.
404
+
405
+ Parameters
406
+ ----------
407
+ X : array-like, shape = (n_samples, n_features)
408
+ Data matrix.
409
+
410
+ alpha : float, optional
411
+ Constant that multiplies the penalty terms. The same alpha as used during training
412
+ must be specified. If set to ``None``, the last alpha in the solution path is used.
413
+
414
+ return_array : boolean, default: False
415
+ If set, return an array with the cumulative hazard rate
416
+ for each `self.unique_times_`, otherwise an array of
417
+ :class:`sksurv.functions.StepFunction`.
418
+
419
+ Returns
420
+ -------
421
+ cum_hazard : ndarray
422
+ If `return_array` is set, an array with the cumulative hazard rate
423
+ for each `self.unique_times_`, otherwise an array of length `n_samples`
424
+ of :class:`sksurv.functions.StepFunction` instances will be returned.
425
+
426
+ Examples
427
+ --------
428
+ >>> import matplotlib.pyplot as plt
429
+ >>> from sksurv.datasets import load_breast_cancer
430
+ >>> from sksurv.preprocessing import OneHotEncoder
431
+ >>> from sksurv.linear_model import CoxnetSurvivalAnalysis
432
+
433
+ Load and prepare the data.
434
+
435
+ >>> X, y = load_breast_cancer()
436
+ >>> X = OneHotEncoder().fit_transform(X)
437
+
438
+ Fit the model.
439
+
440
+ >>> estimator = CoxnetSurvivalAnalysis(l1_ratio=0.99, fit_baseline_model=True)
441
+ >>> estimator.fit(X, y)
442
+
443
+ Estimate the cumulative hazard function for one sample and the five highest alpha.
444
+
445
+ >>> chf_funcs = {}
446
+ >>> for alpha in estimator.alphas_[:5]:
447
+ ... chf_funcs[alpha] = estimator.predict_cumulative_hazard_function(
448
+ ... X.iloc[:1], alpha=alpha)
449
+ ...
450
+
451
+ Plot the estimated cumulative hazard functions.
452
+
453
+ >>> for alpha, chf_alpha in chf_funcs.items():
454
+ ... for fn in chf_alpha:
455
+ ... plt.step(fn.x, fn(fn.x), where="post",
456
+ ... label=f"alpha = {alpha:.3f}")
457
+ ...
458
+ >>> plt.ylim(0, 1)
459
+ >>> plt.legend()
460
+ >>> plt.show()
461
+ """
462
+ baseline_model = self._get_baseline_model(alpha)
463
+ return self._predict_cumulative_hazard_function(baseline_model, self.predict(X, alpha=alpha), return_array)
464
+
465
+ def predict_survival_function(self, X, alpha=None, return_array=False):
466
+ """Predict survival function.
467
+
468
+ Only available if :meth:`fit` has been called with `fit_baseline_model = True`.
469
+
470
+ The survival function for an individual
471
+ with feature vector :math:`x_\\alpha` is defined as
472
+
473
+ .. math::
474
+
475
+ S(t \\mid x_\\alpha) = S_0(t)^{\\exp(x_\\alpha^\\top \\beta)} ,
476
+
477
+ where :math:`S_0(t)` is the baseline survival function,
478
+ estimated by Breslow's estimator.
479
+
480
+ Parameters
481
+ ----------
482
+ X : array-like, shape = (n_samples, n_features)
483
+ Data matrix.
484
+
485
+ alpha : float, optional
486
+ Constant that multiplies the penalty terms. The same alpha as used during training
487
+ must be specified. If set to ``None``, the last alpha in the solution path is used.
488
+
489
+ return_array : boolean, default: False
490
+ If set, return an array with the probability
491
+ of survival for each `self.unique_times_`,
492
+ otherwise an array of :class:`sksurv.functions.StepFunction`.
493
+
494
+ Returns
495
+ -------
496
+ survival : ndarray
497
+ If `return_array` is set, an array with the probability of
498
+ survival for each `self.unique_times_`, otherwise an array of
499
+ length `n_samples` of :class:`sksurv.functions.StepFunction`
500
+ instances will be returned.
501
+
502
+ Examples
503
+ --------
504
+ >>> import matplotlib.pyplot as plt
505
+ >>> from sksurv.datasets import load_breast_cancer
506
+ >>> from sksurv.preprocessing import OneHotEncoder
507
+ >>> from sksurv.linear_model import CoxnetSurvivalAnalysis
508
+
509
+ Load and prepare the data.
510
+
511
+ >>> X, y = load_breast_cancer()
512
+ >>> X = OneHotEncoder().fit_transform(X)
513
+
514
+ Fit the model.
515
+
516
+ >>> estimator = CoxnetSurvivalAnalysis(l1_ratio=0.99, fit_baseline_model=True)
517
+ >>> estimator.fit(X, y)
518
+
519
+ Estimate the survival function for one sample and the five highest alpha.
520
+
521
+ >>> surv_funcs = {}
522
+ >>> for alpha in estimator.alphas_[:5]:
523
+ ... surv_funcs[alpha] = estimator.predict_survival_function(
524
+ ... X.iloc[:1], alpha=alpha)
525
+ ...
526
+
527
+ Plot the estimated survival functions.
528
+
529
+ >>> for alpha, surv_alpha in surv_funcs.items():
530
+ ... for fn in surv_alpha:
531
+ ... plt.step(fn.x, fn(fn.x), where="post",
532
+ ... label=f"alpha = {alpha:.3f}")
533
+ ...
534
+ >>> plt.ylim(0, 1)
535
+ >>> plt.legend()
536
+ >>> plt.show()
537
+ """
538
+ baseline_model = self._get_baseline_model(alpha)
539
+ return self._predict_survival_function(baseline_model, self.predict(X, alpha=alpha), return_array)
540
+
541
+ @property
542
+ def unique_times_(self):
543
+ return self._get_baseline_model(None).unique_times_