cpgtools 2.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. cpgmodule/BED.py +441 -0
  2. cpgmodule/MI.py +193 -0
  3. cpgmodule/__init__.py +0 -0
  4. cpgmodule/_version.py +1 -0
  5. cpgmodule/cgID.py +866897 -0
  6. cpgmodule/data/AltumAge_cpg.pkl +0 -0
  7. cpgmodule/data/AltumAge_multi_platform_cpgs.pkl +0 -0
  8. cpgmodule/data/AltumAge_scaler.pkl +0 -0
  9. cpgmodule/data/GA_Bohlin.pkl +0 -0
  10. cpgmodule/data/GA_Haftorn.pkl +0 -0
  11. cpgmodule/data/GA_Knight.pkl +0 -0
  12. cpgmodule/data/GA_Lee_CPC.pkl +0 -0
  13. cpgmodule/data/GA_Lee_RPC.pkl +0 -0
  14. cpgmodule/data/GA_Lee_refined_RPC.pkl +0 -0
  15. cpgmodule/data/GA_Mayne.pkl +0 -0
  16. cpgmodule/data/Hannum.pkl +0 -0
  17. cpgmodule/data/Horvath_2013.pkl +0 -0
  18. cpgmodule/data/Horvath_2018.pkl +0 -0
  19. cpgmodule/data/Levine.pkl +0 -0
  20. cpgmodule/data/Lu_DNAmTL.pkl +0 -0
  21. cpgmodule/data/Ped_McEwen.pkl +0 -0
  22. cpgmodule/data/Ped_Wu.pkl +0 -0
  23. cpgmodule/data/Zhang_BLUP.pkl +0 -0
  24. cpgmodule/data/Zhang_EN.pkl +0 -0
  25. cpgmodule/data/__init__.py +0 -0
  26. cpgmodule/extend_bed.py +147 -0
  27. cpgmodule/imotif.py +348 -0
  28. cpgmodule/ireader.py +28 -0
  29. cpgmodule/methylClock.py +53 -0
  30. cpgmodule/padjust.py +58 -0
  31. cpgmodule/region2gene.py +170 -0
  32. cpgmodule/utils.py +642 -0
  33. cpgtools-2.0.5.data/scripts/CpG_aggregation.py +238 -0
  34. cpgtools-2.0.5.data/scripts/CpG_anno_position.py +156 -0
  35. cpgtools-2.0.5.data/scripts/CpG_anno_probe.py +112 -0
  36. cpgtools-2.0.5.data/scripts/CpG_density_gene_centered.py +107 -0
  37. cpgtools-2.0.5.data/scripts/CpG_distrb_chrom.py +154 -0
  38. cpgtools-2.0.5.data/scripts/CpG_distrb_gene_centered.py +193 -0
  39. cpgtools-2.0.5.data/scripts/CpG_distrb_region.py +146 -0
  40. cpgtools-2.0.5.data/scripts/CpG_logo.py +134 -0
  41. cpgtools-2.0.5.data/scripts/CpG_to_gene.py +141 -0
  42. cpgtools-2.0.5.data/scripts/beta_PCA.py +188 -0
  43. cpgtools-2.0.5.data/scripts/beta_UMAP.py +181 -0
  44. cpgtools-2.0.5.data/scripts/beta_combat.py +174 -0
  45. cpgtools-2.0.5.data/scripts/beta_jitter_plot.py +107 -0
  46. cpgtools-2.0.5.data/scripts/beta_m_conversion.py +105 -0
  47. cpgtools-2.0.5.data/scripts/beta_profile_gene_centered.py +165 -0
  48. cpgtools-2.0.5.data/scripts/beta_profile_region.py +152 -0
  49. cpgtools-2.0.5.data/scripts/beta_selectNBest.py +116 -0
  50. cpgtools-2.0.5.data/scripts/beta_stacked_barplot.py +119 -0
  51. cpgtools-2.0.5.data/scripts/beta_stats.py +101 -0
  52. cpgtools-2.0.5.data/scripts/beta_tSNE.py +179 -0
  53. cpgtools-2.0.5.data/scripts/beta_topN.py +99 -0
  54. cpgtools-2.0.5.data/scripts/beta_trichotmize.py +190 -0
  55. cpgtools-2.0.5.data/scripts/dmc_Bayes.py +442 -0
  56. cpgtools-2.0.5.data/scripts/dmc_bb.py +221 -0
  57. cpgtools-2.0.5.data/scripts/dmc_fisher.py +161 -0
  58. cpgtools-2.0.5.data/scripts/dmc_glm.py +191 -0
  59. cpgtools-2.0.5.data/scripts/dmc_logit.py +226 -0
  60. cpgtools-2.0.5.data/scripts/dmc_nonparametric.py +176 -0
  61. cpgtools-2.0.5.data/scripts/dmc_ttest.py +222 -0
  62. cpgtools-2.0.5.data/scripts/predict_missing.py +673 -0
  63. cpgtools-2.0.5.data/scripts/predict_sex.py +126 -0
  64. cpgtools-2.0.5.dist-info/METADATA +59 -0
  65. cpgtools-2.0.5.dist-info/RECORD +104 -0
  66. cpgtools-2.0.5.dist-info/WHEEL +5 -0
  67. cpgtools-2.0.5.dist-info/licenses/LICENSE.txt +19 -0
  68. cpgtools-2.0.5.dist-info/top_level.txt +5 -0
  69. impyute/__init__.py +3 -0
  70. impyute/contrib/__init__.py +7 -0
  71. impyute/contrib/compare.py +69 -0
  72. impyute/contrib/count_missing.py +30 -0
  73. impyute/contrib/describe.py +63 -0
  74. impyute/cs/__init__.py +11 -0
  75. impyute/cs/buck_iterative.py +82 -0
  76. impyute/cs/central_tendency.py +84 -0
  77. impyute/cs/em.py +52 -0
  78. impyute/cs/fast_knn.py +130 -0
  79. impyute/cs/random.py +27 -0
  80. impyute/dataset/__init__.py +6 -0
  81. impyute/dataset/base.py +137 -0
  82. impyute/dataset/corrupt.py +55 -0
  83. impyute/deletion/__init__.py +5 -0
  84. impyute/deletion/complete_case.py +21 -0
  85. impyute/ops/__init__.py +12 -0
  86. impyute/ops/error.py +9 -0
  87. impyute/ops/inverse_distance_weighting.py +31 -0
  88. impyute/ops/matrix.py +47 -0
  89. impyute/ops/testing.py +20 -0
  90. impyute/ops/util.py +96 -0
  91. impyute/ops/wrapper.py +179 -0
  92. impyute/ts/__init__.py +6 -0
  93. impyute/ts/locf.py +57 -0
  94. impyute/ts/moving_window.py +128 -0
  95. impyutelib.py +890 -0
  96. missingpy/__init__.py +4 -0
  97. missingpy/knnimpute.py +328 -0
  98. missingpy/missforest.py +556 -0
  99. missingpy/pairwise_external.py +315 -0
  100. missingpy/tests/__init__.py +0 -0
  101. missingpy/tests/test_knnimpute.py +605 -0
  102. missingpy/tests/test_missforest.py +409 -0
  103. missingpy/utils.py +124 -0
  104. misspylib.py +565 -0
@@ -0,0 +1,556 @@
1
+ """MissForest Imputer for Missing Data"""
2
+ # Author: Ashim Bhattarai
3
+ # License: GNU General Public License v3 (GPLv3)
4
+
5
+ import warnings
6
+
7
+ import numpy as np
8
+ from scipy.stats import mode
9
+
10
+ from sklearn.base import BaseEstimator, TransformerMixin
11
+ from sklearn.utils.validation import check_is_fitted, check_array
12
+ from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
13
+
14
+ from .pairwise_external import _get_mask
15
+
16
+ __all__ = [
17
+ 'MissForest',
18
+ ]
19
+
20
+
21
+ class MissForest(BaseEstimator, TransformerMixin):
22
+ """Missing value imputation using Random Forests.
23
+
24
+ MissForest imputes missing values using Random Forests in an iterative
25
+ fashion. By default, the imputer begins imputing missing values of the
26
+ column (which is expected to be a variable) with the smallest number of
27
+ missing values -- let's call this the candidate column.
28
+ The first step involves filling any missing values of the remaining,
29
+ non-candidate, columns with an initial guess, which is the column mean for
30
+ columns representing numerical variables and the column mode for columns
31
+ representing categorical variables. After that, the imputer fits a random
32
+ forest model with the candidate column as the outcome variable and the
33
+ remaining columns as the predictors over all rows where the candidate
34
+ column values are not missing.
35
+ After the fit, the missing rows of the candidate column are
36
+ imputed using the prediction from the fitted Random Forest. The
37
+ rows of the non-candidate columns act as the input data for the fitted
38
+ model.
39
+ Following this, the imputer moves on to the next candidate column with the
40
+ second smallest number of missing values from among the non-candidate
41
+ columns in the first round. The process repeats itself for each column
42
+ with a missing value, possibly over multiple iterations or epochs for
43
+ each column, until the stopping criterion is met.
44
+ The stopping criterion is governed by the "difference" between the imputed
45
+ arrays over successive iterations. For numerical variables (num_vars_),
46
+ the difference is defined as follows:
47
+
48
+ sum((X_new[:, num_vars_] - X_old[:, num_vars_]) ** 2) /
49
+ sum((X_new[:, num_vars_]) ** 2)
50
+
51
+ For categorical variables(cat_vars_), the difference is defined as follows:
52
+
53
+ sum(X_new[:, cat_vars_] != X_old[:, cat_vars_])) / n_cat_missing
54
+
55
+ where X_new is the newly imputed array, X_old is the array imputed in the
56
+ previous round, n_cat_missing is the total number of categorical
57
+ values that are missing, and the sum() is performed both across rows
58
+ and columns. Following [1], the stopping criterion is considered to have
59
+ been met when difference between X_new and X_old increases for the first
60
+ time for both types of variables (if available).
61
+
62
+ Parameters
63
+ ----------
64
+ NOTE: Most parameter definitions below are taken verbatim from the
65
+ Scikit-Learn documentation at [2] and [3].
66
+
67
+ max_iter : int, optional (default = 10)
68
+ The maximum iterations of the imputation process. Each column with a
69
+ missing value is imputed exactly once in a given iteration.
70
+
71
+ decreasing : boolean, optional (default = False)
72
+ If set to True, columns are sorted according to decreasing number of
73
+ missing values. In other words, imputation will move from imputing
74
+ columns with the largest number of missing values to columns with
75
+ fewest number of missing values.
76
+
77
+ missing_values : np.nan, integer, optional (default = np.nan)
78
+ The placeholder for the missing values. All occurrences of
79
+ `missing_values` will be imputed.
80
+
81
+ copy : boolean, optional (default = True)
82
+ If True, a copy of X will be created. If False, imputation will
83
+ be done in-place whenever possible.
84
+
85
+ criterion : tuple, optional (default = ('squared_error', 'gini'))
86
+ The function to measure the quality of a split.The first element of
87
+ the tuple is for the Random Forest Regressor (for imputing numerical
88
+ variables) while the second element is for the Random Forest
89
+ Classifier (for imputing categorical variables).
90
+
91
+ n_estimators : integer, optional (default=100)
92
+ The number of trees in the forest.
93
+
94
+ max_depth : integer or None, optional (default=None)
95
+ The maximum depth of the tree. If None, then nodes are expanded until
96
+ all leaves are pure or until all leaves contain less than
97
+ min_samples_split samples.
98
+
99
+ min_samples_split : int, float, optional (default=2)
100
+ The minimum number of samples required to split an internal node:
101
+ - If int, then consider `min_samples_split` as the minimum number.
102
+ - If float, then `min_samples_split` is a fraction and
103
+ `ceil(min_samples_split * n_samples)` are the minimum
104
+ number of samples for each split.
105
+
106
+ min_samples_leaf : int, float, optional (default=1)
107
+ The minimum number of samples required to be at a leaf node.
108
+ A split point at any depth will only be considered if it leaves at
109
+ least ``min_samples_leaf`` training samples in each of the left and
110
+ right branches. This may have the effect of smoothing the model,
111
+ especially in regression.
112
+ - If int, then consider `min_samples_leaf` as the minimum number.
113
+ - If float, then `min_samples_leaf` is a fraction and
114
+ `ceil(min_samples_leaf * n_samples)` are the minimum
115
+ number of samples for each node.
116
+
117
+ min_weight_fraction_leaf : float, optional (default=0.)
118
+ The minimum weighted fraction of the sum total of weights (of all
119
+ the input samples) required to be at a leaf node. Samples have
120
+ equal weight when sample_weight is not provided.
121
+
122
+ max_features : int, float, string or None, optional (default="sqrt")
123
+ The number of features to consider when looking for the best split:
124
+ - If int, then consider `max_features` features at each split.
125
+ - If float, then `max_features` is a fraction and
126
+ `int(max_features * n_features)` features are considered at each
127
+ split.
128
+ - If "auto", then `max_features=sqrt(n_features)`.
129
+ - If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
130
+ - If "log2", then `max_features=log2(n_features)`.
131
+ - If None, then `max_features=n_features`.
132
+ Note: the search for a split does not stop until at least one
133
+ valid partition of the node samples is found, even if it requires to
134
+ effectively inspect more than ``max_features`` features.
135
+
136
+ max_leaf_nodes : int or None, optional (default=None)
137
+ Grow trees with ``max_leaf_nodes`` in best-first fashion.
138
+ Best nodes are defined as relative reduction in impurity.
139
+ If None then unlimited number of leaf nodes.
140
+
141
+ min_impurity_decrease : float, optional (default=0.)
142
+ A node will be split if this split induces a decrease of the impurity
143
+ greater than or equal to this value.
144
+ The weighted impurity decrease equation is the following::
145
+ N_t / N * (impurity - N_t_R / N_t * right_impurity
146
+ - N_t_L / N_t * left_impurity)
147
+ where ``N`` is the total number of samples, ``N_t`` is the number of
148
+ samples at the current node, ``N_t_L`` is the number of samples in the
149
+ left child, and ``N_t_R`` is the number of samples in the right child.
150
+ ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
151
+ if ``sample_weight`` is passed.
152
+
153
+ bootstrap : boolean, optional (default=True)
154
+ Whether bootstrap samples are used when building trees.
155
+
156
+ oob_score : bool (default=False)
157
+ Whether to use out-of-bag samples to estimate
158
+ the generalization accuracy.
159
+
160
+ n_jobs : int or None, optional (default=None)
161
+ The number of jobs to run in parallel for both `fit` and `predict`.
162
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
163
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
164
+ for more details.
165
+
166
+ random_state : int, RandomState instance or None, optional (default=None)
167
+ If int, random_state is the seed used by the random number generator;
168
+ If RandomState instance, random_state is the random number generator;
169
+ If None, the random number generator is the RandomState instance used
170
+ by `np.random`.
171
+
172
+ verbose : int, optional (default=0)
173
+ Controls the verbosity when fitting and predicting.
174
+
175
+ warm_start : bool, optional (default=False)
176
+ When set to ``True``, reuse the solution of the previous call to fit
177
+ and add more estimators to the ensemble, otherwise, just fit a whole
178
+ new forest. See :term:`the Glossary <warm_start>`.
179
+
180
+ class_weight : dict, list of dicts, "balanced", "balanced_subsample" or \
181
+ None, optional (default=None)
182
+ Weights associated with classes in the form ``{class_label: weight}``.
183
+ If not given, all classes are supposed to have weight one. For
184
+ multi-output problems, a list of dicts can be provided in the same
185
+ order as the columns of y.
186
+ Note that for multioutput (including multilabel) weights should be
187
+ defined for each class of every column in its own dict. For example,
188
+ for four-class multilabel classification weights should be
189
+ [{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
190
+ [{1:1}, {2:5}, {3:1}, {4:1}].
191
+ The "balanced" mode uses the values of y to automatically adjust
192
+ weights inversely proportional to class frequencies in the input data
193
+ as ``n_samples / (n_classes * np.bincount(y))``
194
+ The "balanced_subsample" mode is the same as "balanced" except that
195
+ weights are computed based on the bootstrap sample for every tree
196
+ grown.
197
+ For multi-output, the weights of each column of y will be multiplied.
198
+ Note that these weights will be multiplied with sample_weight (passed
199
+ through the fit method) if sample_weight is specified.
200
+ NOTE: This parameter is only applicable for Random Forest Classifier
201
+ objects (i.e., for categorical variables).
202
+
203
+ Attributes
204
+ ----------
205
+ statistics_ : Dictionary of length two
206
+ The first element is an array with the mean of each numerical feature
207
+ being imputed while the second element is an array of modes of
208
+ categorical features being imputed (if available, otherwise it
209
+ will be None).
210
+
211
+ References
212
+ ----------
213
+ * [1] Stekhoven, Daniel J., and Peter Bühlmann. "MissForest—non-parametric
214
+ missing value imputation for mixed-type data." Bioinformatics 28.1
215
+ (2011): 112-118.
216
+ * [2] https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.
217
+ RandomForestRegressor.html#sklearn.ensemble.RandomForestRegressor
218
+ * [3] https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.
219
+ RandomForestClassifier.html#sklearn.ensemble.RandomForestClassifier
220
+
221
+ Examples
222
+ --------
223
+ >>> from missingpy import MissForest
224
+ >>> nan = float("NaN")
225
+ >>> X = [[1, 2, nan], [3, 4, 3], [nan, 6, 5], [8, 8, 7]]
226
+ >>> imputer = MissForest(random_state=1337)
227
+ >>> imputer.fit_transform(X)
228
+ Iteration: 0
229
+ Iteration: 1
230
+ Iteration: 2
231
+ array([[1. , 2. , 3.92 ],
232
+ [3. , 4. , 3. ],
233
+ [2.71, 6. , 5. ],
234
+ [8. , 8. , 7. ]])
235
+ """
236
+
237
+ def __init__(self, max_iter=10, decreasing=False, missing_values=np.nan,
238
+ copy=True, n_estimators=100, criterion=('squared_error', 'gini'),
239
+ max_depth=None, min_samples_split=2, min_samples_leaf=1,
240
+ min_weight_fraction_leaf=0.0, max_features='sqrt',
241
+ max_leaf_nodes=None, min_impurity_decrease=0.0,
242
+ bootstrap=True, oob_score=False, n_jobs=-1, random_state=None,
243
+ verbose=0, warm_start=False, class_weight=None):
244
+
245
+ self.max_iter = max_iter
246
+ self.decreasing = decreasing
247
+ self.missing_values = missing_values
248
+ self.copy = copy
249
+ self.n_estimators = n_estimators
250
+ self.criterion = criterion
251
+ self.max_depth = max_depth
252
+ self.min_samples_split = min_samples_split
253
+ self.min_samples_leaf = min_samples_leaf
254
+ self.min_weight_fraction_leaf = min_weight_fraction_leaf
255
+ self.max_features = max_features
256
+ self.max_leaf_nodes = max_leaf_nodes
257
+ self.min_impurity_decrease = min_impurity_decrease
258
+ self.bootstrap = bootstrap
259
+ self.oob_score = oob_score
260
+ self.n_jobs = n_jobs
261
+ self.random_state = random_state
262
+ self.verbose = verbose
263
+ self.warm_start = warm_start
264
+ self.class_weight = class_weight
265
+
266
+ def _miss_forest(self, Ximp, mask):
267
+ """The missForest algorithm"""
268
+
269
+ # Count missing per column
270
+ col_missing_count = mask.sum(axis=0)
271
+
272
+ # Get col and row indices for missing
273
+ missing_rows, missing_cols = np.where(mask)
274
+
275
+ if self.num_vars_ is not None:
276
+ # Only keep indices for numerical vars
277
+ keep_idx_num = np.in1d(missing_cols, self.num_vars_)
278
+ missing_num_rows = missing_rows[keep_idx_num]
279
+ missing_num_cols = missing_cols[keep_idx_num]
280
+
281
+ # Make initial guess for missing values
282
+ col_means = np.full(Ximp.shape[1], fill_value=np.nan)
283
+ col_means[self.num_vars_] = self.statistics_.get('col_means')
284
+ Ximp[missing_num_rows, missing_num_cols] = np.take(
285
+ col_means, missing_num_cols)
286
+
287
+ # Reg criterion
288
+ reg_criterion = self.criterion if type(self.criterion) == str \
289
+ else self.criterion[0]
290
+
291
+ # Instantiate regression model
292
+ rf_regressor = RandomForestRegressor(
293
+ n_estimators=self.n_estimators,
294
+ criterion=reg_criterion,
295
+ max_depth=self.max_depth,
296
+ min_samples_split=self.min_samples_split,
297
+ min_samples_leaf=self.min_samples_leaf,
298
+ min_weight_fraction_leaf=self.min_weight_fraction_leaf,
299
+ max_features=self.max_features,
300
+ max_leaf_nodes=self.max_leaf_nodes,
301
+ min_impurity_decrease=self.min_impurity_decrease,
302
+ bootstrap=self.bootstrap,
303
+ oob_score=self.oob_score,
304
+ n_jobs=self.n_jobs,
305
+ random_state=self.random_state,
306
+ verbose=self.verbose,
307
+ warm_start=self.warm_start)
308
+
309
+ # If needed, repeat for categorical variables
310
+ if self.cat_vars_ is not None:
311
+ # Calculate total number of missing categorical values (used later)
312
+ n_catmissing = np.sum(mask[:, self.cat_vars_])
313
+
314
+ # Only keep indices for categorical vars
315
+ keep_idx_cat = np.in1d(missing_cols, self.cat_vars_)
316
+ missing_cat_rows = missing_rows[keep_idx_cat]
317
+ missing_cat_cols = missing_cols[keep_idx_cat]
318
+
319
+ # Make initial guess for missing values
320
+ col_modes = np.full(Ximp.shape[1], fill_value=np.nan)
321
+ col_modes[self.cat_vars_] = self.statistics_.get('col_modes')
322
+ Ximp[missing_cat_rows, missing_cat_cols] = np.take(col_modes, missing_cat_cols)
323
+
324
+ # Classfication criterion
325
+ clf_criterion = self.criterion if type(self.criterion) == str \
326
+ else self.criterion[1]
327
+
328
+ # Instantiate classification model
329
+ rf_classifier = RandomForestClassifier(
330
+ n_estimators=self.n_estimators,
331
+ criterion=clf_criterion,
332
+ max_depth=self.max_depth,
333
+ min_samples_split=self.min_samples_split,
334
+ min_samples_leaf=self.min_samples_leaf,
335
+ min_weight_fraction_leaf=self.min_weight_fraction_leaf,
336
+ max_features=self.max_features,
337
+ max_leaf_nodes=self.max_leaf_nodes,
338
+ min_impurity_decrease=self.min_impurity_decrease,
339
+ bootstrap=self.bootstrap,
340
+ oob_score=self.oob_score,
341
+ n_jobs=self.n_jobs,
342
+ random_state=self.random_state,
343
+ verbose=self.verbose,
344
+ warm_start=self.warm_start,
345
+ class_weight=self.class_weight)
346
+
347
+ # 2. misscount_idx: sorted indices of cols in X based on missing count
348
+ misscount_idx = np.argsort(col_missing_count)
349
+ # Reverse order if decreasing is set to True
350
+ if self.decreasing is True:
351
+ misscount_idx = misscount_idx[::-1]
352
+
353
+ # 3. While new_gammas < old_gammas & self.iter_count_ < max_iter loop:
354
+ self.iter_count_ = 0
355
+ gamma_new = 0
356
+ gamma_old = np.inf
357
+ gamma_newcat = 0
358
+ gamma_oldcat = np.inf
359
+ col_index = np.arange(Ximp.shape[1])
360
+
361
+ while (
362
+ gamma_new < gamma_old or gamma_newcat < gamma_oldcat) and \
363
+ self.iter_count_ < self.max_iter:
364
+
365
+ # 4. store previously imputed matrix
366
+ Ximp_old = np.copy(Ximp)
367
+ if self.iter_count_ != 0:
368
+ gamma_old = gamma_new
369
+ gamma_oldcat = gamma_newcat
370
+ # 5. loop
371
+ for s in misscount_idx:
372
+ # Column indices other than the one being imputed
373
+ s_prime = np.delete(col_index, s)
374
+
375
+ # Get indices of rows where 's' is observed and missing
376
+ obs_rows = np.where(~mask[:, s])[0]
377
+ mis_rows = np.where(mask[:, s])[0]
378
+
379
+ # If no missing, then skip
380
+ if len(mis_rows) == 0:
381
+ continue
382
+
383
+ # Get observed values of 's'
384
+ yobs = Ximp[obs_rows, s]
385
+
386
+ # Get 'X' for both observed and missing 's' column
387
+ xobs = Ximp[np.ix_(obs_rows, s_prime)]
388
+ xmis = Ximp[np.ix_(mis_rows, s_prime)]
389
+
390
+ # 6. Fit a random forest over observed and predict the missing
391
+ if self.cat_vars_ is not None and s in self.cat_vars_:
392
+ rf_classifier.fit(X=xobs, y=yobs)
393
+ # 7. predict ymis(s) using xmis(x)
394
+ ymis = rf_classifier.predict(xmis)
395
+ # 8. update imputed matrix using predicted matrix ymis(s)
396
+ Ximp[mis_rows, s] = ymis
397
+ else:
398
+ rf_regressor.fit(X=xobs, y=yobs)
399
+ # 7. predict ymis(s) using xmis(x)
400
+ ymis = rf_regressor.predict(xmis)
401
+ # 8. update imputed matrix using predicted matrix ymis(s)
402
+ Ximp[mis_rows, s] = ymis
403
+
404
+ # 9. Update gamma (stopping criterion)
405
+ if self.cat_vars_ is not None:
406
+ gamma_newcat = np.sum(
407
+ (Ximp[:, self.cat_vars_] != Ximp_old[:, self.cat_vars_])) / n_catmissing
408
+ if self.num_vars_ is not None:
409
+ gamma_new = np.sum((Ximp[:, self.num_vars_] - Ximp_old[:, self.num_vars_]) ** 2) / np.sum((Ximp[:, self.num_vars_]) ** 2)
410
+
411
+ print("Iteration:", self.iter_count_)
412
+ self.iter_count_ += 1
413
+
414
+ return Ximp_old
415
+
416
+ def fit(self, X, y=None, cat_vars=None):
417
+ """Fit the imputer on X.
418
+
419
+ Parameters
420
+ ----------
421
+ X : {array-like}, shape (n_samples, n_features)
422
+ Input data, where ``n_samples`` is the number of samples and
423
+ ``n_features`` is the number of features.
424
+
425
+ cat_vars : int or array of ints, optional (default = None)
426
+ An int or an array containing column indices of categorical
427
+ variable(s)/feature(s) present in the dataset X.
428
+ ``None`` if there are no categorical variables in the dataset.
429
+
430
+ Returns
431
+ -------
432
+ self : object
433
+ Returns self.
434
+ """
435
+
436
+ # Check data integrity and calling arguments
437
+ force_all_finite = False if self.missing_values in ["NaN",
438
+ np.nan] else True
439
+
440
+ X = check_array(X, accept_sparse=False, dtype=np.float64,
441
+ force_all_finite=force_all_finite, copy=self.copy)
442
+
443
+ # Check for +/- inf
444
+ if np.any(np.isinf(X)):
445
+ raise ValueError("+/- inf values are not supported.")
446
+
447
+ # Check if any column has all missing
448
+ mask = _get_mask(X, self.missing_values)
449
+ if np.any(mask.sum(axis=0) >= (X.shape[0])):
450
+ raise ValueError("One or more columns have all rows missing.")
451
+
452
+ # Check cat_vars type and convert if necessary
453
+ if cat_vars is not None:
454
+ if type(cat_vars) == int:
455
+ cat_vars = [cat_vars]
456
+ elif type(cat_vars) == list or type(cat_vars) == np.ndarray:
457
+ if np.array(cat_vars).dtype != int:
458
+ raise ValueError(
459
+ "cat_vars needs to be either an int or an array "
460
+ "of ints.")
461
+ else:
462
+ raise ValueError("cat_vars needs to be either an int or an array "
463
+ "of ints.")
464
+
465
+ # Identify numerical variables
466
+ num_vars = np.setdiff1d(np.arange(X.shape[1]), cat_vars)
467
+ num_vars = num_vars if len(num_vars) > 0 else None
468
+
469
+ # First replace missing values with NaN if it is something else
470
+ if self.missing_values not in ['NaN', np.nan]:
471
+ X[np.where(X == self.missing_values)] = np.nan
472
+
473
+ # Now, make initial guess for missing values
474
+ col_means = np.nanmean(X[:, num_vars], axis=0) if num_vars is not None else None
475
+ col_modes = mode(
476
+ X[:, cat_vars], axis=0, nan_policy='omit')[0] if cat_vars is not \
477
+ None else None
478
+
479
+ self.cat_vars_ = cat_vars
480
+ self.num_vars_ = num_vars
481
+ self.statistics_ = {"col_means": col_means, "col_modes": col_modes}
482
+
483
+ return self
484
+
485
+ def transform(self, X):
486
+ """Impute all missing values in X.
487
+
488
+ Parameters
489
+ ----------
490
+ X : {array-like}, shape = [n_samples, n_features]
491
+ The input data to complete.
492
+
493
+ Returns
494
+ -------
495
+ X : {array-like}, shape = [n_samples, n_features]
496
+ The imputed dataset.
497
+ """
498
+ # Confirm whether fit() has been called
499
+ check_is_fitted(self, ["cat_vars_", "num_vars_", "statistics_"])
500
+
501
+ # Check data integrity
502
+ force_all_finite = False if self.missing_values in ["NaN",
503
+ np.nan] else True
504
+ X = check_array(X, accept_sparse=False, dtype=np.float64,
505
+ force_all_finite=force_all_finite, copy=self.copy)
506
+
507
+ # Check for +/- inf
508
+ if np.any(np.isinf(X)):
509
+ raise ValueError("+/- inf values are not supported.")
510
+
511
+ # Check if any column has all missing
512
+ mask = _get_mask(X, self.missing_values)
513
+ if np.any(mask.sum(axis=0) >= (X.shape[0])):
514
+ raise ValueError("One or more columns have all rows missing.")
515
+
516
+ # Get fitted X col count and ensure correct dimension
517
+ n_cols_fit_X = (0 if self.num_vars_ is None else len(self.num_vars_)) \
518
+ + (0 if self.cat_vars_ is None else len(self.cat_vars_))
519
+ _, n_cols_X = X.shape
520
+
521
+ if n_cols_X != n_cols_fit_X:
522
+ raise ValueError("Incompatible dimension between the fitted "
523
+ "dataset and the one to be transformed.")
524
+
525
+ # Check if anything is actually missing and if not return original X
526
+ mask = _get_mask(X, self.missing_values)
527
+ if not mask.sum() > 0:
528
+ warnings.warn("No missing value located; returning original "
529
+ "dataset.")
530
+ return X
531
+
532
+ # row_total_missing = mask.sum(axis=1)
533
+ # if not np.any(row_total_missing):
534
+ # return X
535
+
536
+ # Call missForest function to impute missing
537
+ X = self._miss_forest(X, mask)
538
+
539
+ # Return imputed dataset
540
+ return X
541
+
542
+ def fit_transform(self, X, y=None, **fit_params):
543
+ """Fit MissForest and impute all missing values in X.
544
+
545
+ Parameters
546
+ ----------
547
+ X : {array-like}, shape (n_samples, n_features)
548
+ Input data, where ``n_samples`` is the number of samples and
549
+ ``n_features`` is the number of features.
550
+
551
+ Returns
552
+ -------
553
+ X : {array-like}, shape (n_samples, n_features)
554
+ Returns imputed dataset.
555
+ """
556
+ return self.fit(X, **fit_params).transform(X)