cpgtools 2.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. cpgmodule/BED.py +441 -0
  2. cpgmodule/MI.py +193 -0
  3. cpgmodule/__init__.py +0 -0
  4. cpgmodule/_version.py +1 -0
  5. cpgmodule/cgID.py +866897 -0
  6. cpgmodule/data/AltumAge_cpg.pkl +0 -0
  7. cpgmodule/data/AltumAge_multi_platform_cpgs.pkl +0 -0
  8. cpgmodule/data/AltumAge_scaler.pkl +0 -0
  9. cpgmodule/data/GA_Bohlin.pkl +0 -0
  10. cpgmodule/data/GA_Haftorn.pkl +0 -0
  11. cpgmodule/data/GA_Knight.pkl +0 -0
  12. cpgmodule/data/GA_Lee_CPC.pkl +0 -0
  13. cpgmodule/data/GA_Lee_RPC.pkl +0 -0
  14. cpgmodule/data/GA_Lee_refined_RPC.pkl +0 -0
  15. cpgmodule/data/GA_Mayne.pkl +0 -0
  16. cpgmodule/data/Hannum.pkl +0 -0
  17. cpgmodule/data/Horvath_2013.pkl +0 -0
  18. cpgmodule/data/Horvath_2018.pkl +0 -0
  19. cpgmodule/data/Levine.pkl +0 -0
  20. cpgmodule/data/Lu_DNAmTL.pkl +0 -0
  21. cpgmodule/data/Ped_McEwen.pkl +0 -0
  22. cpgmodule/data/Ped_Wu.pkl +0 -0
  23. cpgmodule/data/Zhang_BLUP.pkl +0 -0
  24. cpgmodule/data/Zhang_EN.pkl +0 -0
  25. cpgmodule/data/__init__.py +0 -0
  26. cpgmodule/extend_bed.py +147 -0
  27. cpgmodule/imotif.py +348 -0
  28. cpgmodule/ireader.py +28 -0
  29. cpgmodule/methylClock.py +53 -0
  30. cpgmodule/padjust.py +58 -0
  31. cpgmodule/region2gene.py +170 -0
  32. cpgmodule/utils.py +642 -0
  33. cpgtools-2.0.5.data/scripts/CpG_aggregation.py +238 -0
  34. cpgtools-2.0.5.data/scripts/CpG_anno_position.py +156 -0
  35. cpgtools-2.0.5.data/scripts/CpG_anno_probe.py +112 -0
  36. cpgtools-2.0.5.data/scripts/CpG_density_gene_centered.py +107 -0
  37. cpgtools-2.0.5.data/scripts/CpG_distrb_chrom.py +154 -0
  38. cpgtools-2.0.5.data/scripts/CpG_distrb_gene_centered.py +193 -0
  39. cpgtools-2.0.5.data/scripts/CpG_distrb_region.py +146 -0
  40. cpgtools-2.0.5.data/scripts/CpG_logo.py +134 -0
  41. cpgtools-2.0.5.data/scripts/CpG_to_gene.py +141 -0
  42. cpgtools-2.0.5.data/scripts/beta_PCA.py +188 -0
  43. cpgtools-2.0.5.data/scripts/beta_UMAP.py +181 -0
  44. cpgtools-2.0.5.data/scripts/beta_combat.py +174 -0
  45. cpgtools-2.0.5.data/scripts/beta_jitter_plot.py +107 -0
  46. cpgtools-2.0.5.data/scripts/beta_m_conversion.py +105 -0
  47. cpgtools-2.0.5.data/scripts/beta_profile_gene_centered.py +165 -0
  48. cpgtools-2.0.5.data/scripts/beta_profile_region.py +152 -0
  49. cpgtools-2.0.5.data/scripts/beta_selectNBest.py +116 -0
  50. cpgtools-2.0.5.data/scripts/beta_stacked_barplot.py +119 -0
  51. cpgtools-2.0.5.data/scripts/beta_stats.py +101 -0
  52. cpgtools-2.0.5.data/scripts/beta_tSNE.py +179 -0
  53. cpgtools-2.0.5.data/scripts/beta_topN.py +99 -0
  54. cpgtools-2.0.5.data/scripts/beta_trichotmize.py +190 -0
  55. cpgtools-2.0.5.data/scripts/dmc_Bayes.py +442 -0
  56. cpgtools-2.0.5.data/scripts/dmc_bb.py +221 -0
  57. cpgtools-2.0.5.data/scripts/dmc_fisher.py +161 -0
  58. cpgtools-2.0.5.data/scripts/dmc_glm.py +191 -0
  59. cpgtools-2.0.5.data/scripts/dmc_logit.py +226 -0
  60. cpgtools-2.0.5.data/scripts/dmc_nonparametric.py +176 -0
  61. cpgtools-2.0.5.data/scripts/dmc_ttest.py +222 -0
  62. cpgtools-2.0.5.data/scripts/predict_missing.py +673 -0
  63. cpgtools-2.0.5.data/scripts/predict_sex.py +126 -0
  64. cpgtools-2.0.5.dist-info/METADATA +59 -0
  65. cpgtools-2.0.5.dist-info/RECORD +104 -0
  66. cpgtools-2.0.5.dist-info/WHEEL +5 -0
  67. cpgtools-2.0.5.dist-info/licenses/LICENSE.txt +19 -0
  68. cpgtools-2.0.5.dist-info/top_level.txt +5 -0
  69. impyute/__init__.py +3 -0
  70. impyute/contrib/__init__.py +7 -0
  71. impyute/contrib/compare.py +69 -0
  72. impyute/contrib/count_missing.py +30 -0
  73. impyute/contrib/describe.py +63 -0
  74. impyute/cs/__init__.py +11 -0
  75. impyute/cs/buck_iterative.py +82 -0
  76. impyute/cs/central_tendency.py +84 -0
  77. impyute/cs/em.py +52 -0
  78. impyute/cs/fast_knn.py +130 -0
  79. impyute/cs/random.py +27 -0
  80. impyute/dataset/__init__.py +6 -0
  81. impyute/dataset/base.py +137 -0
  82. impyute/dataset/corrupt.py +55 -0
  83. impyute/deletion/__init__.py +5 -0
  84. impyute/deletion/complete_case.py +21 -0
  85. impyute/ops/__init__.py +12 -0
  86. impyute/ops/error.py +9 -0
  87. impyute/ops/inverse_distance_weighting.py +31 -0
  88. impyute/ops/matrix.py +47 -0
  89. impyute/ops/testing.py +20 -0
  90. impyute/ops/util.py +96 -0
  91. impyute/ops/wrapper.py +179 -0
  92. impyute/ts/__init__.py +6 -0
  93. impyute/ts/locf.py +57 -0
  94. impyute/ts/moving_window.py +128 -0
  95. impyutelib.py +890 -0
  96. missingpy/__init__.py +4 -0
  97. missingpy/knnimpute.py +328 -0
  98. missingpy/missforest.py +556 -0
  99. missingpy/pairwise_external.py +315 -0
  100. missingpy/tests/__init__.py +0 -0
  101. missingpy/tests/test_knnimpute.py +605 -0
  102. missingpy/tests/test_missforest.py +409 -0
  103. missingpy/utils.py +124 -0
  104. misspylib.py +565 -0
misspylib.py ADDED
@@ -0,0 +1,565 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ Created on Thu Oct 10 20:09:31 2024
5
+
6
+ @author: m102324
7
+ """
8
+
9
+ import warnings
10
+ import numpy as np
11
+ from scipy.stats import mode
12
+
13
+ from sklearn.base import BaseEstimator, TransformerMixin
14
+ from sklearn.utils.validation import check_is_fitted, check_array
15
+ from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
16
+
17
+
18
+
19
+ __all__ = [
20
+ 'MissForest',
21
+ ]
22
+
23
+ def _get_mask(X, value_to_mask):
24
+ """Compute the boolean mask X == missing_values."""
25
+ if value_to_mask == "NaN" or np.isnan(value_to_mask):
26
+ return np.isnan(X)
27
+ else:
28
+ return X == value_to_mask
29
+
30
+ class MissForest(BaseEstimator, TransformerMixin):
31
+ """Missing value imputation using Random Forests.
32
+
33
+ MissForest imputes missing values using Random Forests in an iterative
34
+ fashion. By default, the imputer begins imputing missing values of the
35
+ column (which is expected to be a variable) with the smallest number of
36
+ missing values -- let's call this the candidate column.
37
+ The first step involves filling any missing values of the remaining,
38
+ non-candidate, columns with an initial guess, which is the column mean for
39
+ columns representing numerical variables and the column mode for columns
40
+ representing categorical variables. After that, the imputer fits a random
41
+ forest model with the candidate column as the outcome variable and the
42
+ remaining columns as the predictors over all rows where the candidate
43
+ column values are not missing.
44
+ After the fit, the missing rows of the candidate column are
45
+ imputed using the prediction from the fitted Random Forest. The
46
+ rows of the non-candidate columns act as the input data for the fitted
47
+ model.
48
+ Following this, the imputer moves on to the next candidate column with the
49
+ second smallest number of missing values from among the non-candidate
50
+ columns in the first round. The process repeats itself for each column
51
+ with a missing value, possibly over multiple iterations or epochs for
52
+ each column, until the stopping criterion is met.
53
+ The stopping criterion is governed by the "difference" between the imputed
54
+ arrays over successive iterations. For numerical variables (num_vars_),
55
+ the difference is defined as follows:
56
+
57
+ sum((X_new[:, num_vars_] - X_old[:, num_vars_]) ** 2) /
58
+ sum((X_new[:, num_vars_]) ** 2)
59
+
60
+ For categorical variables(cat_vars_), the difference is defined as follows:
61
+
62
+ sum(X_new[:, cat_vars_] != X_old[:, cat_vars_])) / n_cat_missing
63
+
64
+ where X_new is the newly imputed array, X_old is the array imputed in the
65
+ previous round, n_cat_missing is the total number of categorical
66
+ values that are missing, and the sum() is performed both across rows
67
+ and columns. Following [1], the stopping criterion is considered to have
68
+ been met when difference between X_new and X_old increases for the first
69
+ time for both types of variables (if available).
70
+
71
+ Parameters
72
+ ----------
73
+ NOTE: Most parameter definitions below are taken verbatim from the
74
+ Scikit-Learn documentation at [2] and [3].
75
+
76
+ max_iter : int, optional (default = 10)
77
+ The maximum iterations of the imputation process. Each column with a
78
+ missing value is imputed exactly once in a given iteration.
79
+
80
+ decreasing : boolean, optional (default = False)
81
+ If set to True, columns are sorted according to decreasing number of
82
+ missing values. In other words, imputation will move from imputing
83
+ columns with the largest number of missing values to columns with
84
+ fewest number of missing values.
85
+
86
+ missing_values : np.nan, integer, optional (default = np.nan)
87
+ The placeholder for the missing values. All occurrences of
88
+ `missing_values` will be imputed.
89
+
90
+ copy : boolean, optional (default = True)
91
+ If True, a copy of X will be created. If False, imputation will
92
+ be done in-place whenever possible.
93
+
94
+ criterion : tuple, optional (default = ('squared_error', 'gini'))
95
+ The function to measure the quality of a split.The first element of
96
+ the tuple is for the Random Forest Regressor (for imputing numerical
97
+ variables) while the second element is for the Random Forest
98
+ Classifier (for imputing categorical variables).
99
+
100
+ n_estimators : integer, optional (default=100)
101
+ The number of trees in the forest.
102
+
103
+ max_depth : integer or None, optional (default=None)
104
+ The maximum depth of the tree. If None, then nodes are expanded until
105
+ all leaves are pure or until all leaves contain less than
106
+ min_samples_split samples.
107
+
108
+ min_samples_split : int, float, optional (default=2)
109
+ The minimum number of samples required to split an internal node:
110
+ - If int, then consider `min_samples_split` as the minimum number.
111
+ - If float, then `min_samples_split` is a fraction and
112
+ `ceil(min_samples_split * n_samples)` are the minimum
113
+ number of samples for each split.
114
+
115
+ min_samples_leaf : int, float, optional (default=1)
116
+ The minimum number of samples required to be at a leaf node.
117
+ A split point at any depth will only be considered if it leaves at
118
+ least ``min_samples_leaf`` training samples in each of the left and
119
+ right branches. This may have the effect of smoothing the model,
120
+ especially in regression.
121
+ - If int, then consider `min_samples_leaf` as the minimum number.
122
+ - If float, then `min_samples_leaf` is a fraction and
123
+ `ceil(min_samples_leaf * n_samples)` are the minimum
124
+ number of samples for each node.
125
+
126
+ min_weight_fraction_leaf : float, optional (default=0.)
127
+ The minimum weighted fraction of the sum total of weights (of all
128
+ the input samples) required to be at a leaf node. Samples have
129
+ equal weight when sample_weight is not provided.
130
+
131
+ max_features : int, float, string or None, optional (default="auto")
132
+ The number of features to consider when looking for the best split:
133
+ - If int, then consider `max_features` features at each split.
134
+ - If float, then `max_features` is a fraction and
135
+ `int(max_features * n_features)` features are considered at each
136
+ split.
137
+ - If "auto", then `max_features=sqrt(n_features)`.
138
+ - If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
139
+ - If "log2", then `max_features=log2(n_features)`.
140
+ - If None, then `max_features=n_features`.
141
+ Note: the search for a split does not stop until at least one
142
+ valid partition of the node samples is found, even if it requires to
143
+ effectively inspect more than ``max_features`` features.
144
+
145
+ max_leaf_nodes : int or None, optional (default=None)
146
+ Grow trees with ``max_leaf_nodes`` in best-first fashion.
147
+ Best nodes are defined as relative reduction in impurity.
148
+ If None then unlimited number of leaf nodes.
149
+
150
+ min_impurity_decrease : float, optional (default=0.)
151
+ A node will be split if this split induces a decrease of the impurity
152
+ greater than or equal to this value.
153
+ The weighted impurity decrease equation is the following::
154
+ N_t / N * (impurity - N_t_R / N_t * right_impurity
155
+ - N_t_L / N_t * left_impurity)
156
+ where ``N`` is the total number of samples, ``N_t`` is the number of
157
+ samples at the current node, ``N_t_L`` is the number of samples in the
158
+ left child, and ``N_t_R`` is the number of samples in the right child.
159
+ ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
160
+ if ``sample_weight`` is passed.
161
+
162
+ bootstrap : boolean, optional (default=True)
163
+ Whether bootstrap samples are used when building trees.
164
+
165
+ oob_score : bool (default=False)
166
+ Whether to use out-of-bag samples to estimate
167
+ the generalization accuracy.
168
+
169
+ n_jobs : int or None, optional (default=None)
170
+ The number of jobs to run in parallel for both `fit` and `predict`.
171
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
172
+ ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
173
+ for more details.
174
+
175
+ random_state : int, RandomState instance or None, optional (default=None)
176
+ If int, random_state is the seed used by the random number generator;
177
+ If RandomState instance, random_state is the random number generator;
178
+ If None, the random number generator is the RandomState instance used
179
+ by `np.random`.
180
+
181
+ verbose : int, optional (default=0)
182
+ Controls the verbosity when fitting and predicting.
183
+
184
+ warm_start : bool, optional (default=False)
185
+ When set to ``True``, reuse the solution of the previous call to fit
186
+ and add more estimators to the ensemble, otherwise, just fit a whole
187
+ new forest. See :term:`the Glossary <warm_start>`.
188
+
189
+ class_weight : dict, list of dicts, "balanced", "balanced_subsample" or \
190
+ None, optional (default=None)
191
+ Weights associated with classes in the form ``{class_label: weight}``.
192
+ If not given, all classes are supposed to have weight one. For
193
+ multi-output problems, a list of dicts can be provided in the same
194
+ order as the columns of y.
195
+ Note that for multioutput (including multilabel) weights should be
196
+ defined for each class of every column in its own dict. For example,
197
+ for four-class multilabel classification weights should be
198
+ [{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
199
+ [{1:1}, {2:5}, {3:1}, {4:1}].
200
+ The "balanced" mode uses the values of y to automatically adjust
201
+ weights inversely proportional to class frequencies in the input data
202
+ as ``n_samples / (n_classes * np.bincount(y))``
203
+ The "balanced_subsample" mode is the same as "balanced" except that
204
+ weights are computed based on the bootstrap sample for every tree
205
+ grown.
206
+ For multi-output, the weights of each column of y will be multiplied.
207
+ Note that these weights will be multiplied with sample_weight (passed
208
+ through the fit method) if sample_weight is specified.
209
+ NOTE: This parameter is only applicable for Random Forest Classifier
210
+ objects (i.e., for categorical variables).
211
+
212
+ Attributes
213
+ ----------
214
+ statistics_ : Dictionary of length two
215
+ The first element is an array with the mean of each numerical feature
216
+ being imputed while the second element is an array of modes of
217
+ categorical features being imputed (if available, otherwise it
218
+ will be None).
219
+
220
+ References
221
+ ----------
222
+ * [1] Stekhoven, Daniel J., and Peter Bühlmann. "MissForest—non-parametric
223
+ missing value imputation for mixed-type data." Bioinformatics 28.1
224
+ (2011): 112-118.
225
+ * [2] https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.
226
+ RandomForestRegressor.html#sklearn.ensemble.RandomForestRegressor
227
+ * [3] https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.
228
+ RandomForestClassifier.html#sklearn.ensemble.RandomForestClassifier
229
+
230
+ Examples
231
+ --------
232
+ >>> from missingpy import MissForest
233
+ >>> nan = float("NaN")
234
+ >>> X = [[1, 2, nan], [3, 4, 3], [nan, 6, 5], [8, 8, 7]]
235
+ >>> imputer = MissForest(random_state=1337)
236
+ >>> imputer.fit_transform(X)
237
+ Iteration: 0
238
+ Iteration: 1
239
+ Iteration: 2
240
+ array([[1. , 2. , 3.92 ],
241
+ [3. , 4. , 3. ],
242
+ [2.71, 6. , 5. ],
243
+ [8. , 8. , 7. ]])
244
+ """
245
+
246
+ def __init__(self, max_iter=10, decreasing=False, missing_values=np.nan,
247
+ copy=True, n_estimators=100, criterion=('squared_error', 'gini'),
248
+ max_depth=None, min_samples_split=2, min_samples_leaf=1,
249
+ min_weight_fraction_leaf=0.0, max_features='auto',
250
+ max_leaf_nodes=None, min_impurity_decrease=0.0,
251
+ bootstrap=True, oob_score=False, n_jobs=-1, random_state=None,
252
+ verbose=0, warm_start=False, class_weight=None):
253
+
254
+ self.max_iter = max_iter
255
+ self.decreasing = decreasing
256
+ self.missing_values = missing_values
257
+ self.copy = copy
258
+ self.n_estimators = n_estimators
259
+ self.criterion = criterion
260
+ self.max_depth = max_depth
261
+ self.min_samples_split = min_samples_split
262
+ self.min_samples_leaf = min_samples_leaf
263
+ self.min_weight_fraction_leaf = min_weight_fraction_leaf
264
+ self.max_features = max_features
265
+ self.max_leaf_nodes = max_leaf_nodes
266
+ self.min_impurity_decrease = min_impurity_decrease
267
+ self.bootstrap = bootstrap
268
+ self.oob_score = oob_score
269
+ self.n_jobs = n_jobs
270
+ self.random_state = random_state
271
+ self.verbose = verbose
272
+ self.warm_start = warm_start
273
+ self.class_weight = class_weight
274
+
275
+ def _miss_forest(self, Ximp, mask):
276
+ """The missForest algorithm"""
277
+
278
+ # Count missing per column
279
+ col_missing_count = mask.sum(axis=0)
280
+
281
+ # Get col and row indices for missing
282
+ missing_rows, missing_cols = np.where(mask)
283
+
284
+ if self.num_vars_ is not None:
285
+ # Only keep indices for numerical vars
286
+ keep_idx_num = np.in1d(missing_cols, self.num_vars_)
287
+ missing_num_rows = missing_rows[keep_idx_num]
288
+ missing_num_cols = missing_cols[keep_idx_num]
289
+
290
+ # Make initial guess for missing values
291
+ col_means = np.full(Ximp.shape[1], fill_value=np.nan)
292
+ col_means[self.num_vars_] = self.statistics_.get('col_means')
293
+ Ximp[missing_num_rows, missing_num_cols] = np.take(
294
+ col_means, missing_num_cols)
295
+
296
+ # Reg criterion
297
+ reg_criterion = self.criterion if type(self.criterion) == str \
298
+ else self.criterion[0]
299
+
300
+ # Instantiate regression model
301
+ rf_regressor = RandomForestRegressor(
302
+ n_estimators=self.n_estimators,
303
+ criterion=reg_criterion,
304
+ max_depth=self.max_depth,
305
+ min_samples_split=self.min_samples_split,
306
+ min_samples_leaf=self.min_samples_leaf,
307
+ min_weight_fraction_leaf=self.min_weight_fraction_leaf,
308
+ max_features=self.max_features,
309
+ max_leaf_nodes=self.max_leaf_nodes,
310
+ min_impurity_decrease=self.min_impurity_decrease,
311
+ bootstrap=self.bootstrap,
312
+ oob_score=self.oob_score,
313
+ n_jobs=self.n_jobs,
314
+ random_state=self.random_state,
315
+ verbose=self.verbose,
316
+ warm_start=self.warm_start)
317
+
318
+ # If needed, repeat for categorical variables
319
+ if self.cat_vars_ is not None:
320
+ # Calculate total number of missing categorical values (used later)
321
+ n_catmissing = np.sum(mask[:, self.cat_vars_])
322
+
323
+ # Only keep indices for categorical vars
324
+ keep_idx_cat = np.in1d(missing_cols, self.cat_vars_)
325
+ missing_cat_rows = missing_rows[keep_idx_cat]
326
+ missing_cat_cols = missing_cols[keep_idx_cat]
327
+
328
+ # Make initial guess for missing values
329
+ col_modes = np.full(Ximp.shape[1], fill_value=np.nan)
330
+ col_modes[self.cat_vars_] = self.statistics_.get('col_modes')
331
+ Ximp[missing_cat_rows, missing_cat_cols] = np.take(col_modes, missing_cat_cols)
332
+
333
+ # Classfication criterion
334
+ clf_criterion = self.criterion if type(self.criterion) == str \
335
+ else self.criterion[1]
336
+
337
+ # Instantiate classification model
338
+ rf_classifier = RandomForestClassifier(
339
+ n_estimators=self.n_estimators,
340
+ criterion=clf_criterion,
341
+ max_depth=self.max_depth,
342
+ min_samples_split=self.min_samples_split,
343
+ min_samples_leaf=self.min_samples_leaf,
344
+ min_weight_fraction_leaf=self.min_weight_fraction_leaf,
345
+ max_features=self.max_features,
346
+ max_leaf_nodes=self.max_leaf_nodes,
347
+ min_impurity_decrease=self.min_impurity_decrease,
348
+ bootstrap=self.bootstrap,
349
+ oob_score=self.oob_score,
350
+ n_jobs=self.n_jobs,
351
+ random_state=self.random_state,
352
+ verbose=self.verbose,
353
+ warm_start=self.warm_start,
354
+ class_weight=self.class_weight)
355
+
356
+ # 2. misscount_idx: sorted indices of cols in X based on missing count
357
+ misscount_idx = np.argsort(col_missing_count)
358
+ # Reverse order if decreasing is set to True
359
+ if self.decreasing is True:
360
+ misscount_idx = misscount_idx[::-1]
361
+
362
+ # 3. While new_gammas < old_gammas & self.iter_count_ < max_iter loop:
363
+ self.iter_count_ = 0
364
+ gamma_new = 0
365
+ gamma_old = np.inf
366
+ gamma_newcat = 0
367
+ gamma_oldcat = np.inf
368
+ col_index = np.arange(Ximp.shape[1])
369
+
370
+ while (
371
+ gamma_new < gamma_old or gamma_newcat < gamma_oldcat) and \
372
+ self.iter_count_ < self.max_iter:
373
+
374
+ # 4. store previously imputed matrix
375
+ Ximp_old = np.copy(Ximp)
376
+ if self.iter_count_ != 0:
377
+ gamma_old = gamma_new
378
+ gamma_oldcat = gamma_newcat
379
+ # 5. loop
380
+ for s in misscount_idx:
381
+ # Column indices other than the one being imputed
382
+ s_prime = np.delete(col_index, s)
383
+
384
+ # Get indices of rows where 's' is observed and missing
385
+ obs_rows = np.where(~mask[:, s])[0]
386
+ mis_rows = np.where(mask[:, s])[0]
387
+
388
+ # If no missing, then skip
389
+ if len(mis_rows) == 0:
390
+ continue
391
+
392
+ # Get observed values of 's'
393
+ yobs = Ximp[obs_rows, s]
394
+
395
+ # Get 'X' for both observed and missing 's' column
396
+ xobs = Ximp[np.ix_(obs_rows, s_prime)]
397
+ xmis = Ximp[np.ix_(mis_rows, s_prime)]
398
+
399
+ # 6. Fit a random forest over observed and predict the missing
400
+ if self.cat_vars_ is not None and s in self.cat_vars_:
401
+ rf_classifier.fit(X=xobs, y=yobs)
402
+ # 7. predict ymis(s) using xmis(x)
403
+ ymis = rf_classifier.predict(xmis)
404
+ # 8. update imputed matrix using predicted matrix ymis(s)
405
+ Ximp[mis_rows, s] = ymis
406
+ else:
407
+ rf_regressor.fit(X=xobs, y=yobs)
408
+ # 7. predict ymis(s) using xmis(x)
409
+ ymis = rf_regressor.predict(xmis)
410
+ # 8. update imputed matrix using predicted matrix ymis(s)
411
+ Ximp[mis_rows, s] = ymis
412
+
413
+ # 9. Update gamma (stopping criterion)
414
+ if self.cat_vars_ is not None:
415
+ gamma_newcat = np.sum(
416
+ (Ximp[:, self.cat_vars_] != Ximp_old[:, self.cat_vars_])) / n_catmissing
417
+ if self.num_vars_ is not None:
418
+ gamma_new = np.sum((Ximp[:, self.num_vars_] - Ximp_old[:, self.num_vars_]) ** 2) / np.sum((Ximp[:, self.num_vars_]) ** 2)
419
+
420
+ print("Iteration:", self.iter_count_)
421
+ self.iter_count_ += 1
422
+
423
+ return Ximp_old
424
+
425
+ def fit(self, X, y=None, cat_vars=None):
426
+ """Fit the imputer on X.
427
+
428
+ Parameters
429
+ ----------
430
+ X : {array-like}, shape (n_samples, n_features)
431
+ Input data, where ``n_samples`` is the number of samples and
432
+ ``n_features`` is the number of features.
433
+
434
+ cat_vars : int or array of ints, optional (default = None)
435
+ An int or an array containing column indices of categorical
436
+ variable(s)/feature(s) present in the dataset X.
437
+ ``None`` if there are no categorical variables in the dataset.
438
+
439
+ Returns
440
+ -------
441
+ self : object
442
+ Returns self.
443
+ """
444
+
445
+ # Check data integrity and calling arguments
446
+ force_all_finite = False if self.missing_values in ["NaN",
447
+ np.nan] else True
448
+
449
+ X = check_array(X, accept_sparse=False, dtype=np.float64,
450
+ force_all_finite=force_all_finite, copy=self.copy)
451
+
452
+ # Check for +/- inf
453
+ if np.any(np.isinf(X)):
454
+ raise ValueError("+/- inf values are not supported.")
455
+
456
+ # Check if any column has all missing
457
+ mask = _get_mask(X, self.missing_values)
458
+ if np.any(mask.sum(axis=0) >= (X.shape[0])):
459
+ raise ValueError("One or more columns have all rows missing.")
460
+
461
+ # Check cat_vars type and convert if necessary
462
+ if cat_vars is not None:
463
+ if type(cat_vars) == int:
464
+ cat_vars = [cat_vars]
465
+ elif type(cat_vars) == list or type(cat_vars) == np.ndarray:
466
+ if np.array(cat_vars).dtype != int:
467
+ raise ValueError(
468
+ "cat_vars needs to be either an int or an array "
469
+ "of ints.")
470
+ else:
471
+ raise ValueError("cat_vars needs to be either an int or an array "
472
+ "of ints.")
473
+
474
+ # Identify numerical variables
475
+ num_vars = np.setdiff1d(np.arange(X.shape[1]), cat_vars)
476
+ num_vars = num_vars if len(num_vars) > 0 else None
477
+
478
+ # First replace missing values with NaN if it is something else
479
+ if self.missing_values not in ['NaN', np.nan]:
480
+ X[np.where(X == self.missing_values)] = np.nan
481
+
482
+ # Now, make initial guess for missing values
483
+ col_means = np.nanmean(X[:, num_vars], axis=0) if num_vars is not None else None
484
+ col_modes = mode(
485
+ X[:, cat_vars], axis=0, nan_policy='omit')[0] if cat_vars is not \
486
+ None else None
487
+
488
+ self.cat_vars_ = cat_vars
489
+ self.num_vars_ = num_vars
490
+ self.statistics_ = {"col_means": col_means, "col_modes": col_modes}
491
+
492
+ return self
493
+
494
+ def transform(self, X):
495
+ """Impute all missing values in X.
496
+
497
+ Parameters
498
+ ----------
499
+ X : {array-like}, shape = [n_samples, n_features]
500
+ The input data to complete.
501
+
502
+ Returns
503
+ -------
504
+ X : {array-like}, shape = [n_samples, n_features]
505
+ The imputed dataset.
506
+ """
507
+ # Confirm whether fit() has been called
508
+ check_is_fitted(self, ["cat_vars_", "num_vars_", "statistics_"])
509
+
510
+ # Check data integrity
511
+ force_all_finite = False if self.missing_values in ["NaN",
512
+ np.nan] else True
513
+ X = check_array(X, accept_sparse=False, dtype=np.float64,
514
+ force_all_finite=force_all_finite, copy=self.copy)
515
+
516
+ # Check for +/- inf
517
+ if np.any(np.isinf(X)):
518
+ raise ValueError("+/- inf values are not supported.")
519
+
520
+ # Check if any column has all missing
521
+ mask = _get_mask(X, self.missing_values)
522
+ if np.any(mask.sum(axis=0) >= (X.shape[0])):
523
+ raise ValueError("One or more columns have all rows missing.")
524
+
525
+ # Get fitted X col count and ensure correct dimension
526
+ n_cols_fit_X = (0 if self.num_vars_ is None else len(self.num_vars_)) \
527
+ + (0 if self.cat_vars_ is None else len(self.cat_vars_))
528
+ _, n_cols_X = X.shape
529
+
530
+ if n_cols_X != n_cols_fit_X:
531
+ raise ValueError("Incompatible dimension between the fitted "
532
+ "dataset and the one to be transformed.")
533
+
534
+ # Check if anything is actually missing and if not return original X
535
+ mask = _get_mask(X, self.missing_values)
536
+ if not mask.sum() > 0:
537
+ warnings.warn("No missing value located; returning original "
538
+ "dataset.")
539
+ return X
540
+
541
+ # row_total_missing = mask.sum(axis=1)
542
+ # if not np.any(row_total_missing):
543
+ # return X
544
+
545
+ # Call missForest function to impute missing
546
+ X = self._miss_forest(X, mask)
547
+
548
+ # Return imputed dataset
549
+ return X
550
+
551
+ def fit_transform(self, X, y=None, **fit_params):
552
+ """Fit MissForest and impute all missing values in X.
553
+
554
+ Parameters
555
+ ----------
556
+ X : {array-like}, shape (n_samples, n_features)
557
+ Input data, where ``n_samples`` is the number of samples and
558
+ ``n_features`` is the number of features.
559
+
560
+ Returns
561
+ -------
562
+ X : {array-like}, shape (n_samples, n_features)
563
+ Returns imputed dataset.
564
+ """
565
+ return self.fit(X, **fit_params).transform(X)