isotree 0.2.2 → 0.3.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (152) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +12 -1
  3. data/LICENSE.txt +2 -2
  4. data/README.md +32 -14
  5. data/ext/isotree/ext.cpp +144 -31
  6. data/ext/isotree/extconf.rb +7 -7
  7. data/lib/isotree/isolation_forest.rb +110 -30
  8. data/lib/isotree/version.rb +1 -1
  9. data/vendor/isotree/LICENSE +1 -1
  10. data/vendor/isotree/README.md +165 -27
  11. data/vendor/isotree/include/isotree.hpp +2116 -0
  12. data/vendor/isotree/include/isotree_oop.hpp +394 -0
  13. data/vendor/isotree/inst/COPYRIGHTS +132 -0
  14. data/vendor/isotree/src/RcppExports.cpp +594 -57
  15. data/vendor/isotree/src/Rwrapper.cpp +2452 -304
  16. data/vendor/isotree/src/c_interface.cpp +958 -0
  17. data/vendor/isotree/src/crit.hpp +4236 -0
  18. data/vendor/isotree/src/digamma.hpp +184 -0
  19. data/vendor/isotree/src/dist.hpp +1886 -0
  20. data/vendor/isotree/src/exp_depth_table.hpp +134 -0
  21. data/vendor/isotree/src/extended.hpp +1444 -0
  22. data/vendor/isotree/src/external_facing_generic.hpp +399 -0
  23. data/vendor/isotree/src/fit_model.hpp +2401 -0
  24. data/vendor/isotree/src/{dealloc.cpp → headers_joined.hpp} +38 -22
  25. data/vendor/isotree/src/helpers_iforest.hpp +814 -0
  26. data/vendor/isotree/src/{impute.cpp → impute.hpp} +382 -123
  27. data/vendor/isotree/src/indexer.cpp +515 -0
  28. data/vendor/isotree/src/instantiate_template_headers.cpp +118 -0
  29. data/vendor/isotree/src/instantiate_template_headers.hpp +240 -0
  30. data/vendor/isotree/src/isoforest.hpp +1659 -0
  31. data/vendor/isotree/src/isotree.hpp +1815 -394
  32. data/vendor/isotree/src/isotree_exportable.hpp +99 -0
  33. data/vendor/isotree/src/merge_models.cpp +159 -16
  34. data/vendor/isotree/src/mult.hpp +1321 -0
  35. data/vendor/isotree/src/oop_interface.cpp +844 -0
  36. data/vendor/isotree/src/oop_interface.hpp +278 -0
  37. data/vendor/isotree/src/other_helpers.hpp +219 -0
  38. data/vendor/isotree/src/predict.hpp +1932 -0
  39. data/vendor/isotree/src/python_helpers.hpp +114 -0
  40. data/vendor/isotree/src/ref_indexer.hpp +154 -0
  41. data/vendor/isotree/src/robinmap/LICENSE +21 -0
  42. data/vendor/isotree/src/robinmap/README.md +483 -0
  43. data/vendor/isotree/src/robinmap/include/tsl/robin_growth_policy.h +406 -0
  44. data/vendor/isotree/src/robinmap/include/tsl/robin_hash.h +1639 -0
  45. data/vendor/isotree/src/robinmap/include/tsl/robin_map.h +807 -0
  46. data/vendor/isotree/src/robinmap/include/tsl/robin_set.h +660 -0
  47. data/vendor/isotree/src/serialize.cpp +4316 -139
  48. data/vendor/isotree/src/sql.cpp +143 -61
  49. data/vendor/isotree/src/subset_models.cpp +174 -0
  50. data/vendor/isotree/src/utils.hpp +3786 -0
  51. data/vendor/isotree/src/xoshiro.hpp +463 -0
  52. data/vendor/isotree/src/ziggurat.hpp +405 -0
  53. metadata +40 -105
  54. data/vendor/cereal/LICENSE +0 -24
  55. data/vendor/cereal/README.md +0 -85
  56. data/vendor/cereal/include/cereal/access.hpp +0 -351
  57. data/vendor/cereal/include/cereal/archives/adapters.hpp +0 -163
  58. data/vendor/cereal/include/cereal/archives/binary.hpp +0 -169
  59. data/vendor/cereal/include/cereal/archives/json.hpp +0 -1019
  60. data/vendor/cereal/include/cereal/archives/portable_binary.hpp +0 -334
  61. data/vendor/cereal/include/cereal/archives/xml.hpp +0 -956
  62. data/vendor/cereal/include/cereal/cereal.hpp +0 -1089
  63. data/vendor/cereal/include/cereal/details/helpers.hpp +0 -422
  64. data/vendor/cereal/include/cereal/details/polymorphic_impl.hpp +0 -796
  65. data/vendor/cereal/include/cereal/details/polymorphic_impl_fwd.hpp +0 -65
  66. data/vendor/cereal/include/cereal/details/static_object.hpp +0 -127
  67. data/vendor/cereal/include/cereal/details/traits.hpp +0 -1411
  68. data/vendor/cereal/include/cereal/details/util.hpp +0 -84
  69. data/vendor/cereal/include/cereal/external/base64.hpp +0 -134
  70. data/vendor/cereal/include/cereal/external/rapidjson/allocators.h +0 -284
  71. data/vendor/cereal/include/cereal/external/rapidjson/cursorstreamwrapper.h +0 -78
  72. data/vendor/cereal/include/cereal/external/rapidjson/document.h +0 -2652
  73. data/vendor/cereal/include/cereal/external/rapidjson/encodedstream.h +0 -299
  74. data/vendor/cereal/include/cereal/external/rapidjson/encodings.h +0 -716
  75. data/vendor/cereal/include/cereal/external/rapidjson/error/en.h +0 -74
  76. data/vendor/cereal/include/cereal/external/rapidjson/error/error.h +0 -161
  77. data/vendor/cereal/include/cereal/external/rapidjson/filereadstream.h +0 -99
  78. data/vendor/cereal/include/cereal/external/rapidjson/filewritestream.h +0 -104
  79. data/vendor/cereal/include/cereal/external/rapidjson/fwd.h +0 -151
  80. data/vendor/cereal/include/cereal/external/rapidjson/internal/biginteger.h +0 -290
  81. data/vendor/cereal/include/cereal/external/rapidjson/internal/diyfp.h +0 -271
  82. data/vendor/cereal/include/cereal/external/rapidjson/internal/dtoa.h +0 -245
  83. data/vendor/cereal/include/cereal/external/rapidjson/internal/ieee754.h +0 -78
  84. data/vendor/cereal/include/cereal/external/rapidjson/internal/itoa.h +0 -308
  85. data/vendor/cereal/include/cereal/external/rapidjson/internal/meta.h +0 -186
  86. data/vendor/cereal/include/cereal/external/rapidjson/internal/pow10.h +0 -55
  87. data/vendor/cereal/include/cereal/external/rapidjson/internal/regex.h +0 -740
  88. data/vendor/cereal/include/cereal/external/rapidjson/internal/stack.h +0 -232
  89. data/vendor/cereal/include/cereal/external/rapidjson/internal/strfunc.h +0 -69
  90. data/vendor/cereal/include/cereal/external/rapidjson/internal/strtod.h +0 -290
  91. data/vendor/cereal/include/cereal/external/rapidjson/internal/swap.h +0 -46
  92. data/vendor/cereal/include/cereal/external/rapidjson/istreamwrapper.h +0 -128
  93. data/vendor/cereal/include/cereal/external/rapidjson/memorybuffer.h +0 -70
  94. data/vendor/cereal/include/cereal/external/rapidjson/memorystream.h +0 -71
  95. data/vendor/cereal/include/cereal/external/rapidjson/msinttypes/inttypes.h +0 -316
  96. data/vendor/cereal/include/cereal/external/rapidjson/msinttypes/stdint.h +0 -300
  97. data/vendor/cereal/include/cereal/external/rapidjson/ostreamwrapper.h +0 -81
  98. data/vendor/cereal/include/cereal/external/rapidjson/pointer.h +0 -1414
  99. data/vendor/cereal/include/cereal/external/rapidjson/prettywriter.h +0 -277
  100. data/vendor/cereal/include/cereal/external/rapidjson/rapidjson.h +0 -656
  101. data/vendor/cereal/include/cereal/external/rapidjson/reader.h +0 -2230
  102. data/vendor/cereal/include/cereal/external/rapidjson/schema.h +0 -2497
  103. data/vendor/cereal/include/cereal/external/rapidjson/stream.h +0 -223
  104. data/vendor/cereal/include/cereal/external/rapidjson/stringbuffer.h +0 -121
  105. data/vendor/cereal/include/cereal/external/rapidjson/writer.h +0 -709
  106. data/vendor/cereal/include/cereal/external/rapidxml/license.txt +0 -52
  107. data/vendor/cereal/include/cereal/external/rapidxml/manual.html +0 -406
  108. data/vendor/cereal/include/cereal/external/rapidxml/rapidxml.hpp +0 -2624
  109. data/vendor/cereal/include/cereal/external/rapidxml/rapidxml_iterators.hpp +0 -175
  110. data/vendor/cereal/include/cereal/external/rapidxml/rapidxml_print.hpp +0 -428
  111. data/vendor/cereal/include/cereal/external/rapidxml/rapidxml_utils.hpp +0 -123
  112. data/vendor/cereal/include/cereal/macros.hpp +0 -154
  113. data/vendor/cereal/include/cereal/specialize.hpp +0 -139
  114. data/vendor/cereal/include/cereal/types/array.hpp +0 -79
  115. data/vendor/cereal/include/cereal/types/atomic.hpp +0 -55
  116. data/vendor/cereal/include/cereal/types/base_class.hpp +0 -203
  117. data/vendor/cereal/include/cereal/types/bitset.hpp +0 -176
  118. data/vendor/cereal/include/cereal/types/boost_variant.hpp +0 -164
  119. data/vendor/cereal/include/cereal/types/chrono.hpp +0 -72
  120. data/vendor/cereal/include/cereal/types/common.hpp +0 -129
  121. data/vendor/cereal/include/cereal/types/complex.hpp +0 -56
  122. data/vendor/cereal/include/cereal/types/concepts/pair_associative_container.hpp +0 -73
  123. data/vendor/cereal/include/cereal/types/deque.hpp +0 -62
  124. data/vendor/cereal/include/cereal/types/forward_list.hpp +0 -68
  125. data/vendor/cereal/include/cereal/types/functional.hpp +0 -43
  126. data/vendor/cereal/include/cereal/types/list.hpp +0 -62
  127. data/vendor/cereal/include/cereal/types/map.hpp +0 -36
  128. data/vendor/cereal/include/cereal/types/memory.hpp +0 -425
  129. data/vendor/cereal/include/cereal/types/optional.hpp +0 -66
  130. data/vendor/cereal/include/cereal/types/polymorphic.hpp +0 -483
  131. data/vendor/cereal/include/cereal/types/queue.hpp +0 -132
  132. data/vendor/cereal/include/cereal/types/set.hpp +0 -103
  133. data/vendor/cereal/include/cereal/types/stack.hpp +0 -76
  134. data/vendor/cereal/include/cereal/types/string.hpp +0 -61
  135. data/vendor/cereal/include/cereal/types/tuple.hpp +0 -123
  136. data/vendor/cereal/include/cereal/types/unordered_map.hpp +0 -36
  137. data/vendor/cereal/include/cereal/types/unordered_set.hpp +0 -99
  138. data/vendor/cereal/include/cereal/types/utility.hpp +0 -47
  139. data/vendor/cereal/include/cereal/types/valarray.hpp +0 -89
  140. data/vendor/cereal/include/cereal/types/variant.hpp +0 -109
  141. data/vendor/cereal/include/cereal/types/vector.hpp +0 -112
  142. data/vendor/cereal/include/cereal/version.hpp +0 -52
  143. data/vendor/isotree/src/Makevars +0 -4
  144. data/vendor/isotree/src/crit.cpp +0 -912
  145. data/vendor/isotree/src/dist.cpp +0 -749
  146. data/vendor/isotree/src/extended.cpp +0 -790
  147. data/vendor/isotree/src/fit_model.cpp +0 -1090
  148. data/vendor/isotree/src/helpers_iforest.cpp +0 -324
  149. data/vendor/isotree/src/isoforest.cpp +0 -771
  150. data/vendor/isotree/src/mult.cpp +0 -607
  151. data/vendor/isotree/src/predict.cpp +0 -853
  152. data/vendor/isotree/src/utils.cpp +0 -1566
@@ -0,0 +1,2116 @@
1
+ /* Isolation forests and variations thereof, with adjustments for incorporation
2
+ * of categorical variables and missing values.
3
+ * Writen for C++11 standard and aimed at being used in R and Python.
4
+ *
5
+ * This library is based on the following works:
6
+ * [1] Liu, Fei Tony, Kai Ming Ting, and Zhi-Hua Zhou.
7
+ * "Isolation forest."
8
+ * 2008 Eighth IEEE International Conference on Data Mining. IEEE, 2008.
9
+ * [2] Liu, Fei Tony, Kai Ming Ting, and Zhi-Hua Zhou.
10
+ * "Isolation-based anomaly detection."
11
+ * ACM Transactions on Knowledge Discovery from Data (TKDD) 6.1 (2012): 3.
12
+ * [3] Hariri, Sahand, Matias Carrasco Kind, and Robert J. Brunner.
13
+ * "Extended Isolation Forest."
14
+ * arXiv preprint arXiv:1811.02141 (2018).
15
+ * [4] Liu, Fei Tony, Kai Ming Ting, and Zhi-Hua Zhou.
16
+ * "On detecting clustered anomalies using SCiForest."
17
+ * Joint European Conference on Machine Learning and Knowledge Discovery in Databases. Springer, Berlin, Heidelberg, 2010.
18
+ * [5] https://sourceforge.net/projects/iforest/
19
+ * [6] https://math.stackexchange.com/questions/3388518/expected-number-of-paths-required-to-separate-elements-in-a-binary-tree
20
+ * [7] Quinlan, J. Ross. C4. 5: programs for machine learning. Elsevier, 2014.
21
+ * [8] Cortes, David.
22
+ * "Distance approximation using Isolation Forests."
23
+ * arXiv preprint arXiv:1910.12362 (2019).
24
+ * [9] Cortes, David.
25
+ * "Imputing missing values with unsupervised random trees."
26
+ * arXiv preprint arXiv:1911.06646 (2019).
27
+ * [10] https://math.stackexchange.com/questions/3333220/expected-average-depth-in-random-binary-tree-constructed-top-to-bottom
28
+ * [11] Cortes, David.
29
+ * "Revisiting randomized choices in isolation forests."
30
+ * arXiv preprint arXiv:2110.13402 (2021).
31
+ * [12] Guha, Sudipto, et al.
32
+ * "Robust random cut forest based anomaly detection on streams."
33
+ * International conference on machine learning. PMLR, 2016.
34
+ * [13] Cortes, David.
35
+ * "Isolation forests: looking beyond tree depth."
36
+ * arXiv preprint arXiv:2111.11639 (2021).
37
+ * [14] Ting, Kai Ming, Yue Zhu, and Zhi-Hua Zhou.
38
+ * "Isolation kernel and its effect on SVM"
39
+ * Proceedings of the 24th ACM SIGKDD
40
+ * International Conference on Knowledge Discovery & Data Mining. 2018.
41
+ *
42
+ * BSD 2-Clause License
43
+ * Copyright (c) 2019-2021, David Cortes
44
+ * All rights reserved.
45
+ * Redistribution and use in source and binary forms, with or without
46
+ * modification, are permitted provided that the following conditions are met:
47
+ * * Redistributions of source code must retain the above copyright notice, this
48
+ * list of conditions and the following disclaimer.
49
+ * * Redistributions in binary form must reproduce the above copyright notice,
50
+ * this list of conditions and the following disclaimer in the documentation
51
+ * and/or other materials provided with the distribution.
52
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
53
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
55
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
56
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
58
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
59
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
60
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62
+ */
63
+
64
+ /* Standard headers */
65
+ #include <cmath>
66
+ #include <cstddef>
67
+ #include <cstdint>
68
+ #include <vector>
69
+ #include <cstdio>
70
+ #include <iostream>
71
+ using std::size_t;
72
+
73
+ /* The library has overloaded functions supporting different input types.
74
+ Note that, while 'float' type is supported, it will
75
+ be slower to fit models to them as the models internally use
76
+ 'double' and 'long double', and it's not recommended to use.
77
+
78
+ In order to use the library with different types than the ones
79
+ suggested here, add something like this before including the
80
+ library header:
81
+ #define real_t float
82
+ #define sparse_ix int
83
+ #include "isotree.hpp"
84
+ The header may be included multiple times if required. */
85
+ #ifndef real_t
86
+ #define real_t double /* supported: float, double */
87
+ #endif
88
+ #ifndef sparse_ix
89
+ #define sparse_ix int /* supported: int, int64_t, size_t */
90
+ #endif
91
+
92
+ #ifndef ISOTREE_H
93
+ #define ISOTREE_H
94
+
95
+ #ifdef _WIN32
96
+ #define ISOTREE_EXPORTED __declspec(dllimport)
97
+ #else
98
+ #define ISOTREE_EXPORTED
99
+ #endif
100
+
101
+
102
+ /* Types used through the package - zero is the suggested value (when appropriate) */
103
+ typedef enum NewCategAction {Weighted=0, Smallest=11, Random=12} NewCategAction; /* Weighted means Impute in the extended model */
104
+ typedef enum MissingAction {Divide=21, Impute=22, Fail=0} MissingAction; /* Divide is only for non-extended model */
105
+ typedef enum ColType {Numeric=31, Categorical=32, NotUsed=0} ColType;
106
+ typedef enum CategSplit {SubSet=0, SingleCateg=41} CategSplit;
107
+ typedef enum CoefType {Uniform=61, Normal=0} CoefType; /* For extended model */
108
+ typedef enum UseDepthImp {Lower=71, Higher=0, Same=72} UseDepthImp; /* For NA imputation */
109
+ typedef enum WeighImpRows {Inverse=0, Prop=81, Flat=82} WeighImpRows; /* For NA imputation */
110
+ typedef enum ScoringMetric {Depth=0, Density=92, BoxedDensity=94, BoxedDensity2=96, BoxedRatio=95,
111
+ AdjDepth=91, AdjDensity=93} ScoringMetric;
112
+
113
+ /* Notes about new categorical action:
114
+ * - For single-variable case, if using 'Smallest', can then pass data at prediction time
115
+ * having categories that were never in the training data (as an integer higher than 'ncat'
116
+ * for that column), but if using 'Random' or 'Weighted', these must be passed as NA (int < 0)
117
+ * - For extended case, 'Weighted' becomes a weighted imputation instead, and if using either
118
+ * 'Weighted' or 'Smallest', can pass newer, unseen categories at prediction time too.
119
+ * - If using 'Random', cannot pass new categories at prediction time.
120
+ * - If using 'Weighted' for single-variable case, cannot predict similarity with a value
121
+ * for MissingAction other than 'Divide'. */
122
+
123
+
124
+
125
+ /* Structs that are output (modified) from the main function */
126
+ typedef struct IsoTree {
127
+ ColType col_type = NotUsed;
128
+ size_t col_num;
129
+ double num_split;
130
+ std::vector<char> cat_split;
131
+ int chosen_cat;
132
+ size_t tree_left;
133
+ size_t tree_right;
134
+ double pct_tree_left;
135
+ double score; /* will not be integer when there are weights or early stop */
136
+ double range_low = -HUGE_VAL;
137
+ double range_high = HUGE_VAL;
138
+ double remainder; /* only used for distance/similarity */
139
+
140
+ IsoTree() = default;
141
+
142
+ } IsoTree;
143
+
144
+ typedef struct IsoHPlane {
145
+ std::vector<size_t> col_num;
146
+ std::vector<ColType> col_type;
147
+ std::vector<double> coef;
148
+ std::vector<double> mean;
149
+ std::vector<std::vector<double>> cat_coef;
150
+ std::vector<int> chosen_cat;
151
+ std::vector<double> fill_val;
152
+ std::vector<double> fill_new;
153
+
154
+ double split_point;
155
+ size_t hplane_left;
156
+ size_t hplane_right;
157
+ double score; /* will not be integer when there are weights or early stop */
158
+ double range_low = -HUGE_VAL;
159
+ double range_high = HUGE_VAL;
160
+ double remainder; /* only used for distance/similarity */
161
+
162
+ IsoHPlane() = default;
163
+ } IsoHPlane;
164
+
165
+ typedef struct IsoForest {
166
+ std::vector< std::vector<IsoTree> > trees;
167
+ NewCategAction new_cat_action;
168
+ CategSplit cat_split_type;
169
+ MissingAction missing_action;
170
+ ScoringMetric scoring_metric;
171
+ double exp_avg_depth;
172
+ double exp_avg_sep;
173
+ size_t orig_sample_size;
174
+ bool has_range_penalty;
175
+ IsoForest() = default;
176
+ } IsoForest;
177
+
178
+ typedef struct ExtIsoForest {
179
+ std::vector< std::vector<IsoHPlane> > hplanes;
180
+ NewCategAction new_cat_action;
181
+ CategSplit cat_split_type;
182
+ MissingAction missing_action;
183
+ ScoringMetric scoring_metric;
184
+ double exp_avg_depth;
185
+ double exp_avg_sep;
186
+ size_t orig_sample_size;
187
+ bool has_range_penalty;
188
+ ExtIsoForest() = default;
189
+ } ExtIsoForest;
190
+
191
+ typedef struct ImputeNode {
192
+ std::vector<double> num_sum;
193
+ std::vector<double> num_weight;
194
+ std::vector<std::vector<double>> cat_sum;
195
+ std::vector<double> cat_weight;
196
+ size_t parent;
197
+ ImputeNode() = default;
198
+ } ImputeNode; /* this is for each tree node */
199
+
200
+ typedef struct Imputer {
201
+ size_t ncols_numeric;
202
+ size_t ncols_categ;
203
+ std::vector<int> ncat;
204
+ std::vector<std::vector<ImputeNode>> imputer_tree;
205
+ std::vector<double> col_means;
206
+ std::vector<int> col_modes;
207
+ Imputer() = default;
208
+ } Imputer;
209
+
210
+ typedef struct SingleTreeIndex {
211
+ std::vector<size_t> terminal_node_mappings;
212
+ std::vector<double> node_distances;
213
+ std::vector<double> node_depths;
214
+ std::vector<size_t> reference_points;
215
+ std::vector<size_t> reference_indptr;
216
+ std::vector<size_t> reference_mapping;
217
+ size_t n_terminal;
218
+ } TreeNodeIndex;
219
+
220
+ typedef struct TreesIndexer {
221
+ std::vector<SingleTreeIndex> indices;
222
+ TreesIndexer() = default;
223
+ } TreesIndexer;
224
+
225
+ #endif /* ISOTREE_H */
226
+
227
+ /* Fit Isolation Forest model, or variant of it such as SCiForest
228
+ *
229
+ * Parameters:
230
+ * ===========
231
+ * - model_outputs (out)
232
+ * Pointer to already allocated isolation forest model object for single-variable splits.
233
+ * If fitting the extended model, pass NULL (must pass 'model_outputs_ext'). Can later add
234
+ * additional trees through function 'add_tree'.
235
+ * - model_outputs_ext (out)
236
+ * Pointer to already allocated extended isolation forest model object (for multiple-variable splits).
237
+ * Note that if 'ndim' = 1, must use instead the single-variable model object.
238
+ * If fitting the single-variable model, pass NULL (must pass 'model_outputs'). Can later add
239
+ * additional trees through function 'add_tree'.
240
+ * - numeric_data[nrows * ncols_numeric]
241
+ * Pointer to numeric data to which to fit the model. Must be ordered by columns like Fortran,
242
+ * not ordered by rows like C (i.e. entries 1..n contain column 0, n+1..2n column 1, etc.).
243
+ * Pass NULL if there are no dense numeric columns (must also pass 'ncols_numeric' = 0 if there's
244
+ * no sparse numeric data either).
245
+ * Can only pass one of 'numeric_data' or 'Xc' + 'Xc_ind' + 'Xc_indptr'.
246
+ * - ncols_numeric
247
+ * Number of numeric columns in the data (whether they come in a sparse matrix or dense array).
248
+ * - categ_data[nrows * ncols_categ]
249
+ * Pointer to categorical data to which to fit the model. Must be ordered by columns like Fortran,
250
+ * not ordered by rows like C (i.e. entries 1..n contain column 0, n+1..2n column 1, etc.).
251
+ * Pass NULL if there are no categorical columns (must also pass 'ncols_categ' = 0).
252
+ * Each category should be represented as an integer, and these integers must start at zero and
253
+ * be in consecutive order - i.e. if category '3' is present, category '2' must also be present
254
+ * (note that they are not treated as being ordinal, this is just an encoding). Missing values
255
+ * should be encoded as negative numbers such as (-1).
256
+ * - ncols_categ
257
+ * Number of categorical columns in the data.
258
+ * - ncat[ncols_categ]
259
+ * Number of categories in each categorical column. E.g. if the highest code for a column is '4',
260
+ * the number of categories for that column is '5' (zero is one category).
261
+ * - Xc[nnz]
262
+ * Pointer to numeric data in sparse numeric matrix in CSC format (column-compressed).
263
+ * Pass NULL if there are no sparse numeric columns.
264
+ * Can only pass one of 'numeric_data' or 'Xc' + 'Xc_ind' + 'Xc_indptr'.
265
+ * - Xc_ind[nnz]
266
+ * Pointer to row indices to which each non-zero entry in 'Xc' corresponds.
267
+ * Must be in sorted order, otherwise results will be incorrect.
268
+ * The largest value here should be smaller than the largest possible value of 'size_t'.
269
+ * Pass NULL if there are no sparse numeric columns.
270
+ * - Xc_indptr[ncols_numeric + 1]
271
+ * Pointer to column index pointers that tell at entry [col] where does column 'col'
272
+ * start and at entry [col + 1] where does column 'col' end.
273
+ * Pass NULL if there are no sparse numeric columns.
274
+ * - ndim
275
+ * How many dimensions (columns) to use for making a split. Must pass 'ndim' = 1 for
276
+ * the single-variable model. Note that the model object pointer passed must also
277
+ * agree with the value passed to 'ndim'.
278
+ * - ntry
279
+ * When using any of 'prob_pick_by_gain_pl', 'prob_pick_by_gain_avg', 'prob_pick_by_full_gain', 'prob_pick_by_dens', how many variables (with 'ndim=1')
280
+ * or linear combinations (with 'ndim>1') to try for determining the best one according to gain.
281
+ * Recommended value in reference [4] is 10 (with 'prob_pick_by_gain_avg', for outlier detection), while the
282
+ * recommended value in reference [11] is 1 (with 'prob_pick_by_gain_pl', for outlier detection), and the
283
+ * recommended value in reference [9] is 10 to 20 (with 'prob_pick_by_gain_pl', for missing value imputations).
284
+ * - coef_type
285
+ * For the extended model, whether to sample random coefficients according to a normal distribution ~ N(0, 1)
286
+ * (as proposed in [4]) or according to a uniform distribution ~ Unif(-1, +1) as proposed in [3]. Ignored for the
287
+ * single-variable model.
288
+ * - sample_weights[nrows]
289
+ * Weights for the rows when building a tree, either as sampling importances when using
290
+ * sub-samples for each tree (i.e. passing weight '2' makes a row twice as likely to be included
291
+ * in a random sub-sample), or as density measurement (i.e. passing weight '2' is the same as if
292
+ * the row appeared twice, thus it's less of an outlier) - how this is taken is determined
293
+ * through parameter 'weight_as_sample'.
294
+ * Pass NULL if the rows all have uniform weights.
295
+ * - with_replacement
296
+ * Whether to sample rows with replacement or not (not recommended). Note that distance calculations,
297
+ * if desired, don't work well with duplicate rows.
298
+ * - weight_as_sample
299
+ * If passing sample (row) weights when fitting the model, whether to consider those weights as row
300
+ * sampling weights (i.e. the higher the weights, the more likely the observation will end up included
301
+ * in each tree sub-sample), or as distribution density weights (i.e. putting a weight of two is the same
302
+ * as if the row appeared twice, thus higher weight makes it less of an outlier, but does not give it a
303
+ * higher chance of being sampled if the data uses sub-sampling).
304
+ * - nrows
305
+ * Number of rows in 'numeric_data', 'Xc', 'categ_data'.
306
+ * - sample_size
307
+ * Sample size of the data sub-samples with which each binary tree will be built. When a terminal node has more than
308
+ * 1 observation, the remaining isolation depth for them is estimated assuming the data and splits are both uniformly
309
+ * random (separation depth follows a similar process with expected value calculated as in [6]). If passing zero,
310
+ * will set it to 'nrows'. Recommended value in [1], [2], [3] is 256, while the default value in the author's code
311
+ * in [5] is 'nrows' here.
312
+ * - ntrees
313
+ * Number of binary trees to build for the model. Recommended value in [1] is 100, while the default value in the
314
+ * author's code in [5] is 10.
315
+ * - max_depth
316
+ * Maximum depth of the binary trees to grow. Will get overwritten if passing 'limit_depth' = 'true'.
317
+ * Models that use 'prob_pick_by_gain_pl' or 'prob_pick_by_gain_avg' are likely to benefit from
318
+ * deeper trees (larger 'max_depth'), but deeper trees can result in much slower model fitting and
319
+ * predictions.
320
+ * Note that models that use 'prob_pick_by_gain_pl' or 'prob_pick_by_gain_avg' are likely to benefit from
321
+ * deeper trees (larger 'max_depth'), but deeper trees can result in much slower model fitting and
322
+ * predictions.
323
+ * If using pooled gain, one might want to substitute 'max_depth' with 'min_gain'.
324
+ * - ncols_per_tree
325
+ * Number of columns to use (have as potential candidates for splitting at each iteration) in each tree,
326
+ * similar to the 'mtry' parameter of random forests.
327
+ * In general, this is only relevant when using non-random splits and/or weighted column choices.
328
+ * If passing zero, will use the full number of available columns.
329
+ * Recommended value: 0.
330
+ * - limit_depth
331
+ * Whether to automatically set the maximum depth to the corresponding depth of a balanced binary tree with number of
332
+ * terminal nodes corresponding to the sub-sample size (the reason being that, if trying to detect outliers, an outlier
333
+ * will only be so if it turns out to be isolated with shorter average depth than usual, which corresponds to a balanced
334
+ * tree depth). Default setting for [1], [2], [3], [4] is 'true', but it's recommended to pass 'false' here
335
+ * and higher values for 'max_depth' if using the model for purposes other than outlier detection.
336
+ * Note that, if passing 'limit_depth=true', then 'max_depth' is ignored.
337
+ * - penalize_range
338
+ * Whether to penalize (add -1 to the terminal depth) observations at prediction time that have a value
339
+ * of the chosen split variable (linear combination in extended model) that falls outside of a pre-determined
340
+ * reasonable range in the data being split (given by 2 * range in data and centered around the split point),
341
+ * as proposed in [4] and implemented in the authors' original code in [5]. Not used in single-variable model
342
+ * when splitting by categorical variables. Note that this can make a very large difference in the results
343
+ * when using 'prob_pick_by_gain_pl'.
344
+ * This option is not supported when using density-based outlier scoring metrics.
345
+ * - standardize_data
346
+ * Whether to standardize the features at each node before creating a linear combination of them as suggested
347
+ * in [4]. This is ignored when using 'ndim=1'.
348
+ * - scoring_metric
349
+ * Metric to use for determining outlier scores (see reference [13]).
350
+ * If passing 'Depth', will use isolation depth as proposed in reference [1]. This is typically the safest choice
351
+ * and plays well with all model types offered by this library.
352
+ * If passing 'Density', will set scores for each terminal node as the ratio between the fraction of points in the sub-sample
353
+ * that end up in that node and the fraction of the volume in the feature space which defines
354
+ * the node according to the splits that lead to it.
355
+ * If using 'ndim=1', for categorical variables, 'Density' is defined in terms
356
+ * of number of categories that go towards each side of the split divided by number of categories
357
+ * in the observations that reached that node.
358
+ * The standardized outlier score from 'Density' for a given observation is calculated as the
359
+ * negative of the logarithm of the geometric mean from the per-tree densities, which unlike
360
+ * the standardized score produced from 'Depth', is unbounded, but just like the standardized
361
+ * score form 'Depth', has a natural threshold for definining outlierness, which in this case
362
+ * is zero is instead of 0.5. The non-standardized outlier score for 'Density' is calculated as the
363
+ * geometric mean, while the per-tree scores are calculated as the density values.
364
+ * 'Density' might lead to better predictions when using 'ndim=1', particularly in the presence
365
+ * of categorical variables. Note however that using 'Density' requires more trees for convergence
366
+ * of scores (i.e. good results) compared to isolation-based metrics.
367
+ * 'Density' is incompatible with 'penalize_range=true'.
368
+ * If passing 'AdjDepth', will use an adjusted isolation depth that takes into account the number of points that
369
+ * go to each side of a given split vs. the fraction of the range of that feature that each
370
+ * side of the split occupies, by a metric as follows: 'd = 2/ (1 + 1/(2*p))'
371
+ * where 'p' is defined as 'p = (n_s / n_t) / (r_s / r_t)
372
+ * with 'n_t' being the number of points that reach a given node, 'n_s' the
373
+ * number of points that are sent to a given side of the split/branch at that node,
374
+ * 'r_t' being the range (maximum minus minimum) of the splitting feature or
375
+ * linear combination among the points that reached the node, and 'r_s' being the
376
+ * range of the same feature or linear combination among the points that are sent to this
377
+ * same side of the split/branch. This makes each split add a number between zero and two
378
+ * to the isolation depth, with this number's probabilistic distribution being centered
379
+ * around 1 and thus the expected isolation depth remaing the same as in the original
380
+ * 'Depth' metric, but having more variability around the extremes.
381
+ * Scores (standardized, non-standardized, per-tree) for 'AdjDepth' are aggregated in the same way
382
+ * as for 'Depth'.
383
+ * 'AdjDepth' might lead to better predictions when using 'ndim=1', particularly in the prescence
384
+ * of categorical variables and for smaller datasets, and for smaller datasets, might make
385
+ * sense to combine it with 'penalize_range=true'.
386
+ * If passing 'AdjDensity', will use the same metric from 'AdjDepth', but applied multiplicatively instead
387
+ * of additively. The expected value for 'AdjDepth' is not strictly the same
388
+ * as for isolation, but using the expected isolation depth as standardizing criterion
389
+ * tends to produce similar standardized score distributions (centered around 0.5).
390
+ * Scores (standardized, non-standardized, per-tree) from 'AdjDensity' are aggregated in the same way
391
+ * as for 'Depth'.
392
+ * 'AdjDepth' is incompatible with 'penalize_range=true'.
393
+ * If passing 'BoxedRatio', will set the scores for each terminal node as the ratio between the volume of the boxed
394
+ * feature space for the node as defined by the smallest and largest values from the split
395
+ * conditions for each column (bounded by the variable ranges in the sample) and the
396
+ * variable ranges in the tree sample.
397
+ * If using 'ndim=1', for categorical variables 'BoxedRatio' is defined in terms of number of categories.
398
+ * If using 'ndim=>1', 'BoxedRatio' is defined in terms of the maximum achievable value for the
399
+ * splitting linear combination determined from the minimum and maximum values for each
400
+ * variable among the points in the sample, and as such, it has a rather different meaning
401
+ * compared to the score obtained with 'ndim=1' - 'BoxedRatio' scores with 'ndim>1'
402
+ * typically provide very poor quality results and this metric is thus not recommended to
403
+ * use in the extended model. With 'ndim>1', 'BoxedRatio' also has a tendency of producing too small
404
+ * values which round to zero.
405
+ * The standardized outlier score from 'BoxedRatio' for a given observation is calculated
406
+ * simply as the the average from the per-tree boxed ratios. 'BoxedRatio' metric
407
+ * has a lower bound of zero and a theorical upper bound of one, but in practice the scores
408
+ * tend to be very small numbers close to zero, and its distribution across
409
+ * different datasets is rather unpredictable. In order to keep rankings comparable with
410
+ * the rest of the metrics, the non-standardized outlier scores for 'BoxedRatio' are calculated as the
411
+ * negative of the average instead. The per-tree 'BoxedRatio' scores are calculated as the ratios.
412
+ * 'BoxedRatio' can be calculated in a fast-but-not-so-precise way, and in a low-but-precise
413
+ * way, which is controlled by parameter 'fast_bratio'. Usually, both should give the
414
+ * same results, but in some fatasets, the fast way can lead to numerical inaccuracies
415
+ * due to roundoffs very close to zero.
416
+ * 'BoxedRatio' might lead to better predictions in datasets with many rows when using 'ndim=1'
417
+ * and a relatively small 'sample_size'. Note that more trees are required for convergence
418
+ * of scores when using 'BoxedRatio'. In some datasets, 'BoxedRatio' metric might result in very bad
419
+ * predictions, to the point that taking its inverse produces a much better ranking of outliers.
420
+ * 'BoxedRatio' option is incompatible with 'penalize_range'.
421
+ * If passing 'BoxedDensity2', will set the score as the ratio between the fraction of points within the sample that
422
+ * end up in a given terminal node and the 'BoxedRatio' metric.
423
+ * Aggregation of scores (standardized, non-standardized, per-tree) for 'BoxedDensity2' is done in the same
424
+ * way as for 'Density', and it also has a natural threshold at zero for determining
425
+ * outliers and inliers.
426
+ * 'BoxedDensity2' is typically usable with 'ndim>1', but tends to produce much bigger values
427
+ * compared to 'ndim=1'.
428
+ * Albeit unintuitively, in many datasets, one can usually get better results with metric
429
+ * 'BoxedDensity' instead.
430
+ * The calculation of 'BoxedDensity2' is also controlled by 'fast_bratio'.
431
+ * 'BoxedDensity2' incompatible with 'penalize_range'.
432
+ * If passing 'BoxedDensity', will set the score as the ratio between the fraction of points within the sample that
433
+ * end up in a given terminal node and the ratio between the boxed volume of the feature
434
+ * space in the sample and the boxed volume of a node given by the split conditions (inverse
435
+ * as in 'BoxedDensity2'). This metric does not have any theoretical or intuitive
436
+ * justification behind its existence, and it is perhaps ilogical to use it as a
437
+ * scoring metric, but tends to produce good results in some datasets.
438
+ * The standardized outlier scores for 'BoxedDensity' are defined as the negative of the geometric mean,
439
+ * while the non-standardized scores are the geometric mean, and the per-tree scores are simply the 'density' values.
440
+ * The calculation of 'BoxedDensity' is also controlled by 'fast_bratio'.
441
+ * 'BoxedDensity' option is incompatible with 'penalize_range'.
442
+ * - fast_bratio
443
+ * When using "boxed" metrics for scoring, whether to calculate them in a fast way through
444
+ * cumulative sum of logarithms of ratios after each split, or in a slower way as sum of
445
+ * logarithms of a single ratio per column for each terminal node.
446
+ * Usually, both methods should give the same results, but in some datasets, particularly
447
+ * when variables have too small or too large ranges, the first method can be prone to
448
+ * numerical inaccuracies due to roundoff close to zero.
449
+ * Note that this does not affect calculations for models with 'ndim>1', since given the
450
+ * split types, the calculation for them is different.
451
+ * - standardize_dist
452
+ * If passing 'tmat' (see documentation for it), whether to standardize the resulting average separation
453
+ * depths in order to produce a distance metric or not, in the same way this is done for the outlier score.
454
+ * - tmat[nrows * (nrows - 1) / 2]
455
+ * Array in which to calculate average separation depths or standardized distance metric (see documentation
456
+ * for 'standardize_dist') as the model is being fit. Pass NULL to avoid doing these calculations alongside
457
+ * the regular model process. If passing this output argument, the sample size must be the same as the number
458
+ * of rows, and there cannot be sample weights. If not NULL, must already be initialized to zeros. As the
459
+ * output is a symmetric matrix, this function will only fill in the upper-triangular part, in which
460
+ * entry 0 <= i < j < n will be located at position
461
+ * p(i,j) = (i * (n - (i+1)/2) + j - i - 1).
462
+ * Can be converted to a dense square matrix through function 'tmat_to_dense'.
463
+ * - output_depths[nrows]
464
+ * Array in which to calculate average path depths or standardized outlierness metric (see documentation
465
+ * for 'standardize_depth') as the model is being fit. Pass NULL to avoid doing these calculations alongside
466
+ * the regular model process. If passing this output argument, the sample size must be the same as the number
467
+ * of rows. If not NULL, must already be initialized to zeros.
468
+ * - standardize_depth
469
+ * If passing 'output_depths', whether to standardize the results as proposed in [1], in order to obtain
470
+ * a metric in which the more outlier is an observation, the closer this standardized metric will be to 1,
471
+ * with average observations obtaining 0.5. If passing 'false' here, the numbers in 'output_depths' will be
472
+ * the average depth of each row across all trees.
473
+ * - col_weights[ncols_numeric + ncols_categ]
474
+ * Sampling weights for each column, assuming all the numeric columns come before the categorical columns.
475
+ * Ignored when picking columns by deterministic criterion.
476
+ * If passing NULL, each column will have a uniform weight. If used along with kurtosis weights, the
477
+ * effect is multiplicative.
478
+ * - weigh_by_kurt
479
+ * Whether to weigh each column according to the kurtosis obtained in the sub-sample that is selected
480
+ * for each tree as briefly proposed in [1]. Note that this is only done at the beginning of each tree
481
+ * sample. For categorical columns, will calculate expected kurtosis if the column were converted to
482
+ * numerical by assigning to each category a random number ~ Unif(0, 1).
483
+ * This is intended as a cheap feature selector, while the parameter 'prob_pick_col_by_kurt'
484
+ * provides the option to do this at each node in the tree for a different overall type of model.
485
+ * If passing column weights or weighted column choices ('prob_pick_col_by_range', 'prob_pick_col_by_var'),
486
+ * the effect will be multiplicative. This option is not compatible with 'prob_pick_col_by_kurt'.
487
+ * If passing 'missing_action=fail' and the data has infinite values, columns with rows
488
+ * having infinite values will get a weight of zero. If passing a different value for missing
489
+ * action, infinite values will be ignored in the kurtosis calculation.
490
+ * If using 'missing_action=Impute', the calculation of kurtosis will not use imputed values
491
+ * in order not to favor columns with missing values (which would increase kurtosis by all having
492
+ * the same central value).
493
+ * - prob_pick_by_gain_pl
494
+ * This parameter indicates the probability of choosing the threshold on which to split a variable
495
+ * (with 'ndim=1') or a linear combination of variables (when using 'ndim>1') as the threshold
496
+ * that maximizes a pooled standard deviation gain criterion (see references [9] and [11]) on the
497
+ * same variable or linear combination, similarly to regression trees such as CART.
498
+ * If using 'ntry>1', will try several variables or linear combinations thereof and choose the one
499
+ * in which the largest standardized gain can be achieved.
500
+ * For categorical variables with 'ndim=1', will use shannon entropy instead (like in [7]).
501
+ * Compared to a simple averaged gain, this tends to result in more evenly-divided splits and more clustered
502
+ * groups when they are smaller. Recommended to pass higher values when used for imputation of missing values.
503
+ * When used for outlier detection, datasets with multimodal distributions usually see better performance
504
+ * under this type of splits.
505
+ * Note that, since this makes the trees more even and thus it takes more steps to produce isolated nodes,
506
+ * the resulting object will be heavier. When splits are not made according to any of 'prob_pick_by_gain_avg',
507
+ * 'prob_pick_by_gain_pl', 'prob_pick_by_full_gain', 'prob_pick_by_dens', both the column and the split point are decided at random.
508
+ * Note that, if passing value 1 (100%) with no sub-sampling and using the single-variable model,
509
+ * every single tree will have the exact same splits.
510
+ * Be aware that 'penalize_range' can also have a large impact when using 'prob_pick_by_gain_pl'.
511
+ * Be aware also that, if passing a value of 1 (100%) with no sub-sampling and using the single-variable
512
+ * model, every single tree will have the exact same splits.
513
+ * Under this option, models are likely to produce better results when increasing 'max_depth'.
514
+ * Alternatively, one can also control the depth through 'min_gain' (for which one might want to
515
+ * set 'max_depth=0').
516
+ * Important detail: if using any of 'prob_pick_by_gain_avg', 'prob_pick_by_gain_pl', 'prob_pick_by_full_gain',
517
+ * 'prob_pick_by_dens', the distribution of outlier scores is unlikely to be centered around 0.5.
518
+ * - prob_pick_by_gain_avg
519
+ * This parameter indicates the probability of choosing the threshold on which to split a variable
520
+ * (with 'ndim=1') or a linear combination of variables (when using 'ndim>1') as the threshold
521
+ * that maximizes an averaged standard deviation gain criterion (see references [4] and [11]) on the
522
+ * same variable or linear combination.
523
+ * If using 'ntry>1', will try several variables or linear combinations thereof and choose the one
524
+ * in which the largest standardized gain can be achieved.
525
+ * For categorical variables with 'ndim=1', will take the expected standard deviation that would be
526
+ * gotten if the column were converted to numerical by assigning to each category a random
527
+ * number ~ Unif(0, 1) and calculate gain with those assumed standard deviations.
528
+ * Compared to a pooled gain, this tends to result in more cases in which a single observation or very
529
+ * few of them are put into one branch. Typically, datasets with outliers defined by extreme values in
530
+ * some column more or less independently of the rest, usually see better performance under this type
531
+ * of split. Recommended to use sub-samples (parameter 'sample_size') when
532
+ * passing this parameter. Note that, since this will create isolated nodes faster, the resulting object
533
+ * will be lighter (use less memory).
534
+ * When splits are not made according to any of 'prob_pick_by_gain_avg', 'prob_pick_by_gain_pl',
535
+ * 'prob_pick_by_full_gain', 'prob_pick_by_dens', both the column and the split point are decided at random.
536
+ * Default setting for [1], [2], [3] is zero, and default for [4] is 1.
537
+ * This is the randomization parameter that can be passed to the author's original code in [5],
538
+ * but note that the code in [5] suffers from a mathematical error in the calculation of running standard deviations,
539
+ * so the results from it might not match with this library's.
540
+ * Be aware that, if passing a value of 1 (100%) with no sub-sampling and using the single-variable model,
541
+ * every single tree will have the exact same splits.
542
+ * Under this option, models are likely to produce better results when increasing 'max_depth'.
543
+ * Important detail: if using any of 'prob_pick_by_gain_avg', 'prob_pick_by_gain_pl',
544
+ * 'prob_pick_by_full_gain', 'prob_pick_by_dens', the distribution of outlier scores is unlikely to be centered around 0.5.
545
+ * - prob_pick_by_full_gain
546
+ * This parameter indicates the probability of choosing the threshold on which to split a variable
547
+ * (with 'ndim=1') or a linear combination of variables (when using 'ndim>1') as the threshold
548
+ * that minimizes the pooled sums of variances of all columns (or a subset of them if using
549
+ * 'ncols_per_tree').
550
+ * In general, 'prob_pick_by_full_gain' is much slower to evaluate than the other gain types, and does not tend to
551
+ * lead to better results. When using 'prob_pick_by_full_gain', one might want to use a different scoring
552
+ * metric (particulatly 'Density', 'BoxedDensity2' or 'BoxedRatio'). Note that
553
+ * the variance calculations are all done through the (exact) sorted-indices approach, while is much
554
+ * slower than the (approximate) histogram approach used by other decision tree software.
555
+ * Be aware that the data is not standardized in any way for the range calculations, thus the scales
556
+ * of features will make a large difference under 'prob_pick_by_full_gain', which might not make it suitable for
557
+ * all types of data.
558
+ * 'prob_pick_by_full_gain' is not compatible with categorical data, and 'min_gain' does not apply to it.
559
+ * When splits are not made according to any of 'prob_pick_by_gain_avg', 'prob_pick_by_gain_pl',
560
+ * 'prob_pick_by_full_gain', 'prob_pick_by_dens', both the column and the split point are decided at random.
561
+ * Default setting for [1], [2], [3], [4] is zero.
562
+ * - prob_pick_dens
563
+ * This parameter indicates the probability of choosing the threshold on which to split a variable
564
+ * (with 'ndim=1') or a linear combination of variables (when using 'ndim>1') as the threshold
565
+ * that maximizes the pooled densities of the branch distributions.
566
+ * The 'min_gain' option does not apply to this type of splits.
567
+ * When splits are not made according to any of 'prob_pick_by_gain_avg', 'prob_pick_by_gain_pl',
568
+ * 'prob_pick_by_full_gain', 'prob_pick_by_dens', both the column and the split point are decided at random.
569
+ * Default setting for [1], [2], [3], [4] is zero.
570
+ * - prob_pick_col_by_range
571
+ * When using 'ndim=1', this denotes the probability of choosing the column to split with a probability
572
+ * proportional to the range spanned by each column within a node as proposed in reference [12].
573
+ * When using 'ndim>1', this denotes the probability of choosing columns to create a hyperplane with a
574
+ * probability proportional to the range spanned by each column within a node.
575
+ * This option is not compatible with categorical data. If passing column weights, the
576
+ * effect will be multiplicative.
577
+ * Be aware that the data is not standardized in any way for the range calculations, thus the scales
578
+ * of features will make a large difference under this option, which might not make it suitable for
579
+ * all types of data.
580
+ * Note that the proposed RRCF model from [12] uses a different scoring metric for producing anomaly
581
+ * scores, while this library uses isolation depth regardless of how columns are chosen, thus results
582
+ * are likely to be different from those of other software implementations. Nevertheless, as explored
583
+ * in [11], isolation depth as a scoring metric typically provides better results than the
584
+ * "co-displacement" metric from [12] under these split types.
585
+ * - prob_pick_col_by_var
586
+ * When using 'ndim=1', this denotes the probability of choosing the column to split with a probability
587
+ * proportional to the variance of each column within a node.
588
+ * When using 'ndim>1', this denotes the probability of choosing columns to create a hyperplane with a
589
+ * probability proportional to the variance of each column within a node.
590
+ * For categorical data, it will calculate the expected variance if the column were converted to
591
+ * numerical by assigning to each category a random number ~ Unif(0, 1), which depending on the number of
592
+ * categories and their distribution, produces numbers typically a bit smaller than standardized numerical
593
+ * variables.
594
+ * Note that when using sparse matrices, the calculation of variance will rely on a procedure that
595
+ * uses sums of squares, which has less numerical precision than the
596
+ * calculation used for dense inputs, and as such, the results might differ slightly.
597
+ * Be aware that this calculated variance is not standardized in any way, so the scales of
598
+ * features will make a large difference under this option.
599
+ * If there are infinite values, all columns having infinite values will be treated as having the
600
+ * same weight, and will be chosen before every other column with non-infinite values.
601
+ * If passing column weights , the effect will be multiplicative.
602
+ * If passing a 'missing_action' different than 'fail', infinite values will be ignored for the
603
+ * variance calculation. Otherwise, all columns with infinite values will have the same probability
604
+ * and will be chosen before columns with non-infinite values.
605
+ * - prob_pick_col_by_kurt
606
+ * When using 'ndim=1', this denotes the probability of choosing the column to split with a probability
607
+ * proportional to the kurtosis of each column **within a node** (unlike the option 'weigh_by_kurtosis'
608
+ * which calculates this metric only at the root).
609
+ * When using 'ndim>1', this denotes the probability of choosing columns to create a hyperplane with a
610
+ * probability proportional to the kurtosis of each column within a node.
611
+ * For categorical data, it will calculate the expected kurtosis if the column were converted to
612
+ * numerical by assigning to each category a random number ~ Unif(0, 1).
613
+ * Note that when using sparse matrices, the calculation of kurtosis will rely on a procedure that
614
+ * uses sums of squares and higher-power numbers, which has less numerical precision than the
615
+ * calculation used for dense inputs, and as such, the results might differ slightly.
616
+ * If passing column weights, the effect will be multiplicative. This option is not compatible
617
+ * with 'weigh_by_kurtosis'.
618
+ * If passing a 'missing_action' different than 'fail', infinite values will be ignored for the
619
+ * variance calculation. Otherwise, all columns with infinite values will have the same probability
620
+ * and will be chosen before columns with non-infinite values.
621
+ * If using 'missing_action=Impute', the calculation of kurtosis will not use imputed values
622
+ * in order not to favor columns with missing values (which would increase kurtosis by all having
623
+ * the same central value).
624
+ * Be aware that kurtosis can be a rather slow metric to calculate.
625
+ * - min_gain
626
+ * Minimum gain that a split threshold needs to produce in order to proceed with a split.
627
+ * Only used when the splits are decided by a variance gain criterion ('prob_pick_by_gain_pl' or
628
+ * 'prob_pick_by_gain_avg', but not 'prob_pick_by_full_gain' nor 'prob_pick_by_dens').
629
+ * If the highest possible gain in the evaluated splits at a node is below this threshold,
630
+ * that node becomes a terminal node.
631
+ * This can be used as a more sophisticated depth control when using pooled gain (note that 'max_depth'
632
+ * still applies on top of this heuristic).
633
+ * - missing_action
634
+ * How to handle missing data at both fitting and prediction time. Options are a) 'Divide' (for the single-variable
635
+ * model only, recommended), which will follow both branches and combine the result with the weight given by the fraction of
636
+ * the data that went to each branch when fitting the model, b) 'Impute', which will assign observations to the
637
+ * branch with the most observations in the single-variable model (but imputed values will also be used for
638
+ * gain calculations), or fill in missing values with the median of each column of the sample from which the
639
+ * split was made in the extended model (recommended) (but note that the calculation of medians does not take
640
+ * into account sample weights when using 'weights_as_sample_prob=false', and note that when using a gain
641
+ * criterion for splits with 'ndim=1', it will use the imputed values in the calculation), c) 'Fail' which will
642
+ * assume that there are no missing values and will trigger undefined behavior if it encounters any.
643
+ * In the extended model, infinite values will be treated as missing.
644
+ * Note that passing 'Fail' might crash the process if there turn out to be missing values, but will otherwise
645
+ * produce faster fitting and prediction times along with decreased model object sizes.
646
+ * Models from [1], [2], [3], [4] correspond to 'Fail' here.
647
+ * - cat_split_type
648
+ * Whether to split categorical features by assigning sub-sets of them to each branch, or by assigning
649
+ * a single category to a branch and the rest to the other branch. For the extended model, whether to
650
+ * give each category a coefficient, or only one while the rest get zero.
651
+ * - new_cat_action
652
+ * What to do after splitting a categorical feature when new data that reaches that split has categories that
653
+ * the sub-sample from which the split was done did not have. Options are a) "Weighted" (recommended), which
654
+ * in the single-variable model will follow both branches and combine the result with weight given by the fraction of the
655
+ * data that went to each branch when fitting the model, and in the extended model will assign
656
+ * them the median value for that column that was added to the linear combination of features (but note that
657
+ * this median calculation does not use sample weights when using 'weights_as_sample_prob=false'),
658
+ * b) "Smallest", which will assign all observations with unseen categories in the split to the branch that
659
+ * had fewer observations when fitting the model, c) "Random", which will assing a branch (coefficient in the
660
+ * extended model) at random for each category beforehand, even if no observations had that category when
661
+ * fitting the model. Ignored when passing 'cat_split_type' = 'SingleCateg'.
662
+ * - all_perm
663
+ * When doing categorical variable splits by pooled gain with 'ndim=1' (regular model),
664
+ * whether to consider all possible permutations of variables to assign to each branch or not. If 'false',
665
+ * will sort the categories by their frequency and make a grouping in this sorted order. Note that the
666
+ * number of combinations evaluated (if 'true') is the factorial of the number of present categories in
667
+ * a given column (minus 2). For averaged gain, the best split is always to put the second most-frequent
668
+ * category in a separate branch, so not evaluating all permutations (passing 'false') will make it
669
+ * possible to select other splits that respect the sorted frequency order.
670
+ * The total number of combinations must be a number that can fit into a 'size_t' variable - for x64-64
671
+ * systems, this means no column can have more than 20 different categories if using 'all_perm=true',
672
+ * but note that this is not checked within the function.
673
+ * Ignored when not using categorical variables or not doing splits by pooled gain or using 'ndim>1'.
674
+ * - coef_by_prop
675
+ * In the extended model, whether to sort the randomly-generated coefficients for categories
676
+ * according to their relative frequency in the tree node. This might provide better results when using
677
+ * categorical variables with too many categories, but is not recommended, and not reflective of
678
+ * real "categorical-ness". Ignored for the regular model ('ndim=1') and/or when not using categorical
679
+ * variables.
680
+ * - imputer (out)
681
+ * Pointer to already-allocated imputer object, which can be used to produce missing value imputations
682
+ * in new data. Pass NULL if no missing value imputations are required. Note that this is not related to
683
+ * 'missing_action' as missing values inside the model are treated differently and follow their own imputation
684
+ * or division strategy.
685
+ * - min_imp_obs
686
+ * Minimum number of observations with which an imputation value can be produced. Ignored if passing
687
+ * 'build_imputer' = 'false'.
688
+ * - depth_imp
689
+ * How to weight observations according to their depth when used for imputing missing values. Passing
690
+ * "Higher" will weigh observations higher the further down the tree (away from the root node) the
691
+ * terminal node is, while "lower" will do the opposite, and "Sane" will not modify the weights according
692
+ * to node depth in the tree. Implemented for testing purposes and not recommended to change
693
+ * from the default. Ignored when not passing 'impute_nodes'.
694
+ * - weigh_imp_rows
695
+ * How to weight node sizes when used for imputing missing values. Passing "Inverse" will weigh
696
+ * a node inversely proportional to the number of observations that end up there, while "Proportional"
697
+ * will weight them heavier the more observations there are, and "Flat" will weigh all nodes the same
698
+ * in this regard regardless of how many observations end up there. Implemented for testing purposes
699
+ * and not recommended to change from the default. Ignored when not passing 'impute_nodes'.
700
+ * - impute_at_fit
701
+ * Whether to impute missing values in the input data as the model is being built. If passing 'true',
702
+ * then 'sample_size' must be equal to 'nrows'. Values in the arrays passed to 'numeric_data',
703
+ * 'categ_data', and 'Xc', will get overwritten with the imputations produced.
704
+ * - random_seed
705
+ * Seed that will be used to generate random numbers used by the model.
706
+ * - use_long_double
707
+ * Whether to use 'long double' (extended precision) type for more precise calculations about
708
+ * standard deviations, means, ratios, weights, gain, and other potential aggregates. This makes
709
+ * such calculations accurate to a larger number of decimals (provided that the compiler used has
710
+ * wider long doubles than doubles) and it is highly recommended to use when the input data has
711
+ * a number of rows or columns exceeding 2^53 (an unlikely scenario), and also highly recommended
712
+ * to use when the input data has problematic scales (e.g. numbers that differ from each other by
713
+ * something like 10^-100 or columns that include values like 10^100 and 10^-100 and still need to
714
+ * be sensitive to a difference of 10^-100), but will make the calculations slower, the more so in
715
+ * platforms in which 'long double' is a software-emulated type (e.g. Power8 platforms).
716
+ * Note that some platforms (most notably windows with the msvc compiler) do not make any difference
717
+ * between 'double' and 'long double'.
718
+ * - nthreads
719
+ * Number of parallel threads to use. Note that, the more threads, the more memory will be
720
+ * allocated, even if the thread does not end up being used.
721
+ * Be aware that most of the operations are bound by memory bandwidth, which means that
722
+ * adding more threads will not result in a linear speed-up. For some types of data
723
+ * (e.g. large sparse matrices with small sample sizes), adding more threads might result
724
+ * in only a very modest speed up (e.g. 1.5x faster with 4x more threads),
725
+ * even if all threads look fully utilized.
726
+ * Ignored when not building with OpenMP support.
727
+ *
728
+ * Returns
729
+ * =======
730
+ * Will return macro 'EXIT_SUCCESS' (typically =0) upon completion.
731
+ * If the process receives an interrupt signal, will return instead
732
+ * 'EXIT_FAILURE' (typically =1). If you do not have any way of determining
733
+ * what these values correspond to, you can use the functions
734
+ * 'return_EXIT_SUCESS' and 'return_EXIT_FAILURE', which will return them
735
+ * as integers.
736
+ */
737
+ ISOTREE_EXPORTED
738
+ int fit_iforest(IsoForest *model_outputs, ExtIsoForest *model_outputs_ext,
739
+ real_t numeric_data[], size_t ncols_numeric,
740
+ int categ_data[], size_t ncols_categ, int ncat[],
741
+ real_t Xc[], sparse_ix Xc_ind[], sparse_ix Xc_indptr[],
742
+ size_t ndim, size_t ntry, CoefType coef_type, bool coef_by_prop,
743
+ real_t sample_weights[], bool with_replacement, bool weight_as_sample,
744
+ size_t nrows, size_t sample_size, size_t ntrees,
745
+ size_t max_depth, size_t ncols_per_tree,
746
+ bool limit_depth, bool penalize_range, bool standardize_data,
747
+ ScoringMetric scoring_metric, bool fast_bratio,
748
+ bool standardize_dist, double tmat[],
749
+ double output_depths[], bool standardize_depth,
750
+ real_t col_weights[], bool weigh_by_kurt,
751
+ double prob_pick_by_gain_pl, double prob_pick_by_gain_avg,
752
+ double prob_pick_by_full_gain, double prob_pick_by_dens,
753
+ double prob_pick_col_by_range, double prob_pick_col_by_var,
754
+ double prob_pick_col_by_kurt,
755
+ double min_gain, MissingAction missing_action,
756
+ CategSplit cat_split_type, NewCategAction new_cat_action,
757
+ bool all_perm, Imputer *imputer, size_t min_imp_obs,
758
+ UseDepthImp depth_imp, WeighImpRows weigh_imp_rows, bool impute_at_fit,
759
+ uint64_t random_seed, bool use_long_double, int nthreads);
760
+
761
+
762
+
763
+ /* Add additional trees to already-fitted isolation forest model
764
+ *
765
+ * Parameters
766
+ * ==========
767
+ * - model_outputs
768
+ * Pointer to fitted single-variable model object from function 'fit_iforest'. Pass NULL
769
+ * if the trees are are to be added to an extended model. Can only pass one of
770
+ * 'model_outputs' and 'model_outputs_ext'. Note that this function is not thread-safe,
771
+ * so it cannot be run in parallel for the same model object.
772
+ * - model_outputs_ext
773
+ * Pointer to fitted extended model object from function 'fit_iforest'. Pass NULL
774
+ * if the trees are are to be added to an single-variable model. Can only pass one of
775
+ * 'model_outputs' and 'model_outputs_ext'. Note that this function is not thread-safe,
776
+ * so it cannot be run in parallel for the same model object.
777
+ * - numeric_data[nrows * ncols_numeric]
778
+ * Pointer to numeric data to which to fit this additional tree. Must be ordered by columns like Fortran,
779
+ * not ordered by rows like C (i.e. entries 1..n contain column 0, n+1..2n column 1, etc.).
780
+ * Pass NULL if there are no dense numeric columns.
781
+ * Can only pass one of 'numeric_data' or 'Xc' + 'Xc_ind' + 'Xc_indptr'.
782
+ * If the model from 'fit_iforest' was fit to numeric data, must pass numeric data with the same number
783
+ * of columns, either as dense or as sparse arrays.
784
+ * - ncols_numeric
785
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Cannot be changed from
786
+ * what was originally passed to 'fit_iforest'.
787
+ * - categ_data[nrows * ncols_categ]
788
+ * Pointer to categorical data to which to fit this additional tree. Must be ordered by columns like Fortran,
789
+ * not ordered by rows like C (i.e. entries 1..n contain column 0, n+1..2n column 1, etc.).
790
+ * Pass NULL if there are no categorical columns. The encoding must be the same as was used
791
+ * in the data to which the model was fit.
792
+ * Each category should be represented as an integer, and these integers must start at zero and
793
+ * be in consecutive order - i.e. if category '3' is present, category '2' must have also been
794
+ * present when the model was fit (note that they are not treated as being ordinal, this is just
795
+ * an encoding). Missing values should be encoded as negative numbers such as (-1). The encoding
796
+ * must be the same as was used in the data to which the model was fit.
797
+ * If the model from 'fit_iforest' was fit to categorical data, must pass categorical data with the same number
798
+ * of columns and the same category encoding.
799
+ * - ncols_categ
800
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Cannot be changed from
801
+ * what was originally passed to 'fit_iforest'.
802
+ * - ncat[ncols_categ]
803
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). May contain new categories,
804
+ * but should keep the same encodings that were used for previous categories.
805
+ * - Xc[nnz]
806
+ * Pointer to numeric data in sparse numeric matrix in CSC format (column-compressed).
807
+ * Pass NULL if there are no sparse numeric columns.
808
+ * Can only pass one of 'numeric_data' or 'Xc' + 'Xc_ind' + 'Xc_indptr'.
809
+ * - Xc_ind[nnz]
810
+ * Pointer to row indices to which each non-zero entry in 'Xc' corresponds.
811
+ * Must be in sorted order, otherwise results will be incorrect.
812
+ * Pass NULL if there are no sparse numeric columns.
813
+ * - Xc_indptr[ncols_numeric + 1]
814
+ * Pointer to column index pointers that tell at entry [col] where does column 'col'
815
+ * start and at entry [col + 1] where does column 'col' end.
816
+ * Pass NULL if there are no sparse numeric columns.
817
+ * - ndim
818
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Cannot be changed from
819
+ * what was originally passed to 'fit_iforest'.
820
+ * - ntry
821
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Can be changed from
822
+ * what was originally passed to 'fit_iforest'.
823
+ * - coef_type
824
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Can be changed from
825
+ * what was originally passed to 'fit_iforest'.
826
+ * - sample_weights
827
+ * Weights for the rows when adding this tree, either as sampling importances when using
828
+ * sub-samples for each tree (i.e. passing weight '2' makes a row twice as likely to be included
829
+ * in a random sub-sample), or as density measurement (i.e. passing weight '2' is the same as if
830
+ * the row appeared twice, thus it's less of an outlier) - how this is taken is determined
831
+ * through parameter 'weight_as_sample' that was passed to 'fit_iforest.
832
+ * Pass NULL if the rows all have uniform weights.
833
+ * - nrows
834
+ * Number of rows in 'numeric_data', 'Xc', 'categ_data'.
835
+ * - max_depth
836
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Can be changed from
837
+ * what was originally passed to 'fit_iforest'.
838
+ * - ncols_per_tree
839
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Can be changed from
840
+ * what was originally passed to 'fit_iforest'.
841
+ * - limit_depth
842
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Can be changed from
843
+ * what was originally passed to 'fit_iforest'.
844
+ * - penalize_range
845
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Can be changed from
846
+ * what was originally passed to 'fit_iforest'.
847
+ * - standardize_data
848
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Can be changed from
849
+ * what was originally passed to 'fit_iforest'.
850
+ * - fast_bratio
851
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Can be changed from
852
+ * what was originally passed to 'fit_iforest'.
853
+ * - col_weights
854
+ * Sampling weights for each column, assuming all the numeric columns come before the categorical columns.
855
+ * Ignored when picking columns by deterministic criterion.
856
+ * If passing NULL, each column will have a uniform weight. If used along with kurtosis weights, the
857
+ * effect is multiplicative.
858
+ * - weigh_by_kurt
859
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Can be changed from
860
+ * what was originally passed to 'fit_iforest'.
861
+ * - prob_pick_by_gain_pl
862
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Can be changed from
863
+ * what was originally passed to 'fit_iforest'.
864
+ * - prob_pick_by_gain_avg
865
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Can be changed from
866
+ * what was originally passed to 'fit_iforest'.
867
+ * - prob_pick_by_full_gain
868
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Can be changed from
869
+ * what was originally passed to 'fit_iforest'.
870
+ * - prob_pick_by_dens
871
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Can be changed from
872
+ * what was originally passed to 'fit_iforest'.
873
+ * - prob_pick_col_by_range
874
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Can be changed from
875
+ * what was originally passed to 'fit_iforest'.
876
+ * - prob_pick_col_by_var
877
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Can be changed from
878
+ * what was originally passed to 'fit_iforest'.
879
+ * - prob_pick_col_by_kurt
880
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Can be changed from
881
+ * what was originally passed to 'fit_iforest'.
882
+ * - min_gain
883
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Can be changed from
884
+ * what was originally passed to 'fit_iforest'.
885
+ * - missing_action
886
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Cannot be changed from
887
+ * what was originally passed to 'fit_iforest'.
888
+ * - cat_split_type
889
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Cannot be changed from
890
+ * what was originally passed to 'fit_iforest'.
891
+ * - new_cat_action
892
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Cannot be changed from
893
+ * what was originally passed to 'fit_iforest'.
894
+ * - depth_imp
895
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Cannot be changed from
896
+ * what was originally passed to 'fit_iforest'.
897
+ * - weigh_imp_rows
898
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Cannot be changed from
899
+ * what was originally passed to 'fit_iforest'.
900
+ * - all_perm
901
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Can be changed from
902
+ * what was originally passed to 'fit_iforest'.
903
+ * - coef_by_prop
904
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Can be changed from
905
+ * what was originally passed to 'fit_iforest'.
906
+ * - imputer
907
+ * Pointer to already-allocated imputer object, as it was output from function 'fit_model' while
908
+ * producing either 'model_outputs' or 'model_outputs_ext'.
909
+ * Pass NULL if the model was built without imputer.
910
+ * - min_imp_obs
911
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Can be changed from
912
+ * what was originally passed to 'fit_iforest'.
913
+ * - indexer
914
+ * Indexer object associated to the model object ('model_outputs' or 'model_outputs_ext'), which will
915
+ * be updated with the new tree to add.
916
+ * If 'indexer' has reference points, these must be passed again here in order to index them.
917
+ * Pass NULL if the model has no associated indexer.
918
+ * - ref_numeric_data[nref * ncols_numeric]
919
+ * Pointer to numeric data for reference points. May be ordered by rows
920
+ * (i.e. entries 1..n contain row 0, n+1..2n row 1, etc.) - a.k.a. row-major - or by
921
+ * columns (i.e. entries 1..n contain column 0, n+1..2n column 1, etc.) - a.k.a. column-major
922
+ * (see parameter 'ref_is_col_major').
923
+ * Pass NULL if there are no dense numeric columns or no reference points.
924
+ * Can only pass one of 'ref_numeric_data' or 'ref_Xc' + 'ref_Xc_ind' + 'ref_Xc_indptr'.
925
+ * If 'indexer' is passed, it has reference points, and the data to which the model was fit had
926
+ * numeric columns, then numeric data for reference points must be passed (in either dense or sparse format).
927
+ * - ref_categ_data[nref * ncols_categ]
928
+ * Pointer to categorical data for reference points. May be ordered by rows
929
+ * (i.e. entries 1..n contain row 0, n+1..2n row 1, etc.) - a.k.a. row-major - or by
930
+ * columns (i.e. entries 1..n contain column 0, n+1..2n column 1, etc.) - a.k.a. column-major
931
+ * (see parameter 'ref_is_col_major').
932
+ * Pass NULL if there are no categorical columns or no reference points.
933
+ * If 'indexer' is passed, it has reference points, and the data to which the model was fit had
934
+ * categorical columns, then 'ref_categ_data' must be passed.
935
+ * - ref_is_col_major
936
+ * Whether 'ref_numeric_data' and/or 'ref_categ_data' are in column-major order. If numeric data is
937
+ * passed in sparse format, categorical data must be passed in column-major format. If passing dense
938
+ * data, row-major format is preferred as it will be faster. If the data is passed in row-major format,
939
+ * must also pass 'ref_ld_numeric' and/or 'ref_ld_categ'.
940
+ * If both 'ref_numeric_data' and 'ref_categ_data' are passed, they must have the same orientation
941
+ * (row-major or column-major).
942
+ * - ref_ld_numeric
943
+ * Leading dimension of the array 'ref_numeric_data', if it is passed in row-major format.
944
+ * Typically, this corresponds to the number of columns, but may be larger (the array will
945
+ * be accessed assuming that row 'n' starts at 'ref_numeric_data + n*ref_ld_numeric'). If passing
946
+ * 'ref_numeric_data' in column-major order, this is ignored and will be assumed that the
947
+ * leading dimension corresponds to the number of rows. This is ignored when passing numeric
948
+ * data in sparse format.
949
+ * - ref_ld_categ
950
+ * Leading dimension of the array 'ref_categ_data', if it is passed in row-major format.
951
+ * Typically, this corresponds to the number of columns, but may be larger (the array will
952
+ * be accessed assuming that row 'n' starts at 'ref_categ_data + n*ref_ld_categ'). If passing
953
+ * 'ref_categ_data' in column-major order, this is ignored and will be assumed that the
954
+ * leading dimension corresponds to the number of rows.
955
+ * - ref_Xc[ref_nnz]
956
+ * Pointer to numeric data for reference points in sparse numeric matrix in CSC format (column-compressed).
957
+ * Pass NULL if there are no sparse numeric columns for reference points or no reference points.
958
+ * Can only pass one of 'ref_numeric_data' or 'ref_Xc' + 'ref_Xc_ind' + 'ref_Xc_indptr'.
959
+ * - ref_Xc_ind[ref_nnz]
960
+ * Pointer to row indices to which each non-zero entry in 'ref_Xc' corresponds.
961
+ * Must be in sorted order, otherwise results will be incorrect.
962
+ * Pass NULL if there are no sparse numeric columns in CSC format for reference points or no reference points.
963
+ * - ref_Xc_indptr[ref_nnz]
964
+ * Pointer to column index pointers that tell at entry [col] where does column 'col'
965
+ * start and at entry [col + 1] where does column 'col' end.
966
+ * Pass NULL if there are no sparse numeric columns in CSC format for reference points or no reference points.
967
+ * - random_seed
968
+ * Seed that will be used to generate random numbers used by the model.
969
+ * - use_long_double
970
+ * Same parameter as for 'fit_iforest' (see the documentation in there for details). Can be changed from
971
+ * what was originally passed to 'fit_iforest'.
972
+ */
973
+ ISOTREE_EXPORTED
974
+ int add_tree(IsoForest *model_outputs, ExtIsoForest *model_outputs_ext,
975
+ real_t numeric_data[], size_t ncols_numeric,
976
+ int categ_data[], size_t ncols_categ, int ncat[],
977
+ real_t Xc[], sparse_ix Xc_ind[], sparse_ix Xc_indptr[],
978
+ size_t ndim, size_t ntry, CoefType coef_type, bool coef_by_prop,
979
+ real_t sample_weights[], size_t nrows,
980
+ size_t max_depth, size_t ncols_per_tree,
981
+ bool limit_depth, bool penalize_range, bool standardize_data,
982
+ bool fast_bratio,
983
+ real_t col_weights[], bool weigh_by_kurt,
984
+ double prob_pick_by_gain_pl, double prob_pick_by_gain_avg,
985
+ double prob_pick_by_full_gain, double prob_pick_by_dens,
986
+ double prob_pick_col_by_range, double prob_pick_col_by_var,
987
+ double prob_pick_col_by_kurt,
988
+ double min_gain, MissingAction missing_action,
989
+ CategSplit cat_split_type, NewCategAction new_cat_action,
990
+ UseDepthImp depth_imp, WeighImpRows weigh_imp_rows,
991
+ bool all_perm, Imputer *imputer, size_t min_imp_obs,
992
+ TreesIndexer *indexer,
993
+ real_t ref_numeric_data[], int ref_categ_data[],
994
+ bool ref_is_col_major, size_t ref_ld_numeric, size_t ref_ld_categ,
995
+ real_t ref_Xc[], sparse_ix ref_Xc_ind[], sparse_ix ref_Xc_indptr[],
996
+ uint64_t random_seed, bool use_long_double);
997
+
998
+
999
+ /* Predict outlier score, average depth, or terminal node numbers
1000
+ *
1001
+ * Parameters
1002
+ * ==========
1003
+ * - numeric_data[nrows * ncols_numeric]
1004
+ * Pointer to numeric data for which to make predictions. May be ordered by rows
1005
+ * (i.e. entries 1..n contain row 0, n+1..2n row 1, etc.) - a.k.a. row-major - or by
1006
+ * columns (i.e. entries 1..n contain column 0, n+1..2n column 1, etc.) - a.k.a. column-major
1007
+ * (see parameter 'is_col_major').
1008
+ * Pass NULL if there are no dense numeric columns.
1009
+ * Can only pass one of 'numeric_data', 'Xc' + 'Xc_ind' + 'Xc_indptr', 'Xr' + 'Xr_ind' + 'Xr_indptr'.
1010
+ * - categ_data[nrows * ncols_categ]
1011
+ * Pointer to categorical data for which to make predictions. May be ordered by rows
1012
+ * (i.e. entries 1..n contain row 0, n+1..2n row 1, etc.) - a.k.a. row-major - or by
1013
+ * columns (i.e. entries 1..n contain column 0, n+1..2n column 1, etc.) - a.k.a. column-major
1014
+ * (see parameter 'is_col_major').
1015
+ * Pass NULL if there are no categorical columns.
1016
+ * Each category should be represented as an integer, and these integers must start at zero and
1017
+ * be in consecutive order - i.e. if category '3' is present, category '2' must have also been
1018
+ * present when the model was fit (note that they are not treated as being ordinal, this is just
1019
+ * an encoding). Missing values should be encoded as negative numbers such as (-1). The encoding
1020
+ * must be the same as was used in the data to which the model was fit.
1021
+ * - is_col_major
1022
+ * Whether 'numeric_data' and 'categ_data' come in column-major order, like the data to which the
1023
+ * model was fit. If passing 'false', will assume they are in row-major order. Note that most of
1024
+ * the functions in this library work only with column-major order, but here both are suitable
1025
+ * and row-major is preferred. Both arrays must have the same orientation (row/column major).
1026
+ * If there is numeric sparse data in combination with categorical dense data and there are many
1027
+ * rows, it is recommended to pass the categorical data in column major order, as it will take
1028
+ * a faster route.
1029
+ * If passing 'is_col_major=true', must also provide 'ld_numeric' and/or 'ld_categ'.
1030
+ * - ld_numeric
1031
+ * Leading dimension of the array 'numeric_data', if it is passed in row-major format.
1032
+ * Typically, this corresponds to the number of columns, but may be larger (the array will
1033
+ * be accessed assuming that row 'n' starts at 'numeric_data + n*ld_numeric'). If passing
1034
+ * 'numeric_data' in column-major order, this is ignored and will be assumed that the
1035
+ * leading dimension corresponds to the number of rows. This is ignored when passing numeric
1036
+ * data in sparse format.
1037
+ * - ld_categ
1038
+ * Leading dimension of the array 'categ_data', if it is passed in row-major format.
1039
+ * Typically, this corresponds to the number of columns, but may be larger (the array will
1040
+ * be accessed assuming that row 'n' starts at 'categ_data + n*ld_categ'). If passing
1041
+ * 'categ_data' in column-major order, this is ignored and will be assumed that the
1042
+ * leading dimension corresponds to the number of rows.
1043
+ * - Xc[nnz]
1044
+ * Pointer to numeric data in sparse numeric matrix in CSC format (column-compressed).
1045
+ * Pass NULL if there are no sparse numeric columns.
1046
+ * Can only pass one of 'numeric_data', 'Xc' + 'Xc_ind' + 'Xc_indptr', 'Xr' + 'Xr_ind' + 'Xr_indptr'.
1047
+ * - Xc_ind[nnz]
1048
+ * Pointer to row indices to which each non-zero entry in 'Xc' corresponds.
1049
+ * Must be in sorted order, otherwise results will be incorrect.
1050
+ * Pass NULL if there are no sparse numeric columns in CSC format.
1051
+ * - Xc_indptr[ncols_categ + 1]
1052
+ * Pointer to column index pointers that tell at entry [col] where does column 'col'
1053
+ * start and at entry [col + 1] where does column 'col' end.
1054
+ * Pass NULL if there are no sparse numeric columns in CSC format.
1055
+ * - Xr[nnz]
1056
+ * Pointer to numeric data in sparse numeric matrix in CSR format (row-compressed).
1057
+ * Pass NULL if there are no sparse numeric columns.
1058
+ * Can only pass one of 'numeric_data', 'Xc' + 'Xc_ind' + 'Xc_indptr', 'Xr' + 'Xr_ind' + 'Xr_indptr'.
1059
+ * - Xr_ind[nnz]
1060
+ * Pointer to column indices to which each non-zero entry in 'Xr' corresponds.
1061
+ * Must be in sorted order, otherwise results will be incorrect.
1062
+ * Pass NULL if there are no sparse numeric columns in CSR format.
1063
+ * - Xr_indptr[nrows + 1]
1064
+ * Pointer to row index pointers that tell at entry [row] where does row 'row'
1065
+ * start and at entry [row + 1] where does row 'row' end.
1066
+ * Pass NULL if there are no sparse numeric columns in CSR format.
1067
+ * - nrows
1068
+ * Number of rows in 'numeric_data', 'Xc', 'Xr, 'categ_data'.
1069
+ * - nthreads
1070
+ * Number of parallel threads to use. Note that, the more threads, the more memory will be
1071
+ * allocated, even if the thread does not end up being used. Ignored when not building with
1072
+ * OpenMP support.
1073
+ * - standardize
1074
+ * Whether to standardize the average depths for each row according to their relative magnitude
1075
+ * compared to the expected average, in order to obtain an outlier score. If passing 'false',
1076
+ * will output the average depth instead.
1077
+ * Ignored when not passing 'output_depths'.
1078
+ * - model_outputs
1079
+ * Pointer to fitted single-variable model object from function 'fit_iforest'. Pass NULL
1080
+ * if the predictions are to be made from an extended model. Can only pass one of
1081
+ * 'model_outputs' and 'model_outputs_ext'.
1082
+ * - model_outputs_ext
1083
+ * Pointer to fitted extended model object from function 'fit_iforest'. Pass NULL
1084
+ * if the predictions are to be made from a single-variable model. Can only pass one of
1085
+ * 'model_outputs' and 'model_outputs_ext'.
1086
+ * - output_depths[nrows] (out)
1087
+ * Pointer to array where the output average depths or outlier scores will be written into
1088
+ * (the return type is controlled according to parameter 'standardize').
1089
+ * Should always be passed when calling this function (it is not optional).
1090
+ * - tree_num[nrows * ntrees] (out)
1091
+ * Pointer to array where the output terminal node numbers will be written into.
1092
+ * Note that the mapping between tree node and terminal tree node is not stored in
1093
+ * the model object for efficiency reasons, so this mapping will be determined on-the-fly
1094
+ * when passing this parameter, and as such, there will be some overhead regardless of
1095
+ * the actual number of rows. Output will be in column-major order ([nrows, ntrees]).
1096
+ * This will not be calculable when using 'ndim==1' alongside with either
1097
+ * 'missing_action==Divide' or 'new_categ_action=Weighted'.
1098
+ * Pass NULL if this type of output is not needed.
1099
+ * - per_tree_depths[nrows * ntrees] (out)
1100
+ * Pointer to array where to output per-tree depths or expected depths for each row.
1101
+ * Note that these will not include range penalities ('penalize_range=true').
1102
+ * Output will be in row-major order ([nrows, ntrees]).
1103
+ * This will not be calculable when using 'ndim==1' alongside with either
1104
+ * 'missing_action==Divide' or 'new_categ_action=Weighted'.
1105
+ * Pass NULL if this type of output is not needed.
1106
+ * - indexer
1107
+ * Pointer to associated tree indexer for the model being used, if it was constructed,
1108
+ * which can be used to speed up tree numbers/indices predictions.
1109
+ * This is ignored when not passing 'tree_num'.
1110
+ * Pass NULL if the indexer has not been constructed.
1111
+ */
1112
+ ISOTREE_EXPORTED
1113
+ void predict_iforest(real_t numeric_data[], int categ_data[],
1114
+ bool is_col_major, size_t ld_numeric, size_t ld_categ,
1115
+ real_t Xc[], sparse_ix Xc_ind[], sparse_ix Xc_indptr[],
1116
+ real_t Xr[], sparse_ix Xr_ind[], sparse_ix Xr_indptr[],
1117
+ size_t nrows, int nthreads, bool standardize,
1118
+ IsoForest *model_outputs, ExtIsoForest *model_outputs_ext,
1119
+ double output_depths[], sparse_ix tree_num[],
1120
+ double per_tree_depths[],
1121
+ TreesIndexer *indexer);
1122
+
1123
+
1124
+
1125
+ /* Get the number of nodes present in a given model, per tree
1126
+ *
1127
+ * Parameters
1128
+ * ==========
1129
+ * - model_outputs
1130
+ * Pointer to fitted single-variable model object from function 'fit_iforest'.
1131
+ * - model_outputs_ext
1132
+ * Pointer to fitted extended model object from function 'fit_iforest'.
1133
+ * - n_nodes[ntrees] (out)
1134
+ * Number of nodes in tree of the model, including non-terminal nodes.
1135
+ * - n_terminal[ntrees] (out)
1136
+ * Number of terminal nodes in each tree of the model.
1137
+ * - nthreads
1138
+ * Number of parallel threads to use.
1139
+ */
1140
+ ISOTREE_EXPORTED void get_num_nodes(IsoForest &model_outputs, sparse_ix *n_nodes, sparse_ix *n_terminal, int nthreads) noexcept;
1141
+ ISOTREE_EXPORTED void get_num_nodes(ExtIsoForest &model_outputs, sparse_ix *n_nodes, sparse_ix *n_terminal, int nthreads) noexcept;
1142
+
1143
+
1144
+
1145
+ /* Calculate distance or similarity or kernel/proximity between data points
1146
+ *
1147
+ * Parameters
1148
+ * ==========
1149
+ * - numeric_data[nrows * ncols_numeric]
1150
+ * Pointer to numeric data for which to make calculations. If not using 'indexer', must be
1151
+ * ordered by columns like Fortran, not ordered by rows like C (i.e. entries 1..n contain
1152
+ * column 0, n+1..2n column 1, etc.), while if using 'indexer', may be passed in either
1153
+ * row-major or column-major format (with row-major being faster).
1154
+ * If categorical data is passed, must be in the same storage order (row-major / column-major)
1155
+ * as numerical data (whether dense or sparse).
1156
+ * The column order must be the same as in the data that was used to fit the model.
1157
+ * If making calculations between two sets of observations/rows (see documentation for 'rmat'),
1158
+ * the first group is assumed to be the earlier rows here.
1159
+ * Pass NULL if there are no dense numeric columns.
1160
+ * Can only pass one of 'numeric_data' or 'Xc' + 'Xc_ind' + 'Xc_indptr'.
1161
+ * - categ_data[nrows * ncols_categ]
1162
+ * Pointer to categorical data for which to make calculations. If not using 'indexer', must be
1163
+ * ordered by columns like Fortran, not ordered by rows like C (i.e. entries 1..n contain
1164
+ * column 0, n+1..2n column 1, etc.), while if using 'indexer', may be passed in either
1165
+ * row-major or column-major format (with row-major being faster).
1166
+ * If numerical data is passed, must be in the same storage order (row-major / column-major)
1167
+ * as categorical data (whether the numerical data is dense or sparse).
1168
+ * Each category should be represented as an integer, and these integers must start at zero and
1169
+ * be in consecutive order - i.e. if category '3' is present, category '2' must have also been
1170
+ * present when the model was fit (note that they are not treated as being ordinal, this is just
1171
+ * an encoding). Missing values should be encoded as negative numbers such as (-1). The encoding
1172
+ * must be the same as was used in the data to which the model was fit.
1173
+ * Pass NULL if there are no categorical columns.
1174
+ * If making calculations between two sets of observations/rows (see documentation for 'rmat'),
1175
+ * the first group is assumed to be the earlier rows here.
1176
+ * - Xc[nnz]
1177
+ * Pointer to numeric data in sparse numeric matrix in CSC format (column-compressed),
1178
+ * or optionally in CSR format (row-compressed) if using 'indexer' and passing 'is_col_major=false'
1179
+ * (not recommended as the calculations will be slower if sparse data is passed as CSR).
1180
+ * If categorical data is passed, must be in the same storage order (row-major or CSR / column-major or CSC)
1181
+ * as numerical data (whether dense or sparse).
1182
+ * Pass NULL if there are no sparse numeric columns.
1183
+ * Can only pass one of 'numeric_data' or 'Xc' + 'Xc_ind' + 'Xc_indptr'.
1184
+ * - Xc_ind[nnz]
1185
+ * Pointer to row indices to which each non-zero entry in 'Xc' corresponds
1186
+ * (column indices if 'Xc' is in CSR format).
1187
+ * Must be in sorted order, otherwise results will be incorrect.
1188
+ * Pass NULL if there are no sparse numeric columns in CSC or CSR format.
1189
+ * - Xc_indptr[ncols_categ + 1]
1190
+ * Pointer to column index pointers that tell at entry [col] where does column 'col'
1191
+ * start and at entry [col + 1] where does column 'col' end
1192
+ * (row index pointers if 'Xc' is passed in CSR format).
1193
+ * Pass NULL if there are no sparse numeric columns in CSC or CSR format.
1194
+ * If making calculations between two sets of observations/rows (see documentation for 'rmat'),
1195
+ * the first group is assumed to be the earlier rows here.
1196
+ * - nrows
1197
+ * Number of rows in 'numeric_data', 'Xc', 'categ_data'.
1198
+ * - use_long_double
1199
+ * Whether to use 'long double' (extended precision) type for the calculations. This makes them
1200
+ * more accurate (provided that the compiler used has wider long doubles than doubles), but
1201
+ * slower - especially in platforms in which 'long double' is a software-emulated type (e.g.
1202
+ * Power8 platforms).
1203
+ * - nthreads
1204
+ * Number of parallel threads to use. Note that, the more threads, the more memory will be
1205
+ * allocated, even if the thread does not end up being used (with one exception being kernel calculations
1206
+ * with respect to reference points in an idexer). Ignored when not building with OpenMP support.
1207
+ * - assume_full_distr
1208
+ * Whether to assume that the fitted model represents a full population distribution (will use a
1209
+ * standardizing criterion assuming infinite sample, and the results of the similarity between two points
1210
+ * at prediction time will not depend on the prescence of any third point that is similar to them, but will
1211
+ * differ more compared to the pairwise distances between points from which the model was fit). If passing
1212
+ * 'false', will calculate pairwise distances as if the new observations at prediction time were added to
1213
+ * the sample to which each tree was fit, which will make the distances between two points potentially vary
1214
+ * according to other newly introduced points.
1215
+ * This was added for experimentation purposes only and it's not recommended to pass 'false'.
1216
+ * Note that when calculating distances using 'indexer', there
1217
+ * might be slight discrepancies between the numbers produced with or without the indexer due to what
1218
+ * are considered "additional" observations in this calculation.
1219
+ * This is ignored when passing 'as_kernel=true'.
1220
+ * - standardize_dist
1221
+ * Whether to standardize the resulting average separation depths between rows according
1222
+ * to the expected average separation depth in a similar way as when predicting outlierness,
1223
+ * in order to obtain a standardized distance. If passing 'false', will output the average
1224
+ * separation depth instead.
1225
+ * If passing 'as_kernel=true', this indicates whether to output a fraction (if 'true') or
1226
+ * the raw number of matching trees (if 'false').
1227
+ * - as_kernel
1228
+ * Whether to calculate the "similarities" as isolation kernel or proximity matrix, which counts
1229
+ * the proportion of trees in which two observations end up in the same terminal node. This is
1230
+ * typically much faster than separation-based distance, but is typically not as good quality.
1231
+ * Note that, for kernel calculations, the indexer is only used if it has reference points stored on it.
1232
+ * - model_outputs
1233
+ * Pointer to fitted single-variable model object from function 'fit_iforest'. Pass NULL
1234
+ * if the calculations are to be made from an extended model. Can only pass one of
1235
+ * 'model_outputs' and 'model_outputs_ext'.
1236
+ * - model_outputs_ext
1237
+ * Pointer to fitted extended model object from function 'fit_iforest'. Pass NULL
1238
+ * if the calculations are to be made from a single-variable model. Can only pass one of
1239
+ * 'model_outputs' and 'model_outputs_ext'.
1240
+ * - tmat[nrows * (nrows - 1) / 2] (out)
1241
+ * Pointer to array where the resulting pairwise distances or average separation depths or kernels will
1242
+ * be written into. As the output is a symmetric matrix, this function will only fill in the
1243
+ * upper-triangular part, in which entry 0 <= i < j < n will be located at position
1244
+ * p(i,j) = (i * (n - (i+1)/2) + j - i - 1).
1245
+ * Can be converted to a dense square matrix through function 'tmat_to_dense'.
1246
+ * The array must already be initialized to zeros.
1247
+ * If calculating distance/separation from a group of points to another group of points,
1248
+ * pass NULL here and use 'rmat' instead.
1249
+ * - rmat[nrows1 * nrows2] (out)
1250
+ * Pointer to array where to write the distances or separation depths or kernels between each row in
1251
+ * one set of observations and each row in a different set of observations. If doing these
1252
+ * calculations for all pairs of observations/rows, pass 'tmat' instead.
1253
+ * Will take the first group of observations as the rows in this matrix, and the second
1254
+ * group as the columns. The groups are assumed to be in the same data arrays, with the
1255
+ * first group corresponding to the earlier rows there.
1256
+ * This matrix will be used in row-major order (i.e. entries 1..nrows2 contain the first row from nrows1).
1257
+ * Must be already initialized to zeros.
1258
+ * If passing 'use_indexed_references=true' plus an indexer object with reference points, this
1259
+ * array should have dimension [nrows, n_references].
1260
+ * Ignored when 'tmat' is passed.
1261
+ * - n_from
1262
+ * When calculating distances between two groups of points, this indicates the number of
1263
+ * observations/rows belonging to the first group (the rows in 'rmat'), which will be
1264
+ * assumed to be the first 'n_from' rows.
1265
+ * Ignored when 'tmat' is passed or when 'use_indexed_references=true' plus an indexer with
1266
+ * references are passed.
1267
+ * - use_indexed_references
1268
+ * Whether to calculate distances with respect to reference points stored in the indexer
1269
+ * object, if it has any. This is only supported with 'assume_full_distr=true' or with 'as_kernel=true'.
1270
+ * If passing 'use_indexed_references=true', then 'tmat' must be NULL, and 'rmat' must
1271
+ * be of dimension [nrows, n_references].
1272
+ * - indexer
1273
+ * Pointer to associated tree indexer for the model being used, if it was constructed,
1274
+ * which can be used to speed up distance calculations, assuming that it was built with
1275
+ * option 'with_distances=true'. If it does not contain node distances, it will not be used.
1276
+ * Pass NULL if the indexer has not been constructed or was constructed with 'with_distances=false'.
1277
+ * If it contains reference points and passing 'use_indexed_references=true', distances will be
1278
+ * calculated between between the input data passed here and the reference points stored in this object.
1279
+ * If passing 'as_kernel=true', the indexer can only be used for calculating kernels with respect to
1280
+ * reference points in the indexer, otherwise it will not be used (which also means that the data must be
1281
+ * passed in column-major order for all kernel calculations that are not with respect to reference points
1282
+ * from an indexer).
1283
+ * - is_col_major
1284
+ * Whether the data comes in column-major order. If using 'indexer', predictions are also possible
1285
+ * (and are even faster for the case of dense-only data) if passing the data in row-major format.
1286
+ * Without 'indexer' (and with 'as_kernel=true' but without reference points in the idnexer), data
1287
+ * may only be passed in column-major format.
1288
+ * If there is sparse numeric data, it is highly suggested to pass it in CSC/column-major format.
1289
+ * - ld_numeric
1290
+ * If passing 'is_col_major=false', this indicates the leading dimension of the array 'numeric_data'.
1291
+ * Typically, this corresponds to the number of columns, but may be larger (the array will
1292
+ * be accessed assuming that row 'n' starts at 'numeric_data + n*ld_numeric'). If passing
1293
+ * 'numeric_data' in column-major order, this is ignored and will be assumed that the
1294
+ * leading dimension corresponds to the number of rows. This is ignored when passing numeric
1295
+ * data in sparse format.
1296
+ * Note that data in row-major order is only accepted when using 'indexer'.
1297
+ * - ld_categ
1298
+ * If passing 'is_col_major=false', this indicates the leading dimension of the array 'categ_data'.
1299
+ * Typically, this corresponds to the number of columns, but may be larger (the array will
1300
+ * be accessed assuming that row 'n' starts at 'categ_data + n*ld_categ'). If passing
1301
+ * 'categ_data' in column-major order, this is ignored and will be assumed that the
1302
+ * leading dimension corresponds to the number of rows.
1303
+ * Note that data in row-major order is only accepted when using 'indexer'.
1304
+ */
1305
+ ISOTREE_EXPORTED
1306
+ void calc_similarity(real_t numeric_data[], int categ_data[],
1307
+ real_t Xc[], sparse_ix Xc_ind[], sparse_ix Xc_indptr[],
1308
+ size_t nrows, bool use_long_double, int nthreads,
1309
+ bool assume_full_distr, bool standardize_dist, bool as_kernel,
1310
+ IsoForest *model_outputs, ExtIsoForest *model_outputs_ext,
1311
+ double tmat[], double rmat[], size_t n_from, bool use_indexed_references,
1312
+ TreesIndexer *indexer, bool is_col_major, size_t ld_numeric, size_t ld_categ);
1313
+
1314
+ /* Impute missing values in new data
1315
+ *
1316
+ * Parameters
1317
+ * ==========
1318
+ * - numeric_data[nrows * ncols_numeric] (in, out)
1319
+ * Pointer to numeric data in which missing values will be imputed. May be ordered by rows
1320
+ * (i.e. entries 1..n contain row 0, n+1..2n row 1, etc.) - a.k.a. row-major - or by
1321
+ * columns (i.e. entries 1..n contain column 0, n+1..2n column 1, etc.) - a.k.a. column-major
1322
+ * (see parameter 'is_col_major').
1323
+ * Pass NULL if there are no dense numeric columns.
1324
+ * Can only pass one of 'numeric_data', 'Xr' + 'Xr_ind' + 'Xr_indptr'.
1325
+ * Imputations will overwrite values in this same array.
1326
+ * - categ_data[nrows * ncols_categ]
1327
+ * Pointer to categorical data in which missing values will be imputed. May be ordered by rows
1328
+ * (i.e. entries 1..n contain row 0, n+1..2n row 1, etc.) - a.k.a. row-major - or by
1329
+ * columns (i.e. entries 1..n contain column 0, n+1..2n column 1, etc.) - a.k.a. column-major
1330
+ * (see parameter 'is_col_major').
1331
+ * Pass NULL if there are no categorical columns.
1332
+ * Each category should be represented as an integer, and these integers must start at zero and
1333
+ * be in consecutive order - i.e. if category '3' is present, category '2' must have also been
1334
+ * present when the model was fit (note that they are not treated as being ordinal, this is just
1335
+ * an encoding). Missing values should be encoded as negative numbers such as (-1). The encoding
1336
+ * must be the same as was used in the data to which the model was fit.
1337
+ * Imputations will overwrite values in this same array.
1338
+ * - is_col_major
1339
+ * Whether 'numeric_data' and 'categ_data' come in column-major order, like the data to which the
1340
+ * model was fit. If passing 'false', will assume they are in row-major order. Note that most of
1341
+ * the functions in this library work only with column-major order, but here both are suitable
1342
+ * and row-major is preferred. Both arrays must have the same orientation (row/column major).
1343
+ * - ncols_categ
1344
+ * Number of categorical columns in the data.
1345
+ * - ncat[ncols_categ]
1346
+ * Number of categories in each categorical column. E.g. if the highest code for a column is '4',
1347
+ * the number of categories for that column is '5' (zero is one category).
1348
+ * Must be the same as was passed to 'fit_iforest'.
1349
+ * - Xr[nnz] (in, out)
1350
+ * Pointer to numeric data in sparse numeric matrix in CSR format (row-compressed).
1351
+ * Pass NULL if there are no sparse numeric columns.
1352
+ * Can only pass one of 'numeric_data', 'Xr' + 'Xr_ind' + 'Xr_indptr'.
1353
+ * Imputations will overwrite values in this same array.
1354
+ * - Xr_ind[nnz]
1355
+ * Pointer to column indices to which each non-zero entry in 'Xr' corresponds.
1356
+ * Must be in sorted order, otherwise results will be incorrect.
1357
+ * Pass NULL if there are no sparse numeric columns in CSR format.
1358
+ * - Xr_indptr[nrows + 1]
1359
+ * Pointer to row index pointers that tell at entry [row] where does row 'row'
1360
+ * start and at entry [row + 1] where does row 'row' end.
1361
+ * Pass NULL if there are no sparse numeric columns in CSR format.
1362
+ * - nrows
1363
+ * Number of rows in 'numeric_data', 'Xc', 'Xr, 'categ_data'.
1364
+ * - use_long_double
1365
+ * Whether to use 'long double' (extended precision) type for the calculations. This makes them
1366
+ * more accurate (provided that the compiler used has wider long doubles than doubles), but
1367
+ * slower - especially in platforms in which 'long double' is a software-emulated type (e.g.
1368
+ * Power8 platforms).
1369
+ * - nthreads
1370
+ * Number of parallel threads to use. Note that, the more threads, the more memory will be
1371
+ * allocated, even if the thread does not end up being used. Ignored when not building with
1372
+ * OpenMP support.
1373
+ * - model_outputs
1374
+ * Pointer to fitted single-variable model object from function 'fit_iforest'. Pass NULL
1375
+ * if the predictions are to be made from an extended model. Can only pass one of
1376
+ * 'model_outputs' and 'model_outputs_ext'.
1377
+ * - model_outputs_ext
1378
+ * Pointer to fitted extended model object from function 'fit_iforest'. Pass NULL
1379
+ * if the predictions are to be made from a single-variable model. Can only pass one of
1380
+ * 'model_outputs' and 'model_outputs_ext'.
1381
+ * - impute_nodes
1382
+ * Pointer to fitted imputation node obects for the same trees as in 'model_outputs' or 'model_outputs_ext',
1383
+ * as produced from function 'fit_iforest',
1384
+ */
1385
+ ISOTREE_EXPORTED
1386
+ void impute_missing_values(real_t numeric_data[], int categ_data[], bool is_col_major,
1387
+ real_t Xr[], sparse_ix Xr_ind[], sparse_ix Xr_indptr[],
1388
+ size_t nrows, bool use_long_double, int nthreads,
1389
+ IsoForest *model_outputs, ExtIsoForest *model_outputs_ext,
1390
+ Imputer &imputer);
1391
+
1392
+
1393
+ /* Append trees from one model into another
1394
+ *
1395
+ * Parameters
1396
+ * ==========
1397
+ * - model (in, out)
1398
+ * Pointer to isolation forest model wich has already been fit through 'fit_iforest'.
1399
+ * The trees from 'other' will be merged into this (will be at the end of vector member 'trees').
1400
+ * Both 'model' and 'other' must have been fit with the same hyperparameters
1401
+ * in order for this merge to work correctly - at the very least, should have
1402
+ * the same 'missing_action', 'cat_split_type', 'new_cat_action'.
1403
+ * Should only pass one of 'model'+'other' or 'ext_model'+'ext_other'.
1404
+ * Pass NULL if this is not to be used.
1405
+ * - other
1406
+ * Pointer to isolation forest model which has already been fit through 'fit_iforest'.
1407
+ * The trees from this object will be added into 'model' (this object will not be modified).
1408
+ * Both 'model' and 'other' must have been fit with the same hyperparameters
1409
+ * in order for this merge to work correctly - at the very least, should have
1410
+ * the same 'missing_action', 'cat_split_type', 'new_cat_action'.
1411
+ * Should only pass one of 'model'+'other' or 'ext_model'+'ext_other'.
1412
+ * Pass NULL if this is not to be used.
1413
+ * - ext_model (in, out)
1414
+ * Pointer to extended isolation forest model which has already been fit through 'fit_iforest'.
1415
+ * The trees/hyperplanes from 'ext_other' will be merged into this (will be at the end of vector member 'hplanes').
1416
+ * Both 'ext_model' and 'ext_other' must have been fit with the same hyperparameters
1417
+ * in order for this merge to work correctly - at the very least, should have
1418
+ * the same 'missing_action', 'cat_split_type', 'new_cat_action'.
1419
+ * Should only pass one of 'model'+'other' or 'ext_model'+'ext_other'.
1420
+ * Pass NULL if this is not to be used.
1421
+ * - ext_other
1422
+ * Pointer to extended isolation forest model which has already been fit through 'fit_iforest'.
1423
+ * The trees/hyperplanes from this object will be added into 'ext_model' (this object will not be modified).
1424
+ * Both 'ext_model' and 'ext_other' must have been fit with the same hyperparameters
1425
+ * in order for this merge to work correctly - at the very least, should have
1426
+ * the same 'missing_action', 'cat_split_type', 'new_cat_action'.
1427
+ * Should only pass one of 'model'+'other' or 'ext_model'+'ext_other'.
1428
+ * Pass NULL if this is not to be used.
1429
+ * - imputer (in, out)
1430
+ * Pointer to imputation object which has already been fit through 'fit_iforest' along with
1431
+ * either 'model' or 'ext_model' in the same call to 'fit_iforest'.
1432
+ * The imputation nodes from 'iother' will be merged into this (will be at the end of vector member 'imputer_tree').
1433
+ * Hyperparameters related to imputation might differ between 'imputer' and 'iother' ('imputer' will preserve its
1434
+ * hyperparameters after the merge).
1435
+ * Pass NULL if this is not to be used.
1436
+ * - iother
1437
+ * Pointer to imputation object which has already been fit through 'fit_iforest' along with
1438
+ * either 'model' or 'ext_model' in the same call to 'fit_iforest'.
1439
+ * The imputation nodes from this object will be added into 'imputer' (this object will not be modified).
1440
+ * Hyperparameters related to imputation might differ between 'imputer' and 'iother' ('imputer' will preserve its
1441
+ * hyperparameters after the merge).
1442
+ * Pass NULL if this is not to be used.
1443
+ * - indexer (in, out)
1444
+ * Pointer to indexer object which has already been fit through 'fit_iforest' along with
1445
+ * either 'model' or 'ext_model' in the same call to 'fit_iforest' or through another specialized function.
1446
+ * The imputation nodes from 'ind_other' will be merged into this (will be at the end of vector member 'indices').
1447
+ * Reference points should not differ between 'indexer' and 'ind_other'.
1448
+ * Pass NULL if this is not to be used.
1449
+ * - ind_other
1450
+ * Pointer to indexer object which has already been fit through 'fit_iforest' along with
1451
+ * either 'model' or 'ext_model' in the same call to 'fit_iforest' or through another specialized function.
1452
+ * The imputation nodes from this object will be added into 'imputer' (this object will not be modified).
1453
+ * Reference points should not differ between 'indexer' and 'ind_other'.
1454
+ * Pass NULL if this is not to be used.
1455
+ */
1456
+ ISOTREE_EXPORTED
1457
+ void merge_models(IsoForest* model, IsoForest* other,
1458
+ ExtIsoForest* ext_model, ExtIsoForest* ext_other,
1459
+ Imputer* imputer, Imputer* iother,
1460
+ TreesIndexer* indexer, TreesIndexer* ind_other);
1461
+
1462
+ /* Create a model containing a sub-set of the trees from another model
1463
+ *
1464
+ * Parameters
1465
+ * ==========
1466
+ * - model (in)
1467
+ * Pointer to isolation forest model wich has already been fit through 'fit_iforest',
1468
+ * from which the desired trees will be copied into a new model object.
1469
+ * Pass NULL if using the extended model.
1470
+ * - ext_model (in)
1471
+ * Pointer to extended isolation forest model which has already been fit through 'fit_iforest',
1472
+ * from which the desired trees will be copied into a new model object.
1473
+ * Pass NULL if using the single-variable model.
1474
+ * - imputer (in)
1475
+ * Pointer to imputation object which has already been fit through 'fit_iforest' along with
1476
+ * either 'model' or 'ext_model' in the same call to 'fit_iforest'.
1477
+ * Pass NULL if the model was built without an imputer.
1478
+ * - indexer (in)
1479
+ * Pointer to indexer object which has already been fit through 'fit_iforest' along with
1480
+ * either 'model' or 'ext_model' in the same call to 'fit_iforest' or through another specialized funcction.
1481
+ * Pass NULL if the model was built without an indexer.
1482
+ * - model_new (out)
1483
+ * Pointer to already-allocated isolation forest model, which will be reset and to
1484
+ * which the selected trees from 'model' will be copied.
1485
+ * Pass NULL if using the extended model.
1486
+ * - ext_model_new (out)
1487
+ * Pointer to already-allocated extended isolation forest model, which will be reset and to
1488
+ * which the selected hyperplanes from 'ext_model' will be copied.
1489
+ * Pass NULL if using the single-variable model.
1490
+ * - imputer_new (out)
1491
+ * Pointer to already-allocated imputation object, which will be reset and to
1492
+ * which the selected nodes from 'imputer' (matching to those of either 'model'
1493
+ * or 'ext_model') will be copied.
1494
+ * Pass NULL if the model was built without an imputer.
1495
+ * - indexer_new (out)
1496
+ * Pointer to already-allocated indexer object, which will be reset and to
1497
+ * which the selected nodes from 'indexer' (matching to those of either 'model'
1498
+ * or 'ext_model') will be copied.
1499
+ * Pass NULL if the model was built without an indexer.
1500
+ */
1501
+ ISOTREE_EXPORTED
1502
+ void subset_model(IsoForest* model, IsoForest* model_new,
1503
+ ExtIsoForest* ext_model, ExtIsoForest* ext_model_new,
1504
+ Imputer* imputer, Imputer* imputer_new,
1505
+ TreesIndexer* indexer, TreesIndexer* indexer_new,
1506
+ size_t *trees_take, size_t ntrees_take);
1507
+
1508
+ /* Build indexer for faster terminal node predictions and/or distance calculations
1509
+ *
1510
+ * Parameters
1511
+ * ==========
1512
+ * - indexer
1513
+ * Pointer or reference to an indexer object which will be associated to a fitted model and in
1514
+ * which indices for terminal nodes and potentially node distances will be stored.
1515
+ * - model / model_outputs / model_outputs_ext
1516
+ * Pointer or reference to a fitted model object for which an indexer will be built.
1517
+ * - nthreads
1518
+ * Number of parallel threads to use. This operation will only be multi-threaded when passing
1519
+ * 'with_distances=true'.
1520
+ * - with_distances
1521
+ * Whether to also pre-calculate node distances in order to speed up 'calc_similarity' (distances).
1522
+ * Note that this will consume a lot more memory and make the resulting object significantly
1523
+ * heavier.
1524
+ */
1525
+ ISOTREE_EXPORTED
1526
+ void build_tree_indices(TreesIndexer &indexer, const IsoForest &model, int nthreads, const bool with_distances);
1527
+ ISOTREE_EXPORTED
1528
+ void build_tree_indices(TreesIndexer &indexer, const ExtIsoForest &model, int nthreads, const bool with_distances);
1529
+ ISOTREE_EXPORTED
1530
+ void build_tree_indices
1531
+ (
1532
+ TreesIndexer *indexer,
1533
+ const IsoForest *model_outputs,
1534
+ const ExtIsoForest *model_outputs_ext,
1535
+ int nthreads,
1536
+ const bool with_distances
1537
+ );
1538
+ /* Gets the number of reference points stored in an indexer object */
1539
+ ISOTREE_EXPORTED
1540
+ size_t get_number_of_reference_points(const TreesIndexer &indexer) noexcept;
1541
+
1542
+
1543
+ /* Functions to inspect serialized objects
1544
+ *
1545
+ * Parameters
1546
+ * ==========
1547
+ * - serialized_bytes (in)
1548
+ * A model from this library, serialized through the functions available since
1549
+ * version 0.3.0, in any of the varieties offered by the library (as separate
1550
+ * objects or as combined objects with metadata).
1551
+ * - is_isotree_model (out)
1552
+ * Whether the input 'serialized_bytes' is a serialized model from this library.
1553
+ * - is_compatible (out)
1554
+ * Whether the serialized model is compatible (i.e. can be de-serialized) with the
1555
+ * current setup.
1556
+ * Serialized models are compatible between:
1557
+ * - Different operating systems.
1558
+ * - Different compilers.
1559
+ * - Systems with different 'size_t' width (e.g. 32-bit and 64-bit),
1560
+ * as long as the file was produced on a system that was either 32-bit or 64-bit,
1561
+ * and as long as each saved value fits within the range of the machine's 'size_t' type.
1562
+ * - Systems with different 'int' width,
1563
+ * as long as the file was produced on a system that was 16-bit, 32-bit, or 64-bit,
1564
+ * and as long as each saved value fits within the range of the machine's int type.
1565
+ * - Systems with different bit endianness (e.g. x86 and PPC64 in non-le mode).
1566
+ * - Versions of this package from 0.3.0 onwards.
1567
+ * But are not compatible between:
1568
+ * - Systems with different floating point numeric representations
1569
+ * (e.g. standard IEEE754 vs. a base-10 system).
1570
+ * - Versions of this package earlier than 0.3.0.
1571
+ * This pretty much guarantees that a given file can be serialized and de-serialized
1572
+ * in the same machine in which it was built, regardless of how the library was compiled.
1573
+ * Reading a serialized model that was produced in a platform with different
1574
+ * characteristics (e.g. 32-bit vs. 64-bit) will be much slower however.
1575
+ * - has_combined_objects (out)
1576
+ * Whether the serialized model is in the format of combined objects (as produced by the
1577
+ * functions named 'serialized_combined') or in the format of separate objects (as produced
1578
+ * by the functions named 'serialized_<model>').
1579
+ * If if is in the format of combined objects, must be de-serialized through the functions
1580
+ * named 'deserialize_combined'; ohterwise, must be de-serialized through the functions
1581
+ * named 'deserialize_<model>'.
1582
+ * Note that the Python and R interfaces of this library use the combined objects format
1583
+ * when serializing to files.
1584
+ * - has_IsoForest (out)
1585
+ * Whether the serialized bytes include an 'IsoForest' object. If it has 'has_combined_objects=true',
1586
+ * might include additional objects.
1587
+ * - has_ExtIsoForest (out)
1588
+ * Whether the serialized bytes include an 'ExtIsoForest' object. If it has 'has_combined_objects=true',
1589
+ * might include additional objects.
1590
+ * - has_Imputer (out)
1591
+ * Whether the serialized bytes include an 'Imputer' object. If it has 'has_combined_objects=true',
1592
+ * might include additional objects.
1593
+ * - has_metadata (out)
1594
+ * Whether the serialized bytes include additional metadata in the form of a 'char' array.
1595
+ * This can only be present when having 'has_combined_objects=true'.
1596
+ * - size_metadata (out)
1597
+ * When the serialized bytes contain metadata, this denotes the size of the metadata (number
1598
+ * of bytes that it contains).
1599
+ */
1600
+ ISOTREE_EXPORTED
1601
+ void inspect_serialized_object
1602
+ (
1603
+ const char *serialized_bytes,
1604
+ bool &is_isotree_model,
1605
+ bool &is_compatible,
1606
+ bool &has_combined_objects,
1607
+ bool &has_IsoForest,
1608
+ bool &has_ExtIsoForest,
1609
+ bool &has_Imputer,
1610
+ bool &has_Indexer,
1611
+ bool &has_metadata,
1612
+ size_t &size_metadata
1613
+ );
1614
+ ISOTREE_EXPORTED
1615
+ void inspect_serialized_object
1616
+ (
1617
+ FILE *serialized_bytes,
1618
+ bool &is_isotree_model,
1619
+ bool &is_compatible,
1620
+ bool &has_combined_objects,
1621
+ bool &has_IsoForest,
1622
+ bool &has_ExtIsoForest,
1623
+ bool &has_Imputer,
1624
+ bool &has_Indexer,
1625
+ bool &has_metadata,
1626
+ size_t &size_metadata
1627
+ );
1628
+ ISOTREE_EXPORTED
1629
+ void inspect_serialized_object
1630
+ (
1631
+ std::istream &serialized_bytes,
1632
+ bool &is_isotree_model,
1633
+ bool &is_compatible,
1634
+ bool &has_combined_objects,
1635
+ bool &has_IsoForest,
1636
+ bool &has_ExtIsoForest,
1637
+ bool &has_Imputer,
1638
+ bool &has_Indexer,
1639
+ bool &has_metadata,
1640
+ size_t &size_metadata
1641
+ );
1642
+ ISOTREE_EXPORTED
1643
+ void inspect_serialized_object
1644
+ (
1645
+ const std::string &serialized_bytes,
1646
+ bool &is_isotree_model,
1647
+ bool &is_compatible,
1648
+ bool &has_combined_objects,
1649
+ bool &has_IsoForest,
1650
+ bool &has_ExtIsoForest,
1651
+ bool &has_Imputer,
1652
+ bool &has_Indexer,
1653
+ bool &has_metadata,
1654
+ size_t &size_metadata
1655
+ );
1656
+
1657
+ /* Serialization and de-serialization functions (individual objects)
1658
+ *
1659
+ * Parameters
1660
+ * ==========
1661
+ * - model (in or out depending on function)
1662
+ * A model object to serialize (when it has 'const' qualifier), after being fitted through
1663
+ * function 'fit_iforest'; or an already-allocated object (should be initialized through
1664
+ * the default constructor) into which a serialized object of the same class will be
1665
+ * de-serialized. In the latter case, the contents of this object will be overwritten.
1666
+ * Note that this will only be able to load models generated with isotree version 0.3.0
1667
+ * and later, and that these serialized models are forwards compatible but not backwards
1668
+ * compatible (that is, a model saved with 0.3.0 can be loaded with 0.3.6, but not the other
1669
+ * way around).
1670
+ * - output (out)
1671
+ * A writable object or stream in which to save/persist/serialize the
1672
+ * model or imputer object. In the functions that do not take this as a parameter,
1673
+ * it will be returned as a string containing the raw bytes.
1674
+ * Should be opened in binary mode.
1675
+ * Note: on Windows, if compiling this library with a compiler other than MSVC or MINGW,
1676
+ * there might be issues writing models to FILE pointers if the models are larger than 2GB.
1677
+ * - in (in)
1678
+ * An readable object or stream which contains the serialized/persisted model or
1679
+ * imputer object which will be de-serialized. Should be opened in binary mode.
1680
+ *
1681
+ * Returns
1682
+ * =======
1683
+ * (Only for functions 'determine_serialized_size')
1684
+ * Size that the model or imputer object will use when serialized, intended to be
1685
+ * used for allocating arrays beforehand when serializing to 'char'.
1686
+ */
1687
+ ISOTREE_EXPORTED
1688
+ size_t determine_serialized_size(const IsoForest &model) noexcept;
1689
+ ISOTREE_EXPORTED
1690
+ size_t determine_serialized_size(const ExtIsoForest &model) noexcept;
1691
+ ISOTREE_EXPORTED
1692
+ size_t determine_serialized_size(const Imputer &model) noexcept;
1693
+ ISOTREE_EXPORTED
1694
+ size_t determine_serialized_size(const TreesIndexer &model) noexcept;
1695
+ ISOTREE_EXPORTED
1696
+ void serialize_IsoForest(const IsoForest &model, char *out);
1697
+ ISOTREE_EXPORTED
1698
+ void serialize_IsoForest(const IsoForest &model, FILE *out);
1699
+ ISOTREE_EXPORTED
1700
+ void serialize_IsoForest(const IsoForest &model, std::ostream &out);
1701
+ ISOTREE_EXPORTED
1702
+ std::string serialize_IsoForest(const IsoForest &model);
1703
+ ISOTREE_EXPORTED
1704
+ void deserialize_IsoForest(IsoForest &model, const char *in);
1705
+ ISOTREE_EXPORTED
1706
+ void deserialize_IsoForest(IsoForest &model, FILE *in);
1707
+ ISOTREE_EXPORTED
1708
+ void deserialize_IsoForest(IsoForest &model, std::istream &in);
1709
+ ISOTREE_EXPORTED
1710
+ void deserialize_IsoForest(IsoForest &model, const std::string &in);
1711
+ ISOTREE_EXPORTED
1712
+ void serialize_ExtIsoForest(const ExtIsoForest &model, char *out);
1713
+ ISOTREE_EXPORTED
1714
+ void serialize_ExtIsoForest(const ExtIsoForest &model, FILE *out);
1715
+ ISOTREE_EXPORTED
1716
+ void serialize_ExtIsoForest(const ExtIsoForest &model, std::ostream &out);
1717
+ ISOTREE_EXPORTED
1718
+ std::string serialize_ExtIsoForest(const ExtIsoForest &model);
1719
+ ISOTREE_EXPORTED
1720
+ void deserialize_ExtIsoForest(ExtIsoForest &model, const char *in);
1721
+ ISOTREE_EXPORTED
1722
+ void deserialize_ExtIsoForest(ExtIsoForest &model, FILE *in);
1723
+ ISOTREE_EXPORTED
1724
+ void deserialize_ExtIsoForest(ExtIsoForest &model, std::istream &in);
1725
+ ISOTREE_EXPORTED
1726
+ void deserialize_ExtIsoForest(ExtIsoForest &model, const std::string &in);
1727
+ ISOTREE_EXPORTED
1728
+ void serialize_Imputer(const Imputer &model, char *out);
1729
+ ISOTREE_EXPORTED
1730
+ void serialize_Imputer(const Imputer &model, FILE *out);
1731
+ ISOTREE_EXPORTED
1732
+ void serialize_Imputer(const Imputer &model, std::ostream &out);
1733
+ ISOTREE_EXPORTED
1734
+ std::string serialize_Imputer(const Imputer &model);
1735
+ ISOTREE_EXPORTED
1736
+ void deserialize_Imputer(Imputer &model, const char *in);
1737
+ ISOTREE_EXPORTED
1738
+ void deserialize_Imputer(Imputer &model, FILE *in);
1739
+ ISOTREE_EXPORTED
1740
+ void deserialize_Imputer(Imputer &model, std::istream &in);
1741
+ ISOTREE_EXPORTED
1742
+ void deserialize_Imputer(Imputer &model, const std::string &in);
1743
+ ISOTREE_EXPORTED
1744
+ void serialize_Indexer(const TreesIndexer &model, char *out);
1745
+ ISOTREE_EXPORTED
1746
+ void serialize_Indexer(const TreesIndexer &model, FILE *out);
1747
+ ISOTREE_EXPORTED
1748
+ void serialize_Indexer(const TreesIndexer &model, std::ostream &out);
1749
+ ISOTREE_EXPORTED
1750
+ std::string serialize_Indexer(const TreesIndexer &model);
1751
+ ISOTREE_EXPORTED
1752
+ void deserialize_Indexer(TreesIndexer &model, const char *in);
1753
+ ISOTREE_EXPORTED
1754
+ void deserialize_Indexer(TreesIndexer &model, FILE *in);
1755
+ ISOTREE_EXPORTED
1756
+ void deserialize_Indexer(TreesIndexer &model, std::istream &in);
1757
+ ISOTREE_EXPORTED
1758
+ void deserialize_Indexer(TreesIndexer &model, const std::string &in);
1759
+
1760
+
1761
+ /* Serialization and de-serialization functions (combined objects)
1762
+ *
1763
+ * Parameters
1764
+ * ==========
1765
+ * - model (in or out depending on function)
1766
+ * A single-variable model object to serialize or de-serialize.
1767
+ * If the serialized object contains this type of object, it must be
1768
+ * passed, as an already-allocated object (initialized through the default
1769
+ * constructor function).
1770
+ * When de-serializing, can check if it needs to be passed through function
1771
+ * 'inspect_serialized_object'.
1772
+ * If using the extended model, should pass NULL.
1773
+ * Must pass one of 'model' or 'model_ext'.
1774
+ * - model_ext (in or out depending on function)
1775
+ * An extended model object to serialize or de-serialize.
1776
+ * If using the single-variable model, should pass NULL.
1777
+ * Must pass one of 'model' or 'model_ext'.
1778
+ * - imputer (in or out depending on function)
1779
+ * An imputer object to serialize or de-serialize.
1780
+ * Like 'model' and 'model_ext', must also be passed when de-serializing
1781
+ * if the serialized bytes contain such object.
1782
+ * - optional_metadata (in or out depending on function)
1783
+ * Optional metadata to write at the end of the file, which will be written
1784
+ * unformatted (it is assumed files are in binary mode).
1785
+ * Pass NULL if there is no metadata.
1786
+ * - size_optional_metadata (in or out depending on function)
1787
+ * Size of the optional metadata, if passed. Pass zero if there is no metadata.
1788
+ * - serialized_model (in)
1789
+ * A single-variable model which was serialized to raw bytes in the separate-objects
1790
+ * format, using function 'serialize_IsoForest'.
1791
+ * Pass NULL if using the extended model.
1792
+ * Must pass one of 'serialized_model' or 'serialized_model_ext'.
1793
+ * Note that if it was produced on a platform with different characteristics than
1794
+ * the one in which this function is being called (e.g. different 'size_t' width or
1795
+ * different endianness), it will be re-serialized during the function call, which
1796
+ * can be slow and use a lot of memory.
1797
+ * - serialized_model_ext (in)
1798
+ * An extended model which was serialized to raw bytes in the separate-objects
1799
+ * format, using function 'serialize_ExtIsoForest'.
1800
+ * Pass NULL if using the single-variable model.
1801
+ * Must pass one of 'serialized_model' or 'serialized_model_ext'.
1802
+ * - serialized_imputer (in)
1803
+ * An imputer object which was serialized to raw bytes in the separate-objects
1804
+ * format, using function 'serialize_Imputer'.
1805
+ * - output (out)
1806
+ * A writable object or stream in which to save/persist/serialize the
1807
+ * model objects. In the functions that do not take this as a parameter,
1808
+ * it will be returned as a string containing the raw bytes.
1809
+ * Should be opened in binary mode.
1810
+ * - in (in)
1811
+ * An readable object or stream which contains the serialized/persisted model
1812
+ * objects which will be de-serialized. Should be opened in binary mode.
1813
+ *
1814
+ * Returns
1815
+ * =======
1816
+ * (Only for functions 'determine_serialized_size')
1817
+ * Size that the objects will use when serialized, intended to be
1818
+ * used for allocating arrays beforehand when serializing to 'char'.
1819
+ */
1820
+ ISOTREE_EXPORTED
1821
+ size_t determine_serialized_size_combined
1822
+ (
1823
+ const IsoForest *model,
1824
+ const ExtIsoForest *model_ext,
1825
+ const Imputer *imputer,
1826
+ const TreesIndexer *indexer,
1827
+ const size_t size_optional_metadata
1828
+ ) noexcept;
1829
+ ISOTREE_EXPORTED
1830
+ size_t determine_serialized_size_combined
1831
+ (
1832
+ const char *serialized_model,
1833
+ const char *serialized_model_ext,
1834
+ const char *serialized_imputer,
1835
+ const char *serialized_indexer,
1836
+ const size_t size_optional_metadata
1837
+ ) noexcept;
1838
+ ISOTREE_EXPORTED
1839
+ void serialize_combined
1840
+ (
1841
+ const IsoForest *model,
1842
+ const ExtIsoForest *model_ext,
1843
+ const Imputer *imputer,
1844
+ const TreesIndexer *indexer,
1845
+ const char *optional_metadata,
1846
+ const size_t size_optional_metadata,
1847
+ char *out
1848
+ );
1849
+ ISOTREE_EXPORTED
1850
+ void serialize_combined
1851
+ (
1852
+ const IsoForest *model,
1853
+ const ExtIsoForest *model_ext,
1854
+ const Imputer *imputer,
1855
+ const TreesIndexer *indexer,
1856
+ const char *optional_metadata,
1857
+ const size_t size_optional_metadata,
1858
+ FILE *out
1859
+ );
1860
+ ISOTREE_EXPORTED
1861
+ void serialize_combined
1862
+ (
1863
+ const IsoForest *model,
1864
+ const ExtIsoForest *model_ext,
1865
+ const Imputer *imputer,
1866
+ const TreesIndexer *indexer,
1867
+ const char *optional_metadata,
1868
+ const size_t size_optional_metadata,
1869
+ std::ostream &out
1870
+ );
1871
+ ISOTREE_EXPORTED
1872
+ std::string serialize_combined
1873
+ (
1874
+ const IsoForest *model,
1875
+ const ExtIsoForest *model_ext,
1876
+ const Imputer *imputer,
1877
+ const TreesIndexer *indexer,
1878
+ const char *optional_metadata,
1879
+ const size_t size_optional_metadata
1880
+ );
1881
+ ISOTREE_EXPORTED
1882
+ void serialize_combined
1883
+ (
1884
+ const char *serialized_model,
1885
+ const char *serialized_model_ext,
1886
+ const char *serialized_imputer,
1887
+ const char *serialized_indexer,
1888
+ const char *optional_metadata,
1889
+ const size_t size_optional_metadata,
1890
+ FILE *out
1891
+ );
1892
+ ISOTREE_EXPORTED
1893
+ void serialize_combined
1894
+ (
1895
+ const char *serialized_model,
1896
+ const char *serialized_model_ext,
1897
+ const char *serialized_imputer,
1898
+ const char *serialized_indexer,
1899
+ const char *optional_metadata,
1900
+ const size_t size_optional_metadata,
1901
+ std::ostream &out
1902
+ );
1903
+ ISOTREE_EXPORTED
1904
+ std::string serialize_combined
1905
+ (
1906
+ const char *serialized_model,
1907
+ const char *serialized_model_ext,
1908
+ const char *serialized_imputer,
1909
+ const char *serialized_indexer,
1910
+ const char *optional_metadata,
1911
+ const size_t size_optional_metadata
1912
+ );
1913
+ ISOTREE_EXPORTED
1914
+ void deserialize_combined
1915
+ (
1916
+ const char* in,
1917
+ IsoForest *model,
1918
+ ExtIsoForest *model_ext,
1919
+ Imputer *imputer,
1920
+ TreesIndexer *indexer,
1921
+ char *optional_metadata
1922
+ );
1923
+ ISOTREE_EXPORTED
1924
+ void deserialize_combined
1925
+ (
1926
+ FILE* in,
1927
+ IsoForest *model,
1928
+ ExtIsoForest *model_ext,
1929
+ Imputer *imputer,
1930
+ TreesIndexer *indexer,
1931
+ char *optional_metadata
1932
+ );
1933
+ ISOTREE_EXPORTED
1934
+ void deserialize_combined
1935
+ (
1936
+ std::istream &in,
1937
+ IsoForest *model,
1938
+ ExtIsoForest *model_ext,
1939
+ Imputer *imputer,
1940
+ TreesIndexer *indexer,
1941
+ char *optional_metadata
1942
+ );
1943
+ ISOTREE_EXPORTED
1944
+ void deserialize_combined
1945
+ (
1946
+ const std::string &in,
1947
+ IsoForest *model,
1948
+ ExtIsoForest *model_ext,
1949
+ Imputer *imputer,
1950
+ TreesIndexer *indexer,
1951
+ char *optional_metadata
1952
+ );
1953
+
1954
+
1955
+ /* Serialize additional trees into previous serialized bytes
1956
+ *
1957
+ * Parameters
1958
+ * ==========
1959
+ * - model (in)
1960
+ * A model object to re-serialize, which had already been serialized into
1961
+ * 'serialized_bytes' with fewer trees than it currently has, and then
1962
+ * additional trees added through functions such as 'add_tree' or 'merge_models'.
1963
+ * - serialized_bytes (in) / old_bytes (out)
1964
+ * Serialized version of 'model', which had previously been produced with
1965
+ * fewer trees than it currently has and then additional trees added through
1966
+ * functions such as 'add_tree' or 'merge_models'.
1967
+ * Must have been produced in a setup with the same characteristics (e.g. width
1968
+ * of 'int' and 'size_t', endianness, etc.).
1969
+ * - old_ntrees
1970
+ * Number of trees which were serialized from 'model' into 'serialized_bytes'
1971
+ * before. Trees that come after this index are assumed to be the additional
1972
+ * trees to serialize.
1973
+ *
1974
+ * Returns
1975
+ * =======
1976
+ * - For functions 'check_can_undergo_incremental_serialization', whether the serialized
1977
+ * object can be incrementally serialized.
1978
+ * - For functions 'determine_serialized_size_additional_trees', additional size (in addition
1979
+ * to current size) that the new serialized objects will have if they undergo incremental
1980
+ * serialization.
1981
+ */
1982
+ ISOTREE_EXPORTED
1983
+ bool check_can_undergo_incremental_serialization(const IsoForest &model, const char *serialized_bytes);
1984
+ ISOTREE_EXPORTED
1985
+ bool check_can_undergo_incremental_serialization(const ExtIsoForest &model, const char *serialized_bytes);
1986
+ ISOTREE_EXPORTED
1987
+ size_t determine_serialized_size_additional_trees(const IsoForest &model, size_t old_ntrees);
1988
+ ISOTREE_EXPORTED
1989
+ size_t determine_serialized_size_additional_trees(const ExtIsoForest &model, size_t old_ntrees);
1990
+ ISOTREE_EXPORTED
1991
+ size_t determine_serialized_size_additional_trees(const Imputer &model, size_t old_ntrees);
1992
+ ISOTREE_EXPORTED
1993
+ size_t determine_serialized_size_additional_trees(const TreesIndexer &model, size_t old_ntrees);
1994
+ ISOTREE_EXPORTED
1995
+ void incremental_serialize_IsoForest(const IsoForest &model, char *old_bytes_reallocated);
1996
+ ISOTREE_EXPORTED
1997
+ void incremental_serialize_ExtIsoForest(const ExtIsoForest &model, char *old_bytes_reallocated);
1998
+ ISOTREE_EXPORTED
1999
+ void incremental_serialize_Imputer(const Imputer &model, char *old_bytes_reallocated);
2000
+ ISOTREE_EXPORTED
2001
+ void incremental_serialize_Indexer(const TreesIndexer &model, char *old_bytes_reallocated);
2002
+ ISOTREE_EXPORTED
2003
+ void incremental_serialize_IsoForest(const IsoForest &model, std::string &old_bytes);
2004
+ ISOTREE_EXPORTED
2005
+ void incremental_serialize_ExtIsoForest(const ExtIsoForest &model, std::string &old_bytes);
2006
+ ISOTREE_EXPORTED
2007
+ void incremental_serialize_Imputer(const Imputer &model, std::string &old_bytes);
2008
+ ISOTREE_EXPORTED
2009
+ void incremental_serialize_Indexer(const TreesIndexer &model, std::string &old_bytes);
2010
+
2011
+
2012
+ /* Translate isolation forest model into a single SQL select statement
2013
+ *
2014
+ * Parameters
2015
+ * ==========
2016
+ * - model_outputs
2017
+ * Pointer to fitted single-variable model object from function 'fit_iforest'. Pass NULL
2018
+ * if the predictions are to be made from an extended model. Can only pass one of
2019
+ * 'model_outputs' and 'model_outputs_ext'.
2020
+ * - model_outputs_ext
2021
+ * Pointer to fitted extended model object from function 'fit_iforest'. Pass NULL
2022
+ * if the predictions are to be made from a single-variable model. Can only pass one of
2023
+ * 'model_outputs' and 'model_outputs_ext'.
2024
+ * - table_from
2025
+ * Table name from where the columns used in the model will be selected.
2026
+ * - select_as
2027
+ * Alias to give to the outlier score in the select statement.
2028
+ * - numeric_colnames
2029
+ * Names to use for the numerical columns.
2030
+ * - categ_colnames
2031
+ * Names to use for the categorical columns.
2032
+ * - categ_levels
2033
+ * Names to use for the levels/categories of each categorical column. These will be enclosed
2034
+ * in single quotes.
2035
+ * - index1
2036
+ * Whether to make the node numbers start their numeration at 1 instead of 0 in the
2037
+ * resulting statement. If passing 'output_tree_num=false', this will only affect the
2038
+ * commented lines which act as delimiters. If passing 'output_tree_num=true', will also
2039
+ * affect the results (which will also start at 1).
2040
+ * - nthreads
2041
+ * Number of parallel threads to use. Note that, the more threads, the more memory will be
2042
+ * allocated, even if the thread does not end up being used. Ignored when not building with
2043
+ * OpenMP support.
2044
+ *
2045
+ * Returns
2046
+ * =======
2047
+ * A string with the corresponding SQL statement that will calculate the outlier score
2048
+ * from the model.
2049
+ */
2050
+ ISOTREE_EXPORTED
2051
+ std::string generate_sql_with_select_from(IsoForest *model_outputs, ExtIsoForest *model_outputs_ext,
2052
+ std::string &table_from, std::string &select_as,
2053
+ std::vector<std::string> &numeric_colnames, std::vector<std::string> &categ_colnames,
2054
+ std::vector<std::vector<std::string>> &categ_levels,
2055
+ bool index1, int nthreads);
2056
+
2057
+
2058
+ /* Translate model trees into SQL select statements
2059
+ *
2060
+ * Parameters
2061
+ * ==========
2062
+ * - model_outputs
2063
+ * Pointer to fitted single-variable model object from function 'fit_iforest'. Pass NULL
2064
+ * if the predictions are to be made from an extended model. Can only pass one of
2065
+ * 'model_outputs' and 'model_outputs_ext'.
2066
+ * - model_outputs_ext
2067
+ * Pointer to fitted extended model object from function 'fit_iforest'. Pass NULL
2068
+ * if the predictions are to be made from a single-variable model. Can only pass one of
2069
+ * 'model_outputs' and 'model_outputs_ext'.
2070
+ * - numeric_colnames
2071
+ * Names to use for the numerical columns.
2072
+ * - categ_colnames
2073
+ * Names to use for the categorical columns.
2074
+ * - categ_levels
2075
+ * Names to use for the levels/categories of each categorical column. These will be enclosed
2076
+ * in single quotes.
2077
+ * - output_tree_num
2078
+ * Whether to output the terminal node number instead of the separation depth at each node.
2079
+ * - index1
2080
+ * Whether to make the node numbers start their numeration at 1 instead of 0 in the
2081
+ * resulting statement. If passing 'output_tree_num=false', this will only affect the
2082
+ * commented lines which act as delimiters. If passing 'output_tree_num=true', will also
2083
+ * affect the results (which will also start at 1).
2084
+ * - single_tree
2085
+ * Whether to generate the select statement for a single tree of the model instead of for
2086
+ * all. The tree number to generate is to be passed under 'tree_num'.
2087
+ * - tree_num
2088
+ * Tree number for which to generate an SQL select statement, if passing 'single_tree=true'.
2089
+ * - nthreads
2090
+ * Number of parallel threads to use. Note that, the more threads, the more memory will be
2091
+ * allocated, even if the thread does not end up being used. Ignored when not building with
2092
+ * OpenMP support.
2093
+ *
2094
+ * Returns
2095
+ * =======
2096
+ * A vector containing at each element the SQL statement for the corresponding tree in the model.
2097
+ * If passing 'single_tree=true', will contain only one element, corresponding to the tree given
2098
+ * in 'tree_num'. The statements will be node-by-node, with commented-out separators using '---'
2099
+ * as delimiters and including the node number as part of the comment.
2100
+ */
2101
+ ISOTREE_EXPORTED
2102
+ std::vector<std::string> generate_sql(IsoForest *model_outputs, ExtIsoForest *model_outputs_ext,
2103
+ std::vector<std::string> &numeric_colnames, std::vector<std::string> &categ_colnames,
2104
+ std::vector<std::vector<std::string>> &categ_levels,
2105
+ bool output_tree_num, bool index1, bool single_tree, size_t tree_num,
2106
+ int nthreads);
2107
+
2108
+
2109
+ ISOTREE_EXPORTED
2110
+ void set_reference_points(IsoForest *model_outputs, ExtIsoForest *model_outputs_ext, TreesIndexer *indexer,
2111
+ const bool with_distances,
2112
+ real_t *numeric_data, int *categ_data,
2113
+ bool is_col_major, size_t ld_numeric, size_t ld_categ,
2114
+ real_t *Xc, sparse_ix *Xc_ind, sparse_ix *Xc_indptr,
2115
+ real_t *Xr, sparse_ix *Xr_ind, sparse_ix *Xr_indptr,
2116
+ size_t nrows, int nthreads);