mlpack 4.6.1__cp38-cp38-win_amd64.whl → 4.6.2__cp38-cp38-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. mlpack/__init__.py +3 -3
  2. mlpack/adaboost_classify.cp38-win_amd64.pyd +0 -0
  3. mlpack/adaboost_probabilities.cp38-win_amd64.pyd +0 -0
  4. mlpack/adaboost_train.cp38-win_amd64.pyd +0 -0
  5. mlpack/approx_kfn.cp38-win_amd64.pyd +0 -0
  6. mlpack/arma_numpy.cp38-win_amd64.pyd +0 -0
  7. mlpack/bayesian_linear_regression.cp38-win_amd64.pyd +0 -0
  8. mlpack/cf.cp38-win_amd64.pyd +0 -0
  9. mlpack/dbscan.cp38-win_amd64.pyd +0 -0
  10. mlpack/decision_tree.cp38-win_amd64.pyd +0 -0
  11. mlpack/det.cp38-win_amd64.pyd +0 -0
  12. mlpack/emst.cp38-win_amd64.pyd +0 -0
  13. mlpack/fastmks.cp38-win_amd64.pyd +0 -0
  14. mlpack/gmm_generate.cp38-win_amd64.pyd +0 -0
  15. mlpack/gmm_probability.cp38-win_amd64.pyd +0 -0
  16. mlpack/gmm_train.cp38-win_amd64.pyd +0 -0
  17. mlpack/hmm_generate.cp38-win_amd64.pyd +0 -0
  18. mlpack/hmm_loglik.cp38-win_amd64.pyd +0 -0
  19. mlpack/hmm_train.cp38-win_amd64.pyd +0 -0
  20. mlpack/hmm_viterbi.cp38-win_amd64.pyd +0 -0
  21. mlpack/hoeffding_tree.cp38-win_amd64.pyd +0 -0
  22. mlpack/image_converter.cp38-win_amd64.pyd +0 -0
  23. mlpack/include/mlpack/core/cv/k_fold_cv.hpp +21 -12
  24. mlpack/include/mlpack/core/cv/k_fold_cv_impl.hpp +49 -39
  25. mlpack/include/mlpack/core/data/detect_file_type_impl.hpp +9 -46
  26. mlpack/include/mlpack/core/data/save_impl.hpp +315 -315
  27. mlpack/include/mlpack/core/data/utilities.hpp +158 -158
  28. mlpack/include/mlpack/core/math/ccov.hpp +1 -0
  29. mlpack/include/mlpack/core/math/ccov_impl.hpp +4 -5
  30. mlpack/include/mlpack/core/math/make_alias.hpp +98 -3
  31. mlpack/include/mlpack/core/util/arma_traits.hpp +19 -2
  32. mlpack/include/mlpack/core/util/gitversion.hpp +1 -1
  33. mlpack/include/mlpack/core/util/sfinae_utility.hpp +24 -2
  34. mlpack/include/mlpack/core/util/version.hpp +1 -1
  35. mlpack/include/mlpack/methods/ann/dists/bernoulli_distribution_impl.hpp +1 -2
  36. mlpack/include/mlpack/methods/ann/init_rules/network_init.hpp +5 -5
  37. mlpack/include/mlpack/methods/ann/layer/batch_norm.hpp +3 -2
  38. mlpack/include/mlpack/methods/ann/layer/batch_norm_impl.hpp +19 -20
  39. mlpack/include/mlpack/methods/ann/layer/concat.hpp +1 -0
  40. mlpack/include/mlpack/methods/ann/layer/concat_impl.hpp +6 -7
  41. mlpack/include/mlpack/methods/ann/layer/convolution_impl.hpp +3 -3
  42. mlpack/include/mlpack/methods/ann/layer/grouped_convolution_impl.hpp +3 -3
  43. mlpack/include/mlpack/methods/ann/layer/linear3d.hpp +1 -0
  44. mlpack/include/mlpack/methods/ann/layer/linear3d_impl.hpp +11 -14
  45. mlpack/include/mlpack/methods/ann/layer/max_pooling.hpp +5 -4
  46. mlpack/include/mlpack/methods/ann/layer/max_pooling_impl.hpp +15 -14
  47. mlpack/include/mlpack/methods/ann/layer/mean_pooling.hpp +3 -2
  48. mlpack/include/mlpack/methods/ann/layer/mean_pooling_impl.hpp +14 -15
  49. mlpack/include/mlpack/methods/ann/layer/multihead_attention.hpp +6 -5
  50. mlpack/include/mlpack/methods/ann/layer/multihead_attention_impl.hpp +24 -25
  51. mlpack/include/mlpack/methods/ann/layer/nearest_interpolation.hpp +1 -0
  52. mlpack/include/mlpack/methods/ann/layer/nearest_interpolation_impl.hpp +4 -4
  53. mlpack/include/mlpack/methods/ann/layer/padding.hpp +1 -0
  54. mlpack/include/mlpack/methods/ann/layer/padding_impl.hpp +12 -13
  55. mlpack/include/mlpack/methods/ann/layer/recurrent_layer.hpp +3 -2
  56. mlpack/include/mlpack/methods/ann/loss_functions/cosine_embedding_loss_impl.hpp +5 -4
  57. mlpack/include/mlpack/methods/ann/rnn.hpp +19 -18
  58. mlpack/include/mlpack/methods/ann/rnn_impl.hpp +15 -15
  59. mlpack/include/mlpack/methods/bayesian_linear_regression/bayesian_linear_regression_impl.hpp +3 -8
  60. mlpack/include/mlpack/methods/decision_tree/fitness_functions/gini_gain.hpp +5 -8
  61. mlpack/include/mlpack/methods/decision_tree/fitness_functions/information_gain.hpp +5 -8
  62. mlpack/include/mlpack/methods/gmm/diagonal_gmm_impl.hpp +2 -1
  63. mlpack/include/mlpack/methods/gmm/eigenvalue_ratio_constraint.hpp +3 -3
  64. mlpack/include/mlpack/methods/gmm/gmm_impl.hpp +2 -1
  65. mlpack/include/mlpack/methods/hmm/hmm_impl.hpp +10 -5
  66. mlpack/include/mlpack/methods/random_forest/random_forest.hpp +57 -37
  67. mlpack/include/mlpack/methods/random_forest/random_forest_impl.hpp +69 -59
  68. mlpack/kde.cp38-win_amd64.pyd +0 -0
  69. mlpack/kernel_pca.cp38-win_amd64.pyd +0 -0
  70. mlpack/kfn.cp38-win_amd64.pyd +0 -0
  71. mlpack/kmeans.cp38-win_amd64.pyd +0 -0
  72. mlpack/knn.cp38-win_amd64.pyd +0 -0
  73. mlpack/krann.cp38-win_amd64.pyd +0 -0
  74. mlpack/lars.cp38-win_amd64.pyd +0 -0
  75. mlpack/linear_regression_predict.cp38-win_amd64.pyd +0 -0
  76. mlpack/linear_regression_train.cp38-win_amd64.pyd +0 -0
  77. mlpack/linear_svm.cp38-win_amd64.pyd +0 -0
  78. mlpack/lmnn.cp38-win_amd64.pyd +0 -0
  79. mlpack/local_coordinate_coding.cp38-win_amd64.pyd +0 -0
  80. mlpack/logistic_regression.cp38-win_amd64.pyd +0 -0
  81. mlpack/lsh.cp38-win_amd64.pyd +0 -0
  82. mlpack/mean_shift.cp38-win_amd64.pyd +0 -0
  83. mlpack/nbc.cp38-win_amd64.pyd +0 -0
  84. mlpack/nca.cp38-win_amd64.pyd +0 -0
  85. mlpack/nmf.cp38-win_amd64.pyd +0 -0
  86. mlpack/pca.cp38-win_amd64.pyd +0 -0
  87. mlpack/perceptron.cp38-win_amd64.pyd +0 -0
  88. mlpack/preprocess_binarize.cp38-win_amd64.pyd +0 -0
  89. mlpack/preprocess_describe.cp38-win_amd64.pyd +0 -0
  90. mlpack/preprocess_one_hot_encoding.cp38-win_amd64.pyd +0 -0
  91. mlpack/preprocess_scale.cp38-win_amd64.pyd +0 -0
  92. mlpack/preprocess_split.cp38-win_amd64.pyd +0 -0
  93. mlpack/radical.cp38-win_amd64.pyd +0 -0
  94. mlpack/random_forest.cp38-win_amd64.pyd +0 -0
  95. mlpack/softmax_regression.cp38-win_amd64.pyd +0 -0
  96. mlpack/sparse_coding.cp38-win_amd64.pyd +0 -0
  97. mlpack-4.6.2.dist-info/DELVEWHEEL +2 -0
  98. {mlpack-4.6.1.dist-info → mlpack-4.6.2.dist-info}/METADATA +2 -2
  99. {mlpack-4.6.1.dist-info → mlpack-4.6.2.dist-info}/RECORD +102 -102
  100. mlpack-4.6.1.dist-info/DELVEWHEEL +0 -2
  101. {mlpack-4.6.1.dist-info → mlpack-4.6.2.dist-info}/WHEEL +0 -0
  102. {mlpack-4.6.1.dist-info → mlpack-4.6.2.dist-info}/top_level.txt +0 -0
  103. mlpack.libs/{.load-order-mlpack-4.6.1 → .load-order-mlpack-4.6.2} +1 -1
@@ -37,6 +37,7 @@ template<
37
37
  class RNN
38
38
  {
39
39
  public:
40
+ using CubeType = typename GetCubeType<MatType>::type;
40
41
  /**
41
42
  * Create the RNN object.
42
43
  *
@@ -128,8 +129,8 @@ class RNN
128
129
  */
129
130
  template<typename OptimizerType, typename... CallbackTypes>
130
131
  typename MatType::elem_type Train(
131
- arma::Cube<typename MatType::elem_type> predictors,
132
- arma::Cube<typename MatType::elem_type> responses,
132
+ CubeType predictors,
133
+ CubeType responses,
133
134
  OptimizerType& optimizer,
134
135
  CallbackTypes&&... callbacks);
135
136
 
@@ -156,8 +157,8 @@ class RNN
156
157
  */
157
158
  template<typename OptimizerType = ens::RMSProp, typename... CallbackTypes>
158
159
  typename MatType::elem_type Train(
159
- arma::Cube<typename MatType::elem_type> predictors,
160
- arma::Cube<typename MatType::elem_type> responses,
160
+ CubeType predictors,
161
+ CubeType responses,
161
162
  CallbackTypes&&... callbacks);
162
163
 
163
164
  /**
@@ -186,8 +187,8 @@ class RNN
186
187
  */
187
188
  template<typename OptimizerType, typename... CallbackTypes>
188
189
  typename MatType::elem_type Train(
189
- arma::Cube<typename MatType::elem_type> predictors,
190
- arma::Cube<typename MatType::elem_type> responses,
190
+ CubeType predictors,
191
+ CubeType responses,
191
192
  arma::urowvec sequenceLengths,
192
193
  OptimizerType& optimizer,
193
194
  CallbackTypes&&... callbacks);
@@ -222,8 +223,8 @@ class RNN
222
223
  */
223
224
  template<typename OptimizerType = ens::RMSProp, typename... CallbackTypes>
224
225
  typename MatType::elem_type Train(
225
- arma::Cube<typename MatType::elem_type> predictors,
226
- arma::Cube<typename MatType::elem_type> responses,
226
+ CubeType predictors,
227
+ CubeType responses,
227
228
  arma::urowvec sequenceLengths,
228
229
  CallbackTypes&&... callbacks);
229
230
 
@@ -236,8 +237,8 @@ class RNN
236
237
  * @param results Matrix to put output predictions of responses into.
237
238
  * @param batchSize Batch size to use for prediction.
238
239
  */
239
- void Predict(const arma::Cube<typename MatType::elem_type>& predictors,
240
- arma::Cube<typename MatType::elem_type>& results,
240
+ void Predict(const CubeType& predictors,
241
+ CubeType& results,
241
242
  const size_t batchSize = 128);
242
243
 
243
244
  /**
@@ -254,8 +255,8 @@ class RNN
254
255
  * @param predictors Input predictors.
255
256
  * @param results Matrix to put output predictions of responses into.
256
257
  */
257
- void Predict(const arma::Cube<typename MatType::elem_type>& predictors,
258
- arma::Cube<typename MatType::elem_type>& results,
258
+ void Predict(const CubeType& predictors,
259
+ CubeType& results,
259
260
  const arma::urowvec& sequenceLengths);
260
261
 
261
262
  // Return the nujmber of weights in the model.
@@ -318,8 +319,8 @@ class RNN
318
319
  * @param responses Target outputs for input variables.
319
320
  */
320
321
  typename MatType::elem_type Evaluate(
321
- const arma::Cube<typename MatType::elem_type>& predictors,
322
- const arma::Cube<typename MatType::elem_type>& responses);
322
+ const CubeType& predictors,
323
+ const CubeType& responses);
323
324
 
324
325
  //! Serialize the model.
325
326
  template<typename Archive>
@@ -425,8 +426,8 @@ class RNN
425
426
  * @param sequenceLengths (Optional) sequence length for each predictor
426
427
  * sequence.
427
428
  */
428
- void ResetData(arma::Cube<typename MatType::elem_type> predictors,
429
- arma::Cube<typename MatType::elem_type> responses,
429
+ void ResetData(CubeType predictors,
430
+ CubeType responses,
430
431
  arma::urowvec sequenceLengths = arma::urowvec());
431
432
 
432
433
  private:
@@ -456,11 +457,11 @@ class RNN
456
457
  // The matrix of data points (predictors). These members are empty, except
457
458
  // during training---we must store a local copy of the training data since
458
459
  // the ensmallen optimizer will not provide training data.
459
- arma::Cube<typename MatType::elem_type> predictors;
460
+ CubeType predictors;
460
461
 
461
462
  // The matrix of responses to the input data points. This member is empty,
462
463
  // except during training.
463
- arma::Cube<typename MatType::elem_type> responses;
464
+ CubeType responses;
464
465
 
465
466
  // The length of each input sequence. If this is empty, then every sequence
466
467
  // is assuemd to have the same length (`predictors.n_slices`).
@@ -156,8 +156,8 @@ typename MatType::elem_type RNN<
156
156
  InitializationRuleType,
157
157
  MatType
158
158
  >::Train(
159
- arma::Cube<typename MatType::elem_type> predictors,
160
- arma::Cube<typename MatType::elem_type> responses,
159
+ CubeType predictors,
160
+ CubeType responses,
161
161
  OptimizerType& optimizer,
162
162
  CallbackTypes&&... callbacks)
163
163
  {
@@ -190,8 +190,8 @@ typename MatType::elem_type RNN<
190
190
  InitializationRuleType,
191
191
  MatType
192
192
  >::Train(
193
- arma::Cube<typename MatType::elem_type> predictors,
194
- arma::Cube<typename MatType::elem_type> responses,
193
+ CubeType predictors,
194
+ CubeType responses,
195
195
  CallbackTypes&&... callbacks)
196
196
  {
197
197
  OptimizerType optimizer;
@@ -210,8 +210,8 @@ typename MatType::elem_type RNN<
210
210
  InitializationRuleType,
211
211
  MatType
212
212
  >::Train(
213
- arma::Cube<typename MatType::elem_type> predictors,
214
- arma::Cube<typename MatType::elem_type> responses,
213
+ CubeType predictors,
214
+ CubeType responses,
215
215
  arma::urowvec sequenceLengths,
216
216
  OptimizerType& optimizer,
217
217
  CallbackTypes&&... callbacks)
@@ -246,8 +246,8 @@ typename MatType::elem_type RNN<
246
246
  InitializationRuleType,
247
247
  MatType
248
248
  >::Train(
249
- arma::Cube<typename MatType::elem_type> predictors,
250
- arma::Cube<typename MatType::elem_type> responses,
249
+ CubeType predictors,
250
+ CubeType responses,
251
251
  arma::urowvec sequenceLengths,
252
252
  CallbackTypes&&... callbacks)
253
253
  {
@@ -266,8 +266,8 @@ void RNN<
266
266
  InitializationRuleType,
267
267
  MatType
268
268
  >::Predict(
269
- const arma::Cube<typename MatType::elem_type>& predictors,
270
- arma::Cube<typename MatType::elem_type>& results,
269
+ const CubeType& predictors,
270
+ CubeType& results,
271
271
  const size_t batchSize)
272
272
  {
273
273
  // Ensure that the network is configured correctly.
@@ -313,8 +313,8 @@ void RNN<
313
313
  InitializationRuleType,
314
314
  MatType
315
315
  >::Predict(
316
- const arma::Cube<typename MatType::elem_type>& predictors,
317
- arma::Cube<typename MatType::elem_type>& results,
316
+ const CubeType& predictors,
317
+ CubeType& results,
318
318
  const arma::urowvec& sequenceLengths)
319
319
  {
320
320
  // Ensure that the network is configured correctly.
@@ -512,7 +512,7 @@ typename MatType::elem_type RNN<
512
512
  // This will store the outputs of the network at each time step. Note that we
513
513
  // only need to store `effectiveBPTTSteps` of output. We will treat `outputs`
514
514
  // as a circular buffer.
515
- arma::Cube<typename MatType::elem_type> outputs(
515
+ CubeType outputs(
516
516
  network.network.OutputSize(), batchSize, effectiveBPTTSteps);
517
517
 
518
518
  MatType stepData, outputData, responseData;
@@ -654,8 +654,8 @@ void RNN<
654
654
  InitializationRuleType,
655
655
  MatType
656
656
  >::ResetData(
657
- arma::Cube<typename MatType::elem_type> predictors,
658
- arma::Cube<typename MatType::elem_type> responses,
657
+ CubeType predictors,
658
+ CubeType responses,
659
659
  arma::urowvec sequenceLengths)
660
660
  {
661
661
  this->predictors = std::move(predictors);
@@ -273,11 +273,8 @@ inline double BayesianLinearRegression<ModelMatType>::CenterScaleData(
273
273
  {
274
274
  if (!centerData && !scaleData)
275
275
  {
276
- dataProc = MatType(const_cast<ElemType*>(data.memptr()), data.n_rows,
277
- data.n_cols, false, true);
278
- responsesProc = ResponsesType(const_cast<ElemType*>(responses.memptr()),
279
- responses.n_elem, false,
280
- true);
276
+ MakeAlias(dataProc, data, data.n_rows, data.n_cols, 0, true);
277
+ MakeAlias(responsesProc, responses, responses.n_elem, 0, true);
281
278
  }
282
279
  else if (centerData && !scaleData)
283
280
  {
@@ -290,9 +287,7 @@ inline double BayesianLinearRegression<ModelMatType>::CenterScaleData(
290
287
  {
291
288
  dataScale = stddev(data, 0, 1);
292
289
  dataProc = data.each_col() / dataScale;
293
- responsesProc = ResponsesType(const_cast<ElemType*>(responses.memptr()),
294
- responses.n_elem, false,
295
- true);
290
+ MakeAlias(responsesProc, responses, responses.n_elem, 0, true);
296
291
  }
297
292
  else
298
293
  {
@@ -68,14 +68,11 @@ class GiniGain
68
68
 
69
69
  // Count the number of elements in each class. Use four auxiliary vectors
70
70
  // to exploit SIMD instructions if possible.
71
- arma::vec countSpace(4 * numClasses);
72
- arma::vec counts(countSpace.memptr(), numClasses, false, true);
73
- arma::vec counts2(countSpace.memptr() + numClasses, numClasses, false,
74
- true);
75
- arma::vec counts3(countSpace.memptr() + 2 * numClasses, numClasses, false,
76
- true);
77
- arma::vec counts4(countSpace.memptr() + 3 * numClasses, numClasses, false,
78
- true);
71
+ arma::vec countSpace(4 * numClasses), counts, counts2, counts3, counts4;
72
+ MakeAlias(counts, countSpace, numClasses, 0);
73
+ MakeAlias(counts2, countSpace, numClasses, numClasses);
74
+ MakeAlias(counts3, countSpace, numClasses, 2 * numClasses);
75
+ MakeAlias(counts4, countSpace, numClasses, 3 * numClasses);
79
76
 
80
77
  // Calculate the Gini impurity of the un-split node.
81
78
  double impurity = 0.0;
@@ -69,14 +69,11 @@ class InformationGain
69
69
 
70
70
  // Count the number of elements in each class. Use four auxiliary vectors
71
71
  // to exploit SIMD instructions if possible.
72
- arma::vec countSpace(4 * numClasses);
73
- arma::vec counts(countSpace.memptr(), numClasses, false, true);
74
- arma::vec counts2(countSpace.memptr() + numClasses, numClasses, false,
75
- true);
76
- arma::vec counts3(countSpace.memptr() + 2 * numClasses, numClasses, false,
77
- true);
78
- arma::vec counts4(countSpace.memptr() + 3 * numClasses, numClasses, false,
79
- true);
72
+ arma::vec countSpace(4 * numClasses), counts, counts2, counts3, counts4;
73
+ MakeAlias(counts, countSpace, numClasses, 0);
74
+ MakeAlias(counts2, countSpace, numClasses, numClasses);
75
+ MakeAlias(counts3, countSpace, numClasses, 2 * numClasses);
76
+ MakeAlias(counts4, countSpace, numClasses, 3 * numClasses);
80
77
 
81
78
  if (UseWeights)
82
79
  {
@@ -172,7 +172,8 @@ inline void DiagonalGMM::LogProbability(const arma::mat& observation,
172
172
  // Assign value to the matrix.
173
173
  for (size_t i = 0; i < gaussians; i++)
174
174
  {
175
- arma::vec temp(logProb.colptr(i), observation.n_cols, false, true);
175
+ arma::vec temp;
176
+ MakeAlias(temp, logProb, observation.n_cols, logProb.n_rows * i);
176
177
  dists[i].LogProbability(observation, temp);
177
178
  }
178
179
 
@@ -32,10 +32,10 @@ class EigenvalueRatioConstraint
32
32
  * which is the largest eigenvalue, so the first element of the vector should
33
33
  * be 1. In addition, all other elements should be less than or equal to 1.
34
34
  */
35
- EigenvalueRatioConstraint(const arma::vec& ratios) :
36
- // Make an alias of the ratios vector. It will never be modified here.
37
- ratios(const_cast<double*>(ratios.memptr()), ratios.n_elem, false)
35
+ EigenvalueRatioConstraint(const arma::vec& ratios)
38
36
  {
37
+ // Make an alias of the ratios vector. It will never be modified here.
38
+ MakeAlias(const_cast<arma::vec&>(this->ratios), ratios, ratios.n_elem);
39
39
  // Check validity of ratios.
40
40
  if (std::abs(ratios[0] - 1.0) > 1e-20)
41
41
  Log::Fatal << "EigenvalueRatioConstraint::EigenvalueRatioConstraint(): "
@@ -89,7 +89,8 @@ inline void GMM::LogProbability(const arma::mat& observation,
89
89
  // Assign value to the matrix.
90
90
  for (size_t i = 0; i < gaussians; i++)
91
91
  {
92
- arma::vec temp(logProb.colptr(i), observation.n_cols, false, true);
92
+ arma::vec temp;
93
+ MakeAlias(temp, logProb, observation.n_cols, logProb.n_rows * i);
93
94
  dists[i].LogProbability(observation, temp);
94
95
  }
95
96
 
@@ -157,7 +157,8 @@ double HMM<Distribution>::Train(const std::vector<arma::mat>& dataSeq)
157
157
  for (size_t i = 0; i < logTransition.n_rows; i++)
158
158
  {
159
159
  // Define alias of desired column.
160
- arma::vec alias(logProbs.colptr(i), logProbs.n_rows, false, true);
160
+ arma::vec alias;
161
+ MakeAlias(alias, logProbs, logProbs.n_rows, logProbs.n_rows * i);
161
162
  // Use advanced constructor for using logProbs directly.
162
163
  emission[i].LogProbability(dataSeq[seq], alias);
163
164
  }
@@ -358,7 +359,8 @@ double HMM<Distribution>::LogEstimate(const arma::mat& dataSeq,
358
359
  for (size_t i = 0; i < logTransition.n_rows; i++)
359
360
  {
360
361
  // Define alias of desired column.
361
- arma::vec alias(logProbs.colptr(i), logProbs.n_rows, false, true);
362
+ arma::vec alias;
363
+ MakeAlias(alias, logProbs, logProbs.n_rows, logProbs.n_rows * i);
362
364
  // Use advanced constructor for using logProbs directly.
363
365
  emission[i].LogProbability(dataSeq, alias);
364
366
  }
@@ -515,7 +517,8 @@ double HMM<Distribution>::Predict(const arma::mat& dataSeq,
515
517
  for (size_t i = 0; i < logTransition.n_rows; i++)
516
518
  {
517
519
  // Define alias of desired column.
518
- arma::vec alias(logProbs.colptr(i), logProbs.n_rows, false, true);
520
+ arma::vec alias;
521
+ MakeAlias(alias, logProbs, logProbs.n_rows, logProbs.n_rows * i);
519
522
  // Use advanced constructor for using logProbs directly.
520
523
  emission[i].LogProbability(dataSeq, alias);
521
524
  }
@@ -562,7 +565,8 @@ double HMM<Distribution>::LogLikelihood(const arma::mat& dataSeq) const
562
565
  for (size_t i = 0; i < logTransition.n_rows; i++)
563
566
  {
564
567
  // Define alias of desired column.
565
- arma::vec alias(logProbs.colptr(i), logProbs.n_rows, false, true);
568
+ arma::vec alias;
569
+ MakeAlias(alias, logProbs, logProbs.n_rows, logProbs.n_rows * i);
566
570
  // Use advanced constructor for using logProbs directly.
567
571
  emission[i].LogProbability(dataSeq, alias);
568
572
  }
@@ -664,7 +668,8 @@ void HMM<Distribution>::Filter(const arma::mat& dataSeq,
664
668
  for (size_t i = 0; i < logTransition.n_rows; i++)
665
669
  {
666
670
  // Define alias of desired column.
667
- arma::vec alias(logProbs.colptr(i), logProbs.n_rows, false, true);
671
+ arma::vec alias;
672
+ MakeAlias(alias, logProbs, logProbs.n_rows, logProbs.n_rows * i);
668
673
  // Use advanced constructor for using logProbs directly.
669
674
  emission[i].LogProbability(dataSeq, alias);
670
675
  }
@@ -73,9 +73,9 @@ class RandomForest
73
73
  * @param dimensionSelector Instantiated dimension selection policy.
74
74
  * @param bootstrap Instantiated bootstrap policy.
75
75
  */
76
- template<typename MatType>
76
+ template<typename MatType, typename LabelsType>
77
77
  RandomForest(const MatType& dataset,
78
- const arma::Row<size_t>& labels,
78
+ const LabelsType& labels,
79
79
  const size_t numClasses,
80
80
  const size_t numTrees = 20,
81
81
  const size_t minimumLeafSize = 1,
@@ -104,10 +104,10 @@ class RandomForest
104
104
  * @param dimensionSelector Instantiated dimension selection policy.
105
105
  * @param bootstrap Instantiated bootstrap policy.
106
106
  */
107
- template<typename MatType>
107
+ template<typename MatType, typename LabelsType>
108
108
  RandomForest(const MatType& dataset,
109
109
  const data::DatasetInfo& datasetInfo,
110
- const arma::Row<size_t>& labels,
110
+ const LabelsType& labels,
111
111
  const size_t numClasses,
112
112
  const size_t numTrees = 20,
113
113
  const size_t minimumLeafSize = 1,
@@ -133,18 +133,22 @@ class RandomForest
133
133
  * @param dimensionSelector Instantiated dimension selection policy.
134
134
  * @param bootstrap Instantiated bootstrap policy.
135
135
  */
136
- template<typename MatType>
136
+ template<typename MatType,
137
+ typename LabelsType,
138
+ typename WeightsType>
137
139
  RandomForest(const MatType& dataset,
138
- const arma::Row<size_t>& labels,
140
+ const LabelsType& labels,
139
141
  const size_t numClasses,
140
- const arma::rowvec& weights,
142
+ const WeightsType& weights,
141
143
  const size_t numTrees = 20,
142
144
  const size_t minimumLeafSize = 1,
143
145
  const double minimumGainSplit = 1e-7,
144
146
  const size_t maximumDepth = 0,
145
147
  DimensionSelectionType dimensionSelector =
146
148
  DimensionSelectionType(),
147
- BootstrapType bootstrap = BootstrapType());
149
+ BootstrapType bootstrap = BootstrapType(),
150
+ const std::enable_if_t<arma::is_arma_type<
151
+ std::remove_reference_t<WeightsType>>::value>* = 0);
148
152
 
149
153
  /**
150
154
  * Create a random forest, training on the given weighted labeled training
@@ -166,19 +170,23 @@ class RandomForest
166
170
  * @param dimensionSelector Instantiated dimension selection policy.
167
171
  * @param bootstrap Instantiated bootstrap policy.
168
172
  */
169
- template<typename MatType>
173
+ template<typename MatType,
174
+ typename LabelsType,
175
+ typename WeightsType>
170
176
  RandomForest(const MatType& dataset,
171
177
  const data::DatasetInfo& datasetInfo,
172
- const arma::Row<size_t>& labels,
178
+ const LabelsType& labels,
173
179
  const size_t numClasses,
174
- const arma::rowvec& weights,
180
+ const WeightsType& weights,
175
181
  const size_t numTrees = 20,
176
182
  const size_t minimumLeafSize = 1,
177
183
  const double minimumGainSplit = 1e-7,
178
184
  const size_t maximumDepth = 0,
179
185
  DimensionSelectionType dimensionSelector =
180
186
  DimensionSelectionType(),
181
- BootstrapType bootstrap = BootstrapType());
187
+ BootstrapType bootstrap = BootstrapType(),
188
+ const std::enable_if_t<arma::is_arma_type<
189
+ std::remove_reference_t<WeightsType>>::value>* = 0);
182
190
 
183
191
  /**
184
192
  * Train the random forest on the given labeled training data with the given
@@ -200,9 +208,9 @@ class RandomForest
200
208
  * @param bootstrap Instantiated bootstrap policy.
201
209
  * @return The average entropy of all the decision trees trained under forest.
202
210
  */
203
- template<typename MatType>
211
+ template<typename MatType, typename LabelsType>
204
212
  double Train(const MatType& data,
205
- const arma::Row<size_t>& labels,
213
+ const LabelsType& labels,
206
214
  const size_t numClasses,
207
215
  const size_t numTrees = 20,
208
216
  const size_t minimumLeafSize = 1,
@@ -236,10 +244,10 @@ class RandomForest
236
244
  * @param bootstrap Instantiated bootstrap policy.
237
245
  * @return The average entropy of all the decision trees trained under forest.
238
246
  */
239
- template<typename MatType>
247
+ template<typename MatType, typename LabelsType>
240
248
  double Train(const MatType& data,
241
249
  const data::DatasetInfo& datasetInfo,
242
- const arma::Row<size_t>& labels,
250
+ const LabelsType& labels,
243
251
  const size_t numClasses,
244
252
  const size_t numTrees = 20,
245
253
  const size_t minimumLeafSize = 1,
@@ -271,11 +279,13 @@ class RandomForest
271
279
  * @param bootstrap Instantiated bootstrap policy.
272
280
  * @return The average entropy of all the decision trees trained under forest.
273
281
  */
274
- template<typename MatType>
282
+ template<typename MatType,
283
+ typename LabelsType,
284
+ typename WeightsType>
275
285
  double Train(const MatType& data,
276
- const arma::Row<size_t>& labels,
286
+ const LabelsType& labels,
277
287
  const size_t numClasses,
278
- const arma::rowvec& weights,
288
+ const WeightsType& weights,
279
289
  const size_t numTrees = 20,
280
290
  const size_t minimumLeafSize = 1,
281
291
  const double minimumGainSplit = 1e-7,
@@ -283,7 +293,9 @@ class RandomForest
283
293
  const bool warmStart = false,
284
294
  DimensionSelectionType dimensionSelector =
285
295
  DimensionSelectionType(),
286
- BootstrapType bootstrap = BootstrapType());
296
+ BootstrapType bootstrap = BootstrapType(),
297
+ const std::enable_if_t<arma::is_arma_type<
298
+ std::remove_reference_t<WeightsType>>::value>* = 0);
287
299
 
288
300
  /**
289
301
  * Train the random forest on the given weighted labeled training data with
@@ -308,12 +320,14 @@ class RandomForest
308
320
  * @param bootstrap Instantiated bootstrap policy.
309
321
  * @return The average entropy of all the decision trees trained under forest.
310
322
  */
311
- template<typename MatType>
323
+ template<typename MatType,
324
+ typename LabelsType,
325
+ typename WeightsType>
312
326
  double Train(const MatType& data,
313
327
  const data::DatasetInfo& datasetInfo,
314
- const arma::Row<size_t>& labels,
328
+ const LabelsType& labels,
315
329
  const size_t numClasses,
316
- const arma::rowvec& weights,
330
+ const WeightsType& weights,
317
331
  const size_t numTrees = 20,
318
332
  const size_t minimumLeafSize = 1,
319
333
  const double minimumGainSplit = 1e-7,
@@ -321,7 +335,9 @@ class RandomForest
321
335
  const bool warmStart = false,
322
336
  DimensionSelectionType dimensionSelector =
323
337
  DimensionSelectionType(),
324
- BootstrapType bootstrap = BootstrapType());
338
+ BootstrapType bootstrap = BootstrapType(),
339
+ const std::enable_if_t<arma::is_arma_type<
340
+ std::remove_reference_t<WeightsType>>::value>* = 0);
325
341
 
326
342
  /**
327
343
  * Predict the class of the given point. If the random forest has not been
@@ -414,19 +430,23 @@ class RandomForest
414
430
  * @tparam MatType The type of data matrix (i.e. arma::mat).
415
431
  * @return The average entropy of all the decision trees trained under forest.
416
432
  */
417
- template<bool UseWeights, bool UseDatasetInfo, typename MatType>
418
- double Train(const MatType& data,
419
- const data::DatasetInfo& datasetInfo,
420
- const arma::Row<size_t>& labels,
421
- const size_t numClasses,
422
- const arma::rowvec& weights,
423
- const size_t numTrees,
424
- const size_t minimumLeafSize,
425
- const double minimumGainSplit,
426
- const size_t maximumDepth,
427
- const bool warmStart,
428
- DimensionSelectionType& dimensionSelector,
429
- BootstrapType& bootstrap);
433
+ template<bool UseWeights,
434
+ bool UseDatasetInfo,
435
+ typename MatType,
436
+ typename LabelsType,
437
+ typename WeightsType>
438
+ double TrainInternal(const MatType& data,
439
+ const data::DatasetInfo& datasetInfo,
440
+ const LabelsType& labels,
441
+ const size_t numClasses,
442
+ const WeightsType& weights,
443
+ const size_t numTrees,
444
+ const size_t minimumLeafSize,
445
+ const double minimumGainSplit,
446
+ const size_t maximumDepth,
447
+ const bool warmStart,
448
+ DimensionSelectionType& dimensionSelector,
449
+ BootstrapType& bootstrap);
430
450
 
431
451
  //! The trees in the forest.
432
452
  std::vector<DecisionTreeType> trees;