teradataml 17.20.0.6__py3-none-any.whl → 20.0.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of teradataml might be problematic. Click here for more details.

Files changed (432) hide show
  1. teradataml/LICENSE-3RD-PARTY.pdf +0 -0
  2. teradataml/LICENSE.pdf +0 -0
  3. teradataml/README.md +238 -1
  4. teradataml/__init__.py +13 -3
  5. teradataml/_version.py +1 -1
  6. teradataml/analytics/Transformations.py +4 -4
  7. teradataml/analytics/__init__.py +0 -2
  8. teradataml/analytics/analytic_function_executor.py +3 -0
  9. teradataml/analytics/json_parser/utils.py +13 -12
  10. teradataml/analytics/sqle/DecisionTreePredict.py +15 -30
  11. teradataml/analytics/sqle/NaiveBayesPredict.py +11 -20
  12. teradataml/analytics/sqle/__init__.py +0 -13
  13. teradataml/analytics/utils.py +1 -0
  14. teradataml/analytics/valib.py +3 -0
  15. teradataml/automl/__init__.py +1628 -0
  16. teradataml/automl/custom_json_utils.py +1270 -0
  17. teradataml/automl/data_preparation.py +993 -0
  18. teradataml/automl/data_transformation.py +727 -0
  19. teradataml/automl/feature_engineering.py +1648 -0
  20. teradataml/automl/feature_exploration.py +547 -0
  21. teradataml/automl/model_evaluation.py +163 -0
  22. teradataml/automl/model_training.py +887 -0
  23. teradataml/catalog/__init__.py +0 -2
  24. teradataml/catalog/byom.py +49 -6
  25. teradataml/catalog/function_argument_mapper.py +0 -2
  26. teradataml/catalog/model_cataloging_utils.py +2 -1021
  27. teradataml/common/aed_utils.py +6 -2
  28. teradataml/common/constants.py +50 -58
  29. teradataml/common/deprecations.py +160 -0
  30. teradataml/common/garbagecollector.py +61 -104
  31. teradataml/common/messagecodes.py +27 -36
  32. teradataml/common/messages.py +11 -15
  33. teradataml/common/utils.py +205 -287
  34. teradataml/common/wrapper_utils.py +1 -110
  35. teradataml/context/context.py +150 -78
  36. teradataml/data/bank_churn.csv +10001 -0
  37. teradataml/data/bmi.csv +501 -0
  38. teradataml/data/docs/sqle/docs_17_10/BincodeFit.py +3 -3
  39. teradataml/data/docs/sqle/docs_17_10/BincodeTransform.py +6 -5
  40. teradataml/data/docs/sqle/docs_17_10/Fit.py +1 -1
  41. teradataml/data/docs/sqle/docs_17_10/OneHotEncodingTransform.py +1 -1
  42. teradataml/data/docs/sqle/docs_17_10/OutlierFilterTransform.py +1 -1
  43. teradataml/data/docs/sqle/docs_17_10/PolynomialFeaturesTransform.py +2 -2
  44. teradataml/data/docs/sqle/docs_17_10/RowNormalizeTransform.py +2 -1
  45. teradataml/data/docs/sqle/docs_17_10/ScaleTransform.py +1 -0
  46. teradataml/data/docs/sqle/docs_17_10/SimpleImputeTransform.py +1 -1
  47. teradataml/data/docs/sqle/docs_17_10/Transform.py +2 -1
  48. teradataml/data/docs/sqle/docs_17_20/BincodeFit.py +3 -3
  49. teradataml/data/docs/sqle/docs_17_20/BincodeTransform.py +6 -5
  50. teradataml/data/docs/sqle/docs_17_20/Fit.py +1 -1
  51. teradataml/data/docs/sqle/docs_17_20/GLM.py +1 -1
  52. teradataml/data/docs/sqle/docs_17_20/GLMPredictPerSegment.py +9 -10
  53. teradataml/data/docs/sqle/docs_17_20/KMeansPredict.py +3 -2
  54. teradataml/data/docs/sqle/docs_17_20/NaiveBayesTextClassifierPredict.py +16 -15
  55. teradataml/data/docs/sqle/docs_17_20/NaiveBayesTextClassifierTrainer.py +2 -2
  56. teradataml/data/docs/sqle/docs_17_20/NonLinearCombineFit.py +2 -2
  57. teradataml/data/docs/sqle/docs_17_20/NonLinearCombineTransform.py +8 -8
  58. teradataml/data/docs/sqle/docs_17_20/OneClassSVMPredict.py +21 -20
  59. teradataml/data/docs/sqle/docs_17_20/OneHotEncodingTransform.py +1 -1
  60. teradataml/data/docs/sqle/docs_17_20/OutlierFilterTransform.py +8 -3
  61. teradataml/data/docs/sqle/docs_17_20/PolynomialFeaturesTransform.py +6 -5
  62. teradataml/data/docs/sqle/docs_17_20/RandomProjectionTransform.py +6 -6
  63. teradataml/data/docs/sqle/docs_17_20/RowNormalizeTransform.py +2 -1
  64. teradataml/data/docs/sqle/docs_17_20/SVM.py +1 -1
  65. teradataml/data/docs/sqle/docs_17_20/SVMPredict.py +16 -16
  66. teradataml/data/docs/sqle/docs_17_20/ScaleTransform.py +1 -0
  67. teradataml/data/docs/sqle/docs_17_20/SimpleImputeTransform.py +3 -2
  68. teradataml/data/docs/sqle/docs_17_20/TDDecisionForestPredict.py +4 -4
  69. teradataml/data/docs/sqle/docs_17_20/TDGLMPredict.py +19 -19
  70. teradataml/data/docs/sqle/docs_17_20/TargetEncodingTransform.py +5 -4
  71. teradataml/data/docs/sqle/docs_17_20/Transform.py +2 -2
  72. teradataml/data/docs/sqle/docs_17_20/XGBoostPredict.py +9 -9
  73. teradataml/data/fish.csv +160 -0
  74. teradataml/data/glass_types.csv +215 -0
  75. teradataml/data/insurance.csv +1 -1
  76. teradataml/data/iris_data.csv +151 -0
  77. teradataml/data/jsons/sqle/17.10/TD_FunctionTransform.json +1 -0
  78. teradataml/data/jsons/sqle/17.10/TD_OneHotEncodingTransform.json +1 -0
  79. teradataml/data/jsons/sqle/17.10/TD_OutlierFilterTransform.json +1 -0
  80. teradataml/data/jsons/sqle/17.10/TD_PolynomialFeaturesTransform.json +1 -0
  81. teradataml/data/jsons/sqle/17.10/TD_RowNormalizeTransform.json +1 -0
  82. teradataml/data/jsons/sqle/17.10/TD_ScaleTransform.json +1 -0
  83. teradataml/data/jsons/sqle/17.10/TD_SimpleImputeTransform.json +1 -0
  84. teradataml/data/load_example_data.py +3 -0
  85. teradataml/data/multi_model_classification.csv +401 -0
  86. teradataml/data/multi_model_regression.csv +401 -0
  87. teradataml/data/openml_example.json +63 -0
  88. teradataml/data/scripts/deploy_script.py +65 -0
  89. teradataml/data/scripts/mapper.R +20 -0
  90. teradataml/data/scripts/sklearn/__init__.py +0 -0
  91. teradataml/data/scripts/sklearn/sklearn_fit.py +175 -0
  92. teradataml/data/scripts/sklearn/sklearn_fit_predict.py +135 -0
  93. teradataml/data/scripts/sklearn/sklearn_function.template +113 -0
  94. teradataml/data/scripts/sklearn/sklearn_model_selection_split.py +158 -0
  95. teradataml/data/scripts/sklearn/sklearn_neighbors.py +152 -0
  96. teradataml/data/scripts/sklearn/sklearn_score.py +128 -0
  97. teradataml/data/scripts/sklearn/sklearn_transform.py +179 -0
  98. teradataml/data/templates/open_source_ml.json +9 -0
  99. teradataml/data/teradataml_example.json +73 -1
  100. teradataml/data/test_classification.csv +101 -0
  101. teradataml/data/test_prediction.csv +101 -0
  102. teradataml/data/test_regression.csv +101 -0
  103. teradataml/data/train_multiclass.csv +101 -0
  104. teradataml/data/train_regression.csv +101 -0
  105. teradataml/data/train_regression_multiple_labels.csv +101 -0
  106. teradataml/data/wine_data.csv +1600 -0
  107. teradataml/dataframe/copy_to.py +79 -13
  108. teradataml/dataframe/data_transfer.py +8 -0
  109. teradataml/dataframe/dataframe.py +910 -311
  110. teradataml/dataframe/dataframe_utils.py +102 -5
  111. teradataml/dataframe/fastload.py +11 -3
  112. teradataml/dataframe/setop.py +15 -2
  113. teradataml/dataframe/sql.py +3735 -77
  114. teradataml/dataframe/sql_function_parameters.py +56 -5
  115. teradataml/dataframe/vantage_function_types.py +45 -1
  116. teradataml/dataframe/window.py +30 -29
  117. teradataml/dbutils/dbutils.py +18 -1
  118. teradataml/geospatial/geodataframe.py +18 -7
  119. teradataml/geospatial/geodataframecolumn.py +5 -0
  120. teradataml/hyperparameter_tuner/optimizer.py +910 -120
  121. teradataml/hyperparameter_tuner/utils.py +131 -37
  122. teradataml/lib/aed_0_1.dll +0 -0
  123. teradataml/lib/libaed_0_1.dylib +0 -0
  124. teradataml/lib/libaed_0_1.so +0 -0
  125. teradataml/libaed_0_1.dylib +0 -0
  126. teradataml/libaed_0_1.so +0 -0
  127. teradataml/opensource/__init__.py +1 -0
  128. teradataml/opensource/sklearn/__init__.py +1 -0
  129. teradataml/opensource/sklearn/_class.py +255 -0
  130. teradataml/opensource/sklearn/_sklearn_wrapper.py +1668 -0
  131. teradataml/opensource/sklearn/_wrapper_utils.py +268 -0
  132. teradataml/opensource/sklearn/constants.py +54 -0
  133. teradataml/options/__init__.py +3 -6
  134. teradataml/options/configure.py +21 -20
  135. teradataml/scriptmgmt/UserEnv.py +61 -5
  136. teradataml/scriptmgmt/lls_utils.py +135 -53
  137. teradataml/table_operators/Apply.py +38 -6
  138. teradataml/table_operators/Script.py +45 -308
  139. teradataml/table_operators/TableOperator.py +182 -591
  140. teradataml/table_operators/__init__.py +0 -1
  141. teradataml/table_operators/table_operator_util.py +32 -40
  142. teradataml/utils/validators.py +127 -3
  143. {teradataml-17.20.0.6.dist-info → teradataml-20.0.0.0.dist-info}/METADATA +243 -3
  144. {teradataml-17.20.0.6.dist-info → teradataml-20.0.0.0.dist-info}/RECORD +147 -391
  145. teradataml/analytics/mle/AdaBoost.py +0 -651
  146. teradataml/analytics/mle/AdaBoostPredict.py +0 -564
  147. teradataml/analytics/mle/Antiselect.py +0 -342
  148. teradataml/analytics/mle/Arima.py +0 -641
  149. teradataml/analytics/mle/ArimaPredict.py +0 -477
  150. teradataml/analytics/mle/Attribution.py +0 -1070
  151. teradataml/analytics/mle/Betweenness.py +0 -658
  152. teradataml/analytics/mle/Burst.py +0 -711
  153. teradataml/analytics/mle/CCM.py +0 -600
  154. teradataml/analytics/mle/CCMPrepare.py +0 -324
  155. teradataml/analytics/mle/CFilter.py +0 -460
  156. teradataml/analytics/mle/ChangePointDetection.py +0 -572
  157. teradataml/analytics/mle/ChangePointDetectionRT.py +0 -477
  158. teradataml/analytics/mle/Closeness.py +0 -737
  159. teradataml/analytics/mle/ConfusionMatrix.py +0 -420
  160. teradataml/analytics/mle/Correlation.py +0 -477
  161. teradataml/analytics/mle/Correlation2.py +0 -573
  162. teradataml/analytics/mle/CoxHazardRatio.py +0 -679
  163. teradataml/analytics/mle/CoxPH.py +0 -556
  164. teradataml/analytics/mle/CoxSurvival.py +0 -478
  165. teradataml/analytics/mle/CumulativeMovAvg.py +0 -363
  166. teradataml/analytics/mle/DTW.py +0 -623
  167. teradataml/analytics/mle/DWT.py +0 -564
  168. teradataml/analytics/mle/DWT2D.py +0 -599
  169. teradataml/analytics/mle/DecisionForest.py +0 -716
  170. teradataml/analytics/mle/DecisionForestEvaluator.py +0 -363
  171. teradataml/analytics/mle/DecisionForestPredict.py +0 -561
  172. teradataml/analytics/mle/DecisionTree.py +0 -830
  173. teradataml/analytics/mle/DecisionTreePredict.py +0 -528
  174. teradataml/analytics/mle/ExponentialMovAvg.py +0 -418
  175. teradataml/analytics/mle/FMeasure.py +0 -402
  176. teradataml/analytics/mle/FPGrowth.py +0 -734
  177. teradataml/analytics/mle/FrequentPaths.py +0 -695
  178. teradataml/analytics/mle/GLM.py +0 -558
  179. teradataml/analytics/mle/GLML1L2.py +0 -547
  180. teradataml/analytics/mle/GLML1L2Predict.py +0 -519
  181. teradataml/analytics/mle/GLMPredict.py +0 -529
  182. teradataml/analytics/mle/HMMDecoder.py +0 -945
  183. teradataml/analytics/mle/HMMEvaluator.py +0 -901
  184. teradataml/analytics/mle/HMMSupervised.py +0 -521
  185. teradataml/analytics/mle/HMMUnsupervised.py +0 -572
  186. teradataml/analytics/mle/Histogram.py +0 -561
  187. teradataml/analytics/mle/IDWT.py +0 -476
  188. teradataml/analytics/mle/IDWT2D.py +0 -493
  189. teradataml/analytics/mle/IdentityMatch.py +0 -763
  190. teradataml/analytics/mle/Interpolator.py +0 -918
  191. teradataml/analytics/mle/KMeans.py +0 -485
  192. teradataml/analytics/mle/KNN.py +0 -627
  193. teradataml/analytics/mle/KNNRecommender.py +0 -488
  194. teradataml/analytics/mle/KNNRecommenderPredict.py +0 -581
  195. teradataml/analytics/mle/LAR.py +0 -439
  196. teradataml/analytics/mle/LARPredict.py +0 -478
  197. teradataml/analytics/mle/LDA.py +0 -548
  198. teradataml/analytics/mle/LDAInference.py +0 -492
  199. teradataml/analytics/mle/LDATopicSummary.py +0 -464
  200. teradataml/analytics/mle/LevenshteinDistance.py +0 -450
  201. teradataml/analytics/mle/LinReg.py +0 -433
  202. teradataml/analytics/mle/LinRegPredict.py +0 -438
  203. teradataml/analytics/mle/MinHash.py +0 -544
  204. teradataml/analytics/mle/Modularity.py +0 -587
  205. teradataml/analytics/mle/NEREvaluator.py +0 -410
  206. teradataml/analytics/mle/NERExtractor.py +0 -595
  207. teradataml/analytics/mle/NERTrainer.py +0 -458
  208. teradataml/analytics/mle/NGrams.py +0 -570
  209. teradataml/analytics/mle/NPath.py +0 -634
  210. teradataml/analytics/mle/NTree.py +0 -549
  211. teradataml/analytics/mle/NaiveBayes.py +0 -462
  212. teradataml/analytics/mle/NaiveBayesPredict.py +0 -513
  213. teradataml/analytics/mle/NaiveBayesTextClassifier.py +0 -607
  214. teradataml/analytics/mle/NaiveBayesTextClassifier2.py +0 -531
  215. teradataml/analytics/mle/NaiveBayesTextClassifierPredict.py +0 -799
  216. teradataml/analytics/mle/NamedEntityFinder.py +0 -529
  217. teradataml/analytics/mle/NamedEntityFinderEvaluator.py +0 -414
  218. teradataml/analytics/mle/NamedEntityFinderTrainer.py +0 -396
  219. teradataml/analytics/mle/POSTagger.py +0 -417
  220. teradataml/analytics/mle/Pack.py +0 -411
  221. teradataml/analytics/mle/PageRank.py +0 -535
  222. teradataml/analytics/mle/PathAnalyzer.py +0 -426
  223. teradataml/analytics/mle/PathGenerator.py +0 -367
  224. teradataml/analytics/mle/PathStart.py +0 -464
  225. teradataml/analytics/mle/PathSummarizer.py +0 -470
  226. teradataml/analytics/mle/Pivot.py +0 -471
  227. teradataml/analytics/mle/ROC.py +0 -425
  228. teradataml/analytics/mle/RandomSample.py +0 -637
  229. teradataml/analytics/mle/RandomWalkSample.py +0 -490
  230. teradataml/analytics/mle/SAX.py +0 -779
  231. teradataml/analytics/mle/SVMDense.py +0 -677
  232. teradataml/analytics/mle/SVMDensePredict.py +0 -536
  233. teradataml/analytics/mle/SVMDenseSummary.py +0 -437
  234. teradataml/analytics/mle/SVMSparse.py +0 -557
  235. teradataml/analytics/mle/SVMSparsePredict.py +0 -553
  236. teradataml/analytics/mle/SVMSparseSummary.py +0 -435
  237. teradataml/analytics/mle/Sampling.py +0 -549
  238. teradataml/analytics/mle/Scale.py +0 -565
  239. teradataml/analytics/mle/ScaleByPartition.py +0 -496
  240. teradataml/analytics/mle/ScaleMap.py +0 -378
  241. teradataml/analytics/mle/ScaleSummary.py +0 -320
  242. teradataml/analytics/mle/SentenceExtractor.py +0 -363
  243. teradataml/analytics/mle/SentimentEvaluator.py +0 -432
  244. teradataml/analytics/mle/SentimentExtractor.py +0 -578
  245. teradataml/analytics/mle/SentimentTrainer.py +0 -405
  246. teradataml/analytics/mle/SeriesSplitter.py +0 -641
  247. teradataml/analytics/mle/Sessionize.py +0 -475
  248. teradataml/analytics/mle/SimpleMovAvg.py +0 -397
  249. teradataml/analytics/mle/StringSimilarity.py +0 -425
  250. teradataml/analytics/mle/TF.py +0 -389
  251. teradataml/analytics/mle/TFIDF.py +0 -504
  252. teradataml/analytics/mle/TextChunker.py +0 -414
  253. teradataml/analytics/mle/TextClassifier.py +0 -399
  254. teradataml/analytics/mle/TextClassifierEvaluator.py +0 -413
  255. teradataml/analytics/mle/TextClassifierTrainer.py +0 -565
  256. teradataml/analytics/mle/TextMorph.py +0 -494
  257. teradataml/analytics/mle/TextParser.py +0 -623
  258. teradataml/analytics/mle/TextTagger.py +0 -530
  259. teradataml/analytics/mle/TextTokenizer.py +0 -502
  260. teradataml/analytics/mle/UnivariateStatistics.py +0 -488
  261. teradataml/analytics/mle/Unpack.py +0 -526
  262. teradataml/analytics/mle/Unpivot.py +0 -438
  263. teradataml/analytics/mle/VarMax.py +0 -776
  264. teradataml/analytics/mle/VectorDistance.py +0 -762
  265. teradataml/analytics/mle/WeightedMovAvg.py +0 -400
  266. teradataml/analytics/mle/XGBoost.py +0 -842
  267. teradataml/analytics/mle/XGBoostPredict.py +0 -627
  268. teradataml/analytics/mle/__init__.py +0 -123
  269. teradataml/analytics/mle/json/adaboost_mle.json +0 -135
  270. teradataml/analytics/mle/json/adaboostpredict_mle.json +0 -85
  271. teradataml/analytics/mle/json/antiselect_mle.json +0 -34
  272. teradataml/analytics/mle/json/antiselect_mle_mle.json +0 -34
  273. teradataml/analytics/mle/json/arima_mle.json +0 -172
  274. teradataml/analytics/mle/json/arimapredict_mle.json +0 -52
  275. teradataml/analytics/mle/json/attribution_mle_mle.json +0 -143
  276. teradataml/analytics/mle/json/betweenness_mle.json +0 -97
  277. teradataml/analytics/mle/json/burst_mle.json +0 -140
  278. teradataml/analytics/mle/json/ccm_mle.json +0 -124
  279. teradataml/analytics/mle/json/ccmprepare_mle.json +0 -14
  280. teradataml/analytics/mle/json/cfilter_mle.json +0 -93
  281. teradataml/analytics/mle/json/changepointdetection_mle.json +0 -92
  282. teradataml/analytics/mle/json/changepointdetectionrt_mle.json +0 -78
  283. teradataml/analytics/mle/json/closeness_mle.json +0 -104
  284. teradataml/analytics/mle/json/confusionmatrix_mle.json +0 -79
  285. teradataml/analytics/mle/json/correlation_mle.json +0 -86
  286. teradataml/analytics/mle/json/correlationreduce_mle.json +0 -49
  287. teradataml/analytics/mle/json/coxhazardratio_mle.json +0 -89
  288. teradataml/analytics/mle/json/coxph_mle.json +0 -98
  289. teradataml/analytics/mle/json/coxsurvival_mle.json +0 -79
  290. teradataml/analytics/mle/json/cumulativemovavg_mle.json +0 -34
  291. teradataml/analytics/mle/json/decisionforest_mle.json +0 -167
  292. teradataml/analytics/mle/json/decisionforestevaluator_mle.json +0 -33
  293. teradataml/analytics/mle/json/decisionforestpredict_mle_mle.json +0 -74
  294. teradataml/analytics/mle/json/decisiontree_mle.json +0 -194
  295. teradataml/analytics/mle/json/decisiontreepredict_mle_mle.json +0 -86
  296. teradataml/analytics/mle/json/dtw_mle.json +0 -97
  297. teradataml/analytics/mle/json/dwt2d_mle.json +0 -116
  298. teradataml/analytics/mle/json/dwt_mle.json +0 -101
  299. teradataml/analytics/mle/json/exponentialmovavg_mle.json +0 -55
  300. teradataml/analytics/mle/json/fmeasure_mle.json +0 -58
  301. teradataml/analytics/mle/json/fpgrowth_mle.json +0 -159
  302. teradataml/analytics/mle/json/frequentpaths_mle.json +0 -129
  303. teradataml/analytics/mle/json/glm_mle.json +0 -111
  304. teradataml/analytics/mle/json/glml1l2_mle.json +0 -106
  305. teradataml/analytics/mle/json/glml1l2predict_mle.json +0 -57
  306. teradataml/analytics/mle/json/glmpredict_mle_mle.json +0 -74
  307. teradataml/analytics/mle/json/histogram_mle.json +0 -100
  308. teradataml/analytics/mle/json/hmmdecoder_mle.json +0 -192
  309. teradataml/analytics/mle/json/hmmevaluator_mle.json +0 -206
  310. teradataml/analytics/mle/json/hmmsupervised_mle.json +0 -91
  311. teradataml/analytics/mle/json/hmmunsupervised_mle.json +0 -114
  312. teradataml/analytics/mle/json/identitymatch_mle.json +0 -88
  313. teradataml/analytics/mle/json/idwt2d_mle.json +0 -73
  314. teradataml/analytics/mle/json/idwt_mle.json +0 -66
  315. teradataml/analytics/mle/json/interpolator_mle.json +0 -151
  316. teradataml/analytics/mle/json/kmeans_mle.json +0 -97
  317. teradataml/analytics/mle/json/knn_mle.json +0 -141
  318. teradataml/analytics/mle/json/knnrecommender_mle.json +0 -111
  319. teradataml/analytics/mle/json/knnrecommenderpredict_mle.json +0 -75
  320. teradataml/analytics/mle/json/lar_mle.json +0 -78
  321. teradataml/analytics/mle/json/larpredict_mle.json +0 -69
  322. teradataml/analytics/mle/json/lda_mle.json +0 -130
  323. teradataml/analytics/mle/json/ldainference_mle.json +0 -78
  324. teradataml/analytics/mle/json/ldatopicsummary_mle.json +0 -64
  325. teradataml/analytics/mle/json/levenshteindistance_mle.json +0 -92
  326. teradataml/analytics/mle/json/linreg_mle.json +0 -42
  327. teradataml/analytics/mle/json/linregpredict_mle.json +0 -56
  328. teradataml/analytics/mle/json/minhash_mle.json +0 -113
  329. teradataml/analytics/mle/json/modularity_mle.json +0 -91
  330. teradataml/analytics/mle/json/naivebayespredict_mle_mle.json +0 -85
  331. teradataml/analytics/mle/json/naivebayesreduce_mle.json +0 -52
  332. teradataml/analytics/mle/json/naivebayestextclassifierpredict_mle_mle.json +0 -147
  333. teradataml/analytics/mle/json/naivebayestextclassifiertrainer2_mle.json +0 -108
  334. teradataml/analytics/mle/json/naivebayestextclassifiertrainer_mle.json +0 -102
  335. teradataml/analytics/mle/json/namedentityfinder_mle.json +0 -84
  336. teradataml/analytics/mle/json/namedentityfinderevaluatorreduce_mle.json +0 -43
  337. teradataml/analytics/mle/json/namedentityfindertrainer_mle.json +0 -64
  338. teradataml/analytics/mle/json/nerevaluator_mle.json +0 -54
  339. teradataml/analytics/mle/json/nerextractor_mle.json +0 -87
  340. teradataml/analytics/mle/json/nertrainer_mle.json +0 -89
  341. teradataml/analytics/mle/json/ngrams_mle.json +0 -137
  342. teradataml/analytics/mle/json/ngramsplitter_mle_mle.json +0 -137
  343. teradataml/analytics/mle/json/npath@coprocessor_mle.json +0 -73
  344. teradataml/analytics/mle/json/ntree@coprocessor_mle.json +0 -123
  345. teradataml/analytics/mle/json/pack_mle.json +0 -58
  346. teradataml/analytics/mle/json/pack_mle_mle.json +0 -58
  347. teradataml/analytics/mle/json/pagerank_mle.json +0 -81
  348. teradataml/analytics/mle/json/pathanalyzer_mle.json +0 -63
  349. teradataml/analytics/mle/json/pathgenerator_mle.json +0 -40
  350. teradataml/analytics/mle/json/pathstart_mle.json +0 -62
  351. teradataml/analytics/mle/json/pathsummarizer_mle.json +0 -72
  352. teradataml/analytics/mle/json/pivoting_mle.json +0 -71
  353. teradataml/analytics/mle/json/postagger_mle.json +0 -51
  354. teradataml/analytics/mle/json/randomsample_mle.json +0 -131
  355. teradataml/analytics/mle/json/randomwalksample_mle.json +0 -85
  356. teradataml/analytics/mle/json/roc_mle.json +0 -73
  357. teradataml/analytics/mle/json/sampling_mle.json +0 -75
  358. teradataml/analytics/mle/json/sax_mle.json +0 -154
  359. teradataml/analytics/mle/json/scale_mle.json +0 -93
  360. teradataml/analytics/mle/json/scalebypartition_mle.json +0 -89
  361. teradataml/analytics/mle/json/scalemap_mle.json +0 -44
  362. teradataml/analytics/mle/json/scalesummary_mle.json +0 -14
  363. teradataml/analytics/mle/json/sentenceextractor_mle.json +0 -41
  364. teradataml/analytics/mle/json/sentimentevaluator_mle.json +0 -43
  365. teradataml/analytics/mle/json/sentimentextractor_mle.json +0 -100
  366. teradataml/analytics/mle/json/sentimenttrainer_mle.json +0 -68
  367. teradataml/analytics/mle/json/seriessplitter_mle.json +0 -133
  368. teradataml/analytics/mle/json/sessionize_mle_mle.json +0 -62
  369. teradataml/analytics/mle/json/simplemovavg_mle.json +0 -48
  370. teradataml/analytics/mle/json/stringsimilarity_mle.json +0 -50
  371. teradataml/analytics/mle/json/stringsimilarity_mle_mle.json +0 -50
  372. teradataml/analytics/mle/json/svmdense_mle.json +0 -165
  373. teradataml/analytics/mle/json/svmdensepredict_mle.json +0 -95
  374. teradataml/analytics/mle/json/svmdensesummary_mle.json +0 -58
  375. teradataml/analytics/mle/json/svmsparse_mle.json +0 -148
  376. teradataml/analytics/mle/json/svmsparsepredict_mle_mle.json +0 -103
  377. teradataml/analytics/mle/json/svmsparsesummary_mle.json +0 -57
  378. teradataml/analytics/mle/json/textchunker_mle.json +0 -40
  379. teradataml/analytics/mle/json/textclassifier_mle.json +0 -51
  380. teradataml/analytics/mle/json/textclassifierevaluator_mle.json +0 -43
  381. teradataml/analytics/mle/json/textclassifiertrainer_mle.json +0 -103
  382. teradataml/analytics/mle/json/textmorph_mle.json +0 -63
  383. teradataml/analytics/mle/json/textparser_mle.json +0 -166
  384. teradataml/analytics/mle/json/texttagger_mle.json +0 -81
  385. teradataml/analytics/mle/json/texttokenizer_mle.json +0 -91
  386. teradataml/analytics/mle/json/tf_mle.json +0 -33
  387. teradataml/analytics/mle/json/tfidf_mle.json +0 -34
  388. teradataml/analytics/mle/json/univariatestatistics_mle.json +0 -81
  389. teradataml/analytics/mle/json/unpack_mle.json +0 -91
  390. teradataml/analytics/mle/json/unpack_mle_mle.json +0 -91
  391. teradataml/analytics/mle/json/unpivoting_mle.json +0 -63
  392. teradataml/analytics/mle/json/varmax_mle.json +0 -176
  393. teradataml/analytics/mle/json/vectordistance_mle.json +0 -179
  394. teradataml/analytics/mle/json/weightedmovavg_mle.json +0 -48
  395. teradataml/analytics/mle/json/xgboost_mle.json +0 -178
  396. teradataml/analytics/mle/json/xgboostpredict_mle.json +0 -104
  397. teradataml/analytics/sqle/Antiselect.py +0 -321
  398. teradataml/analytics/sqle/Attribution.py +0 -603
  399. teradataml/analytics/sqle/DecisionForestPredict.py +0 -408
  400. teradataml/analytics/sqle/GLMPredict.py +0 -430
  401. teradataml/analytics/sqle/MovingAverage.py +0 -543
  402. teradataml/analytics/sqle/NGramSplitter.py +0 -548
  403. teradataml/analytics/sqle/NPath.py +0 -632
  404. teradataml/analytics/sqle/NaiveBayesTextClassifierPredict.py +0 -515
  405. teradataml/analytics/sqle/Pack.py +0 -388
  406. teradataml/analytics/sqle/SVMSparsePredict.py +0 -464
  407. teradataml/analytics/sqle/Sessionize.py +0 -390
  408. teradataml/analytics/sqle/StringSimilarity.py +0 -400
  409. teradataml/analytics/sqle/Unpack.py +0 -503
  410. teradataml/analytics/sqle/json/antiselect_sqle.json +0 -21
  411. teradataml/analytics/sqle/json/attribution_sqle.json +0 -92
  412. teradataml/analytics/sqle/json/decisionforestpredict_sqle.json +0 -48
  413. teradataml/analytics/sqle/json/glmpredict_sqle.json +0 -48
  414. teradataml/analytics/sqle/json/h2opredict_sqle.json +0 -63
  415. teradataml/analytics/sqle/json/movingaverage_sqle.json +0 -58
  416. teradataml/analytics/sqle/json/naivebayestextclassifierpredict_sqle.json +0 -76
  417. teradataml/analytics/sqle/json/ngramsplitter_sqle.json +0 -126
  418. teradataml/analytics/sqle/json/npath_sqle.json +0 -67
  419. teradataml/analytics/sqle/json/pack_sqle.json +0 -47
  420. teradataml/analytics/sqle/json/pmmlpredict_sqle.json +0 -55
  421. teradataml/analytics/sqle/json/sessionize_sqle.json +0 -43
  422. teradataml/analytics/sqle/json/stringsimilarity_sqle.json +0 -39
  423. teradataml/analytics/sqle/json/svmsparsepredict_sqle.json +0 -74
  424. teradataml/analytics/sqle/json/unpack_sqle.json +0 -80
  425. teradataml/catalog/model_cataloging.py +0 -980
  426. teradataml/config/mlengine_alias_definitions_v1.0 +0 -118
  427. teradataml/config/mlengine_alias_definitions_v1.1 +0 -127
  428. teradataml/config/mlengine_alias_definitions_v1.3 +0 -129
  429. teradataml/table_operators/sandbox_container_util.py +0 -643
  430. {teradataml-17.20.0.6.dist-info → teradataml-20.0.0.0.dist-info}/WHEEL +0 -0
  431. {teradataml-17.20.0.6.dist-info → teradataml-20.0.0.0.dist-info}/top_level.txt +0 -0
  432. {teradataml-17.20.0.6.dist-info → teradataml-20.0.0.0.dist-info}/zip-safe +0 -0
@@ -0,0 +1,1648 @@
1
+ # ##################################################################
2
+ #
3
+ # Copyright 2024 Teradata. All rights reserved.
4
+ # TERADATA CONFIDENTIAL AND TRADE SECRET
5
+ #
6
+ # Primary Owner: Sweta Shaw
7
+ # Email Id: Sweta.Shaw@Teradata.com
8
+ #
9
+ # Secondary Owner: Akhil Bisht
10
+ # Email Id: AKHIL.BISHT@Teradata.com
11
+ #
12
+ # Version: 1.1
13
+ # Function Version: 1.0
14
+ # ##################################################################
15
+
16
+ # Python libraries
17
+ import pandas as pd
18
+ import time
19
+ import json
20
+ import re
21
+
22
+ # Teradata libraries
23
+ from teradataml.dataframe.dataframe import DataFrame
24
+ from teradataml.dataframe.copy_to import copy_to_sql
25
+ from teradataml import Antiselect
26
+ from teradataml import BincodeFit, BincodeTransform
27
+ from teradataml import ColumnSummary, CategoricalSummary, GetFutileColumns, FillRowId
28
+ from teradataml import Fit, Transform
29
+ from teradataml import NonLinearCombineFit, NonLinearCombineTransform
30
+ from teradataml import NumApply
31
+ from teradataml import OneHotEncodingFit, OneHotEncodingTransform
32
+ from teradataml import OrdinalEncodingFit, OrdinalEncodingTransform
33
+ from teradataml import SimpleImputeFit, SimpleImputeTransform
34
+ from teradataml import StrApply
35
+ from teradataml import TargetEncodingFit, TargetEncodingTransform
36
+ from sqlalchemy import literal_column
37
+ from teradatasqlalchemy import INTEGER
38
+ from teradataml import display
39
+ from teradataml.hyperparameter_tuner.utils import _ProgressBar
40
+ from teradataml.utils.validators import _Validators
41
+
42
+
43
+ class _FeatureEngineering:
44
+
45
+ def __init__(self,
46
+ data,
47
+ target_column,
48
+ model_list,
49
+ verbose = 0,
50
+ task_type = "Regression",
51
+ custom_data = None):
52
+ """
53
+ DESCRIPTION:
54
+ Function initializes the data, target column and columns datatypes
55
+ for feature engineering.
56
+
57
+ PARAMETERS:
58
+ data:
59
+ Required Argument.
60
+ Specifies the input teradataml DataFrame for feature engineering.
61
+ Types: teradataml Dataframe
62
+
63
+ target_column:
64
+ Required Arugment.
65
+ Specifies the name of the target column in "data"..
66
+ Types: str
67
+
68
+ model_list:
69
+ Required Arugment.
70
+ Specifies the list of models to be used for model training.
71
+ Types: list
72
+
73
+ verbose:
74
+ Optional Argument.
75
+ Specifies the detailed execution steps based on verbose level.
76
+ Default Value: 0
77
+ Permitted Values:
78
+ * 0: prints the progress bar and leaderboard
79
+ * 1: prints the execution steps of AutoML.
80
+ * 2: prints the intermediate data between the execution of each step of AutoML.
81
+ Types: int
82
+
83
+ task_type:
84
+ Required Arugment.
85
+ Specifies the task type for AutoML, whether to apply regresion OR classification
86
+ on the provived dataset.
87
+ Default Value: "Regression"
88
+ Permitted Values: "Regression", "Classification"
89
+ Types: str
90
+
91
+ custom_data:
92
+ Optional Arugment.
93
+ Specifies json object containing user customized input.
94
+ Types: json object
95
+ """
96
+ # Instance variables
97
+ self.data = data
98
+ self.target_column = target_column
99
+ self.model_list = model_list
100
+ self.verbose = verbose
101
+ self.task_type = task_type
102
+ self.custom_data = custom_data
103
+ self.excluded_cols=[]
104
+ self.data_types = {key: value for key, value in self.data._column_names_and_types}
105
+ self.target_label = None
106
+ self.data_transform_dict = {}
107
+ self.one_hot_obj_count = 0
108
+ self.is_classification_type = lambda: self.task_type.upper() == 'CLASSIFICATION'
109
+
110
+ # Method for doing feature engineering on data -> adding id, removing futile col, imputation, encoding(one hot)
111
+ def feature_engineering(self,
112
+ auto=True):
113
+ """
114
+ DESCRIPTION:
115
+ Function performs following operations :-
116
+ 1. Removes futile columns/features from dataset.
117
+ 2. Detects the columns with missing values.
118
+ 3. Performs imputation on these columns with missing values.
119
+ 4. Detects categorical columns and perform encoding on those columns.
120
+
121
+ PARAMETERS:
122
+ auto:
123
+ Optional Arugment.
124
+ Specifies whether to run AutoML in custom mode or auto mode.
125
+ When set to False, runs in custom mode. Otherwise, by default runs in auto mode.
126
+ Default Value: True
127
+ Types: boolean
128
+
129
+ Returns:
130
+ tuple, First element represents teradataml DataFrame,
131
+ second element represents list of columns which are not participating in outlier tranformation.
132
+ """
133
+ # Assigning number of base jobs for progress bar.
134
+ base_jobs = 14 if auto else 18
135
+
136
+ # Updating model list based on distinct value of target column for classification type
137
+ if self.is_classification_type():
138
+ if self.data.drop_duplicate(self.target_column).size > 2:
139
+ unsupported_models = ['svm', 'glm']
140
+ self.model_list = [model for model in self.model_list if model not in unsupported_models]
141
+
142
+ # Updating number of jobs for progress bar based on number of models.
143
+ jobs = base_jobs + len(self.model_list)
144
+ self.progress_bar = _ProgressBar(jobs=jobs, verbose=2, prefix='Automl Running:')
145
+
146
+ self._display_heading(phase=1,
147
+ progress_bar=self.progress_bar)
148
+ self._display_msg(msg='Feature Engineering started ...',
149
+ progress_bar=self.progress_bar)
150
+
151
+ # Storing target column to data transform dictionary
152
+ self.data_transform_dict['data_target_column'] = self.target_column
153
+ # Storing target column encoding indicator to data transform dictionary
154
+ self.data_transform_dict['target_col_encode_ind'] = False
155
+ # Storing task type to data transform dictionary
156
+ self.data_transform_dict['classification_type']=self.is_classification_type()
157
+ # Storing params for performing one hot encoding
158
+ self.data_transform_dict['one_hot_encoding_fit_obj'] ={}
159
+ self.data_transform_dict['one_hot_encoding_drop_list'] = []
160
+
161
+ if auto:
162
+ self._remove_duplicate_rows()
163
+ self.progress_bar.update()
164
+
165
+ self._remove_futile_columns()
166
+ self.progress_bar.update()
167
+
168
+ self._handle_date_columns()
169
+ self.progress_bar.update()
170
+
171
+ self._handling_missing_value()
172
+ self.progress_bar.update()
173
+
174
+ self._impute_missing_value()
175
+ self.progress_bar.update()
176
+
177
+ self._encoding_categorical_columns()
178
+ self.progress_bar.update()
179
+
180
+ else:
181
+ self._remove_duplicate_rows()
182
+ self.progress_bar.update()
183
+
184
+ self._remove_futile_columns()
185
+ self.progress_bar.update()
186
+
187
+ self._handle_date_columns()
188
+ self.progress_bar.update()
189
+
190
+ self._custom_handling_missing_value()
191
+ self.progress_bar.update()
192
+
193
+ self._bin_code_transformation()
194
+ self.progress_bar.update()
195
+
196
+ self._string_manipulation()
197
+ self.progress_bar.update()
198
+
199
+ self._custom_categorical_encoding()
200
+ self.progress_bar.update()
201
+
202
+ self._mathematical_transformation()
203
+ self.progress_bar.update()
204
+
205
+ self._non_linear_transformation()
206
+ self.progress_bar.update()
207
+
208
+ self._anti_select_columns()
209
+ self.progress_bar.update()
210
+
211
+ return self.data, self.excluded_cols, self.target_label, self.data_transform_dict
212
+
213
+ def _extract_list(self,
214
+ list1,
215
+ list2):
216
+ """
217
+ DESCRIPTION:
218
+ Function to extract elements from list1 which are not present in list2.
219
+
220
+ PARAMETERS:
221
+ list1:
222
+ Required Argument.
223
+ Specifies the first list for extracting elements from.
224
+ Types: list
225
+
226
+ list2:
227
+ Required Argument.
228
+ Specifies the second list to get elements for avoiding in first list while extracting.
229
+ Types: list
230
+
231
+ RETURN:
232
+ Returns extracted elements in form of list.
233
+
234
+ """
235
+ new_lst = list(set(list1) - set(list2))
236
+ return new_lst
237
+
238
+ def _remove_duplicate_rows(self):
239
+ """
240
+ DESCRIPTION:
241
+ Function to handles duplicate rows present in dataset.
242
+
243
+ """
244
+ self._display_msg(msg="\nHandling duplicate records present in dataset ...",
245
+ progress_bar=self.progress_bar,
246
+ show_data=True)
247
+ start_time = time.time()
248
+ rows = self.data.shape[0]
249
+ self.data=self.data.drop_duplicate()
250
+ if rows != self.data.shape[0]:
251
+ self._display_msg(msg=f'Updated dataset sample after removing {rows-self.data.shape[0]} duplicate records:',
252
+ data=self.data,
253
+ progress_bar=self.progress_bar)
254
+ self._display_msg(inline_msg=f"Remaining Rows in the data: {self.data.shape[0]}\n"\
255
+ f"Remaining Columns in the data: {self.data.shape[1]}",
256
+ progress_bar=self.progress_bar)
257
+ else:
258
+ self._display_msg(inline_msg="Analysis complete. No action taken.",
259
+ progress_bar=self.progress_bar)
260
+
261
+ end_time = time.time()
262
+ self._display_msg(msg="Total time to handle duplicate records: {:.2f} sec ".format(end_time - start_time),
263
+ progress_bar=self.progress_bar,
264
+ show_data=True)
265
+
266
+ def _get_distinct_count(self):
267
+ """
268
+ DESCRIPTION:
269
+ Function to get distinct count for all features and store it in dictionary for further use.
270
+ """
271
+ # Count of distinct value in each column
272
+ counts = self.data.select(self.data.columns).count(distinct=True)
273
+
274
+ # Dict containing disctinct value in each column
275
+ self.counts_dict = next(counts.itertuples())._asdict()
276
+
277
+ def _preprocess_data(self):
278
+ """
279
+ DESCRIPTION:
280
+ Function replaces the existing id column or adds the new id column and
281
+ removes columns with sinlge value/same values in the dataset.
282
+ """
283
+ # Get distinct value in each column
284
+ self._get_distinct_count()
285
+
286
+ # Columns to removed if
287
+ # id column detected or count of distinct value = 1
288
+ columns_to_be_removed = [col for col in self.data.columns if col.lower() == 'id' or self.counts_dict[f'count_{col}'] == 1]
289
+
290
+ # Removing id column, if exists
291
+ if len(columns_to_be_removed) != 0:
292
+ self.data = self.data.drop(columns_to_be_removed, axis=1)
293
+ # Storing irrelevent column list in data transform dictionary
294
+ self.data_transform_dict['drop_irrelevent_columns'] = columns_to_be_removed
295
+
296
+ # Adding id columns
297
+ obj = FillRowId(data=self.data, row_id_column='id')
298
+
299
+ self.data = obj.result
300
+
301
+ def _remove_futile_columns(self):
302
+ """
303
+ DESCRIPTION:
304
+ Function removes the futile columns from dataset.
305
+ """
306
+ self._display_msg(msg="\nHandling less significant features from data ...",
307
+ progress_bar=self.progress_bar,
308
+ show_data=True)
309
+ start_time = time.time()
310
+
311
+ self._preprocess_data()
312
+
313
+ # Handling string type target column in classification
314
+ # Performing Ordinal Encoding
315
+ if self.data_types[self.target_column] in ['str']:
316
+ self._ordinal_encoding([self.target_column])
317
+
318
+ # Detecting categorical columns
319
+ categorical_columns = [col for col, d_type in self.data._column_names_and_types if d_type == 'str']
320
+
321
+ # Detecting and removing futile columns, if categorical_column exists
322
+ if len(categorical_columns) != 0:
323
+
324
+ obj = CategoricalSummary(data=self.data,
325
+ target_columns=categorical_columns)
326
+
327
+ gfc_out = GetFutileColumns(data=self.data,
328
+ object=obj,
329
+ category_summary_column="ColumnName",
330
+ threshold_value =0.7)
331
+
332
+ # Extracting Futile columns
333
+ f_cols = [row[0] for row in gfc_out.result.itertuples()]
334
+
335
+ if len(f_cols) == 0:
336
+ self._display_msg(inline_msg="All categorical columns seem to be significant.",
337
+ progress_bar=self.progress_bar)
338
+ else:
339
+
340
+ self.data = self.data.drop(f_cols, axis=1)
341
+ # Storing futile column list in data transform dictionary
342
+ self.data_transform_dict['futile_columns'] = f_cols
343
+ self._display_msg(msg='Removing Futile columns:',
344
+ col_lst=f_cols,
345
+ progress_bar=self.progress_bar)
346
+ self._display_msg(msg='Sample of Data after removing Futile columns:',
347
+ data=self.data,
348
+ progress_bar=self.progress_bar)
349
+ end_time= time.time()
350
+ self._display_msg(msg="Total time to handle less significant features: {:.2f} sec ".format( end_time - start_time),
351
+ progress_bar=self.progress_bar,
352
+ show_data=True)
353
+
354
+ def _handle_date_component(self,
355
+ date_component_columns,
356
+ date_component):
357
+
358
+ """
359
+ DESCRIPTION:
360
+ Function to handle newly generated date components, i.e., day , month and year diff.
361
+ Based on their distinct values, binning is done with predefined prefix.
362
+ Binned component is used further as categorical features.
363
+
364
+ PARAMETERS:
365
+ date_component_columns:
366
+ Required Argument.
367
+ Specifies the list of newly generated differnt component of date features.
368
+ Types: list
369
+
370
+ date_component:
371
+ Required Argument.
372
+ Specifies identifier for the differnt component of date features, i.e., D - Days , M - Months and Y - Year diffs.
373
+ Types: str
374
+
375
+ """
376
+ # Check for day
377
+ if date_component == "D":
378
+ prefix_value = "Day_"
379
+ # Check for month
380
+ elif date_component == "M":
381
+ prefix_value = "Month_"
382
+ # Check for year diff
383
+ elif date_component == "Y":
384
+ prefix_value = "Year_diff_"
385
+
386
+ # Deciding bins based on distinct value of date component features.
387
+ for col in date_component_columns:
388
+ data_size = self.data.drop_duplicate(col).size
389
+ if data_size < 4:
390
+ num_bins = data_size
391
+ else:
392
+ num_bins = 4
393
+ # Performing bincode for converting date component to specific labels
394
+ fit_params = {
395
+ "data": self.data,
396
+ "target_columns": col,
397
+ "method_type":"Equal-Width",
398
+ "nbins": num_bins,
399
+ "label_prefix" : prefix_value
400
+ }
401
+ bin_code_fit = BincodeFit(**fit_params)
402
+
403
+ fit_params_map = {"D": "day_component_fit_object",
404
+ "M": "month_component_fit_object",
405
+ "Y": "year_diff_component_fit_object"}
406
+
407
+ # Storing fit object for each date component in data transform dictionary
408
+ self.data_transform_dict[fit_params_map[date_component]][col] = bin_code_fit.output
409
+
410
+ accumulate_columns = self._extract_list(self.data.columns, [col])
411
+ transform_params = {
412
+ "data": self.data,
413
+ "object": bin_code_fit.output,
414
+ "accumulate": accumulate_columns,
415
+ "persist": True
416
+ }
417
+ self.data = BincodeTransform(**transform_params).result
418
+
419
+ def _fetch_date_component(self,
420
+ process,
421
+ regex_str,
422
+ columns,
423
+ date_component):
424
+
425
+ """
426
+ DESCRIPTION:
427
+ Function to fetch newly generated date component features.
428
+ Passing ahead for performing binning.
429
+
430
+ PARAMETERS:
431
+ process:
432
+ Required Argument.
433
+ Specifies date component of date feature which is going to be fetched and handled.
434
+ Types: str
435
+
436
+ regex_str:
437
+ Required Argument.
438
+ Specifies regular expression for identifying newly generated date component features.
439
+ Types: str
440
+
441
+ columns:
442
+ Required Argument.
443
+ Specifies list of newly generated date component features.
444
+ Types: list
445
+
446
+ date_component:
447
+ Required Argument.
448
+ Specifies identifier for the differnt component of date features, i.e., D - Days , M - Months and Y - Year diffs.
449
+ Types: str
450
+
451
+ """
452
+ date_component_columns = [col for col in columns if re.search(regex_str+"$", col)]
453
+ if len(date_component_columns) != 0:
454
+ self._handle_date_component(date_component_columns,date_component)
455
+ self._display_msg(msg="Useful {} features:".format(process),
456
+ col_lst=date_component_columns,
457
+ progress_bar=self.progress_bar)
458
+ self._display_msg(msg="Updated dataset sample:",
459
+ data=self.data,
460
+ progress_bar=self.progress_bar)
461
+
462
+ else:
463
+ self._display_msg("\nNo useful feature found for {} component:".format(process),
464
+ progress_bar=self.progress_bar)
465
+
466
+ return date_component_columns
467
+
468
+ def _handle_date_columns_helper(self):
469
+
470
+ """
471
+ DESCRIPTION:
472
+ Function for dropping irrelevent date features.
473
+ Extracting day, month and year component from revelent date features.
474
+ Passing extracted component for performing binning.
475
+ """
476
+
477
+ # Dropping missing value for all date columns
478
+ self._display_msg(msg="\nDropping missing values for:",
479
+ col_lst=self.date_column_list,
480
+ progress_bar=self.progress_bar)
481
+
482
+ self.data = self.data.dropna(subset=self.date_column_list)
483
+
484
+ # Date columns list eligible for dropping from dataset
485
+ drop_date_cols = []
486
+
487
+ # Checking for single valued date columns
488
+ for col in self.date_column_list:
489
+ if self.data.drop_duplicate(col).size == self.data.shape[0]:
490
+ drop_date_cols.append(col)
491
+
492
+ if len(drop_date_cols) != 0:
493
+ self.data = self.data.drop(drop_date_cols, axis=1)
494
+ # Storing unique date column list in data transform dictionary
495
+ self.data_transform_dict['drop_unique_date_columns'] = drop_date_cols
496
+ self._display_msg(msg='Dropping date features with all unique value:',
497
+ col_lst = drop_date_cols,
498
+ progress_bar=self.progress_bar)
499
+
500
+ # Updated date columns list
501
+ self.date_column_list = [item for item in self.date_column_list if item not in drop_date_cols]
502
+
503
+ # List for storing newly generated date component features
504
+ new_columns=[]
505
+
506
+ # Extracting day, month and year difference from date columns
507
+ if len(self.date_column_list) != 0:
508
+
509
+ component_param={}
510
+ for col in self.date_column_list:
511
+
512
+ day_column=str(col)+"_day_comp"
513
+ month_column=str(col)+"_month_comp"
514
+ year_diff_column=str(col)+"_year_diff_comp"
515
+ new_columns.extend([day_column,month_column,year_diff_column])
516
+ day_query=("EXTRACT(DAY FROM {0})".format(col))
517
+ month_query=("EXTRACT(MONTH FROM {0})".format(col))
518
+ year_query=("EXTRACT(YEAR FROM CURRENT_DATE) - EXTRACT(YEAR FROM {0})".format(col))
519
+ component_param[day_column]=literal_column(day_query,INTEGER())
520
+ component_param[month_column]=literal_column(month_query,INTEGER())
521
+ component_param[year_diff_column]=literal_column(year_query,INTEGER())
522
+
523
+ self.data=self.data.assign(**component_param)
524
+ # Storing newly generated date component list along with parameters in data transform dictionary
525
+ self.data_transform_dict['extract_date_comp_col'] = self.date_column_list
526
+ self.data_transform_dict['extract_date_comp_param'] = component_param
527
+
528
+ # Dropping date columns as we have already extracted day, month and year in new columns
529
+ self.data = self.data.drop(self.date_column_list, axis=1)
530
+ self._display_msg(msg='List of newly generated features from existing date features:',
531
+ col_lst=new_columns,
532
+ progress_bar=self.progress_bar)
533
+ self._display_msg(msg='List of newly generated features from existing date features:',
534
+ data=self.data,
535
+ progress_bar=self.progress_bar)
536
+
537
+ drop_cols=[]
538
+
539
+ for col in new_columns:
540
+ distinct_rows = self.data.drop_duplicate(col).size
541
+ if distinct_rows == self.data.shape[0]:
542
+ drop_cols.append(col)
543
+ self._display_msg(msg='Dropping features with all unique values:',
544
+ col_lst=col,
545
+ progress_bar=self.progress_bar)
546
+
547
+ elif distinct_rows == 1:
548
+ drop_cols.append(col)
549
+ self._display_msg(msg='Dropping features with single value:',
550
+ col_lst=col,
551
+ progress_bar=self.progress_bar)
552
+
553
+ # Dropping columns from drop_cols list
554
+ if len(drop_cols) != 0:
555
+ self.data = self.data.drop(drop_cols, axis=1)
556
+ # Storing extract date component list for drop in data transform dictionary
557
+ self.data_transform_dict['drop_extract_date_columns'] = drop_cols
558
+
559
+ # Extracting all newly generated columns
560
+ new_columns = [item for item in new_columns if item not in drop_cols]
561
+
562
+ # Storing each date component transformation fit object in data transform dictionary
563
+ self.data_transform_dict = {**self.data_transform_dict,
564
+ 'day_component_fit_object': {},
565
+ 'month_component_fit_object': {},
566
+ 'year_diff_component_fit_object': {}}
567
+ # Grouping date components based on types i.e., day, month, and year_diff for performing binning
568
+ if len(new_columns) != 0:
569
+ self.day_columns = self._fetch_date_component("day", "_day_comp", new_columns, "D")
570
+ self.month_columns = self._fetch_date_component("month", "_month_comp", new_columns, "M")
571
+ self.year_diff_columns = self._fetch_date_component("year_diff", "_year_diff_comp", new_columns, "Y")
572
+ self._display_msg(inline_msg="No useful date component found",
573
+ progress_bar=self.progress_bar)
574
+
575
+ self._display_msg(msg='Updated dataset sample after handling date features:',
576
+ data=self.data,
577
+ progress_bar=self.progress_bar)
578
+ else:
579
+ self._display_msg(inline_msg="No useful date feature found",
580
+ progress_bar=self.progress_bar)
581
+
582
+ def _handle_date_columns(self):
583
+
584
+ """
585
+ DESCRIPTION:
586
+ Function to handle date columns in dataset if any.
587
+ Perform relevent transformation by extracting different components, i.e., Day , Month and Year.
588
+ """
589
+ self._display_msg(msg="\nHandling Date Features ...",
590
+ progress_bar=self.progress_bar,
591
+ show_data=True)
592
+ start_time = time.time()
593
+
594
+ self.date_column_list = [col for col, d_type in self.data._column_names_and_types \
595
+ if d_type in ["datetime.date","datetime.datetime"]]
596
+
597
+ if len(self.date_column_list) == 0:
598
+ self._display_msg(inline_msg="Dataset does not contain any feature related to dates.",
599
+ progress_bar=self.progress_bar)
600
+ else:
601
+ # Storing date column list in data transform dictionary
602
+ self.data_transform_dict['date_columns'] = self.date_column_list
603
+ self._handle_date_columns_helper()
604
+
605
+ end_time = time.time()
606
+ self._display_msg(msg="Total time to handle date features: {:.2f} sec\n".format(end_time-start_time),
607
+ progress_bar=self.progress_bar,
608
+ show_data=True)
609
+
610
+ def _missing_count_per_column(self):
611
+ """
612
+ DESCRIPTION:
613
+ Function finds and returns a dictnoary containing list of columns
614
+ with missing values.
615
+
616
+ Returns:
617
+ dict, keys represent column names and
618
+ values represent the missing value count for corresponding column.
619
+ """
620
+
621
+ # Removing rows with missing target column value
622
+ self.data = self.data.dropna(subset=[self.target_column])
623
+
624
+ obj = ColumnSummary(data=self.data,
625
+ target_columns=self.data.columns,
626
+ volatile=True)
627
+
628
+ cols_miss_val={}
629
+ # Iterating over each row in the column summary result
630
+ for row in obj.result.itertuples():
631
+ # Checking if the third element of the row (missing values count) is greater than 0
632
+ if row[3] > 0:
633
+ # If so, add an entry to the 'cols_miss_val' dictionary
634
+ # Key: column name (first element of the row)
635
+ # Value: count of missing values in the column (third element of the row)
636
+ cols_miss_val[row[0]] = row[3]
637
+
638
+ return cols_miss_val
639
+
640
+ def _handling_missing_value(self):
641
+ """
642
+ DESCRIPTION:
643
+ Function detects the missing values in the each feature of dataset,
644
+ then performs these operation based on condition :-
645
+ 1. deleting rows from columns/feature
646
+ 2. dropping columns from dataset
647
+ """
648
+ self._display_msg(msg="\nChecking Missing values in dataset ...",
649
+ progress_bar=self.progress_bar,
650
+ show_data=True)
651
+ start_time = time.time()
652
+
653
+ # Flag for missing values
654
+ msg_val_found=0
655
+
656
+ #num of rows
657
+ d_size = self.data.shape[0]
658
+
659
+ delete_rows = []
660
+ drop_cols = []
661
+ self.imputation_cols = {}
662
+
663
+ cols_miss_val = self._missing_count_per_column()
664
+
665
+ if len(cols_miss_val) != 0:
666
+ self._display_msg(msg="Columns with their missing values:",
667
+ col_lst=cols_miss_val,
668
+ progress_bar=self.progress_bar)
669
+
670
+ # Get distinct value in each column
671
+ self._get_distinct_count()
672
+
673
+ # Iterating over columns with missing values
674
+ for col,val in cols_miss_val.items():
675
+
676
+ # Drop col, if count of missing value > 60%
677
+ if val > .6*d_size:
678
+ drop_cols.append(col)
679
+ continue
680
+
681
+ if self.data_types[col] in ['float', 'int']:
682
+ corr_df = self.data[col].corr(self.data[self.target_column])
683
+ corr_val = self.data.assign(True, corr_=corr_df)
684
+ related = next(corr_val.itertuples())[0]
685
+
686
+ # Delete row, if count of missing value < 2% and
687
+ # Relation b/w target column and numeric column <= .25
688
+ if val < .02*d_size and related <= .25:
689
+ delete_rows.append(col)
690
+ continue
691
+
692
+ elif self.data_types[col] in ['str']:
693
+ # Delete row, if count of missing value < 4%
694
+ if val < .04*d_size:
695
+ delete_rows.append(col)
696
+ continue
697
+ # Drop col, if unique count of column > 75%
698
+ elif self.counts_dict[f'count_{col}'] > .75*(d_size-val):
699
+ drop_cols.append(col)
700
+ continue
701
+
702
+ # Remaining column for imputation
703
+ self.imputation_cols[col] = val
704
+ # Storing columns with missing value for imputation in data transform dictionary
705
+ self.data_transform_dict['imputation_columns'] = self.imputation_cols
706
+
707
+ if len(delete_rows) != 0:
708
+ self.data = self.data.dropna(subset=delete_rows)
709
+ msg_val_found=1
710
+ self._display_msg(msg='Deleting rows of these columns for handling missing values:',
711
+ col_lst=delete_rows,
712
+ progress_bar=self.progress_bar)
713
+
714
+ if len(drop_cols) != 0:
715
+ self.data = self.data.drop(drop_cols, axis=1)
716
+ msg_val_found=1
717
+ # Storing columns with missing value for drop in data transform dictionary
718
+ self.data_transform_dict['drop_missing_columns'] = drop_cols
719
+ self._display_msg(msg='Dropping these columns for handling missing values:',
720
+ col_lst=drop_cols,
721
+ progress_bar=self.progress_bar)
722
+
723
+ if len(self.imputation_cols) == 0 and msg_val_found ==0:
724
+ self._display_msg(inline_msg="No Missing Values Detected.",
725
+ progress_bar=self.progress_bar)
726
+
727
+ end_time = time.time()
728
+ self._display_msg(msg="Total time to find missing values in data: {:.2f} sec ".format( end_time - start_time),
729
+ progress_bar=self.progress_bar,
730
+ show_data=True)
731
+
732
+ def _impute_helper(self):
733
+ """
734
+ DESCRIPTION:
735
+ Function decides the imputation methods [mean/ median/ mode] for columns with missing values
736
+ on the basis of skewness of column in the dataset.
737
+
738
+ RETURNS:
739
+ A tuple containing,
740
+ col_stat (name of columns with missing value)
741
+ stat (imputation method for respective columns)
742
+ """
743
+ col_stat = []
744
+ stat = []
745
+
746
+ # Converting o/p of skew() into dictonary with key as column name and value as skewness value
747
+ df = self.data.skew()
748
+ skew_data = next(df.itertuples())._asdict()
749
+
750
+ # Iterating over columns with missing value
751
+ for key, val in self.imputation_cols.items():
752
+
753
+ col_stat.append(key)
754
+ if self.data_types[key] in ['float', 'int']:
755
+ val = skew_data[f'skew_{key}']
756
+ # Median imputation method, if abs(skewness value) > 1
757
+ if abs(val) > 1:
758
+ stat.append('median')
759
+ # Mean imputation method, if abs(skewness value) <= 1
760
+ else:
761
+ stat.append('mean')
762
+ # Mode imputation method, if categorical column
763
+ else:
764
+ stat.append('mode')
765
+
766
+ self._display_msg(msg="Columns with their imputation method:",
767
+ col_lst=dict(zip(col_stat, stat)),
768
+ progress_bar=self.progress_bar)
769
+
770
+ return col_stat, stat
771
+
772
+ def _impute_missing_value(self):
773
+ """
774
+ DESCRIPTION:
775
+ Function performs the imputation on columns/features with missing values in the dataset.
776
+ """
777
+
778
+ start_time = time.time()
779
+ self._display_msg(msg="\nImputing Missing Values ...",
780
+ progress_bar=self.progress_bar,
781
+ show_data=True)
782
+
783
+ if len(self.imputation_cols) != 0:
784
+
785
+ # List of columns and imputation Method
786
+ col_stat, stat = self._impute_helper()
787
+
788
+ fit_obj = SimpleImputeFit(data=self.data,
789
+ stats_columns=col_stat,
790
+ stats=stat,
791
+ volatile=True)
792
+
793
+ # Storing fit object for imputation in data transform dictionary
794
+ self.data_transform_dict['imputation_fit_object'] = fit_obj.output
795
+ sm = SimpleImputeTransform(data=self.data,
796
+ object=fit_obj,
797
+ volatile=True)
798
+
799
+ self.data = sm.result
800
+ self._display_msg(msg="Sample of Data after Imputation:",
801
+ data=self.data,
802
+ progress_bar=self.progress_bar)
803
+ else:
804
+ self._display_msg(inline_msg="No imputation is Required.",
805
+ progress_bar=self.progress_bar)
806
+
807
+ end_time = time.time()
808
+ self._display_msg(msg="Time taken to perform imputation: {:.2f} sec ".format(end_time - start_time),
809
+ progress_bar=self.progress_bar,
810
+ show_data=True)
811
+
812
+
813
+ def _custom_handling_missing_value(self):
814
+ """
815
+ DESCRIPTION:
816
+ Function to perform customized missing value handling for features based on user input.
817
+
818
+ """
819
+ # Fetching user input for performing missing value handling
820
+ missing_handling_input = self.custom_data.get("MissingValueHandlingIndicator", False)
821
+
822
+ if missing_handling_input:
823
+ # Fetching parameters required for performing
824
+ missing_handling_param = self.custom_data.get("MissingValueHandlingParam", None)
825
+ if missing_handling_param:
826
+ # Fetching user input for different methods missing value handling
827
+ drop_col_ind = missing_handling_param.get("DroppingColumnIndicator", False)
828
+ drop_row_ind = missing_handling_param.get("DroppingRowIndicator", False)
829
+ impute_ind = missing_handling_param.get("ImputeMissingIndicator", False)
830
+ # Checking for user input if all methods indicator are false or not
831
+ if not any([drop_col_ind, drop_row_ind, impute_ind]):
832
+ self._display_msg(inline_msg="No method information provided for performing customized missing value handling. \
833
+ AutoML will proceed with default missing value handling method.",
834
+ progress_bar=self.progress_bar)
835
+
836
+ else:
837
+ # Checking user input for dropping missing value columns
838
+ if drop_col_ind:
839
+ drop_col_list = missing_handling_param.get("DroppingColumnList", [])
840
+ # Storing customcolumns with missing value for drop in data transform dictionary
841
+ self.data_transform_dict["custom_drop_missing_columns"] = drop_col_list
842
+ if len(drop_col_list):
843
+ # Checking for column present in dataset or not
844
+ _Validators._validate_dataframe_has_argument_columns(drop_col_list, "DroppingColumnList", self.data, "df")
845
+
846
+ self._display_msg(msg="\nDropping these columns for handling customized missing value:",
847
+ col_lst=drop_col_list,
848
+ progress_bar=self.progress_bar)
849
+ self.data = self.data.drop(drop_col_list, axis=1)
850
+ else:
851
+ self._display_msg(inline_msg="No information provided for dropping missing value containing columns.",
852
+ progress_bar=self.progress_bar)
853
+
854
+ # Checking user input for dropping missing value rows
855
+ if drop_row_ind:
856
+ drop_row_list = missing_handling_param.get("DroppingRowList", [])
857
+ if len(drop_row_list):
858
+ # Checking for column present in dataset or not
859
+ _Validators._validate_dataframe_has_argument_columns(drop_row_list, "DroppingRowList", self.data, "df")
860
+
861
+ self._display_msg(msg="Dropping missing rows in these columns for handling customized missing value:",
862
+ col_lst=drop_row_list,
863
+ progress_bar=self.progress_bar)
864
+ self.data = self.data.dropna(subset = drop_row_list)
865
+ else:
866
+ self._display_msg(inline_msg="No information provided for dropping missing value containing rows.",
867
+ progress_bar=self.progress_bar)
868
+ # Checking user input for missing value imputation
869
+ if impute_ind:
870
+ stat_list = missing_handling_param.get("StatImputeList", None)
871
+ stat_method = missing_handling_param.get("StatImputeMethod", None)
872
+ literal_list = missing_handling_param.get("LiteralImputeList", None)
873
+ literal_value = missing_handling_param.get("LiteralImputeValue", None)
874
+
875
+ # Checking for column present in dataset or not
876
+ _Validators._validate_dataframe_has_argument_columns(stat_list, "StatImputeList", self.data, "df")
877
+
878
+ _Validators._validate_dataframe_has_argument_columns(literal_list, "LiteralImputeList", self.data, "df")
879
+
880
+ # Creating fit params
881
+ fit_param = {
882
+ "data" : self.data,
883
+ "stats_columns" : stat_list,
884
+ "stats" : stat_method,
885
+ "literals_columns" : literal_list,
886
+ "literals" : literal_value
887
+ }
888
+ # Fitting on dataset
889
+ fit_obj = SimpleImputeFit(**fit_param)
890
+ # Storing custom fit object for imputation in data transform dictionary
891
+ self.data_transform_dict["custom_imputation_ind"] = True
892
+ self.data_transform_dict["custom_imputation_fit_object"] = fit_obj.output
893
+ # Creating transform params
894
+ transform_param = {
895
+ "data" : self.data,
896
+ "object" : fit_obj.output,
897
+ "persist" : True
898
+ }
899
+ # Updating dataset with transform result
900
+ self.data = SimpleImputeTransform(**transform_param).result
901
+ self._display_msg(msg="Updated dataset sample after performing customized missing value imputation:",
902
+ data=self.data,
903
+ progress_bar=self.progress_bar)
904
+ else:
905
+ self._display_msg(inline_msg="No information provided for performing customized missing value handling. \
906
+ AutoML will proceed with default missing value handling method.",
907
+ progress_bar=self.progress_bar)
908
+ else:
909
+ self._display_msg(inline_msg="Proceeding with default option for missing value imputation.",
910
+ progress_bar=self.progress_bar)
911
+
912
+ # Proceeding with default method for handling remaining missing values
913
+ self._display_msg(inline_msg="Proceeding with default option for handling remaining missing values.",
914
+ progress_bar=self.progress_bar)
915
+ self._handling_missing_value()
916
+ self._impute_missing_value()
917
+
918
+ def _bin_code_transformation(self):
919
+ """
920
+ DESCRIPTION:
921
+ Function to perform customized binning on features based on user input.
922
+
923
+ """
924
+ # Fetching user input for performing bin code transformation.
925
+ bin_code_input = self.custom_data.get("BincodeIndicator", False)
926
+
927
+ if bin_code_input:
928
+ # Storing custom bin code transformation indicator in data transform dictionary
929
+ self.data_transform_dict['custom_bincode_ind'] = True
930
+ # Fetching list required for performing transfomation.
931
+ extracted_col = self.custom_data.get("BincodeParam", None)
932
+ if not extracted_col:
933
+ self._display_msg(inline_msg="BincodeParam is empty. Skipping customized bincode transformation.",
934
+ progress_bar=self.progress_bar)
935
+ else:
936
+ # Creating list for storing column and binning informartion for performing transformation
937
+ equal_width_bin_list = []
938
+ equal_width_bin_columns = []
939
+ var_width_bin_list = []
940
+ var_width_bin_columns = []
941
+
942
+ # Checking for column present in dataset or not
943
+ _Validators._validate_dataframe_has_argument_columns(list(extracted_col.keys()), "BincodeParam", self.data, "df")
944
+
945
+ for col,transform_val in extracted_col.items():
946
+ # Fetching type of binning to be performed
947
+ bin_trans_type = transform_val["Type"]
948
+ # Fetching number of bins to be created
949
+ num_bin = transform_val["NumOfBins"]
950
+ # Checking for bin types and adding details into lists for binning
951
+ if bin_trans_type == "Equal-Width":
952
+ bins = num_bin
953
+ equal_width_bin_list.append(bins)
954
+ equal_width_bin_columns.append(col)
955
+ elif bin_trans_type == "Variable-Width":
956
+ var_width_bin_columns.append(col)
957
+ bins = num_bin
958
+ for i in range(1, bins+1):
959
+ # Forming binning name as per expected input
960
+ temp="Bin_"+str(i)
961
+ # Fetching required details for variable type binning
962
+ minval = transform_val[temp]["min_value"]
963
+ maxval = transform_val[temp]["max_value"]
964
+ label = transform_val[temp]["label"]
965
+ # Appending information of each bin
966
+ var_width_bin_list.append({ "ColumnName":col, "MinValue":minval, "MaxValue":maxval, "Label":label})
967
+ # Checking column list for performing binning with Equal-Width.
968
+ if len(equal_width_bin_columns) != 0:
969
+ # Adding fit parameter for performing binning with Equal-Width.
970
+ fit_params={
971
+ "data" : self.data,
972
+ "target_columns": equal_width_bin_columns,
973
+ "method_type" : "Equal-Width",
974
+ "nbins" : bins
975
+ }
976
+ eql_bin_code_fit = BincodeFit(**fit_params)
977
+ # Storing fit object and column list for Equal-Width binning in data transform dictionary
978
+ self.data_transform_dict['custom_eql_bincode_col'] = equal_width_bin_columns
979
+ self.data_transform_dict['custom_eql_bincode_fit_object'] = eql_bin_code_fit.output
980
+ # Extracting accumulate columns
981
+ accumulate_columns = self._extract_list(self.data.columns, equal_width_bin_columns)
982
+ # Adding transform parameters for performing binning with Equal-Width.
983
+ eql_transform_params={
984
+ "data" : self.data,
985
+ "object" : eql_bin_code_fit.output,
986
+ "accumulate" : accumulate_columns,
987
+ "persist" : True,
988
+ }
989
+ self.data = BincodeTransform(**eql_transform_params).result
990
+ self._display_msg(msg="\nUpdated dataset sample after performing Equal-Width binning :-",
991
+ data=self.data,
992
+ progress_bar=self.progress_bar)
993
+ else:
994
+ self._display_msg(inline_msg="No information provided for Equal-Width Transformation.",
995
+ progress_bar=self.progress_bar)
996
+
997
+ if len(var_width_bin_columns) != 0:
998
+ # Creating pandas dataframe and then teradata dataframe for storing binning information
999
+ var_bin_table = pd.DataFrame(var_width_bin_list, columns=["ColumnName", "MinValue", "MaxValue", "Label"])
1000
+ self._display_msg(msg="Variable-Width binning information:-",
1001
+ data=var_bin_table,
1002
+ progress_bar=self.progress_bar)
1003
+ copy_to_sql(df=var_bin_table, table_name="automl_bincode_var_fit", temporary=True)
1004
+ var_fit_input = DataFrame.from_table("automl_bincode_var_fit")
1005
+ fit_params = {
1006
+ "data" : self.data,
1007
+ "fit_data": var_fit_input,
1008
+ "fit_data_order_column" : ["MinValue", "MaxValue"],
1009
+ "target_columns": var_width_bin_columns,
1010
+ "minvalue_column" : "MinValue",
1011
+ "maxvalue_column" : "MaxValue",
1012
+ "label_column" : "Label",
1013
+ "method_type" : "Variable-Width",
1014
+ "label_prefix" : "label_prefix"
1015
+ }
1016
+ var_bin_code_fit = BincodeFit(**fit_params)
1017
+ # Storing fit object and column list for Variable-Width binning in data transform dictionary
1018
+ self.data_transform_dict['custom_var_bincode_col'] = var_width_bin_columns
1019
+ self.data_transform_dict['custom_var_bincode_fit_object'] = var_bin_code_fit.output
1020
+ accumulate_columns = self._extract_list(self.data.columns, var_width_bin_columns)
1021
+ var_transform_params = {
1022
+ "data" : self.data,
1023
+ "object" : var_bin_code_fit.output,
1024
+ "object_order_column" : "TD_MinValue_BINFIT",
1025
+ "accumulate" : accumulate_columns,
1026
+ "persist" : True
1027
+ }
1028
+ self.data = BincodeTransform(**var_transform_params).result
1029
+ self._display_msg(msg="Updated dataset sample after performing Variable-Width binning:",
1030
+ data=self.data,
1031
+ progress_bar=self.progress_bar)
1032
+ else:
1033
+ self._display_msg(inline_msg="No information provided for Variable-Width Transformation.",
1034
+ progress_bar=self.progress_bar)
1035
+ else:
1036
+ self._display_msg(inline_msg="No information provided for Variable-Width Transformation.",
1037
+ progress_bar=self.progress_bar)
1038
+
1039
+ def _string_manipulation(self):
1040
+ """
1041
+ DESCRIPTION:
1042
+ Function to perform customized string manipulations on categorical features based on user input.
1043
+
1044
+ """
1045
+ # Fetching user input for performing string manipulation.
1046
+ str_mnpl_input = self.custom_data.get("StringManipulationIndicator", False)
1047
+ # Checking user input for string manipulation on categrical features.
1048
+ if str_mnpl_input:
1049
+ # Storing custom string manipulation indicator in data transform dictionary
1050
+ self.data_transform_dict['custom_string_manipulation_ind'] = True
1051
+ # Fetching list required for performing operation.
1052
+ extracted_col = self.custom_data.get("StringManipulationParam", None)
1053
+ if not extracted_col:
1054
+ self._display_msg(inline_msg="No information provided for performing string manipulation.",
1055
+ progress_bar=self.progress_bar)
1056
+ else:
1057
+ # Checking for column present in dataset or not
1058
+ _Validators._validate_dataframe_has_argument_columns(list(extracted_col.keys()), "StringManipulationParam", self.data, "df")
1059
+
1060
+ for target_col,transform_val in extracted_col.items():
1061
+ self.data = self._str_method_mapping(target_col, transform_val)
1062
+ # Storing custom string manipulation parameters in data transform dictionary
1063
+ self.data_transform_dict['custom_string_manipulation_param'] = extracted_col
1064
+
1065
+ self._display_msg(msg="Updated dataset sample after performing string manipulation:",
1066
+ data=self.data,
1067
+ progress_bar=self.progress_bar)
1068
+ else:
1069
+ self._display_msg(inline_msg="Skipping customized string manipulation.")
1070
+
1071
+ def _str_method_mapping(self,
1072
+ target_col,
1073
+ transform_val):
1074
+ """
1075
+ DESCRIPTION:
1076
+ Function to map customized parameters according to passed method and
1077
+ performs string manipulation on categorical features.
1078
+
1079
+ PARAMETERS:
1080
+ target_col:
1081
+ Required Argument.
1082
+ Specifies feature for applying string manipulation.
1083
+ Types: str
1084
+
1085
+ transform_val:
1086
+ Required Argument.
1087
+ Specifies different parameter require for applying string manipulation.
1088
+ Types: dict
1089
+
1090
+ RETURNS:
1091
+ Dataframe containing transformed data after applying string manipulation.
1092
+
1093
+ """
1094
+ # Creating list of features for accumulating while performing string manipulation on certain features
1095
+ accumulate_columns = self._extract_list(self.data.columns, [target_col])
1096
+
1097
+ # Fetching required parameters from json object
1098
+ string_operation = transform_val["StringOperation"]
1099
+
1100
+ # Storing general parameters for performing string transformation
1101
+ fit_params = {
1102
+ "data" : self.data,
1103
+ "target_columns" : target_col,
1104
+ "string_operation" : string_operation,
1105
+ "accumulate" : accumulate_columns,
1106
+ "inplace" : True,
1107
+ "persist" : True
1108
+ }
1109
+ # Adding additional parameters based on string operation type
1110
+ if string_operation in ["StringCon", "StringTrim"]:
1111
+ string_argument = transform_val["String"]
1112
+ fit_params = {**fit_params,
1113
+ "string" : string_argument}
1114
+ elif string_operation == "StringPad":
1115
+ string_argument = transform_val["String"]
1116
+ string_length = transform_val["StringLength"]
1117
+ fit_params = {**fit_params,
1118
+ "string" : string_argument,
1119
+ "string_length" : string_length}
1120
+ elif string_operation == "Substring":
1121
+ string_index = transform_val["StartIndex"]
1122
+ string_length = transform_val["StringLength"]
1123
+ fit_params = {**fit_params,
1124
+ "start_index" : string_index,
1125
+ "string_length" : string_length}
1126
+
1127
+ # returning dataset after performing string manipulation
1128
+ return StrApply(**fit_params).result
1129
+
1130
+ def _one_hot_encoding(self,
1131
+ one_hot_columns,
1132
+ unique_counts):
1133
+ """
1134
+ DESCRIPTION:
1135
+ Function performs the one hot encoding to categorcial columns/features in the dataset.
1136
+
1137
+ PARAMETERS:
1138
+ one_hot_columns:
1139
+ Required Argument.
1140
+ Specifies the categorical columns for which one hot encoding will be performed.
1141
+ Types: str or list of strings (str)
1142
+
1143
+ unique_counts:
1144
+ Required Argument.
1145
+ Specifies the unique counts in the categorical columns.
1146
+ Types: int or list of integer (int)
1147
+
1148
+ """
1149
+ # TD function will add extra column_other in onehotEncoding, so
1150
+ # initailizing this list to remove those extra columns
1151
+ drop_lst = [ele + "_other" for ele in one_hot_columns]
1152
+ # Adding fit parameters for performing encoding
1153
+ fit_params = {
1154
+ "data" : self.data,
1155
+ "approach" : "auto",
1156
+ "is_input_dense" : True,
1157
+ "target_column" : one_hot_columns,
1158
+ "category_counts" : unique_counts,
1159
+ "other_column" : "other"
1160
+ }
1161
+ # Performing one hot encoding fit on target columns
1162
+ fit_obj = OneHotEncodingFit(**fit_params)
1163
+ # Storing indicator, fit object and column drop list for one hot encoding in data transform dictionary
1164
+ self.data_transform_dict['one_hot_encoding_ind'] = True
1165
+ self.data_transform_dict['one_hot_encoding_fit_obj'].update({self.one_hot_obj_count : fit_obj.result})
1166
+ self.data_transform_dict['one_hot_encoding_drop_list'].extend(drop_lst)
1167
+ self.one_hot_obj_count = self.one_hot_obj_count + 1
1168
+ # Adding transform parameters for performing encoding
1169
+ transform_params = {
1170
+ "data" : self.data,
1171
+ "object" : fit_obj.result,
1172
+ "is_input_dense" : True,
1173
+ "persist" : True
1174
+ }
1175
+ # Performing one hot encoding transformation
1176
+ transform_obj = OneHotEncodingTransform(**transform_params)
1177
+ self.data = transform_obj.result.drop(drop_lst, axis=1)
1178
+
1179
+ def _ordinal_encoding(self,
1180
+ ordinal_columns):
1181
+ """
1182
+ DESCRIPTION:
1183
+ Function performs the ordinal encoding to categorcial columns or features in the dataset.
1184
+
1185
+ PARAMETERS:
1186
+ ordinal_columns:
1187
+ Required Argument.
1188
+ Specifies the categorical columns for which ordinal encoding will be performed.
1189
+ Types: str or list of strings (str)
1190
+ """
1191
+ # Adding fit parameters for performing encoding
1192
+ fit_params = {
1193
+ "data" : self.data,
1194
+ "target_column" : ordinal_columns,
1195
+ "volatile" : True
1196
+ }
1197
+ # Performing ordinal encoding fit on target columns
1198
+ ord_fit_obj = OrdinalEncodingFit(**fit_params)
1199
+ # Storing fit object and column list for ordinal encoding in data transform dictionary
1200
+ if ordinal_columns[0] != self.target_column:
1201
+ self.data_transform_dict["custom_ord_encoding_fit_obj"] = ord_fit_obj.result
1202
+ self.data_transform_dict['custom_ord_encoding_col'] = ordinal_columns
1203
+ else:
1204
+ self.data_transform_dict['target_col_encode_ind'] = True
1205
+ self.data_transform_dict['target_col_ord_encoding_fit_obj'] = ord_fit_obj.result
1206
+ # Extracting accumulate columns
1207
+ accumulate_columns = self._extract_list(self.data.columns, ordinal_columns)
1208
+ # Adding transform parameters for performing encoding
1209
+ transform_params = {
1210
+ "data" : self.data,
1211
+ "object" : ord_fit_obj.result,
1212
+ "accumulate" : accumulate_columns,
1213
+ "persist" : True
1214
+ }
1215
+ # Performing ordinal encoding transformation
1216
+ self.data = OrdinalEncodingTransform(**transform_params).result
1217
+
1218
+ if len(ordinal_columns) == 1 and ordinal_columns[0] == self.target_column:
1219
+ self.target_label = ord_fit_obj
1220
+
1221
+
1222
+ def _target_encoding(self,
1223
+ target_encoding_list):
1224
+ """
1225
+ DESCRIPTION:
1226
+ Function performs the target encoding to categorcial columns/features in the dataset.
1227
+
1228
+ PARAMETERS:
1229
+ target_encoding_list:
1230
+ Required Argument.
1231
+ Specifies the categorical columns for which target encoding will be performed.
1232
+ Types: str or list of strings (str)
1233
+ """
1234
+ # Fetching all columns on which target encoding will be performed.
1235
+ target_columns= list(target_encoding_list.keys())
1236
+ # Checking for column present in dataset or not
1237
+ _Validators._validate_dataframe_has_argument_columns(target_columns, "TargetEncodingList", self.data, "df")
1238
+ # Finding distinct values and counts for columns.
1239
+ cat_sum = CategoricalSummary(data = self.data,
1240
+ target_columns = target_columns)
1241
+ category_data=cat_sum.result.groupby("ColumnName").count()
1242
+ category_data = category_data.assign(drop_columns = True,
1243
+ ColumnName = category_data.ColumnName,
1244
+ CategoryCount = category_data.count_DistinctValue)
1245
+ # Storing indicator and fit object for target encoding in data transform dictionary
1246
+ self.data_transform_dict["custom_target_encoding_ind"] = True
1247
+ self.data_transform_dict["custom_target_encoding_fit_obj"] = {}
1248
+ # Fetching required argument for performing target encoding
1249
+ for col,transform_val in target_encoding_list.items():
1250
+ encoder_method = transform_val["encoder_method"]
1251
+ response_column = transform_val["response_column"]
1252
+ # Adding fit parameters for performing encoding
1253
+ fit_params = {
1254
+ "data" : self.data,
1255
+ "category_data" : category_data,
1256
+ "encoder_method" : encoder_method,
1257
+ "target_columns" : col,
1258
+ "response_column" : response_column
1259
+ }
1260
+ if encoder_method == "CBM_DIRICHLET":
1261
+ num_distinct_responses=transform_val["num_distinct_responses"]
1262
+ fit_params = {**fit_params,
1263
+ "num_distinct_responses" : num_distinct_responses}
1264
+ # Performing target encoding fit on target columns
1265
+ tar_fit_obj = TargetEncodingFit(**fit_params)
1266
+ # Storing each column fit object for target encoding in data transform dictionary
1267
+ self.data_transform_dict["custom_target_encoding_fit_obj"].update({col : tar_fit_obj})
1268
+ # Extracting accumulate columns
1269
+ accumulate_columns = self._extract_list(self.data.columns, [col])
1270
+ # Adding transform parameters for performing encoding
1271
+ transform_params = {
1272
+ "data" : self.data,
1273
+ "object" : tar_fit_obj,
1274
+ "accumulate" : accumulate_columns,
1275
+ "persist" : True
1276
+ }
1277
+ # Performing ordinal encoding transformation
1278
+ self.data = TargetEncodingTransform(**transform_params).result
1279
+
1280
+ def _encoding_categorical_columns(self):
1281
+ """
1282
+ DESCRIPTION:
1283
+ Function detects the categorical columns and performs encoding on categorical columns in the dataset.
1284
+ """
1285
+ self._display_msg(msg="\nPerforming encoding for categorical columns ...",
1286
+ progress_bar=self.progress_bar,
1287
+ show_data=True)
1288
+ start_time = time.time()
1289
+
1290
+ ohe_col = []
1291
+ unique_count = []
1292
+
1293
+ # List of columns before one hot
1294
+ col_bf_ohe = self.data.columns
1295
+
1296
+ # Get distinct value in each column
1297
+ self._get_distinct_count()
1298
+
1299
+ # Detecting categorical columns with thier unique counts
1300
+ for col, d_type in self.data._column_names_and_types:
1301
+ if d_type in ['str']:
1302
+ ohe_col.append(col)
1303
+ unique_count.append(self.counts_dict[f'count_{col}'])
1304
+
1305
+ if len(ohe_col) != 0:
1306
+ self._one_hot_encoding(ohe_col, unique_count)
1307
+
1308
+ self._display_msg(msg="ONE HOT Encoding these Columns:",
1309
+ col_lst=ohe_col,
1310
+ progress_bar=self.progress_bar)
1311
+ else:
1312
+ self._display_msg(inline_msg="Encoding not required.",
1313
+ progress_bar=self.progress_bar)
1314
+
1315
+ # List of columns after one hot
1316
+ col_af_ohe = self.data.columns
1317
+
1318
+ # List of excluded columns from outlier processing and scaling
1319
+ self.excluded_cols= self._extract_list(col_af_ohe, col_bf_ohe)
1320
+
1321
+ end_time = time.time()
1322
+ self._display_msg(msg="Time taken to encode the columns: {:.2f} sec".format( end_time - start_time),
1323
+ progress_bar=self.progress_bar,
1324
+ show_data=True)
1325
+
1326
+ def _custom_categorical_encoding(self):
1327
+ """
1328
+ DESCRIPTION:
1329
+ Function to perform specific encoding on the categorical columns based on user input.
1330
+ if validation fails, default encoding is getting performed on all remaining categorical columns.
1331
+ """
1332
+ self._display_msg(msg="\nStarting Customized Categorical Feature Encoding ...",
1333
+ progress_bar=self.progress_bar)
1334
+ cat_end_input = self.custom_data.get("CategoricalEncodingIndicator", False)
1335
+ # Checking user input for categorical encoding
1336
+ if cat_end_input:
1337
+ # Storing custom categorical encoding indicator in data transform dictionary
1338
+ self.data_transform_dict["custom_categorical_encoding_ind"] = True
1339
+ # Fetching user input list for performing
1340
+ encoding_list = self.custom_data.get("CategoricalEncodingParam", None)
1341
+ if encoding_list:
1342
+ onehot_encode_ind = encoding_list.get("OneHotEncodingIndicator", False)
1343
+ ordinal_encode_ind = encoding_list.get("OrdinalEncodingIndicator", False)
1344
+ target_encode_ind = encoding_list.get("TargetEncodingIndicator", False)
1345
+ # Checking if any of categorical encoding technique indicator
1346
+ if not any([onehot_encode_ind, ordinal_encode_ind, target_encode_ind]):
1347
+ self._display_msg(inline_msg="No information provided for any type of customized categorical encoding techniques. AutoML will proceed with default encoding technique.",
1348
+ progress_bar=self.progress_bar)
1349
+ else:
1350
+ if onehot_encode_ind:
1351
+ unique_count = []
1352
+ ohe_list = encoding_list.get("OneHotEncodingList", None)
1353
+ # Checking for empty list
1354
+ if not ohe_list:
1355
+ self._display_msg(inline_msg="No information provided for customized one hot encoding technique.",
1356
+ progress_bar=self.progress_bar)
1357
+ else:
1358
+ # Checking for column present in dataset or not
1359
+ _Validators._validate_dataframe_has_argument_columns(ohe_list, "OneHotEncodingList", self.data, "df")
1360
+
1361
+ # Keeping track for existing columns before apply one hot encoding
1362
+ col_bf_ohe = self.data.columns
1363
+ # Detecting categorical columns with their unique counts
1364
+ for col in ohe_list:
1365
+ unique_count.append(self.data.drop_duplicate(col).size)
1366
+ # Performing one hot encoding
1367
+ self._one_hot_encoding(ohe_list, unique_count)
1368
+ # Keeping track for new columns after apply one hot encoding
1369
+ col_af_ohe = self.data.columns
1370
+ # Fetching list of columns on which outlier processing should not be applied
1371
+ self.excluded_cols.extend(self._extract_list(col_af_ohe, col_bf_ohe))
1372
+
1373
+ self._display_msg(msg="Updated dataset sample after performing one hot encoding:",
1374
+ data=self.data,
1375
+ progress_bar=self.progress_bar)
1376
+
1377
+ if ordinal_encode_ind:
1378
+ ord_list = encoding_list.get("OrdinalEncodingList", None)
1379
+ # Checking for empty list
1380
+ if not ord_list:
1381
+ self._display_msg(inline_msg="No information provided for customized ordinal encoding technique.",
1382
+ progress_bar=self.progress_bar)
1383
+ else:
1384
+ # Checking for column present in dataset or not
1385
+ _Validators._validate_dataframe_has_argument_columns(ord_list, "OrdinalEncodingList", self.data, "df")
1386
+
1387
+ # Performing ordinal encoding
1388
+ self._ordinal_encoding(ord_list)
1389
+ self._display_msg(msg="Updated dataset sample after performing ordinal encoding:",
1390
+ data=self.data,
1391
+ progress_bar=self.progress_bar)
1392
+
1393
+ if target_encode_ind:
1394
+ tar_list = encoding_list.get("TargetEncodingList", None)
1395
+ if not tar_list:
1396
+ self._display_msg(inline_msg="No information provided for customized target encoding technique.",
1397
+ progress_bar=self.progress_bar)
1398
+ else:
1399
+ # Performing target encoding
1400
+ self._target_encoding(tar_list)
1401
+ self._display_msg(msg="Updated dataset sample after performing target encoding:",
1402
+ data=self.data,
1403
+ progress_bar=self.progress_bar)
1404
+ else:
1405
+ self._display_msg(inline_msg="No input provided for performing customized categorical encoding. AutoML will proceed with default encoding technique.",
1406
+ progress_bar=self.progress_bar)
1407
+ else:
1408
+ self._display_msg(inline_msg="AutoML will proceed with default encoding technique.",
1409
+ progress_bar=self.progress_bar)
1410
+
1411
+ # Performing default encoding on remaining categorical columns
1412
+ self._encoding_categorical_columns()
1413
+
1414
+ def _numapply_transformation(self, target_col, transform_val):
1415
+ """
1416
+ DESCRIPTION:
1417
+ Function to perform different numerical transformations using NumApply on numerical features based on user input.
1418
+
1419
+ """
1420
+ # Fetching columns for accumulation
1421
+ accumulate_columns = self._extract_list(self.data.columns, [target_col])
1422
+ apply_method = transform_val["apply_method"]
1423
+ # Adding fit parameters for performing transformation
1424
+ fit_params={
1425
+ "data": self.data,
1426
+ "target_columns" : target_col,
1427
+ "apply_method" : apply_method,
1428
+ "inplace" : True,
1429
+ "persist" :True,
1430
+ "accumulate" : accumulate_columns
1431
+ }
1432
+ # Adding addition details for fit parameters in case of SIGMOID transformation
1433
+ if apply_method == "sigmoid":
1434
+ sigmoid_style=transform_val["sigmoid_style"]
1435
+ fit_params = {**fit_params, "sigmoid_style" : sigmoid_style}
1436
+ # Performing transformation on target columns
1437
+ return NumApply(**fit_params).result
1438
+
1439
+ def _numerical_transformation(self, target_columns, num_transform_data):
1440
+ """
1441
+ DESCRIPTION:
1442
+ Function to perform different numerical transformations using Fit and Transform on numerical features based on user input.
1443
+
1444
+ """
1445
+ # Adding fit parameters for transformation
1446
+ fit_params={
1447
+ "data" : self.data,
1448
+ "object" : num_transform_data,
1449
+ "object_order_column" : "TargetColumn"
1450
+ }
1451
+ # Peforming fit with all arguments.
1452
+ num_fit_obj = Fit(**fit_params)
1453
+ # Fetching all numerical columns
1454
+ numerical_columns = [col for col, d_type in self.data._column_names_and_types if d_type in ["int","float"]]
1455
+ # Extracting id columns where transformation should not affect numerical columns
1456
+ id_columns = self._extract_list(numerical_columns,target_columns)
1457
+ # Storing fit object and id column list for numerical transformation in data transform dictionary
1458
+ self.data_transform_dict['custom_numerical_transformation_fit_object'] = num_fit_obj.result
1459
+ self.data_transform_dict['custom_numerical_transformation_id_columns'] = id_columns
1460
+ # Adding transform parameters for transformation
1461
+ transform_params={
1462
+ "data" : self.data,
1463
+ "object" : num_fit_obj.result,
1464
+ "id_columns" : id_columns,
1465
+ "persist" :True
1466
+ }
1467
+ # Peforming transformation on target columns
1468
+ self.data = Transform(**transform_params).result
1469
+ self._display_msg(msg="Updated dataset sample after applying numerical transformation:",
1470
+ data=self.data,
1471
+ progress_bar=self.progress_bar)
1472
+
1473
+ def _mathematical_transformation(self):
1474
+ """
1475
+ DESCRIPTION:
1476
+ Function to perform different mathematical transformations (i.e., log, pow,
1477
+ exp, sininv, sigmoid) on numerical features based on user input.
1478
+ """
1479
+ self._display_msg(msg="\nStarting customized mathematical transformation ...",
1480
+ progress_bar=self.progress_bar,
1481
+ show_data=True)
1482
+
1483
+ mat_transform_input = self.custom_data.get("MathameticalTransformationIndicator", False)
1484
+ # Checking user input for mathematical transformations
1485
+ if mat_transform_input:
1486
+ # Extracting list required for mathematical transformations
1487
+ mat_transform_list = self.custom_data.get("MathameticalTransformationParam", None)
1488
+ if mat_transform_list:
1489
+ # Checking for column present in dataset or not
1490
+ _Validators._validate_dataframe_has_argument_columns(list(mat_transform_list.keys()),
1491
+ "MathameticalTransformationParam", self.data, "df")
1492
+
1493
+ # List of storing target columns and mathematical transformation information
1494
+ transform_data=[]
1495
+ target_columns=[]
1496
+ # Storing custom mathematical transformation indicator in data transform dictionary
1497
+ self.data_transform_dict['custom_mathematical_transformation_ind'] = True
1498
+ # Storing custom numapply transformation parameters in data transform dictionary
1499
+ self.data_transform_dict['custom_numapply_transformation_param'] = {}
1500
+
1501
+ for col, transform_val in mat_transform_list.items():
1502
+ apply_method=transform_val["apply_method"]
1503
+ if apply_method in (["sininv","sigmoid"]):
1504
+ # Applying numapply transformation
1505
+ self.data = self._numapply_transformation(col,transform_val)
1506
+ self._display_msg(msg="Updated dataset sample after applying numapply transformation:",
1507
+ data=self.data,
1508
+ progress_bar=self.progress_bar)
1509
+ # Updating parameter details for each column
1510
+ self.data_transform_dict['custom_numapply_transformation_param'].update({col:transform_val})
1511
+ else:
1512
+ # Handling specific scenarios for log and pow transformation
1513
+ parameters=""
1514
+ if apply_method == "log":
1515
+ base = transform_val["base"]
1516
+ parameters = json.dumps({"base":base})
1517
+ elif apply_method == "pow":
1518
+ exponent = transform_val["exponent"]
1519
+ parameters = json.dumps({"exponent":exponent})
1520
+ target_columns.append(col)
1521
+ transform_data.append({"TargetColumn":col, "DefaultValue":1, "Transformation":apply_method, "Parameters":parameters})
1522
+ # Checking for transformation data
1523
+ if len(transform_data):
1524
+ # Coverting into pandas and then teradata dataframe for performing further opration
1525
+ transform_data = pd.DataFrame(transform_data, columns=["TargetColumn", "DefaultValue", "Transformation", "Parameters"])
1526
+ self._display_msg(msg="Numerical transformation information :-",
1527
+ data=transform_data,
1528
+ progress_bar=self.progress_bar)
1529
+ copy_to_sql(df=transform_data, table_name="automl_num_transform_data", temporary=True)
1530
+ num_transform_data = DataFrame.from_table("automl_num_transform_data")
1531
+ # Applying transformation using Fit/Transform functions
1532
+ self._numerical_transformation(target_columns, num_transform_data)
1533
+ # Storing custom numerical transformation parameters and column list in data transform dictionary
1534
+ self.data_transform_dict['custom_numerical_transformation_col'] = target_columns
1535
+ self.data_transform_dict['custom_numerical_transformation_params'] = num_transform_data
1536
+ else:
1537
+ self._display_msg(inline_msg="No input provided for performing customized mathematical transformation.",
1538
+ progress_bar=self.progress_bar)
1539
+ else:
1540
+ self._display_msg(inline_msg="Skipping customized mathematical transformation.",
1541
+ progress_bar=self.progress_bar)
1542
+
1543
+ def _non_linear_transformation(self):
1544
+ """
1545
+ DESCRIPTION:
1546
+ Function to perform customized non-linear transformation on numerical features based on user input.
1547
+
1548
+ """
1549
+ self._display_msg(msg="\nStarting customized non-linear transformation ...",
1550
+ progress_bar=self.progress_bar,
1551
+ show_data=True)
1552
+ nl_transform_input = self.custom_data.get("NonLinearTransformationIndicator", False)
1553
+ # Checking user input for non-linear transformation
1554
+ if nl_transform_input:
1555
+ nl_transform_list = self.custom_data.get("NonLinearTransformationParam", None)
1556
+ # Extracting list required for non-linear transformation
1557
+ if nl_transform_list:
1558
+ total_combination = len(nl_transform_list)
1559
+ # Generating all possible combination names
1560
+ possible_combination = ["Combination_"+str(counter) for counter in range(1,total_combination+1)]
1561
+ self._display_msg(msg="Possible combination :",
1562
+ col_lst=possible_combination,
1563
+ progress_bar=self.progress_bar)
1564
+ # Storing custom non-linear transformation indicator in data transform dictionary
1565
+ self.data_transform_dict['custom_non_linear_transformation_ind'] = True
1566
+ # Storing custom non-linear transformation fit object in data transform dictionary
1567
+ self.data_transform_dict['custom_non_linear_transformation_fit_object'] = {}
1568
+ # print("Possible combination :",possible_combination)
1569
+ # Performing transformation for each combination
1570
+ for comb, transform_val in nl_transform_list.items():
1571
+ if comb in possible_combination:
1572
+ target_columns = transform_val["target_columns"]
1573
+ # Checking for column present in dataset or not
1574
+ _Validators._validate_dataframe_has_argument_columns(target_columns,
1575
+ "target_columns", self.data, "df")
1576
+
1577
+ formula = transform_val["formula"]
1578
+ result_column = transform_val["result_column"]
1579
+ # Adding fit params for transformation
1580
+ fit_param = {
1581
+ "data" : self.data,
1582
+ "target_columns" : target_columns,
1583
+ "formula" : formula,
1584
+ "result_column" : result_column
1585
+ }
1586
+ # Performing fit on dataset
1587
+ fit_obj = NonLinearCombineFit(**fit_param)
1588
+ # Updating it for each non-linear combination
1589
+ self.data_transform_dict['custom_non_linear_transformation_fit_object'].update({comb:fit_obj})
1590
+ # Adding transform params for transformation
1591
+ transform_params = {
1592
+ "data" : self.data,
1593
+ "object" : fit_obj,
1594
+ "accumulate" : self.data.columns,
1595
+ "persist" : True
1596
+ }
1597
+ self.data = NonLinearCombineTransform(**transform_params).result
1598
+ else:
1599
+ self._display_msg(inline_msg="Combinations are not as per expectation.",
1600
+ progress_bar=self.progress_bar)
1601
+ self._display_msg(msg="Updated dataset sample after performing non-liner transformation:",
1602
+ data=self.data,
1603
+ progress_bar=self.progress_bar)
1604
+ else:
1605
+ self._display_msg(inline_msg="No information provided for performing customized non-linear transformation.",
1606
+ progress_bar=self.progress_bar)
1607
+ else:
1608
+ self._display_msg(inline_msg="Skipping customized non-linear transformation.",
1609
+ progress_bar=self.progress_bar)
1610
+
1611
+ def _anti_select_columns(self):
1612
+ """
1613
+ DESCRIPTION:
1614
+ Function to remove specific features from dataset based on user input.
1615
+
1616
+ """
1617
+ self._display_msg(msg="\nStarting customized anti-select columns ...",
1618
+ progress_bar=self.progress_bar,
1619
+ show_data=True)
1620
+ anti_select_input = self.custom_data.get("AntiselectIndicator", False)
1621
+ # Checking user input for anti-select columns
1622
+ if anti_select_input:
1623
+ # Extracting list required for anti-select columns
1624
+ anti_select_list = self.custom_data.get("AntiselectParam", None)
1625
+ if(anti_select_list):
1626
+ if all(item in self.data.columns for item in anti_select_list):
1627
+ # Storing custom anti-select columns indicator and column list in data transform dictionary
1628
+ self.data_transform_dict['custom_anti_select_columns_ind'] = True
1629
+ self.data_transform_dict['custom_anti_select_columns'] = anti_select_list
1630
+ fit_params = {
1631
+ "data" : self.data,
1632
+ "exclude" : anti_select_list
1633
+ }
1634
+ # Performing transformation for given user input
1635
+ self.data = Antiselect(**fit_params).result
1636
+ self._display_msg(msg="Updated dataset sample after performing anti-select columns:",
1637
+ data=self.data,
1638
+ progress_bar=self.progress_bar)
1639
+ else:
1640
+ self._display_msg(msg="Columns provided in list are not present in dataset:",
1641
+ col_lst=anti_select_list,
1642
+ progress_bar=self.progress_bar)
1643
+ else:
1644
+ self._display_msg(inline_msg="No information provided for performing anti-select columns operation.",
1645
+ progress_bar=self.progress_bar)
1646
+ else:
1647
+ self._display_msg(inline_msg="Skipping customized anti-select columns.",
1648
+ progress_bar=self.progress_bar)