teradataml 17.20.0.7__py3-none-any.whl → 20.0.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of teradataml might be problematic. Click here for more details.
- teradataml/LICENSE-3RD-PARTY.pdf +0 -0
- teradataml/LICENSE.pdf +0 -0
- teradataml/README.md +1864 -1640
- teradataml/__init__.py +70 -60
- teradataml/_version.py +11 -11
- teradataml/analytics/Transformations.py +2995 -2995
- teradataml/analytics/__init__.py +81 -83
- teradataml/analytics/analytic_function_executor.py +2013 -2010
- teradataml/analytics/analytic_query_generator.py +958 -958
- teradataml/analytics/byom/H2OPredict.py +514 -514
- teradataml/analytics/byom/PMMLPredict.py +437 -437
- teradataml/analytics/byom/__init__.py +14 -14
- teradataml/analytics/json_parser/__init__.py +130 -130
- teradataml/analytics/json_parser/analytic_functions_argument.py +1707 -1707
- teradataml/analytics/json_parser/json_store.py +191 -191
- teradataml/analytics/json_parser/metadata.py +1637 -1637
- teradataml/analytics/json_parser/utils.py +804 -803
- teradataml/analytics/meta_class.py +196 -196
- teradataml/analytics/sqle/DecisionTreePredict.py +455 -470
- teradataml/analytics/sqle/NaiveBayesPredict.py +419 -428
- teradataml/analytics/sqle/__init__.py +97 -110
- teradataml/analytics/sqle/json/decisiontreepredict_sqle.json +78 -78
- teradataml/analytics/sqle/json/naivebayespredict_sqle.json +62 -62
- teradataml/analytics/table_operator/__init__.py +10 -10
- teradataml/analytics/uaf/__init__.py +63 -63
- teradataml/analytics/utils.py +693 -692
- teradataml/analytics/valib.py +1603 -1600
- teradataml/automl/__init__.py +1628 -0
- teradataml/automl/custom_json_utils.py +1270 -0
- teradataml/automl/data_preparation.py +993 -0
- teradataml/automl/data_transformation.py +727 -0
- teradataml/automl/feature_engineering.py +1648 -0
- teradataml/automl/feature_exploration.py +547 -0
- teradataml/automl/model_evaluation.py +163 -0
- teradataml/automl/model_training.py +887 -0
- teradataml/catalog/__init__.py +1 -3
- teradataml/catalog/byom.py +1759 -1716
- teradataml/catalog/function_argument_mapper.py +859 -861
- teradataml/catalog/model_cataloging_utils.py +491 -1510
- teradataml/clients/pkce_client.py +481 -481
- teradataml/common/aed_utils.py +6 -2
- teradataml/common/bulk_exposed_utils.py +111 -111
- teradataml/common/constants.py +1433 -1441
- teradataml/common/deprecations.py +160 -0
- teradataml/common/exceptions.py +73 -73
- teradataml/common/formula.py +742 -742
- teradataml/common/garbagecollector.py +592 -635
- teradataml/common/messagecodes.py +422 -431
- teradataml/common/messages.py +227 -231
- teradataml/common/sqlbundle.py +693 -693
- teradataml/common/td_coltype_code_to_tdtype.py +48 -48
- teradataml/common/utils.py +2418 -2500
- teradataml/common/warnings.py +25 -25
- teradataml/common/wrapper_utils.py +1 -110
- teradataml/config/dummy_file1.cfg +4 -4
- teradataml/config/dummy_file2.cfg +2 -2
- teradataml/config/sqlengine_alias_definitions_v1.0 +13 -13
- teradataml/config/sqlengine_alias_definitions_v1.1 +19 -19
- teradataml/config/sqlengine_alias_definitions_v1.3 +18 -18
- teradataml/context/aed_context.py +217 -217
- teradataml/context/context.py +1071 -999
- teradataml/data/A_loan.csv +19 -19
- teradataml/data/BINARY_REALS_LEFT.csv +11 -11
- teradataml/data/BINARY_REALS_RIGHT.csv +11 -11
- teradataml/data/B_loan.csv +49 -49
- teradataml/data/BuoyData2.csv +17 -17
- teradataml/data/CONVOLVE2_COMPLEX_LEFT.csv +5 -5
- teradataml/data/CONVOLVE2_COMPLEX_RIGHT.csv +5 -5
- teradataml/data/Convolve2RealsLeft.csv +5 -5
- teradataml/data/Convolve2RealsRight.csv +5 -5
- teradataml/data/Convolve2ValidLeft.csv +11 -11
- teradataml/data/Convolve2ValidRight.csv +11 -11
- teradataml/data/DFFTConv_Real_8_8.csv +65 -65
- teradataml/data/Orders1_12mf.csv +24 -24
- teradataml/data/Pi_loan.csv +7 -7
- teradataml/data/SMOOTHED_DATA.csv +7 -7
- teradataml/data/TestDFFT8.csv +9 -9
- teradataml/data/TestRiver.csv +109 -109
- teradataml/data/Traindata.csv +28 -28
- teradataml/data/acf.csv +17 -17
- teradataml/data/adaboost_example.json +34 -34
- teradataml/data/adaboostpredict_example.json +24 -24
- teradataml/data/additional_table.csv +10 -10
- teradataml/data/admissions_test.csv +21 -21
- teradataml/data/admissions_train.csv +41 -41
- teradataml/data/admissions_train_nulls.csv +41 -41
- teradataml/data/ageandheight.csv +13 -13
- teradataml/data/ageandpressure.csv +31 -31
- teradataml/data/antiselect_example.json +36 -36
- teradataml/data/antiselect_input.csv +8 -8
- teradataml/data/antiselect_input_mixed_case.csv +8 -8
- teradataml/data/applicant_external.csv +6 -6
- teradataml/data/applicant_reference.csv +6 -6
- teradataml/data/arima_example.json +9 -9
- teradataml/data/assortedtext_input.csv +8 -8
- teradataml/data/attribution_example.json +33 -33
- teradataml/data/attribution_sample_table.csv +27 -27
- teradataml/data/attribution_sample_table1.csv +6 -6
- teradataml/data/attribution_sample_table2.csv +11 -11
- teradataml/data/bank_churn.csv +10001 -0
- teradataml/data/bank_web_clicks1.csv +42 -42
- teradataml/data/bank_web_clicks2.csv +91 -91
- teradataml/data/bank_web_url.csv +85 -85
- teradataml/data/barrier.csv +2 -2
- teradataml/data/barrier_new.csv +3 -3
- teradataml/data/betweenness_example.json +13 -13
- teradataml/data/bin_breaks.csv +8 -8
- teradataml/data/bin_fit_ip.csv +3 -3
- teradataml/data/binary_complex_left.csv +11 -11
- teradataml/data/binary_complex_right.csv +11 -11
- teradataml/data/binary_matrix_complex_left.csv +21 -21
- teradataml/data/binary_matrix_complex_right.csv +21 -21
- teradataml/data/binary_matrix_real_left.csv +21 -21
- teradataml/data/binary_matrix_real_right.csv +21 -21
- teradataml/data/blood2ageandweight.csv +26 -26
- teradataml/data/bmi.csv +501 -0
- teradataml/data/boston.csv +507 -507
- teradataml/data/buoydata_mix.csv +11 -11
- teradataml/data/burst_data.csv +5 -5
- teradataml/data/burst_example.json +20 -20
- teradataml/data/byom_example.json +17 -17
- teradataml/data/bytes_table.csv +3 -3
- teradataml/data/cal_housing_ex_raw.csv +70 -70
- teradataml/data/callers.csv +7 -7
- teradataml/data/calls.csv +10 -10
- teradataml/data/cars_hist.csv +33 -33
- teradataml/data/cat_table.csv +24 -24
- teradataml/data/ccm_example.json +31 -31
- teradataml/data/ccm_input.csv +91 -91
- teradataml/data/ccm_input2.csv +13 -13
- teradataml/data/ccmexample.csv +101 -101
- teradataml/data/ccmprepare_example.json +8 -8
- teradataml/data/ccmprepare_input.csv +91 -91
- teradataml/data/cfilter_example.json +12 -12
- teradataml/data/changepointdetection_example.json +18 -18
- teradataml/data/changepointdetectionrt_example.json +8 -8
- teradataml/data/chi_sq.csv +2 -2
- teradataml/data/churn_data.csv +14 -14
- teradataml/data/churn_emission.csv +35 -35
- teradataml/data/churn_initial.csv +3 -3
- teradataml/data/churn_state_transition.csv +5 -5
- teradataml/data/citedges_2.csv +745 -745
- teradataml/data/citvertices_2.csv +1210 -1210
- teradataml/data/clicks2.csv +16 -16
- teradataml/data/clickstream.csv +12 -12
- teradataml/data/clickstream1.csv +11 -11
- teradataml/data/closeness_example.json +15 -15
- teradataml/data/complaints.csv +21 -21
- teradataml/data/complaints_mini.csv +3 -3
- teradataml/data/complaints_testtoken.csv +224 -224
- teradataml/data/complaints_tokens_test.csv +353 -353
- teradataml/data/complaints_traintoken.csv +472 -472
- teradataml/data/computers_category.csv +1001 -1001
- teradataml/data/computers_test1.csv +1252 -1252
- teradataml/data/computers_train1.csv +5009 -5009
- teradataml/data/computers_train1_clustered.csv +5009 -5009
- teradataml/data/confusionmatrix_example.json +9 -9
- teradataml/data/conversion_event_table.csv +3 -3
- teradataml/data/corr_input.csv +17 -17
- teradataml/data/correlation_example.json +11 -11
- teradataml/data/coxhazardratio_example.json +39 -39
- teradataml/data/coxph_example.json +15 -15
- teradataml/data/coxsurvival_example.json +28 -28
- teradataml/data/cpt.csv +41 -41
- teradataml/data/credit_ex_merged.csv +45 -45
- teradataml/data/customer_loyalty.csv +301 -301
- teradataml/data/customer_loyalty_newseq.csv +31 -31
- teradataml/data/dataframe_example.json +146 -146
- teradataml/data/decisionforest_example.json +37 -37
- teradataml/data/decisionforestpredict_example.json +38 -38
- teradataml/data/decisiontree_example.json +21 -21
- teradataml/data/decisiontreepredict_example.json +45 -45
- teradataml/data/dfft2_size4_real.csv +17 -17
- teradataml/data/dfft2_test_matrix16.csv +17 -17
- teradataml/data/dfft2conv_real_4_4.csv +65 -65
- teradataml/data/diabetes.csv +443 -443
- teradataml/data/diabetes_test.csv +89 -89
- teradataml/data/dict_table.csv +5 -5
- teradataml/data/docperterm_table.csv +4 -4
- teradataml/data/docs/__init__.py +1 -1
- teradataml/data/docs/byom/docs/DataRobotPredict.py +180 -180
- teradataml/data/docs/byom/docs/DataikuPredict.py +177 -177
- teradataml/data/docs/byom/docs/H2OPredict.py +324 -324
- teradataml/data/docs/byom/docs/ONNXPredict.py +283 -283
- teradataml/data/docs/byom/docs/PMMLPredict.py +277 -277
- teradataml/data/docs/sqle/docs_17_10/Antiselect.py +82 -82
- teradataml/data/docs/sqle/docs_17_10/Attribution.py +199 -199
- teradataml/data/docs/sqle/docs_17_10/BincodeFit.py +171 -171
- teradataml/data/docs/sqle/docs_17_10/BincodeTransform.py +131 -130
- teradataml/data/docs/sqle/docs_17_10/CategoricalSummary.py +86 -86
- teradataml/data/docs/sqle/docs_17_10/ChiSq.py +90 -90
- teradataml/data/docs/sqle/docs_17_10/ColumnSummary.py +85 -85
- teradataml/data/docs/sqle/docs_17_10/ConvertTo.py +95 -95
- teradataml/data/docs/sqle/docs_17_10/DecisionForestPredict.py +139 -139
- teradataml/data/docs/sqle/docs_17_10/DecisionTreePredict.py +151 -151
- teradataml/data/docs/sqle/docs_17_10/FTest.py +160 -160
- teradataml/data/docs/sqle/docs_17_10/FillRowId.py +82 -82
- teradataml/data/docs/sqle/docs_17_10/Fit.py +87 -87
- teradataml/data/docs/sqle/docs_17_10/GLMPredict.py +144 -144
- teradataml/data/docs/sqle/docs_17_10/GetRowsWithMissingValues.py +84 -84
- teradataml/data/docs/sqle/docs_17_10/GetRowsWithoutMissingValues.py +81 -81
- teradataml/data/docs/sqle/docs_17_10/Histogram.py +164 -164
- teradataml/data/docs/sqle/docs_17_10/MovingAverage.py +134 -134
- teradataml/data/docs/sqle/docs_17_10/NGramSplitter.py +208 -208
- teradataml/data/docs/sqle/docs_17_10/NPath.py +265 -265
- teradataml/data/docs/sqle/docs_17_10/NaiveBayesPredict.py +116 -116
- teradataml/data/docs/sqle/docs_17_10/NaiveBayesTextClassifierPredict.py +176 -176
- teradataml/data/docs/sqle/docs_17_10/NumApply.py +147 -147
- teradataml/data/docs/sqle/docs_17_10/OneHotEncodingFit.py +132 -132
- teradataml/data/docs/sqle/docs_17_10/OneHotEncodingTransform.py +103 -103
- teradataml/data/docs/sqle/docs_17_10/OutlierFilterFit.py +165 -165
- teradataml/data/docs/sqle/docs_17_10/OutlierFilterTransform.py +101 -101
- teradataml/data/docs/sqle/docs_17_10/Pack.py +128 -128
- teradataml/data/docs/sqle/docs_17_10/PolynomialFeaturesFit.py +111 -111
- teradataml/data/docs/sqle/docs_17_10/PolynomialFeaturesTransform.py +102 -102
- teradataml/data/docs/sqle/docs_17_10/QQNorm.py +104 -104
- teradataml/data/docs/sqle/docs_17_10/RoundColumns.py +109 -109
- teradataml/data/docs/sqle/docs_17_10/RowNormalizeFit.py +117 -117
- teradataml/data/docs/sqle/docs_17_10/RowNormalizeTransform.py +99 -98
- teradataml/data/docs/sqle/docs_17_10/SVMSparsePredict.py +152 -152
- teradataml/data/docs/sqle/docs_17_10/ScaleFit.py +197 -197
- teradataml/data/docs/sqle/docs_17_10/ScaleTransform.py +99 -98
- teradataml/data/docs/sqle/docs_17_10/Sessionize.py +113 -113
- teradataml/data/docs/sqle/docs_17_10/SimpleImputeFit.py +116 -116
- teradataml/data/docs/sqle/docs_17_10/SimpleImputeTransform.py +98 -98
- teradataml/data/docs/sqle/docs_17_10/StrApply.py +187 -187
- teradataml/data/docs/sqle/docs_17_10/StringSimilarity.py +145 -145
- teradataml/data/docs/sqle/docs_17_10/Transform.py +105 -104
- teradataml/data/docs/sqle/docs_17_10/UnivariateStatistics.py +141 -141
- teradataml/data/docs/sqle/docs_17_10/Unpack.py +214 -214
- teradataml/data/docs/sqle/docs_17_10/WhichMax.py +83 -83
- teradataml/data/docs/sqle/docs_17_10/WhichMin.py +83 -83
- teradataml/data/docs/sqle/docs_17_10/ZTest.py +155 -155
- teradataml/data/docs/sqle/docs_17_20/ANOVA.py +126 -126
- teradataml/data/docs/sqle/docs_17_20/Antiselect.py +82 -82
- teradataml/data/docs/sqle/docs_17_20/Attribution.py +200 -200
- teradataml/data/docs/sqle/docs_17_20/BincodeFit.py +171 -171
- teradataml/data/docs/sqle/docs_17_20/BincodeTransform.py +139 -138
- teradataml/data/docs/sqle/docs_17_20/CategoricalSummary.py +86 -86
- teradataml/data/docs/sqle/docs_17_20/ChiSq.py +90 -90
- teradataml/data/docs/sqle/docs_17_20/ClassificationEvaluator.py +166 -166
- teradataml/data/docs/sqle/docs_17_20/ColumnSummary.py +85 -85
- teradataml/data/docs/sqle/docs_17_20/ColumnTransformer.py +243 -243
- teradataml/data/docs/sqle/docs_17_20/ConvertTo.py +113 -113
- teradataml/data/docs/sqle/docs_17_20/DecisionForest.py +279 -279
- teradataml/data/docs/sqle/docs_17_20/DecisionForestPredict.py +144 -144
- teradataml/data/docs/sqle/docs_17_20/DecisionTreePredict.py +135 -135
- teradataml/data/docs/sqle/docs_17_20/FTest.py +160 -160
- teradataml/data/docs/sqle/docs_17_20/FillRowId.py +82 -82
- teradataml/data/docs/sqle/docs_17_20/Fit.py +87 -87
- teradataml/data/docs/sqle/docs_17_20/GLM.py +380 -380
- teradataml/data/docs/sqle/docs_17_20/GLMPerSegment.py +414 -414
- teradataml/data/docs/sqle/docs_17_20/GLMPredict.py +144 -144
- teradataml/data/docs/sqle/docs_17_20/GLMPredictPerSegment.py +233 -234
- teradataml/data/docs/sqle/docs_17_20/GetFutileColumns.py +123 -123
- teradataml/data/docs/sqle/docs_17_20/GetRowsWithMissingValues.py +108 -108
- teradataml/data/docs/sqle/docs_17_20/GetRowsWithoutMissingValues.py +105 -105
- teradataml/data/docs/sqle/docs_17_20/Histogram.py +223 -223
- teradataml/data/docs/sqle/docs_17_20/KMeans.py +204 -204
- teradataml/data/docs/sqle/docs_17_20/KMeansPredict.py +144 -143
- teradataml/data/docs/sqle/docs_17_20/KNN.py +214 -214
- teradataml/data/docs/sqle/docs_17_20/MovingAverage.py +134 -134
- teradataml/data/docs/sqle/docs_17_20/NGramSplitter.py +208 -208
- teradataml/data/docs/sqle/docs_17_20/NPath.py +265 -265
- teradataml/data/docs/sqle/docs_17_20/NaiveBayesPredict.py +116 -116
- teradataml/data/docs/sqle/docs_17_20/NaiveBayesTextClassifierPredict.py +177 -176
- teradataml/data/docs/sqle/docs_17_20/NaiveBayesTextClassifierTrainer.py +126 -126
- teradataml/data/docs/sqle/docs_17_20/NonLinearCombineFit.py +117 -117
- teradataml/data/docs/sqle/docs_17_20/NonLinearCombineTransform.py +112 -112
- teradataml/data/docs/sqle/docs_17_20/NumApply.py +147 -147
- teradataml/data/docs/sqle/docs_17_20/OneClassSVM.py +307 -307
- teradataml/data/docs/sqle/docs_17_20/OneClassSVMPredict.py +185 -184
- teradataml/data/docs/sqle/docs_17_20/OneHotEncodingFit.py +225 -225
- teradataml/data/docs/sqle/docs_17_20/OneHotEncodingTransform.py +115 -115
- teradataml/data/docs/sqle/docs_17_20/OrdinalEncodingFit.py +219 -219
- teradataml/data/docs/sqle/docs_17_20/OrdinalEncodingTransform.py +127 -127
- teradataml/data/docs/sqle/docs_17_20/OutlierFilterFit.py +189 -189
- teradataml/data/docs/sqle/docs_17_20/OutlierFilterTransform.py +117 -112
- teradataml/data/docs/sqle/docs_17_20/Pack.py +128 -128
- teradataml/data/docs/sqle/docs_17_20/PolynomialFeaturesFit.py +111 -111
- teradataml/data/docs/sqle/docs_17_20/PolynomialFeaturesTransform.py +112 -111
- teradataml/data/docs/sqle/docs_17_20/QQNorm.py +104 -104
- teradataml/data/docs/sqle/docs_17_20/ROC.py +163 -163
- teradataml/data/docs/sqle/docs_17_20/RandomProjectionFit.py +154 -154
- teradataml/data/docs/sqle/docs_17_20/RandomProjectionMinComponents.py +106 -106
- teradataml/data/docs/sqle/docs_17_20/RandomProjectionTransform.py +120 -120
- teradataml/data/docs/sqle/docs_17_20/RegressionEvaluator.py +211 -211
- teradataml/data/docs/sqle/docs_17_20/RoundColumns.py +108 -108
- teradataml/data/docs/sqle/docs_17_20/RowNormalizeFit.py +117 -117
- teradataml/data/docs/sqle/docs_17_20/RowNormalizeTransform.py +111 -110
- teradataml/data/docs/sqle/docs_17_20/SVM.py +413 -413
- teradataml/data/docs/sqle/docs_17_20/SVMPredict.py +202 -202
- teradataml/data/docs/sqle/docs_17_20/SVMSparsePredict.py +152 -152
- teradataml/data/docs/sqle/docs_17_20/ScaleFit.py +197 -197
- teradataml/data/docs/sqle/docs_17_20/ScaleTransform.py +110 -109
- teradataml/data/docs/sqle/docs_17_20/SentimentExtractor.py +206 -206
- teradataml/data/docs/sqle/docs_17_20/Sessionize.py +113 -113
- teradataml/data/docs/sqle/docs_17_20/Silhouette.py +152 -152
- teradataml/data/docs/sqle/docs_17_20/SimpleImputeFit.py +116 -116
- teradataml/data/docs/sqle/docs_17_20/SimpleImputeTransform.py +109 -108
- teradataml/data/docs/sqle/docs_17_20/StrApply.py +187 -187
- teradataml/data/docs/sqle/docs_17_20/StringSimilarity.py +145 -145
- teradataml/data/docs/sqle/docs_17_20/TDDecisionForestPredict.py +207 -207
- teradataml/data/docs/sqle/docs_17_20/TDGLMPredict.py +171 -171
- teradataml/data/docs/sqle/docs_17_20/TargetEncodingFit.py +266 -266
- teradataml/data/docs/sqle/docs_17_20/TargetEncodingTransform.py +141 -140
- teradataml/data/docs/sqle/docs_17_20/TextParser.py +172 -172
- teradataml/data/docs/sqle/docs_17_20/TrainTestSplit.py +159 -159
- teradataml/data/docs/sqle/docs_17_20/Transform.py +123 -123
- teradataml/data/docs/sqle/docs_17_20/UnivariateStatistics.py +141 -141
- teradataml/data/docs/sqle/docs_17_20/Unpack.py +214 -214
- teradataml/data/docs/sqle/docs_17_20/VectorDistance.py +168 -168
- teradataml/data/docs/sqle/docs_17_20/WhichMax.py +83 -83
- teradataml/data/docs/sqle/docs_17_20/WhichMin.py +83 -83
- teradataml/data/docs/sqle/docs_17_20/WordEmbeddings.py +236 -236
- teradataml/data/docs/sqle/docs_17_20/XGBoost.py +353 -353
- teradataml/data/docs/sqle/docs_17_20/XGBoostPredict.py +275 -275
- teradataml/data/docs/sqle/docs_17_20/ZTest.py +155 -155
- teradataml/data/docs/tableoperator/docs_17_00/ReadNOS.py +429 -429
- teradataml/data/docs/tableoperator/docs_17_05/ReadNOS.py +429 -429
- teradataml/data/docs/tableoperator/docs_17_05/WriteNOS.py +347 -347
- teradataml/data/docs/tableoperator/docs_17_10/ReadNOS.py +428 -428
- teradataml/data/docs/tableoperator/docs_17_10/WriteNOS.py +347 -347
- teradataml/data/docs/tableoperator/docs_17_20/ReadNOS.py +439 -439
- teradataml/data/docs/tableoperator/docs_17_20/WriteNOS.py +386 -386
- teradataml/data/docs/uaf/docs_17_20/ACF.py +195 -195
- teradataml/data/docs/uaf/docs_17_20/ArimaEstimate.py +369 -369
- teradataml/data/docs/uaf/docs_17_20/ArimaForecast.py +142 -142
- teradataml/data/docs/uaf/docs_17_20/ArimaValidate.py +159 -159
- teradataml/data/docs/uaf/docs_17_20/BinaryMatrixOp.py +247 -247
- teradataml/data/docs/uaf/docs_17_20/BinarySeriesOp.py +252 -252
- teradataml/data/docs/uaf/docs_17_20/BreuschGodfrey.py +177 -177
- teradataml/data/docs/uaf/docs_17_20/BreuschPaganGodfrey.py +174 -174
- teradataml/data/docs/uaf/docs_17_20/Convolve.py +226 -226
- teradataml/data/docs/uaf/docs_17_20/Convolve2.py +214 -214
- teradataml/data/docs/uaf/docs_17_20/CumulPeriodogram.py +183 -183
- teradataml/data/docs/uaf/docs_17_20/DFFT.py +203 -203
- teradataml/data/docs/uaf/docs_17_20/DFFT2.py +216 -216
- teradataml/data/docs/uaf/docs_17_20/DFFT2Conv.py +215 -215
- teradataml/data/docs/uaf/docs_17_20/DFFTConv.py +191 -191
- teradataml/data/docs/uaf/docs_17_20/DTW.py +179 -179
- teradataml/data/docs/uaf/docs_17_20/DickeyFuller.py +144 -144
- teradataml/data/docs/uaf/docs_17_20/DurbinWatson.py +183 -183
- teradataml/data/docs/uaf/docs_17_20/ExtractResults.py +184 -184
- teradataml/data/docs/uaf/docs_17_20/FitMetrics.py +172 -172
- teradataml/data/docs/uaf/docs_17_20/GenseriesFormula.py +205 -205
- teradataml/data/docs/uaf/docs_17_20/GenseriesSinusoids.py +142 -142
- teradataml/data/docs/uaf/docs_17_20/HoltWintersForecaster.py +258 -258
- teradataml/data/docs/uaf/docs_17_20/IDFFT.py +164 -164
- teradataml/data/docs/uaf/docs_17_20/IDFFT2.py +198 -198
- teradataml/data/docs/uaf/docs_17_20/InputValidator.py +120 -120
- teradataml/data/docs/uaf/docs_17_20/LineSpec.py +155 -155
- teradataml/data/docs/uaf/docs_17_20/LinearRegr.py +214 -214
- teradataml/data/docs/uaf/docs_17_20/MAMean.py +173 -173
- teradataml/data/docs/uaf/docs_17_20/MInfo.py +133 -133
- teradataml/data/docs/uaf/docs_17_20/MatrixMultiply.py +135 -135
- teradataml/data/docs/uaf/docs_17_20/MultivarRegr.py +190 -190
- teradataml/data/docs/uaf/docs_17_20/PACF.py +158 -158
- teradataml/data/docs/uaf/docs_17_20/Portman.py +216 -216
- teradataml/data/docs/uaf/docs_17_20/PowerTransform.py +154 -154
- teradataml/data/docs/uaf/docs_17_20/Resample.py +228 -228
- teradataml/data/docs/uaf/docs_17_20/SInfo.py +122 -122
- teradataml/data/docs/uaf/docs_17_20/SeasonalNormalize.py +165 -165
- teradataml/data/docs/uaf/docs_17_20/SelectionCriteria.py +173 -173
- teradataml/data/docs/uaf/docs_17_20/SignifPeriodicities.py +170 -170
- teradataml/data/docs/uaf/docs_17_20/SignifResidmean.py +163 -163
- teradataml/data/docs/uaf/docs_17_20/SimpleExp.py +179 -179
- teradataml/data/docs/uaf/docs_17_20/Smoothma.py +207 -207
- teradataml/data/docs/uaf/docs_17_20/TrackingOp.py +150 -150
- teradataml/data/docs/uaf/docs_17_20/UNDIFF.py +171 -171
- teradataml/data/docs/uaf/docs_17_20/Unnormalize.py +201 -201
- teradataml/data/docs/uaf/docs_17_20/WhitesGeneral.py +169 -169
- teradataml/data/dtw_example.json +17 -17
- teradataml/data/dtw_t1.csv +11 -11
- teradataml/data/dtw_t2.csv +4 -4
- teradataml/data/dwt2d_example.json +15 -15
- teradataml/data/dwt_example.json +14 -14
- teradataml/data/dwt_filter_dim.csv +5 -5
- teradataml/data/emission.csv +9 -9
- teradataml/data/emp_table_by_dept.csv +19 -19
- teradataml/data/employee_info.csv +4 -4
- teradataml/data/employee_table.csv +6 -6
- teradataml/data/excluding_event_table.csv +2 -2
- teradataml/data/finance_data.csv +6 -6
- teradataml/data/finance_data2.csv +61 -61
- teradataml/data/finance_data3.csv +93 -93
- teradataml/data/fish.csv +160 -0
- teradataml/data/fm_blood2ageandweight.csv +26 -26
- teradataml/data/fmeasure_example.json +11 -11
- teradataml/data/followers_leaders.csv +10 -10
- teradataml/data/fpgrowth_example.json +12 -12
- teradataml/data/frequentpaths_example.json +29 -29
- teradataml/data/friends.csv +9 -9
- teradataml/data/fs_input.csv +33 -33
- teradataml/data/fs_input1.csv +33 -33
- teradataml/data/genData.csv +513 -513
- teradataml/data/geodataframe_example.json +39 -39
- teradataml/data/glass_types.csv +215 -0
- teradataml/data/glm_admissions_model.csv +12 -12
- teradataml/data/glm_example.json +29 -29
- teradataml/data/glml1l2_example.json +28 -28
- teradataml/data/glml1l2predict_example.json +54 -54
- teradataml/data/glmpredict_example.json +54 -54
- teradataml/data/gq_t1.csv +21 -21
- teradataml/data/hconvolve_complex_right.csv +5 -5
- teradataml/data/hconvolve_complex_rightmulti.csv +5 -5
- teradataml/data/histogram_example.json +11 -11
- teradataml/data/hmmdecoder_example.json +78 -78
- teradataml/data/hmmevaluator_example.json +24 -24
- teradataml/data/hmmsupervised_example.json +10 -10
- teradataml/data/hmmunsupervised_example.json +7 -7
- teradataml/data/house_values.csv +12 -12
- teradataml/data/house_values2.csv +13 -13
- teradataml/data/housing_cat.csv +7 -7
- teradataml/data/housing_data.csv +9 -9
- teradataml/data/housing_test.csv +47 -47
- teradataml/data/housing_test_binary.csv +47 -47
- teradataml/data/housing_train.csv +493 -493
- teradataml/data/housing_train_attribute.csv +4 -4
- teradataml/data/housing_train_binary.csv +437 -437
- teradataml/data/housing_train_parameter.csv +2 -2
- teradataml/data/housing_train_response.csv +493 -493
- teradataml/data/ibm_stock.csv +370 -370
- teradataml/data/ibm_stock1.csv +370 -370
- teradataml/data/identitymatch_example.json +21 -21
- teradataml/data/idf_table.csv +4 -4
- teradataml/data/impressions.csv +101 -101
- teradataml/data/inflation.csv +21 -21
- teradataml/data/initial.csv +3 -3
- teradataml/data/insect_sprays.csv +12 -12
- teradataml/data/insurance.csv +1339 -1339
- teradataml/data/interpolator_example.json +12 -12
- teradataml/data/iris_altinput.csv +481 -481
- teradataml/data/iris_attribute_output.csv +8 -8
- teradataml/data/iris_attribute_test.csv +121 -121
- teradataml/data/iris_attribute_train.csv +481 -481
- teradataml/data/iris_category_expect_predict.csv +31 -31
- teradataml/data/iris_data.csv +151 -0
- teradataml/data/iris_input.csv +151 -151
- teradataml/data/iris_response_train.csv +121 -121
- teradataml/data/iris_test.csv +31 -31
- teradataml/data/iris_train.csv +121 -121
- teradataml/data/join_table1.csv +4 -4
- teradataml/data/join_table2.csv +4 -4
- teradataml/data/jsons/anly_function_name.json +6 -6
- teradataml/data/jsons/byom/dataikupredict.json +147 -147
- teradataml/data/jsons/byom/datarobotpredict.json +146 -146
- teradataml/data/jsons/byom/h2opredict.json +194 -194
- teradataml/data/jsons/byom/onnxpredict.json +186 -186
- teradataml/data/jsons/byom/pmmlpredict.json +146 -146
- teradataml/data/jsons/paired_functions.json +435 -435
- teradataml/data/jsons/sqle/16.20/Antiselect.json +56 -56
- teradataml/data/jsons/sqle/16.20/Attribution.json +249 -249
- teradataml/data/jsons/sqle/16.20/DecisionForestPredict.json +156 -156
- teradataml/data/jsons/sqle/16.20/DecisionTreePredict.json +170 -170
- teradataml/data/jsons/sqle/16.20/GLMPredict.json +122 -122
- teradataml/data/jsons/sqle/16.20/MovingAverage.json +367 -367
- teradataml/data/jsons/sqle/16.20/NGramSplitter.json +239 -239
- teradataml/data/jsons/sqle/16.20/NaiveBayesPredict.json +136 -136
- teradataml/data/jsons/sqle/16.20/NaiveBayesTextClassifierPredict.json +235 -235
- teradataml/data/jsons/sqle/16.20/Pack.json +98 -98
- teradataml/data/jsons/sqle/16.20/SVMSparsePredict.json +162 -162
- teradataml/data/jsons/sqle/16.20/Sessionize.json +105 -105
- teradataml/data/jsons/sqle/16.20/StringSimilarity.json +86 -86
- teradataml/data/jsons/sqle/16.20/Unpack.json +166 -166
- teradataml/data/jsons/sqle/16.20/nPath.json +269 -269
- teradataml/data/jsons/sqle/17.00/Antiselect.json +56 -56
- teradataml/data/jsons/sqle/17.00/Attribution.json +249 -249
- teradataml/data/jsons/sqle/17.00/DecisionForestPredict.json +156 -156
- teradataml/data/jsons/sqle/17.00/DecisionTreePredict.json +170 -170
- teradataml/data/jsons/sqle/17.00/GLMPredict.json +122 -122
- teradataml/data/jsons/sqle/17.00/MovingAverage.json +367 -367
- teradataml/data/jsons/sqle/17.00/NGramSplitter.json +239 -239
- teradataml/data/jsons/sqle/17.00/NaiveBayesPredict.json +136 -136
- teradataml/data/jsons/sqle/17.00/NaiveBayesTextClassifierPredict.json +235 -235
- teradataml/data/jsons/sqle/17.00/Pack.json +98 -98
- teradataml/data/jsons/sqle/17.00/SVMSparsePredict.json +162 -162
- teradataml/data/jsons/sqle/17.00/Sessionize.json +105 -105
- teradataml/data/jsons/sqle/17.00/StringSimilarity.json +86 -86
- teradataml/data/jsons/sqle/17.00/Unpack.json +166 -166
- teradataml/data/jsons/sqle/17.00/nPath.json +269 -269
- teradataml/data/jsons/sqle/17.05/Antiselect.json +56 -56
- teradataml/data/jsons/sqle/17.05/Attribution.json +249 -249
- teradataml/data/jsons/sqle/17.05/DecisionForestPredict.json +156 -156
- teradataml/data/jsons/sqle/17.05/DecisionTreePredict.json +170 -170
- teradataml/data/jsons/sqle/17.05/GLMPredict.json +122 -122
- teradataml/data/jsons/sqle/17.05/MovingAverage.json +367 -367
- teradataml/data/jsons/sqle/17.05/NGramSplitter.json +239 -239
- teradataml/data/jsons/sqle/17.05/NaiveBayesPredict.json +136 -136
- teradataml/data/jsons/sqle/17.05/NaiveBayesTextClassifierPredict.json +235 -235
- teradataml/data/jsons/sqle/17.05/Pack.json +98 -98
- teradataml/data/jsons/sqle/17.05/SVMSparsePredict.json +162 -162
- teradataml/data/jsons/sqle/17.05/Sessionize.json +105 -105
- teradataml/data/jsons/sqle/17.05/StringSimilarity.json +86 -86
- teradataml/data/jsons/sqle/17.05/Unpack.json +166 -166
- teradataml/data/jsons/sqle/17.05/nPath.json +269 -269
- teradataml/data/jsons/sqle/17.10/Antiselect.json +56 -56
- teradataml/data/jsons/sqle/17.10/Attribution.json +249 -249
- teradataml/data/jsons/sqle/17.10/DecisionForestPredict.json +185 -185
- teradataml/data/jsons/sqle/17.10/DecisionTreePredict.json +171 -171
- teradataml/data/jsons/sqle/17.10/GLMPredict.json +151 -151
- teradataml/data/jsons/sqle/17.10/MovingAverage.json +368 -368
- teradataml/data/jsons/sqle/17.10/NGramSplitter.json +239 -239
- teradataml/data/jsons/sqle/17.10/NaiveBayesPredict.json +149 -149
- teradataml/data/jsons/sqle/17.10/NaiveBayesTextClassifierPredict.json +288 -288
- teradataml/data/jsons/sqle/17.10/Pack.json +133 -133
- teradataml/data/jsons/sqle/17.10/SVMSparsePredict.json +193 -193
- teradataml/data/jsons/sqle/17.10/Sessionize.json +105 -105
- teradataml/data/jsons/sqle/17.10/StringSimilarity.json +86 -86
- teradataml/data/jsons/sqle/17.10/TD_BinCodeFit.json +239 -239
- teradataml/data/jsons/sqle/17.10/TD_BinCodeTransform.json +70 -70
- teradataml/data/jsons/sqle/17.10/TD_CategoricalSummary.json +53 -53
- teradataml/data/jsons/sqle/17.10/TD_Chisq.json +67 -67
- teradataml/data/jsons/sqle/17.10/TD_ColumnSummary.json +53 -53
- teradataml/data/jsons/sqle/17.10/TD_ConvertTo.json +68 -68
- teradataml/data/jsons/sqle/17.10/TD_FTest.json +187 -187
- teradataml/data/jsons/sqle/17.10/TD_FillRowID.json +51 -51
- teradataml/data/jsons/sqle/17.10/TD_FunctionFit.json +46 -46
- teradataml/data/jsons/sqle/17.10/TD_FunctionTransform.json +72 -71
- teradataml/data/jsons/sqle/17.10/TD_GetRowsWithMissingValues.json +52 -52
- teradataml/data/jsons/sqle/17.10/TD_GetRowsWithoutMissingValues.json +52 -52
- teradataml/data/jsons/sqle/17.10/TD_Histogram.json +132 -132
- teradataml/data/jsons/sqle/17.10/TD_NumApply.json +147 -147
- teradataml/data/jsons/sqle/17.10/TD_OneHotEncodingFit.json +182 -182
- teradataml/data/jsons/sqle/17.10/TD_OneHotEncodingTransform.json +65 -64
- teradataml/data/jsons/sqle/17.10/TD_OutlierFilterFit.json +196 -196
- teradataml/data/jsons/sqle/17.10/TD_OutlierFilterTransform.json +48 -47
- teradataml/data/jsons/sqle/17.10/TD_PolynomialFeaturesFit.json +114 -114
- teradataml/data/jsons/sqle/17.10/TD_PolynomialFeaturesTransform.json +72 -71
- teradataml/data/jsons/sqle/17.10/TD_QQNorm.json +111 -111
- teradataml/data/jsons/sqle/17.10/TD_RoundColumns.json +93 -93
- teradataml/data/jsons/sqle/17.10/TD_RowNormalizeFit.json +127 -127
- teradataml/data/jsons/sqle/17.10/TD_RowNormalizeTransform.json +70 -69
- teradataml/data/jsons/sqle/17.10/TD_ScaleFit.json +156 -156
- teradataml/data/jsons/sqle/17.10/TD_ScaleTransform.json +70 -69
- teradataml/data/jsons/sqle/17.10/TD_SimpleImputeFit.json +147 -147
- teradataml/data/jsons/sqle/17.10/TD_SimpleImputeTransform.json +48 -47
- teradataml/data/jsons/sqle/17.10/TD_StrApply.json +240 -240
- teradataml/data/jsons/sqle/17.10/TD_UnivariateStatistics.json +118 -118
- teradataml/data/jsons/sqle/17.10/TD_WhichMax.json +52 -52
- teradataml/data/jsons/sqle/17.10/TD_WhichMin.json +52 -52
- teradataml/data/jsons/sqle/17.10/TD_ZTest.json +171 -171
- teradataml/data/jsons/sqle/17.10/Unpack.json +188 -188
- teradataml/data/jsons/sqle/17.10/nPath.json +269 -269
- teradataml/data/jsons/sqle/17.20/Antiselect.json +56 -56
- teradataml/data/jsons/sqle/17.20/Attribution.json +249 -249
- teradataml/data/jsons/sqle/17.20/DecisionForestPredict.json +185 -185
- teradataml/data/jsons/sqle/17.20/DecisionTreePredict.json +172 -172
- teradataml/data/jsons/sqle/17.20/GLMPredict.json +151 -151
- teradataml/data/jsons/sqle/17.20/MovingAverage.json +367 -367
- teradataml/data/jsons/sqle/17.20/NGramSplitter.json +239 -239
- teradataml/data/jsons/sqle/17.20/NaiveBayesPredict.json +149 -149
- teradataml/data/jsons/sqle/17.20/NaiveBayesTextClassifierPredict.json +287 -287
- teradataml/data/jsons/sqle/17.20/Pack.json +133 -133
- teradataml/data/jsons/sqle/17.20/SVMSparsePredict.json +192 -192
- teradataml/data/jsons/sqle/17.20/Sessionize.json +105 -105
- teradataml/data/jsons/sqle/17.20/StringSimilarity.json +86 -86
- teradataml/data/jsons/sqle/17.20/TD_ANOVA.json +76 -76
- teradataml/data/jsons/sqle/17.20/TD_BinCodeFit.json +239 -239
- teradataml/data/jsons/sqle/17.20/TD_BinCodeTransform.json +71 -71
- teradataml/data/jsons/sqle/17.20/TD_CategoricalSummary.json +53 -53
- teradataml/data/jsons/sqle/17.20/TD_Chisq.json +67 -67
- teradataml/data/jsons/sqle/17.20/TD_ClassificationEvaluator.json +145 -145
- teradataml/data/jsons/sqle/17.20/TD_ColumnSummary.json +53 -53
- teradataml/data/jsons/sqle/17.20/TD_ColumnTransformer.json +218 -218
- teradataml/data/jsons/sqle/17.20/TD_ConvertTo.json +92 -92
- teradataml/data/jsons/sqle/17.20/TD_DecisionForest.json +259 -259
- teradataml/data/jsons/sqle/17.20/TD_DecisionForestPredict.json +139 -139
- teradataml/data/jsons/sqle/17.20/TD_FTest.json +186 -186
- teradataml/data/jsons/sqle/17.20/TD_FillRowID.json +52 -52
- teradataml/data/jsons/sqle/17.20/TD_FunctionFit.json +46 -46
- teradataml/data/jsons/sqle/17.20/TD_FunctionTransform.json +72 -72
- teradataml/data/jsons/sqle/17.20/TD_GLM.json +431 -431
- teradataml/data/jsons/sqle/17.20/TD_GLMPREDICT.json +125 -125
- teradataml/data/jsons/sqle/17.20/TD_GLMPerSegment.json +411 -411
- teradataml/data/jsons/sqle/17.20/TD_GLMPredictPerSegment.json +146 -146
- teradataml/data/jsons/sqle/17.20/TD_GetFutileColumns.json +91 -91
- teradataml/data/jsons/sqle/17.20/TD_GetRowsWithMissingValues.json +76 -76
- teradataml/data/jsons/sqle/17.20/TD_GetRowsWithoutMissingValues.json +76 -76
- teradataml/data/jsons/sqle/17.20/TD_Histogram.json +152 -152
- teradataml/data/jsons/sqle/17.20/TD_KMeans.json +211 -211
- teradataml/data/jsons/sqle/17.20/TD_KMeansPredict.json +86 -86
- teradataml/data/jsons/sqle/17.20/TD_KNN.json +262 -262
- teradataml/data/jsons/sqle/17.20/TD_NaiveBayesTextClassifierTrainer.json +137 -137
- teradataml/data/jsons/sqle/17.20/TD_NonLinearCombineFit.json +101 -101
- teradataml/data/jsons/sqle/17.20/TD_NonLinearCombineTransform.json +71 -71
- teradataml/data/jsons/sqle/17.20/TD_NumApply.json +147 -147
- teradataml/data/jsons/sqle/17.20/TD_OneClassSVM.json +315 -315
- teradataml/data/jsons/sqle/17.20/TD_OneClassSVMPredict.json +123 -123
- teradataml/data/jsons/sqle/17.20/TD_OneHotEncodingFit.json +271 -271
- teradataml/data/jsons/sqle/17.20/TD_OneHotEncodingTransform.json +65 -65
- teradataml/data/jsons/sqle/17.20/TD_OrdinalEncodingFit.json +229 -229
- teradataml/data/jsons/sqle/17.20/TD_OrdinalEncodingTransform.json +75 -75
- teradataml/data/jsons/sqle/17.20/TD_OutlierFilterFit.json +217 -217
- teradataml/data/jsons/sqle/17.20/TD_OutlierFilterTransform.json +48 -48
- teradataml/data/jsons/sqle/17.20/TD_PolynomialFeaturesFit.json +114 -114
- teradataml/data/jsons/sqle/17.20/TD_PolynomialFeaturesTransform.json +72 -72
- teradataml/data/jsons/sqle/17.20/TD_QQNorm.json +111 -111
- teradataml/data/jsons/sqle/17.20/TD_ROC.json +177 -177
- teradataml/data/jsons/sqle/17.20/TD_RandomProjectionFit.json +178 -178
- teradataml/data/jsons/sqle/17.20/TD_RandomProjectionMinComponents.json +73 -73
- teradataml/data/jsons/sqle/17.20/TD_RandomProjectionTransform.json +74 -74
- teradataml/data/jsons/sqle/17.20/TD_RegressionEvaluator.json +137 -137
- teradataml/data/jsons/sqle/17.20/TD_RoundColumns.json +93 -93
- teradataml/data/jsons/sqle/17.20/TD_RowNormalizeFit.json +127 -127
- teradataml/data/jsons/sqle/17.20/TD_RowNormalizeTransform.json +70 -70
- teradataml/data/jsons/sqle/17.20/TD_SVM.json +389 -389
- teradataml/data/jsons/sqle/17.20/TD_SVMPredict.json +124 -124
- teradataml/data/jsons/sqle/17.20/TD_ScaleFit.json +156 -156
- teradataml/data/jsons/sqle/17.20/TD_ScaleTransform.json +70 -70
- teradataml/data/jsons/sqle/17.20/TD_SentimentExtractor.json +193 -193
- teradataml/data/jsons/sqle/17.20/TD_Silhouette.json +142 -142
- teradataml/data/jsons/sqle/17.20/TD_SimpleImputeFit.json +147 -147
- teradataml/data/jsons/sqle/17.20/TD_SimpleImputeTransform.json +48 -48
- teradataml/data/jsons/sqle/17.20/TD_StrApply.json +240 -240
- teradataml/data/jsons/sqle/17.20/TD_TargetEncodingFit.json +248 -248
- teradataml/data/jsons/sqle/17.20/TD_TargetEncodingTransform.json +75 -75
- teradataml/data/jsons/sqle/17.20/TD_TextParser.json +192 -192
- teradataml/data/jsons/sqle/17.20/TD_TrainTestSplit.json +142 -142
- teradataml/data/jsons/sqle/17.20/TD_UnivariateStatistics.json +117 -117
- teradataml/data/jsons/sqle/17.20/TD_VectorDistance.json +182 -182
- teradataml/data/jsons/sqle/17.20/TD_WhichMax.json +52 -52
- teradataml/data/jsons/sqle/17.20/TD_WhichMin.json +52 -52
- teradataml/data/jsons/sqle/17.20/TD_WordEmbeddings.json +241 -241
- teradataml/data/jsons/sqle/17.20/TD_XGBoost.json +312 -312
- teradataml/data/jsons/sqle/17.20/TD_XGBoostPredict.json +182 -182
- teradataml/data/jsons/sqle/17.20/TD_ZTest.json +170 -170
- teradataml/data/jsons/sqle/17.20/Unpack.json +188 -188
- teradataml/data/jsons/sqle/17.20/nPath.json +269 -269
- teradataml/data/jsons/tableoperator/17.00/read_nos.json +197 -197
- teradataml/data/jsons/tableoperator/17.05/read_nos.json +197 -197
- teradataml/data/jsons/tableoperator/17.05/write_nos.json +194 -194
- teradataml/data/jsons/tableoperator/17.10/read_nos.json +183 -183
- teradataml/data/jsons/tableoperator/17.10/write_nos.json +194 -194
- teradataml/data/jsons/tableoperator/17.20/read_nos.json +182 -182
- teradataml/data/jsons/tableoperator/17.20/write_nos.json +223 -223
- teradataml/data/jsons/uaf/17.20/TD_ACF.json +149 -149
- teradataml/data/jsons/uaf/17.20/TD_ARIMAESTIMATE.json +409 -409
- teradataml/data/jsons/uaf/17.20/TD_ARIMAFORECAST.json +79 -79
- teradataml/data/jsons/uaf/17.20/TD_ARIMAVALIDATE.json +151 -151
- teradataml/data/jsons/uaf/17.20/TD_BINARYMATRIXOP.json +109 -109
- teradataml/data/jsons/uaf/17.20/TD_BINARYSERIESOP.json +107 -107
- teradataml/data/jsons/uaf/17.20/TD_BREUSCH_GODFREY.json +87 -87
- teradataml/data/jsons/uaf/17.20/TD_BREUSCH_PAGAN_GODFREY.json +106 -106
- teradataml/data/jsons/uaf/17.20/TD_CONVOLVE.json +80 -80
- teradataml/data/jsons/uaf/17.20/TD_CONVOLVE2.json +67 -67
- teradataml/data/jsons/uaf/17.20/TD_CUMUL_PERIODOGRAM.json +91 -91
- teradataml/data/jsons/uaf/17.20/TD_DFFT.json +136 -136
- teradataml/data/jsons/uaf/17.20/TD_DFFT2.json +148 -148
- teradataml/data/jsons/uaf/17.20/TD_DFFT2CONV.json +108 -108
- teradataml/data/jsons/uaf/17.20/TD_DFFTCONV.json +109 -109
- teradataml/data/jsons/uaf/17.20/TD_DICKEY_FULLER.json +86 -86
- teradataml/data/jsons/uaf/17.20/TD_DIFF.json +91 -91
- teradataml/data/jsons/uaf/17.20/TD_DTW.json +116 -116
- teradataml/data/jsons/uaf/17.20/TD_DURBIN_WATSON.json +100 -100
- teradataml/data/jsons/uaf/17.20/TD_EXTRACT_RESULTS.json +38 -38
- teradataml/data/jsons/uaf/17.20/TD_FITMETRICS.json +100 -100
- teradataml/data/jsons/uaf/17.20/TD_GENSERIES4FORMULA.json +84 -84
- teradataml/data/jsons/uaf/17.20/TD_GENSERIES4SINUSOIDS.json +70 -70
- teradataml/data/jsons/uaf/17.20/TD_GOLDFELD_QUANDT.json +152 -152
- teradataml/data/jsons/uaf/17.20/TD_HOLT_WINTERS_FORECAST.json +313 -313
- teradataml/data/jsons/uaf/17.20/TD_IDFFT.json +57 -57
- teradataml/data/jsons/uaf/17.20/TD_IDFFT2.json +94 -94
- teradataml/data/jsons/uaf/17.20/TD_INPUTVALIDATOR.json +63 -63
- teradataml/data/jsons/uaf/17.20/TD_LINEAR_REGR.json +181 -181
- teradataml/data/jsons/uaf/17.20/TD_LINESPEC.json +102 -102
- teradataml/data/jsons/uaf/17.20/TD_MAMEAN.json +182 -182
- teradataml/data/jsons/uaf/17.20/TD_MATRIXMULTIPLY.json +67 -67
- teradataml/data/jsons/uaf/17.20/TD_MINFO.json +66 -66
- teradataml/data/jsons/uaf/17.20/TD_MULTIVAR_REGR.json +178 -178
- teradataml/data/jsons/uaf/17.20/TD_PACF.json +114 -114
- teradataml/data/jsons/uaf/17.20/TD_PORTMAN.json +118 -118
- teradataml/data/jsons/uaf/17.20/TD_POWERSPEC.json +175 -175
- teradataml/data/jsons/uaf/17.20/TD_POWERTRANSFORM.json +97 -97
- teradataml/data/jsons/uaf/17.20/TD_RESAMPLE.json +173 -173
- teradataml/data/jsons/uaf/17.20/TD_SEASONALNORMALIZE.json +136 -136
- teradataml/data/jsons/uaf/17.20/TD_SELECTION_CRITERIA.json +89 -89
- teradataml/data/jsons/uaf/17.20/TD_SIGNIF_PERIODICITIES.json +79 -79
- teradataml/data/jsons/uaf/17.20/TD_SIGNIF_RESIDMEAN.json +67 -67
- teradataml/data/jsons/uaf/17.20/TD_SIMPLEEXP.json +184 -184
- teradataml/data/jsons/uaf/17.20/TD_SINFO.json +57 -57
- teradataml/data/jsons/uaf/17.20/TD_SMOOTHMA.json +162 -162
- teradataml/data/jsons/uaf/17.20/TD_TRACKINGOP.json +100 -100
- teradataml/data/jsons/uaf/17.20/TD_UNDIFF.json +111 -111
- teradataml/data/jsons/uaf/17.20/TD_UNNORMALIZE.json +95 -95
- teradataml/data/jsons/uaf/17.20/TD_WHITES_GENERAL.json +77 -77
- teradataml/data/kmeans_example.json +17 -17
- teradataml/data/kmeans_us_arrests_data.csv +0 -0
- teradataml/data/knn_example.json +18 -18
- teradataml/data/knnrecommender_example.json +6 -6
- teradataml/data/knnrecommenderpredict_example.json +12 -12
- teradataml/data/lar_example.json +17 -17
- teradataml/data/larpredict_example.json +30 -30
- teradataml/data/lc_new_predictors.csv +5 -5
- teradataml/data/lc_new_reference.csv +9 -9
- teradataml/data/lda_example.json +8 -8
- teradataml/data/ldainference_example.json +14 -14
- teradataml/data/ldatopicsummary_example.json +8 -8
- teradataml/data/levendist_input.csv +13 -13
- teradataml/data/levenshteindistance_example.json +10 -10
- teradataml/data/linreg_example.json +9 -9
- teradataml/data/load_example_data.py +326 -323
- teradataml/data/loan_prediction.csv +295 -295
- teradataml/data/lungcancer.csv +138 -138
- teradataml/data/mappingdata.csv +12 -12
- teradataml/data/milk_timeseries.csv +157 -157
- teradataml/data/min_max_titanic.csv +4 -4
- teradataml/data/minhash_example.json +6 -6
- teradataml/data/ml_ratings.csv +7547 -7547
- teradataml/data/ml_ratings_10.csv +2445 -2445
- teradataml/data/model1_table.csv +5 -5
- teradataml/data/model2_table.csv +5 -5
- teradataml/data/models/iris_db_glm_model.pmml +56 -56
- teradataml/data/models/iris_db_xgb_model.pmml +4471 -4471
- teradataml/data/modularity_example.json +12 -12
- teradataml/data/movavg_example.json +7 -7
- teradataml/data/mtx1.csv +7 -7
- teradataml/data/mtx2.csv +13 -13
- teradataml/data/multi_model_classification.csv +401 -0
- teradataml/data/multi_model_regression.csv +401 -0
- teradataml/data/mvdfft8.csv +9 -9
- teradataml/data/naivebayes_example.json +9 -9
- teradataml/data/naivebayespredict_example.json +19 -19
- teradataml/data/naivebayestextclassifier2_example.json +6 -6
- teradataml/data/naivebayestextclassifier_example.json +8 -8
- teradataml/data/naivebayestextclassifierpredict_example.json +20 -20
- teradataml/data/name_Find_configure.csv +10 -10
- teradataml/data/namedentityfinder_example.json +14 -14
- teradataml/data/namedentityfinderevaluator_example.json +10 -10
- teradataml/data/namedentityfindertrainer_example.json +6 -6
- teradataml/data/nb_iris_input_test.csv +31 -31
- teradataml/data/nb_iris_input_train.csv +121 -121
- teradataml/data/nbp_iris_model.csv +13 -13
- teradataml/data/ner_extractor_text.csv +2 -2
- teradataml/data/ner_sports_test2.csv +29 -29
- teradataml/data/ner_sports_train.csv +501 -501
- teradataml/data/nerevaluator_example.json +5 -5
- teradataml/data/nerextractor_example.json +18 -18
- teradataml/data/nermem_sports_test.csv +17 -17
- teradataml/data/nermem_sports_train.csv +50 -50
- teradataml/data/nertrainer_example.json +6 -6
- teradataml/data/ngrams_example.json +6 -6
- teradataml/data/notebooks/sqlalchemy/Teradata Vantage Aggregate Functions using SQLAlchemy.ipynb +1455 -1455
- teradataml/data/notebooks/sqlalchemy/Teradata Vantage Arithmetic Functions Using SQLAlchemy.ipynb +1993 -1993
- teradataml/data/notebooks/sqlalchemy/Teradata Vantage Bit-Byte Manipulation Functions using SQLAlchemy.ipynb +1492 -1492
- teradataml/data/notebooks/sqlalchemy/Teradata Vantage Built-in functions using SQLAlchemy.ipynb +536 -536
- teradataml/data/notebooks/sqlalchemy/Teradata Vantage Regular Expressions Using SQLAlchemy.ipynb +570 -570
- teradataml/data/notebooks/sqlalchemy/Teradata Vantage String Functions Using SQLAlchemy.ipynb +2559 -2559
- teradataml/data/notebooks/sqlalchemy/Teradata Vantage Window Aggregate Functions using SQLAlchemy.ipynb +2911 -2911
- teradataml/data/notebooks/sqlalchemy/Using Generic SQLAlchemy ClauseElements teradataml DataFrame assign method.ipynb +698 -698
- teradataml/data/notebooks/sqlalchemy/teradataml filtering using SQLAlchemy ClauseElements.ipynb +784 -784
- teradataml/data/npath_example.json +23 -23
- teradataml/data/ntree_example.json +14 -14
- teradataml/data/numeric_strings.csv +4 -4
- teradataml/data/numerics.csv +4 -4
- teradataml/data/ocean_buoy.csv +17 -17
- teradataml/data/ocean_buoy2.csv +17 -17
- teradataml/data/ocean_buoys.csv +27 -27
- teradataml/data/ocean_buoys2.csv +10 -10
- teradataml/data/ocean_buoys_nonpti.csv +28 -28
- teradataml/data/ocean_buoys_seq.csv +29 -29
- teradataml/data/openml_example.json +63 -0
- teradataml/data/optional_event_table.csv +4 -4
- teradataml/data/orders1.csv +11 -11
- teradataml/data/orders1_12.csv +12 -12
- teradataml/data/orders_ex.csv +4 -4
- teradataml/data/pack_example.json +8 -8
- teradataml/data/package_tracking.csv +19 -19
- teradataml/data/package_tracking_pti.csv +18 -18
- teradataml/data/pagerank_example.json +13 -13
- teradataml/data/paragraphs_input.csv +6 -6
- teradataml/data/pathanalyzer_example.json +7 -7
- teradataml/data/pathgenerator_example.json +7 -7
- teradataml/data/phrases.csv +7 -7
- teradataml/data/pivot_example.json +8 -8
- teradataml/data/pivot_input.csv +22 -22
- teradataml/data/playerRating.csv +31 -31
- teradataml/data/postagger_example.json +6 -6
- teradataml/data/posttagger_output.csv +44 -44
- teradataml/data/production_data.csv +16 -16
- teradataml/data/production_data2.csv +7 -7
- teradataml/data/randomsample_example.json +31 -31
- teradataml/data/randomwalksample_example.json +8 -8
- teradataml/data/rank_table.csv +6 -6
- teradataml/data/ref_mobile_data.csv +4 -4
- teradataml/data/ref_mobile_data_dense.csv +2 -2
- teradataml/data/ref_url.csv +17 -17
- teradataml/data/restaurant_reviews.csv +7 -7
- teradataml/data/river_data.csv +145 -145
- teradataml/data/roc_example.json +7 -7
- teradataml/data/roc_input.csv +101 -101
- teradataml/data/rule_inputs.csv +6 -6
- teradataml/data/rule_table.csv +2 -2
- teradataml/data/sales.csv +7 -7
- teradataml/data/sales_transaction.csv +501 -501
- teradataml/data/salesdata.csv +342 -342
- teradataml/data/sample_cities.csv +2 -2
- teradataml/data/sample_shapes.csv +10 -10
- teradataml/data/sample_streets.csv +2 -2
- teradataml/data/sampling_example.json +15 -15
- teradataml/data/sax_example.json +8 -8
- teradataml/data/scale_example.json +23 -23
- teradataml/data/scale_housing.csv +11 -11
- teradataml/data/scale_housing_test.csv +6 -6
- teradataml/data/scale_stat.csv +11 -11
- teradataml/data/scalebypartition_example.json +13 -13
- teradataml/data/scalemap_example.json +13 -13
- teradataml/data/scalesummary_example.json +12 -12
- teradataml/data/score_category.csv +101 -101
- teradataml/data/score_summary.csv +4 -4
- teradataml/data/script_example.json +9 -9
- teradataml/data/scripts/deploy_script.py +65 -0
- teradataml/data/scripts/mapper.R +20 -0
- teradataml/data/scripts/mapper.py +15 -15
- teradataml/data/scripts/mapper_replace.py +15 -15
- teradataml/data/scripts/sklearn/__init__.py +0 -0
- teradataml/data/scripts/sklearn/sklearn_fit.py +175 -0
- teradataml/data/scripts/sklearn/sklearn_fit_predict.py +135 -0
- teradataml/data/scripts/sklearn/sklearn_function.template +113 -0
- teradataml/data/scripts/sklearn/sklearn_model_selection_split.py +158 -0
- teradataml/data/scripts/sklearn/sklearn_neighbors.py +152 -0
- teradataml/data/scripts/sklearn/sklearn_score.py +128 -0
- teradataml/data/scripts/sklearn/sklearn_transform.py +179 -0
- teradataml/data/seeds.csv +10 -10
- teradataml/data/sentenceextractor_example.json +6 -6
- teradataml/data/sentiment_extract_input.csv +11 -11
- teradataml/data/sentiment_train.csv +16 -16
- teradataml/data/sentiment_word.csv +20 -20
- teradataml/data/sentiment_word_input.csv +19 -19
- teradataml/data/sentimentextractor_example.json +24 -24
- teradataml/data/sentimenttrainer_example.json +8 -8
- teradataml/data/sequence_table.csv +10 -10
- teradataml/data/seriessplitter_example.json +7 -7
- teradataml/data/sessionize_example.json +17 -17
- teradataml/data/sessionize_table.csv +116 -116
- teradataml/data/setop_test1.csv +24 -24
- teradataml/data/setop_test2.csv +22 -22
- teradataml/data/soc_nw_edges.csv +10 -10
- teradataml/data/soc_nw_vertices.csv +7 -7
- teradataml/data/souvenir_timeseries.csv +167 -167
- teradataml/data/sparse_iris_attribute.csv +5 -5
- teradataml/data/sparse_iris_test.csv +121 -121
- teradataml/data/sparse_iris_train.csv +601 -601
- teradataml/data/star1.csv +6 -6
- teradataml/data/state_transition.csv +5 -5
- teradataml/data/stock_data.csv +53 -53
- teradataml/data/stock_movement.csv +11 -11
- teradataml/data/stock_vol.csv +76 -76
- teradataml/data/stop_words.csv +8 -8
- teradataml/data/store_sales.csv +37 -37
- teradataml/data/stringsimilarity_example.json +7 -7
- teradataml/data/strsimilarity_input.csv +13 -13
- teradataml/data/students.csv +101 -101
- teradataml/data/svm_iris_input_test.csv +121 -121
- teradataml/data/svm_iris_input_train.csv +481 -481
- teradataml/data/svm_iris_model.csv +7 -7
- teradataml/data/svmdense_example.json +9 -9
- teradataml/data/svmdensepredict_example.json +18 -18
- teradataml/data/svmsparse_example.json +7 -7
- teradataml/data/svmsparsepredict_example.json +13 -13
- teradataml/data/svmsparsesummary_example.json +7 -7
- teradataml/data/target_mobile_data.csv +13 -13
- teradataml/data/target_mobile_data_dense.csv +5 -5
- teradataml/data/templatedata.csv +1201 -1201
- teradataml/data/templates/open_source_ml.json +9 -0
- teradataml/data/teradataml_example.json +73 -1
- teradataml/data/test_classification.csv +101 -0
- teradataml/data/test_loan_prediction.csv +53 -53
- teradataml/data/test_pacf_12.csv +37 -37
- teradataml/data/test_prediction.csv +101 -0
- teradataml/data/test_regression.csv +101 -0
- teradataml/data/test_river2.csv +109 -109
- teradataml/data/text_inputs.csv +6 -6
- teradataml/data/textchunker_example.json +7 -7
- teradataml/data/textclassifier_example.json +6 -6
- teradataml/data/textclassifier_input.csv +7 -7
- teradataml/data/textclassifiertrainer_example.json +6 -6
- teradataml/data/textmorph_example.json +5 -5
- teradataml/data/textparser_example.json +15 -15
- teradataml/data/texttagger_example.json +11 -11
- teradataml/data/texttokenizer_example.json +6 -6
- teradataml/data/texttrainer_input.csv +11 -11
- teradataml/data/tf_example.json +6 -6
- teradataml/data/tfidf_example.json +13 -13
- teradataml/data/tfidf_input1.csv +201 -201
- teradataml/data/tfidf_train.csv +6 -6
- teradataml/data/time_table1.csv +535 -535
- teradataml/data/time_table2.csv +14 -14
- teradataml/data/timeseriesdata.csv +1601 -1601
- teradataml/data/timeseriesdatasetsd4.csv +105 -105
- teradataml/data/titanic.csv +892 -892
- teradataml/data/token_table.csv +696 -696
- teradataml/data/train_multiclass.csv +101 -0
- teradataml/data/train_regression.csv +101 -0
- teradataml/data/train_regression_multiple_labels.csv +101 -0
- teradataml/data/train_tracking.csv +27 -27
- teradataml/data/transformation_table.csv +5 -5
- teradataml/data/transformation_table_new.csv +1 -1
- teradataml/data/tv_spots.csv +16 -16
- teradataml/data/twod_climate_data.csv +117 -117
- teradataml/data/uaf_example.json +475 -475
- teradataml/data/univariatestatistics_example.json +8 -8
- teradataml/data/unpack_example.json +9 -9
- teradataml/data/unpivot_example.json +9 -9
- teradataml/data/unpivot_input.csv +8 -8
- teradataml/data/us_air_pass.csv +36 -36
- teradataml/data/us_population.csv +624 -624
- teradataml/data/us_states_shapes.csv +52 -52
- teradataml/data/varmax_example.json +17 -17
- teradataml/data/vectordistance_example.json +25 -25
- teradataml/data/ville_climatedata.csv +121 -121
- teradataml/data/ville_tempdata.csv +12 -12
- teradataml/data/ville_tempdata1.csv +12 -12
- teradataml/data/ville_temperature.csv +11 -11
- teradataml/data/waveletTable.csv +1605 -1605
- teradataml/data/waveletTable2.csv +1605 -1605
- teradataml/data/weightedmovavg_example.json +8 -8
- teradataml/data/wft_testing.csv +5 -5
- teradataml/data/wine_data.csv +1600 -0
- teradataml/data/word_embed_input_table1.csv +5 -5
- teradataml/data/word_embed_input_table2.csv +4 -4
- teradataml/data/word_embed_model.csv +22 -22
- teradataml/data/words_input.csv +13 -13
- teradataml/data/xconvolve_complex_left.csv +6 -6
- teradataml/data/xconvolve_complex_leftmulti.csv +6 -6
- teradataml/data/xgboost_example.json +35 -35
- teradataml/data/xgboostpredict_example.json +31 -31
- teradataml/dataframe/copy_to.py +1764 -1698
- teradataml/dataframe/data_transfer.py +2753 -2745
- teradataml/dataframe/dataframe.py +17545 -16946
- teradataml/dataframe/dataframe_utils.py +1837 -1740
- teradataml/dataframe/fastload.py +611 -603
- teradataml/dataframe/indexer.py +424 -424
- teradataml/dataframe/setop.py +1179 -1166
- teradataml/dataframe/sql.py +10090 -6432
- teradataml/dataframe/sql_function_parameters.py +439 -388
- teradataml/dataframe/sql_functions.py +652 -652
- teradataml/dataframe/sql_interfaces.py +220 -220
- teradataml/dataframe/vantage_function_types.py +674 -630
- teradataml/dataframe/window.py +693 -692
- teradataml/dbutils/__init__.py +3 -3
- teradataml/dbutils/dbutils.py +1167 -1150
- teradataml/dbutils/filemgr.py +267 -267
- teradataml/gen_ai/__init__.py +2 -2
- teradataml/gen_ai/convAI.py +472 -472
- teradataml/geospatial/__init__.py +3 -3
- teradataml/geospatial/geodataframe.py +1105 -1094
- teradataml/geospatial/geodataframecolumn.py +392 -387
- teradataml/geospatial/geometry_types.py +925 -925
- teradataml/hyperparameter_tuner/__init__.py +1 -1
- teradataml/hyperparameter_tuner/optimizer.py +3783 -2993
- teradataml/hyperparameter_tuner/utils.py +281 -187
- teradataml/lib/aed_0_1.dll +0 -0
- teradataml/lib/libaed_0_1.dylib +0 -0
- teradataml/lib/libaed_0_1.so +0 -0
- teradataml/libaed_0_1.dylib +0 -0
- teradataml/libaed_0_1.so +0 -0
- teradataml/opensource/__init__.py +1 -0
- teradataml/opensource/sklearn/__init__.py +1 -0
- teradataml/opensource/sklearn/_class.py +255 -0
- teradataml/opensource/sklearn/_sklearn_wrapper.py +1668 -0
- teradataml/opensource/sklearn/_wrapper_utils.py +268 -0
- teradataml/opensource/sklearn/constants.py +54 -0
- teradataml/options/__init__.py +121 -124
- teradataml/options/configure.py +337 -336
- teradataml/options/display.py +176 -176
- teradataml/plot/__init__.py +2 -2
- teradataml/plot/axis.py +1388 -1388
- teradataml/plot/constants.py +15 -15
- teradataml/plot/figure.py +398 -398
- teradataml/plot/plot.py +760 -760
- teradataml/plot/query_generator.py +83 -83
- teradataml/plot/subplot.py +216 -216
- teradataml/scriptmgmt/UserEnv.py +3788 -3761
- teradataml/scriptmgmt/__init__.py +3 -3
- teradataml/scriptmgmt/lls_utils.py +1616 -1604
- teradataml/series/series.py +532 -532
- teradataml/series/series_utils.py +71 -71
- teradataml/table_operators/Apply.py +949 -917
- teradataml/table_operators/Script.py +1719 -1982
- teradataml/table_operators/TableOperator.py +1207 -1616
- teradataml/table_operators/__init__.py +2 -3
- teradataml/table_operators/apply_query_generator.py +262 -262
- teradataml/table_operators/query_generator.py +507 -507
- teradataml/table_operators/table_operator_query_generator.py +460 -460
- teradataml/table_operators/table_operator_util.py +631 -639
- teradataml/table_operators/templates/dataframe_apply.template +184 -184
- teradataml/table_operators/templates/dataframe_map.template +176 -176
- teradataml/table_operators/templates/script_executor.template +170 -170
- teradataml/utils/dtypes.py +684 -684
- teradataml/utils/internal_buffer.py +84 -84
- teradataml/utils/print_versions.py +205 -205
- teradataml/utils/utils.py +410 -410
- teradataml/utils/validators.py +2239 -2115
- {teradataml-17.20.0.7.dist-info → teradataml-20.0.0.0.dist-info}/METADATA +270 -41
- teradataml-20.0.0.0.dist-info/RECORD +1038 -0
- {teradataml-17.20.0.7.dist-info → teradataml-20.0.0.0.dist-info}/WHEEL +1 -1
- {teradataml-17.20.0.7.dist-info → teradataml-20.0.0.0.dist-info}/zip-safe +1 -1
- teradataml/analytics/mle/AdaBoost.py +0 -651
- teradataml/analytics/mle/AdaBoostPredict.py +0 -564
- teradataml/analytics/mle/Antiselect.py +0 -342
- teradataml/analytics/mle/Arima.py +0 -641
- teradataml/analytics/mle/ArimaPredict.py +0 -477
- teradataml/analytics/mle/Attribution.py +0 -1070
- teradataml/analytics/mle/Betweenness.py +0 -658
- teradataml/analytics/mle/Burst.py +0 -711
- teradataml/analytics/mle/CCM.py +0 -600
- teradataml/analytics/mle/CCMPrepare.py +0 -324
- teradataml/analytics/mle/CFilter.py +0 -460
- teradataml/analytics/mle/ChangePointDetection.py +0 -572
- teradataml/analytics/mle/ChangePointDetectionRT.py +0 -477
- teradataml/analytics/mle/Closeness.py +0 -737
- teradataml/analytics/mle/ConfusionMatrix.py +0 -420
- teradataml/analytics/mle/Correlation.py +0 -477
- teradataml/analytics/mle/Correlation2.py +0 -573
- teradataml/analytics/mle/CoxHazardRatio.py +0 -679
- teradataml/analytics/mle/CoxPH.py +0 -556
- teradataml/analytics/mle/CoxSurvival.py +0 -478
- teradataml/analytics/mle/CumulativeMovAvg.py +0 -363
- teradataml/analytics/mle/DTW.py +0 -623
- teradataml/analytics/mle/DWT.py +0 -564
- teradataml/analytics/mle/DWT2D.py +0 -599
- teradataml/analytics/mle/DecisionForest.py +0 -716
- teradataml/analytics/mle/DecisionForestEvaluator.py +0 -363
- teradataml/analytics/mle/DecisionForestPredict.py +0 -561
- teradataml/analytics/mle/DecisionTree.py +0 -830
- teradataml/analytics/mle/DecisionTreePredict.py +0 -528
- teradataml/analytics/mle/ExponentialMovAvg.py +0 -418
- teradataml/analytics/mle/FMeasure.py +0 -402
- teradataml/analytics/mle/FPGrowth.py +0 -734
- teradataml/analytics/mle/FrequentPaths.py +0 -695
- teradataml/analytics/mle/GLM.py +0 -558
- teradataml/analytics/mle/GLML1L2.py +0 -547
- teradataml/analytics/mle/GLML1L2Predict.py +0 -519
- teradataml/analytics/mle/GLMPredict.py +0 -529
- teradataml/analytics/mle/HMMDecoder.py +0 -945
- teradataml/analytics/mle/HMMEvaluator.py +0 -901
- teradataml/analytics/mle/HMMSupervised.py +0 -521
- teradataml/analytics/mle/HMMUnsupervised.py +0 -572
- teradataml/analytics/mle/Histogram.py +0 -561
- teradataml/analytics/mle/IDWT.py +0 -476
- teradataml/analytics/mle/IDWT2D.py +0 -493
- teradataml/analytics/mle/IdentityMatch.py +0 -763
- teradataml/analytics/mle/Interpolator.py +0 -918
- teradataml/analytics/mle/KMeans.py +0 -485
- teradataml/analytics/mle/KNN.py +0 -627
- teradataml/analytics/mle/KNNRecommender.py +0 -488
- teradataml/analytics/mle/KNNRecommenderPredict.py +0 -581
- teradataml/analytics/mle/LAR.py +0 -439
- teradataml/analytics/mle/LARPredict.py +0 -478
- teradataml/analytics/mle/LDA.py +0 -548
- teradataml/analytics/mle/LDAInference.py +0 -492
- teradataml/analytics/mle/LDATopicSummary.py +0 -464
- teradataml/analytics/mle/LevenshteinDistance.py +0 -450
- teradataml/analytics/mle/LinReg.py +0 -433
- teradataml/analytics/mle/LinRegPredict.py +0 -438
- teradataml/analytics/mle/MinHash.py +0 -544
- teradataml/analytics/mle/Modularity.py +0 -587
- teradataml/analytics/mle/NEREvaluator.py +0 -410
- teradataml/analytics/mle/NERExtractor.py +0 -595
- teradataml/analytics/mle/NERTrainer.py +0 -458
- teradataml/analytics/mle/NGrams.py +0 -570
- teradataml/analytics/mle/NPath.py +0 -634
- teradataml/analytics/mle/NTree.py +0 -549
- teradataml/analytics/mle/NaiveBayes.py +0 -462
- teradataml/analytics/mle/NaiveBayesPredict.py +0 -513
- teradataml/analytics/mle/NaiveBayesTextClassifier.py +0 -607
- teradataml/analytics/mle/NaiveBayesTextClassifier2.py +0 -531
- teradataml/analytics/mle/NaiveBayesTextClassifierPredict.py +0 -799
- teradataml/analytics/mle/NamedEntityFinder.py +0 -529
- teradataml/analytics/mle/NamedEntityFinderEvaluator.py +0 -414
- teradataml/analytics/mle/NamedEntityFinderTrainer.py +0 -396
- teradataml/analytics/mle/POSTagger.py +0 -417
- teradataml/analytics/mle/Pack.py +0 -411
- teradataml/analytics/mle/PageRank.py +0 -535
- teradataml/analytics/mle/PathAnalyzer.py +0 -426
- teradataml/analytics/mle/PathGenerator.py +0 -367
- teradataml/analytics/mle/PathStart.py +0 -464
- teradataml/analytics/mle/PathSummarizer.py +0 -470
- teradataml/analytics/mle/Pivot.py +0 -471
- teradataml/analytics/mle/ROC.py +0 -425
- teradataml/analytics/mle/RandomSample.py +0 -637
- teradataml/analytics/mle/RandomWalkSample.py +0 -490
- teradataml/analytics/mle/SAX.py +0 -779
- teradataml/analytics/mle/SVMDense.py +0 -677
- teradataml/analytics/mle/SVMDensePredict.py +0 -536
- teradataml/analytics/mle/SVMDenseSummary.py +0 -437
- teradataml/analytics/mle/SVMSparse.py +0 -557
- teradataml/analytics/mle/SVMSparsePredict.py +0 -553
- teradataml/analytics/mle/SVMSparseSummary.py +0 -435
- teradataml/analytics/mle/Sampling.py +0 -549
- teradataml/analytics/mle/Scale.py +0 -565
- teradataml/analytics/mle/ScaleByPartition.py +0 -496
- teradataml/analytics/mle/ScaleMap.py +0 -378
- teradataml/analytics/mle/ScaleSummary.py +0 -320
- teradataml/analytics/mle/SentenceExtractor.py +0 -363
- teradataml/analytics/mle/SentimentEvaluator.py +0 -432
- teradataml/analytics/mle/SentimentExtractor.py +0 -578
- teradataml/analytics/mle/SentimentTrainer.py +0 -405
- teradataml/analytics/mle/SeriesSplitter.py +0 -641
- teradataml/analytics/mle/Sessionize.py +0 -475
- teradataml/analytics/mle/SimpleMovAvg.py +0 -397
- teradataml/analytics/mle/StringSimilarity.py +0 -425
- teradataml/analytics/mle/TF.py +0 -389
- teradataml/analytics/mle/TFIDF.py +0 -504
- teradataml/analytics/mle/TextChunker.py +0 -414
- teradataml/analytics/mle/TextClassifier.py +0 -399
- teradataml/analytics/mle/TextClassifierEvaluator.py +0 -413
- teradataml/analytics/mle/TextClassifierTrainer.py +0 -565
- teradataml/analytics/mle/TextMorph.py +0 -494
- teradataml/analytics/mle/TextParser.py +0 -623
- teradataml/analytics/mle/TextTagger.py +0 -530
- teradataml/analytics/mle/TextTokenizer.py +0 -502
- teradataml/analytics/mle/UnivariateStatistics.py +0 -488
- teradataml/analytics/mle/Unpack.py +0 -526
- teradataml/analytics/mle/Unpivot.py +0 -438
- teradataml/analytics/mle/VarMax.py +0 -776
- teradataml/analytics/mle/VectorDistance.py +0 -762
- teradataml/analytics/mle/WeightedMovAvg.py +0 -400
- teradataml/analytics/mle/XGBoost.py +0 -842
- teradataml/analytics/mle/XGBoostPredict.py +0 -627
- teradataml/analytics/mle/__init__.py +0 -123
- teradataml/analytics/mle/json/adaboost_mle.json +0 -135
- teradataml/analytics/mle/json/adaboostpredict_mle.json +0 -85
- teradataml/analytics/mle/json/antiselect_mle.json +0 -34
- teradataml/analytics/mle/json/antiselect_mle_mle.json +0 -34
- teradataml/analytics/mle/json/arima_mle.json +0 -172
- teradataml/analytics/mle/json/arimapredict_mle.json +0 -52
- teradataml/analytics/mle/json/attribution_mle_mle.json +0 -143
- teradataml/analytics/mle/json/betweenness_mle.json +0 -97
- teradataml/analytics/mle/json/burst_mle.json +0 -140
- teradataml/analytics/mle/json/ccm_mle.json +0 -124
- teradataml/analytics/mle/json/ccmprepare_mle.json +0 -14
- teradataml/analytics/mle/json/cfilter_mle.json +0 -93
- teradataml/analytics/mle/json/changepointdetection_mle.json +0 -92
- teradataml/analytics/mle/json/changepointdetectionrt_mle.json +0 -78
- teradataml/analytics/mle/json/closeness_mle.json +0 -104
- teradataml/analytics/mle/json/confusionmatrix_mle.json +0 -79
- teradataml/analytics/mle/json/correlation_mle.json +0 -86
- teradataml/analytics/mle/json/correlationreduce_mle.json +0 -49
- teradataml/analytics/mle/json/coxhazardratio_mle.json +0 -89
- teradataml/analytics/mle/json/coxph_mle.json +0 -98
- teradataml/analytics/mle/json/coxsurvival_mle.json +0 -79
- teradataml/analytics/mle/json/cumulativemovavg_mle.json +0 -34
- teradataml/analytics/mle/json/decisionforest_mle.json +0 -167
- teradataml/analytics/mle/json/decisionforestevaluator_mle.json +0 -33
- teradataml/analytics/mle/json/decisionforestpredict_mle_mle.json +0 -74
- teradataml/analytics/mle/json/decisiontree_mle.json +0 -194
- teradataml/analytics/mle/json/decisiontreepredict_mle_mle.json +0 -86
- teradataml/analytics/mle/json/dtw_mle.json +0 -97
- teradataml/analytics/mle/json/dwt2d_mle.json +0 -116
- teradataml/analytics/mle/json/dwt_mle.json +0 -101
- teradataml/analytics/mle/json/exponentialmovavg_mle.json +0 -55
- teradataml/analytics/mle/json/fmeasure_mle.json +0 -58
- teradataml/analytics/mle/json/fpgrowth_mle.json +0 -159
- teradataml/analytics/mle/json/frequentpaths_mle.json +0 -129
- teradataml/analytics/mle/json/glm_mle.json +0 -111
- teradataml/analytics/mle/json/glml1l2_mle.json +0 -106
- teradataml/analytics/mle/json/glml1l2predict_mle.json +0 -57
- teradataml/analytics/mle/json/glmpredict_mle_mle.json +0 -74
- teradataml/analytics/mle/json/histogram_mle.json +0 -100
- teradataml/analytics/mle/json/hmmdecoder_mle.json +0 -192
- teradataml/analytics/mle/json/hmmevaluator_mle.json +0 -206
- teradataml/analytics/mle/json/hmmsupervised_mle.json +0 -91
- teradataml/analytics/mle/json/hmmunsupervised_mle.json +0 -114
- teradataml/analytics/mle/json/identitymatch_mle.json +0 -88
- teradataml/analytics/mle/json/idwt2d_mle.json +0 -73
- teradataml/analytics/mle/json/idwt_mle.json +0 -66
- teradataml/analytics/mle/json/interpolator_mle.json +0 -151
- teradataml/analytics/mle/json/kmeans_mle.json +0 -97
- teradataml/analytics/mle/json/knn_mle.json +0 -141
- teradataml/analytics/mle/json/knnrecommender_mle.json +0 -111
- teradataml/analytics/mle/json/knnrecommenderpredict_mle.json +0 -75
- teradataml/analytics/mle/json/lar_mle.json +0 -78
- teradataml/analytics/mle/json/larpredict_mle.json +0 -69
- teradataml/analytics/mle/json/lda_mle.json +0 -130
- teradataml/analytics/mle/json/ldainference_mle.json +0 -78
- teradataml/analytics/mle/json/ldatopicsummary_mle.json +0 -64
- teradataml/analytics/mle/json/levenshteindistance_mle.json +0 -92
- teradataml/analytics/mle/json/linreg_mle.json +0 -42
- teradataml/analytics/mle/json/linregpredict_mle.json +0 -56
- teradataml/analytics/mle/json/minhash_mle.json +0 -113
- teradataml/analytics/mle/json/modularity_mle.json +0 -91
- teradataml/analytics/mle/json/naivebayespredict_mle_mle.json +0 -85
- teradataml/analytics/mle/json/naivebayesreduce_mle.json +0 -52
- teradataml/analytics/mle/json/naivebayestextclassifierpredict_mle_mle.json +0 -147
- teradataml/analytics/mle/json/naivebayestextclassifiertrainer2_mle.json +0 -108
- teradataml/analytics/mle/json/naivebayestextclassifiertrainer_mle.json +0 -102
- teradataml/analytics/mle/json/namedentityfinder_mle.json +0 -84
- teradataml/analytics/mle/json/namedentityfinderevaluatorreduce_mle.json +0 -43
- teradataml/analytics/mle/json/namedentityfindertrainer_mle.json +0 -64
- teradataml/analytics/mle/json/nerevaluator_mle.json +0 -54
- teradataml/analytics/mle/json/nerextractor_mle.json +0 -87
- teradataml/analytics/mle/json/nertrainer_mle.json +0 -89
- teradataml/analytics/mle/json/ngrams_mle.json +0 -137
- teradataml/analytics/mle/json/ngramsplitter_mle_mle.json +0 -137
- teradataml/analytics/mle/json/npath@coprocessor_mle.json +0 -73
- teradataml/analytics/mle/json/ntree@coprocessor_mle.json +0 -123
- teradataml/analytics/mle/json/pack_mle.json +0 -58
- teradataml/analytics/mle/json/pack_mle_mle.json +0 -58
- teradataml/analytics/mle/json/pagerank_mle.json +0 -81
- teradataml/analytics/mle/json/pathanalyzer_mle.json +0 -63
- teradataml/analytics/mle/json/pathgenerator_mle.json +0 -40
- teradataml/analytics/mle/json/pathstart_mle.json +0 -62
- teradataml/analytics/mle/json/pathsummarizer_mle.json +0 -72
- teradataml/analytics/mle/json/pivoting_mle.json +0 -71
- teradataml/analytics/mle/json/postagger_mle.json +0 -51
- teradataml/analytics/mle/json/randomsample_mle.json +0 -131
- teradataml/analytics/mle/json/randomwalksample_mle.json +0 -85
- teradataml/analytics/mle/json/roc_mle.json +0 -73
- teradataml/analytics/mle/json/sampling_mle.json +0 -75
- teradataml/analytics/mle/json/sax_mle.json +0 -154
- teradataml/analytics/mle/json/scale_mle.json +0 -93
- teradataml/analytics/mle/json/scalebypartition_mle.json +0 -89
- teradataml/analytics/mle/json/scalemap_mle.json +0 -44
- teradataml/analytics/mle/json/scalesummary_mle.json +0 -14
- teradataml/analytics/mle/json/sentenceextractor_mle.json +0 -41
- teradataml/analytics/mle/json/sentimentevaluator_mle.json +0 -43
- teradataml/analytics/mle/json/sentimentextractor_mle.json +0 -100
- teradataml/analytics/mle/json/sentimenttrainer_mle.json +0 -68
- teradataml/analytics/mle/json/seriessplitter_mle.json +0 -133
- teradataml/analytics/mle/json/sessionize_mle_mle.json +0 -62
- teradataml/analytics/mle/json/simplemovavg_mle.json +0 -48
- teradataml/analytics/mle/json/stringsimilarity_mle.json +0 -50
- teradataml/analytics/mle/json/stringsimilarity_mle_mle.json +0 -50
- teradataml/analytics/mle/json/svmdense_mle.json +0 -165
- teradataml/analytics/mle/json/svmdensepredict_mle.json +0 -95
- teradataml/analytics/mle/json/svmdensesummary_mle.json +0 -58
- teradataml/analytics/mle/json/svmsparse_mle.json +0 -148
- teradataml/analytics/mle/json/svmsparsepredict_mle_mle.json +0 -103
- teradataml/analytics/mle/json/svmsparsesummary_mle.json +0 -57
- teradataml/analytics/mle/json/textchunker_mle.json +0 -40
- teradataml/analytics/mle/json/textclassifier_mle.json +0 -51
- teradataml/analytics/mle/json/textclassifierevaluator_mle.json +0 -43
- teradataml/analytics/mle/json/textclassifiertrainer_mle.json +0 -103
- teradataml/analytics/mle/json/textmorph_mle.json +0 -63
- teradataml/analytics/mle/json/textparser_mle.json +0 -166
- teradataml/analytics/mle/json/texttagger_mle.json +0 -81
- teradataml/analytics/mle/json/texttokenizer_mle.json +0 -91
- teradataml/analytics/mle/json/tf_mle.json +0 -33
- teradataml/analytics/mle/json/tfidf_mle.json +0 -34
- teradataml/analytics/mle/json/univariatestatistics_mle.json +0 -81
- teradataml/analytics/mle/json/unpack_mle.json +0 -91
- teradataml/analytics/mle/json/unpack_mle_mle.json +0 -91
- teradataml/analytics/mle/json/unpivoting_mle.json +0 -63
- teradataml/analytics/mle/json/varmax_mle.json +0 -176
- teradataml/analytics/mle/json/vectordistance_mle.json +0 -179
- teradataml/analytics/mle/json/weightedmovavg_mle.json +0 -48
- teradataml/analytics/mle/json/xgboost_mle.json +0 -178
- teradataml/analytics/mle/json/xgboostpredict_mle.json +0 -104
- teradataml/analytics/sqle/Antiselect.py +0 -321
- teradataml/analytics/sqle/Attribution.py +0 -603
- teradataml/analytics/sqle/DecisionForestPredict.py +0 -408
- teradataml/analytics/sqle/GLMPredict.py +0 -430
- teradataml/analytics/sqle/MovingAverage.py +0 -543
- teradataml/analytics/sqle/NGramSplitter.py +0 -548
- teradataml/analytics/sqle/NPath.py +0 -632
- teradataml/analytics/sqle/NaiveBayesTextClassifierPredict.py +0 -515
- teradataml/analytics/sqle/Pack.py +0 -388
- teradataml/analytics/sqle/SVMSparsePredict.py +0 -464
- teradataml/analytics/sqle/Sessionize.py +0 -390
- teradataml/analytics/sqle/StringSimilarity.py +0 -400
- teradataml/analytics/sqle/Unpack.py +0 -503
- teradataml/analytics/sqle/json/antiselect_sqle.json +0 -21
- teradataml/analytics/sqle/json/attribution_sqle.json +0 -92
- teradataml/analytics/sqle/json/decisionforestpredict_sqle.json +0 -48
- teradataml/analytics/sqle/json/glmpredict_sqle.json +0 -48
- teradataml/analytics/sqle/json/h2opredict_sqle.json +0 -63
- teradataml/analytics/sqle/json/movingaverage_sqle.json +0 -58
- teradataml/analytics/sqle/json/naivebayestextclassifierpredict_sqle.json +0 -76
- teradataml/analytics/sqle/json/ngramsplitter_sqle.json +0 -126
- teradataml/analytics/sqle/json/npath_sqle.json +0 -67
- teradataml/analytics/sqle/json/pack_sqle.json +0 -47
- teradataml/analytics/sqle/json/pmmlpredict_sqle.json +0 -55
- teradataml/analytics/sqle/json/sessionize_sqle.json +0 -43
- teradataml/analytics/sqle/json/stringsimilarity_sqle.json +0 -39
- teradataml/analytics/sqle/json/svmsparsepredict_sqle.json +0 -74
- teradataml/analytics/sqle/json/unpack_sqle.json +0 -80
- teradataml/catalog/model_cataloging.py +0 -980
- teradataml/config/mlengine_alias_definitions_v1.0 +0 -118
- teradataml/config/mlengine_alias_definitions_v1.1 +0 -127
- teradataml/config/mlengine_alias_definitions_v1.3 +0 -129
- teradataml/table_operators/sandbox_container_util.py +0 -643
- teradataml-17.20.0.7.dist-info/RECORD +0 -1280
- {teradataml-17.20.0.7.dist-info → teradataml-20.0.0.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1648 @@
|
|
|
1
|
+
# ##################################################################
|
|
2
|
+
#
|
|
3
|
+
# Copyright 2024 Teradata. All rights reserved.
|
|
4
|
+
# TERADATA CONFIDENTIAL AND TRADE SECRET
|
|
5
|
+
#
|
|
6
|
+
# Primary Owner: Sweta Shaw
|
|
7
|
+
# Email Id: Sweta.Shaw@Teradata.com
|
|
8
|
+
#
|
|
9
|
+
# Secondary Owner: Akhil Bisht
|
|
10
|
+
# Email Id: AKHIL.BISHT@Teradata.com
|
|
11
|
+
#
|
|
12
|
+
# Version: 1.1
|
|
13
|
+
# Function Version: 1.0
|
|
14
|
+
# ##################################################################
|
|
15
|
+
|
|
16
|
+
# Python libraries
|
|
17
|
+
import pandas as pd
|
|
18
|
+
import time
|
|
19
|
+
import json
|
|
20
|
+
import re
|
|
21
|
+
|
|
22
|
+
# Teradata libraries
|
|
23
|
+
from teradataml.dataframe.dataframe import DataFrame
|
|
24
|
+
from teradataml.dataframe.copy_to import copy_to_sql
|
|
25
|
+
from teradataml import Antiselect
|
|
26
|
+
from teradataml import BincodeFit, BincodeTransform
|
|
27
|
+
from teradataml import ColumnSummary, CategoricalSummary, GetFutileColumns, FillRowId
|
|
28
|
+
from teradataml import Fit, Transform
|
|
29
|
+
from teradataml import NonLinearCombineFit, NonLinearCombineTransform
|
|
30
|
+
from teradataml import NumApply
|
|
31
|
+
from teradataml import OneHotEncodingFit, OneHotEncodingTransform
|
|
32
|
+
from teradataml import OrdinalEncodingFit, OrdinalEncodingTransform
|
|
33
|
+
from teradataml import SimpleImputeFit, SimpleImputeTransform
|
|
34
|
+
from teradataml import StrApply
|
|
35
|
+
from teradataml import TargetEncodingFit, TargetEncodingTransform
|
|
36
|
+
from sqlalchemy import literal_column
|
|
37
|
+
from teradatasqlalchemy import INTEGER
|
|
38
|
+
from teradataml import display
|
|
39
|
+
from teradataml.hyperparameter_tuner.utils import _ProgressBar
|
|
40
|
+
from teradataml.utils.validators import _Validators
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class _FeatureEngineering:
|
|
44
|
+
|
|
45
|
+
def __init__(self,
|
|
46
|
+
data,
|
|
47
|
+
target_column,
|
|
48
|
+
model_list,
|
|
49
|
+
verbose = 0,
|
|
50
|
+
task_type = "Regression",
|
|
51
|
+
custom_data = None):
|
|
52
|
+
"""
|
|
53
|
+
DESCRIPTION:
|
|
54
|
+
Function initializes the data, target column and columns datatypes
|
|
55
|
+
for feature engineering.
|
|
56
|
+
|
|
57
|
+
PARAMETERS:
|
|
58
|
+
data:
|
|
59
|
+
Required Argument.
|
|
60
|
+
Specifies the input teradataml DataFrame for feature engineering.
|
|
61
|
+
Types: teradataml Dataframe
|
|
62
|
+
|
|
63
|
+
target_column:
|
|
64
|
+
Required Arugment.
|
|
65
|
+
Specifies the name of the target column in "data"..
|
|
66
|
+
Types: str
|
|
67
|
+
|
|
68
|
+
model_list:
|
|
69
|
+
Required Arugment.
|
|
70
|
+
Specifies the list of models to be used for model training.
|
|
71
|
+
Types: list
|
|
72
|
+
|
|
73
|
+
verbose:
|
|
74
|
+
Optional Argument.
|
|
75
|
+
Specifies the detailed execution steps based on verbose level.
|
|
76
|
+
Default Value: 0
|
|
77
|
+
Permitted Values:
|
|
78
|
+
* 0: prints the progress bar and leaderboard
|
|
79
|
+
* 1: prints the execution steps of AutoML.
|
|
80
|
+
* 2: prints the intermediate data between the execution of each step of AutoML.
|
|
81
|
+
Types: int
|
|
82
|
+
|
|
83
|
+
task_type:
|
|
84
|
+
Required Arugment.
|
|
85
|
+
Specifies the task type for AutoML, whether to apply regresion OR classification
|
|
86
|
+
on the provived dataset.
|
|
87
|
+
Default Value: "Regression"
|
|
88
|
+
Permitted Values: "Regression", "Classification"
|
|
89
|
+
Types: str
|
|
90
|
+
|
|
91
|
+
custom_data:
|
|
92
|
+
Optional Arugment.
|
|
93
|
+
Specifies json object containing user customized input.
|
|
94
|
+
Types: json object
|
|
95
|
+
"""
|
|
96
|
+
# Instance variables
|
|
97
|
+
self.data = data
|
|
98
|
+
self.target_column = target_column
|
|
99
|
+
self.model_list = model_list
|
|
100
|
+
self.verbose = verbose
|
|
101
|
+
self.task_type = task_type
|
|
102
|
+
self.custom_data = custom_data
|
|
103
|
+
self.excluded_cols=[]
|
|
104
|
+
self.data_types = {key: value for key, value in self.data._column_names_and_types}
|
|
105
|
+
self.target_label = None
|
|
106
|
+
self.data_transform_dict = {}
|
|
107
|
+
self.one_hot_obj_count = 0
|
|
108
|
+
self.is_classification_type = lambda: self.task_type.upper() == 'CLASSIFICATION'
|
|
109
|
+
|
|
110
|
+
# Method for doing feature engineering on data -> adding id, removing futile col, imputation, encoding(one hot)
|
|
111
|
+
def feature_engineering(self,
|
|
112
|
+
auto=True):
|
|
113
|
+
"""
|
|
114
|
+
DESCRIPTION:
|
|
115
|
+
Function performs following operations :-
|
|
116
|
+
1. Removes futile columns/features from dataset.
|
|
117
|
+
2. Detects the columns with missing values.
|
|
118
|
+
3. Performs imputation on these columns with missing values.
|
|
119
|
+
4. Detects categorical columns and perform encoding on those columns.
|
|
120
|
+
|
|
121
|
+
PARAMETERS:
|
|
122
|
+
auto:
|
|
123
|
+
Optional Arugment.
|
|
124
|
+
Specifies whether to run AutoML in custom mode or auto mode.
|
|
125
|
+
When set to False, runs in custom mode. Otherwise, by default runs in auto mode.
|
|
126
|
+
Default Value: True
|
|
127
|
+
Types: boolean
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
tuple, First element represents teradataml DataFrame,
|
|
131
|
+
second element represents list of columns which are not participating in outlier tranformation.
|
|
132
|
+
"""
|
|
133
|
+
# Assigning number of base jobs for progress bar.
|
|
134
|
+
base_jobs = 14 if auto else 18
|
|
135
|
+
|
|
136
|
+
# Updating model list based on distinct value of target column for classification type
|
|
137
|
+
if self.is_classification_type():
|
|
138
|
+
if self.data.drop_duplicate(self.target_column).size > 2:
|
|
139
|
+
unsupported_models = ['svm', 'glm']
|
|
140
|
+
self.model_list = [model for model in self.model_list if model not in unsupported_models]
|
|
141
|
+
|
|
142
|
+
# Updating number of jobs for progress bar based on number of models.
|
|
143
|
+
jobs = base_jobs + len(self.model_list)
|
|
144
|
+
self.progress_bar = _ProgressBar(jobs=jobs, verbose=2, prefix='Automl Running:')
|
|
145
|
+
|
|
146
|
+
self._display_heading(phase=1,
|
|
147
|
+
progress_bar=self.progress_bar)
|
|
148
|
+
self._display_msg(msg='Feature Engineering started ...',
|
|
149
|
+
progress_bar=self.progress_bar)
|
|
150
|
+
|
|
151
|
+
# Storing target column to data transform dictionary
|
|
152
|
+
self.data_transform_dict['data_target_column'] = self.target_column
|
|
153
|
+
# Storing target column encoding indicator to data transform dictionary
|
|
154
|
+
self.data_transform_dict['target_col_encode_ind'] = False
|
|
155
|
+
# Storing task type to data transform dictionary
|
|
156
|
+
self.data_transform_dict['classification_type']=self.is_classification_type()
|
|
157
|
+
# Storing params for performing one hot encoding
|
|
158
|
+
self.data_transform_dict['one_hot_encoding_fit_obj'] ={}
|
|
159
|
+
self.data_transform_dict['one_hot_encoding_drop_list'] = []
|
|
160
|
+
|
|
161
|
+
if auto:
|
|
162
|
+
self._remove_duplicate_rows()
|
|
163
|
+
self.progress_bar.update()
|
|
164
|
+
|
|
165
|
+
self._remove_futile_columns()
|
|
166
|
+
self.progress_bar.update()
|
|
167
|
+
|
|
168
|
+
self._handle_date_columns()
|
|
169
|
+
self.progress_bar.update()
|
|
170
|
+
|
|
171
|
+
self._handling_missing_value()
|
|
172
|
+
self.progress_bar.update()
|
|
173
|
+
|
|
174
|
+
self._impute_missing_value()
|
|
175
|
+
self.progress_bar.update()
|
|
176
|
+
|
|
177
|
+
self._encoding_categorical_columns()
|
|
178
|
+
self.progress_bar.update()
|
|
179
|
+
|
|
180
|
+
else:
|
|
181
|
+
self._remove_duplicate_rows()
|
|
182
|
+
self.progress_bar.update()
|
|
183
|
+
|
|
184
|
+
self._remove_futile_columns()
|
|
185
|
+
self.progress_bar.update()
|
|
186
|
+
|
|
187
|
+
self._handle_date_columns()
|
|
188
|
+
self.progress_bar.update()
|
|
189
|
+
|
|
190
|
+
self._custom_handling_missing_value()
|
|
191
|
+
self.progress_bar.update()
|
|
192
|
+
|
|
193
|
+
self._bin_code_transformation()
|
|
194
|
+
self.progress_bar.update()
|
|
195
|
+
|
|
196
|
+
self._string_manipulation()
|
|
197
|
+
self.progress_bar.update()
|
|
198
|
+
|
|
199
|
+
self._custom_categorical_encoding()
|
|
200
|
+
self.progress_bar.update()
|
|
201
|
+
|
|
202
|
+
self._mathematical_transformation()
|
|
203
|
+
self.progress_bar.update()
|
|
204
|
+
|
|
205
|
+
self._non_linear_transformation()
|
|
206
|
+
self.progress_bar.update()
|
|
207
|
+
|
|
208
|
+
self._anti_select_columns()
|
|
209
|
+
self.progress_bar.update()
|
|
210
|
+
|
|
211
|
+
return self.data, self.excluded_cols, self.target_label, self.data_transform_dict
|
|
212
|
+
|
|
213
|
+
def _extract_list(self,
|
|
214
|
+
list1,
|
|
215
|
+
list2):
|
|
216
|
+
"""
|
|
217
|
+
DESCRIPTION:
|
|
218
|
+
Function to extract elements from list1 which are not present in list2.
|
|
219
|
+
|
|
220
|
+
PARAMETERS:
|
|
221
|
+
list1:
|
|
222
|
+
Required Argument.
|
|
223
|
+
Specifies the first list for extracting elements from.
|
|
224
|
+
Types: list
|
|
225
|
+
|
|
226
|
+
list2:
|
|
227
|
+
Required Argument.
|
|
228
|
+
Specifies the second list to get elements for avoiding in first list while extracting.
|
|
229
|
+
Types: list
|
|
230
|
+
|
|
231
|
+
RETURN:
|
|
232
|
+
Returns extracted elements in form of list.
|
|
233
|
+
|
|
234
|
+
"""
|
|
235
|
+
new_lst = list(set(list1) - set(list2))
|
|
236
|
+
return new_lst
|
|
237
|
+
|
|
238
|
+
def _remove_duplicate_rows(self):
|
|
239
|
+
"""
|
|
240
|
+
DESCRIPTION:
|
|
241
|
+
Function to handles duplicate rows present in dataset.
|
|
242
|
+
|
|
243
|
+
"""
|
|
244
|
+
self._display_msg(msg="\nHandling duplicate records present in dataset ...",
|
|
245
|
+
progress_bar=self.progress_bar,
|
|
246
|
+
show_data=True)
|
|
247
|
+
start_time = time.time()
|
|
248
|
+
rows = self.data.shape[0]
|
|
249
|
+
self.data=self.data.drop_duplicate()
|
|
250
|
+
if rows != self.data.shape[0]:
|
|
251
|
+
self._display_msg(msg=f'Updated dataset sample after removing {rows-self.data.shape[0]} duplicate records:',
|
|
252
|
+
data=self.data,
|
|
253
|
+
progress_bar=self.progress_bar)
|
|
254
|
+
self._display_msg(inline_msg=f"Remaining Rows in the data: {self.data.shape[0]}\n"\
|
|
255
|
+
f"Remaining Columns in the data: {self.data.shape[1]}",
|
|
256
|
+
progress_bar=self.progress_bar)
|
|
257
|
+
else:
|
|
258
|
+
self._display_msg(inline_msg="Analysis complete. No action taken.",
|
|
259
|
+
progress_bar=self.progress_bar)
|
|
260
|
+
|
|
261
|
+
end_time = time.time()
|
|
262
|
+
self._display_msg(msg="Total time to handle duplicate records: {:.2f} sec ".format(end_time - start_time),
|
|
263
|
+
progress_bar=self.progress_bar,
|
|
264
|
+
show_data=True)
|
|
265
|
+
|
|
266
|
+
def _get_distinct_count(self):
|
|
267
|
+
"""
|
|
268
|
+
DESCRIPTION:
|
|
269
|
+
Function to get distinct count for all features and store it in dictionary for further use.
|
|
270
|
+
"""
|
|
271
|
+
# Count of distinct value in each column
|
|
272
|
+
counts = self.data.select(self.data.columns).count(distinct=True)
|
|
273
|
+
|
|
274
|
+
# Dict containing disctinct value in each column
|
|
275
|
+
self.counts_dict = next(counts.itertuples())._asdict()
|
|
276
|
+
|
|
277
|
+
def _preprocess_data(self):
|
|
278
|
+
"""
|
|
279
|
+
DESCRIPTION:
|
|
280
|
+
Function replaces the existing id column or adds the new id column and
|
|
281
|
+
removes columns with sinlge value/same values in the dataset.
|
|
282
|
+
"""
|
|
283
|
+
# Get distinct value in each column
|
|
284
|
+
self._get_distinct_count()
|
|
285
|
+
|
|
286
|
+
# Columns to removed if
|
|
287
|
+
# id column detected or count of distinct value = 1
|
|
288
|
+
columns_to_be_removed = [col for col in self.data.columns if col.lower() == 'id' or self.counts_dict[f'count_{col}'] == 1]
|
|
289
|
+
|
|
290
|
+
# Removing id column, if exists
|
|
291
|
+
if len(columns_to_be_removed) != 0:
|
|
292
|
+
self.data = self.data.drop(columns_to_be_removed, axis=1)
|
|
293
|
+
# Storing irrelevent column list in data transform dictionary
|
|
294
|
+
self.data_transform_dict['drop_irrelevent_columns'] = columns_to_be_removed
|
|
295
|
+
|
|
296
|
+
# Adding id columns
|
|
297
|
+
obj = FillRowId(data=self.data, row_id_column='id')
|
|
298
|
+
|
|
299
|
+
self.data = obj.result
|
|
300
|
+
|
|
301
|
+
def _remove_futile_columns(self):
|
|
302
|
+
"""
|
|
303
|
+
DESCRIPTION:
|
|
304
|
+
Function removes the futile columns from dataset.
|
|
305
|
+
"""
|
|
306
|
+
self._display_msg(msg="\nHandling less significant features from data ...",
|
|
307
|
+
progress_bar=self.progress_bar,
|
|
308
|
+
show_data=True)
|
|
309
|
+
start_time = time.time()
|
|
310
|
+
|
|
311
|
+
self._preprocess_data()
|
|
312
|
+
|
|
313
|
+
# Handling string type target column in classification
|
|
314
|
+
# Performing Ordinal Encoding
|
|
315
|
+
if self.data_types[self.target_column] in ['str']:
|
|
316
|
+
self._ordinal_encoding([self.target_column])
|
|
317
|
+
|
|
318
|
+
# Detecting categorical columns
|
|
319
|
+
categorical_columns = [col for col, d_type in self.data._column_names_and_types if d_type == 'str']
|
|
320
|
+
|
|
321
|
+
# Detecting and removing futile columns, if categorical_column exists
|
|
322
|
+
if len(categorical_columns) != 0:
|
|
323
|
+
|
|
324
|
+
obj = CategoricalSummary(data=self.data,
|
|
325
|
+
target_columns=categorical_columns)
|
|
326
|
+
|
|
327
|
+
gfc_out = GetFutileColumns(data=self.data,
|
|
328
|
+
object=obj,
|
|
329
|
+
category_summary_column="ColumnName",
|
|
330
|
+
threshold_value =0.7)
|
|
331
|
+
|
|
332
|
+
# Extracting Futile columns
|
|
333
|
+
f_cols = [row[0] for row in gfc_out.result.itertuples()]
|
|
334
|
+
|
|
335
|
+
if len(f_cols) == 0:
|
|
336
|
+
self._display_msg(inline_msg="All categorical columns seem to be significant.",
|
|
337
|
+
progress_bar=self.progress_bar)
|
|
338
|
+
else:
|
|
339
|
+
|
|
340
|
+
self.data = self.data.drop(f_cols, axis=1)
|
|
341
|
+
# Storing futile column list in data transform dictionary
|
|
342
|
+
self.data_transform_dict['futile_columns'] = f_cols
|
|
343
|
+
self._display_msg(msg='Removing Futile columns:',
|
|
344
|
+
col_lst=f_cols,
|
|
345
|
+
progress_bar=self.progress_bar)
|
|
346
|
+
self._display_msg(msg='Sample of Data after removing Futile columns:',
|
|
347
|
+
data=self.data,
|
|
348
|
+
progress_bar=self.progress_bar)
|
|
349
|
+
end_time= time.time()
|
|
350
|
+
self._display_msg(msg="Total time to handle less significant features: {:.2f} sec ".format( end_time - start_time),
|
|
351
|
+
progress_bar=self.progress_bar,
|
|
352
|
+
show_data=True)
|
|
353
|
+
|
|
354
|
+
def _handle_date_component(self,
|
|
355
|
+
date_component_columns,
|
|
356
|
+
date_component):
|
|
357
|
+
|
|
358
|
+
"""
|
|
359
|
+
DESCRIPTION:
|
|
360
|
+
Function to handle newly generated date components, i.e., day , month and year diff.
|
|
361
|
+
Based on their distinct values, binning is done with predefined prefix.
|
|
362
|
+
Binned component is used further as categorical features.
|
|
363
|
+
|
|
364
|
+
PARAMETERS:
|
|
365
|
+
date_component_columns:
|
|
366
|
+
Required Argument.
|
|
367
|
+
Specifies the list of newly generated differnt component of date features.
|
|
368
|
+
Types: list
|
|
369
|
+
|
|
370
|
+
date_component:
|
|
371
|
+
Required Argument.
|
|
372
|
+
Specifies identifier for the differnt component of date features, i.e., D - Days , M - Months and Y - Year diffs.
|
|
373
|
+
Types: str
|
|
374
|
+
|
|
375
|
+
"""
|
|
376
|
+
# Check for day
|
|
377
|
+
if date_component == "D":
|
|
378
|
+
prefix_value = "Day_"
|
|
379
|
+
# Check for month
|
|
380
|
+
elif date_component == "M":
|
|
381
|
+
prefix_value = "Month_"
|
|
382
|
+
# Check for year diff
|
|
383
|
+
elif date_component == "Y":
|
|
384
|
+
prefix_value = "Year_diff_"
|
|
385
|
+
|
|
386
|
+
# Deciding bins based on distinct value of date component features.
|
|
387
|
+
for col in date_component_columns:
|
|
388
|
+
data_size = self.data.drop_duplicate(col).size
|
|
389
|
+
if data_size < 4:
|
|
390
|
+
num_bins = data_size
|
|
391
|
+
else:
|
|
392
|
+
num_bins = 4
|
|
393
|
+
# Performing bincode for converting date component to specific labels
|
|
394
|
+
fit_params = {
|
|
395
|
+
"data": self.data,
|
|
396
|
+
"target_columns": col,
|
|
397
|
+
"method_type":"Equal-Width",
|
|
398
|
+
"nbins": num_bins,
|
|
399
|
+
"label_prefix" : prefix_value
|
|
400
|
+
}
|
|
401
|
+
bin_code_fit = BincodeFit(**fit_params)
|
|
402
|
+
|
|
403
|
+
fit_params_map = {"D": "day_component_fit_object",
|
|
404
|
+
"M": "month_component_fit_object",
|
|
405
|
+
"Y": "year_diff_component_fit_object"}
|
|
406
|
+
|
|
407
|
+
# Storing fit object for each date component in data transform dictionary
|
|
408
|
+
self.data_transform_dict[fit_params_map[date_component]][col] = bin_code_fit.output
|
|
409
|
+
|
|
410
|
+
accumulate_columns = self._extract_list(self.data.columns, [col])
|
|
411
|
+
transform_params = {
|
|
412
|
+
"data": self.data,
|
|
413
|
+
"object": bin_code_fit.output,
|
|
414
|
+
"accumulate": accumulate_columns,
|
|
415
|
+
"persist": True
|
|
416
|
+
}
|
|
417
|
+
self.data = BincodeTransform(**transform_params).result
|
|
418
|
+
|
|
419
|
+
def _fetch_date_component(self,
|
|
420
|
+
process,
|
|
421
|
+
regex_str,
|
|
422
|
+
columns,
|
|
423
|
+
date_component):
|
|
424
|
+
|
|
425
|
+
"""
|
|
426
|
+
DESCRIPTION:
|
|
427
|
+
Function to fetch newly generated date component features.
|
|
428
|
+
Passing ahead for performing binning.
|
|
429
|
+
|
|
430
|
+
PARAMETERS:
|
|
431
|
+
process:
|
|
432
|
+
Required Argument.
|
|
433
|
+
Specifies date component of date feature which is going to be fetched and handled.
|
|
434
|
+
Types: str
|
|
435
|
+
|
|
436
|
+
regex_str:
|
|
437
|
+
Required Argument.
|
|
438
|
+
Specifies regular expression for identifying newly generated date component features.
|
|
439
|
+
Types: str
|
|
440
|
+
|
|
441
|
+
columns:
|
|
442
|
+
Required Argument.
|
|
443
|
+
Specifies list of newly generated date component features.
|
|
444
|
+
Types: list
|
|
445
|
+
|
|
446
|
+
date_component:
|
|
447
|
+
Required Argument.
|
|
448
|
+
Specifies identifier for the differnt component of date features, i.e., D - Days , M - Months and Y - Year diffs.
|
|
449
|
+
Types: str
|
|
450
|
+
|
|
451
|
+
"""
|
|
452
|
+
date_component_columns = [col for col in columns if re.search(regex_str+"$", col)]
|
|
453
|
+
if len(date_component_columns) != 0:
|
|
454
|
+
self._handle_date_component(date_component_columns,date_component)
|
|
455
|
+
self._display_msg(msg="Useful {} features:".format(process),
|
|
456
|
+
col_lst=date_component_columns,
|
|
457
|
+
progress_bar=self.progress_bar)
|
|
458
|
+
self._display_msg(msg="Updated dataset sample:",
|
|
459
|
+
data=self.data,
|
|
460
|
+
progress_bar=self.progress_bar)
|
|
461
|
+
|
|
462
|
+
else:
|
|
463
|
+
self._display_msg("\nNo useful feature found for {} component:".format(process),
|
|
464
|
+
progress_bar=self.progress_bar)
|
|
465
|
+
|
|
466
|
+
return date_component_columns
|
|
467
|
+
|
|
468
|
+
def _handle_date_columns_helper(self):
|
|
469
|
+
|
|
470
|
+
"""
|
|
471
|
+
DESCRIPTION:
|
|
472
|
+
Function for dropping irrelevent date features.
|
|
473
|
+
Extracting day, month and year component from revelent date features.
|
|
474
|
+
Passing extracted component for performing binning.
|
|
475
|
+
"""
|
|
476
|
+
|
|
477
|
+
# Dropping missing value for all date columns
|
|
478
|
+
self._display_msg(msg="\nDropping missing values for:",
|
|
479
|
+
col_lst=self.date_column_list,
|
|
480
|
+
progress_bar=self.progress_bar)
|
|
481
|
+
|
|
482
|
+
self.data = self.data.dropna(subset=self.date_column_list)
|
|
483
|
+
|
|
484
|
+
# Date columns list eligible for dropping from dataset
|
|
485
|
+
drop_date_cols = []
|
|
486
|
+
|
|
487
|
+
# Checking for single valued date columns
|
|
488
|
+
for col in self.date_column_list:
|
|
489
|
+
if self.data.drop_duplicate(col).size == self.data.shape[0]:
|
|
490
|
+
drop_date_cols.append(col)
|
|
491
|
+
|
|
492
|
+
if len(drop_date_cols) != 0:
|
|
493
|
+
self.data = self.data.drop(drop_date_cols, axis=1)
|
|
494
|
+
# Storing unique date column list in data transform dictionary
|
|
495
|
+
self.data_transform_dict['drop_unique_date_columns'] = drop_date_cols
|
|
496
|
+
self._display_msg(msg='Dropping date features with all unique value:',
|
|
497
|
+
col_lst = drop_date_cols,
|
|
498
|
+
progress_bar=self.progress_bar)
|
|
499
|
+
|
|
500
|
+
# Updated date columns list
|
|
501
|
+
self.date_column_list = [item for item in self.date_column_list if item not in drop_date_cols]
|
|
502
|
+
|
|
503
|
+
# List for storing newly generated date component features
|
|
504
|
+
new_columns=[]
|
|
505
|
+
|
|
506
|
+
# Extracting day, month and year difference from date columns
|
|
507
|
+
if len(self.date_column_list) != 0:
|
|
508
|
+
|
|
509
|
+
component_param={}
|
|
510
|
+
for col in self.date_column_list:
|
|
511
|
+
|
|
512
|
+
day_column=str(col)+"_day_comp"
|
|
513
|
+
month_column=str(col)+"_month_comp"
|
|
514
|
+
year_diff_column=str(col)+"_year_diff_comp"
|
|
515
|
+
new_columns.extend([day_column,month_column,year_diff_column])
|
|
516
|
+
day_query=("EXTRACT(DAY FROM {0})".format(col))
|
|
517
|
+
month_query=("EXTRACT(MONTH FROM {0})".format(col))
|
|
518
|
+
year_query=("EXTRACT(YEAR FROM CURRENT_DATE) - EXTRACT(YEAR FROM {0})".format(col))
|
|
519
|
+
component_param[day_column]=literal_column(day_query,INTEGER())
|
|
520
|
+
component_param[month_column]=literal_column(month_query,INTEGER())
|
|
521
|
+
component_param[year_diff_column]=literal_column(year_query,INTEGER())
|
|
522
|
+
|
|
523
|
+
self.data=self.data.assign(**component_param)
|
|
524
|
+
# Storing newly generated date component list along with parameters in data transform dictionary
|
|
525
|
+
self.data_transform_dict['extract_date_comp_col'] = self.date_column_list
|
|
526
|
+
self.data_transform_dict['extract_date_comp_param'] = component_param
|
|
527
|
+
|
|
528
|
+
# Dropping date columns as we have already extracted day, month and year in new columns
|
|
529
|
+
self.data = self.data.drop(self.date_column_list, axis=1)
|
|
530
|
+
self._display_msg(msg='List of newly generated features from existing date features:',
|
|
531
|
+
col_lst=new_columns,
|
|
532
|
+
progress_bar=self.progress_bar)
|
|
533
|
+
self._display_msg(msg='List of newly generated features from existing date features:',
|
|
534
|
+
data=self.data,
|
|
535
|
+
progress_bar=self.progress_bar)
|
|
536
|
+
|
|
537
|
+
drop_cols=[]
|
|
538
|
+
|
|
539
|
+
for col in new_columns:
|
|
540
|
+
distinct_rows = self.data.drop_duplicate(col).size
|
|
541
|
+
if distinct_rows == self.data.shape[0]:
|
|
542
|
+
drop_cols.append(col)
|
|
543
|
+
self._display_msg(msg='Dropping features with all unique values:',
|
|
544
|
+
col_lst=col,
|
|
545
|
+
progress_bar=self.progress_bar)
|
|
546
|
+
|
|
547
|
+
elif distinct_rows == 1:
|
|
548
|
+
drop_cols.append(col)
|
|
549
|
+
self._display_msg(msg='Dropping features with single value:',
|
|
550
|
+
col_lst=col,
|
|
551
|
+
progress_bar=self.progress_bar)
|
|
552
|
+
|
|
553
|
+
# Dropping columns from drop_cols list
|
|
554
|
+
if len(drop_cols) != 0:
|
|
555
|
+
self.data = self.data.drop(drop_cols, axis=1)
|
|
556
|
+
# Storing extract date component list for drop in data transform dictionary
|
|
557
|
+
self.data_transform_dict['drop_extract_date_columns'] = drop_cols
|
|
558
|
+
|
|
559
|
+
# Extracting all newly generated columns
|
|
560
|
+
new_columns = [item for item in new_columns if item not in drop_cols]
|
|
561
|
+
|
|
562
|
+
# Storing each date component transformation fit object in data transform dictionary
|
|
563
|
+
self.data_transform_dict = {**self.data_transform_dict,
|
|
564
|
+
'day_component_fit_object': {},
|
|
565
|
+
'month_component_fit_object': {},
|
|
566
|
+
'year_diff_component_fit_object': {}}
|
|
567
|
+
# Grouping date components based on types i.e., day, month, and year_diff for performing binning
|
|
568
|
+
if len(new_columns) != 0:
|
|
569
|
+
self.day_columns = self._fetch_date_component("day", "_day_comp", new_columns, "D")
|
|
570
|
+
self.month_columns = self._fetch_date_component("month", "_month_comp", new_columns, "M")
|
|
571
|
+
self.year_diff_columns = self._fetch_date_component("year_diff", "_year_diff_comp", new_columns, "Y")
|
|
572
|
+
self._display_msg(inline_msg="No useful date component found",
|
|
573
|
+
progress_bar=self.progress_bar)
|
|
574
|
+
|
|
575
|
+
self._display_msg(msg='Updated dataset sample after handling date features:',
|
|
576
|
+
data=self.data,
|
|
577
|
+
progress_bar=self.progress_bar)
|
|
578
|
+
else:
|
|
579
|
+
self._display_msg(inline_msg="No useful date feature found",
|
|
580
|
+
progress_bar=self.progress_bar)
|
|
581
|
+
|
|
582
|
+
def _handle_date_columns(self):
|
|
583
|
+
|
|
584
|
+
"""
|
|
585
|
+
DESCRIPTION:
|
|
586
|
+
Function to handle date columns in dataset if any.
|
|
587
|
+
Perform relevent transformation by extracting different components, i.e., Day , Month and Year.
|
|
588
|
+
"""
|
|
589
|
+
self._display_msg(msg="\nHandling Date Features ...",
|
|
590
|
+
progress_bar=self.progress_bar,
|
|
591
|
+
show_data=True)
|
|
592
|
+
start_time = time.time()
|
|
593
|
+
|
|
594
|
+
self.date_column_list = [col for col, d_type in self.data._column_names_and_types \
|
|
595
|
+
if d_type in ["datetime.date","datetime.datetime"]]
|
|
596
|
+
|
|
597
|
+
if len(self.date_column_list) == 0:
|
|
598
|
+
self._display_msg(inline_msg="Dataset does not contain any feature related to dates.",
|
|
599
|
+
progress_bar=self.progress_bar)
|
|
600
|
+
else:
|
|
601
|
+
# Storing date column list in data transform dictionary
|
|
602
|
+
self.data_transform_dict['date_columns'] = self.date_column_list
|
|
603
|
+
self._handle_date_columns_helper()
|
|
604
|
+
|
|
605
|
+
end_time = time.time()
|
|
606
|
+
self._display_msg(msg="Total time to handle date features: {:.2f} sec\n".format(end_time-start_time),
|
|
607
|
+
progress_bar=self.progress_bar,
|
|
608
|
+
show_data=True)
|
|
609
|
+
|
|
610
|
+
def _missing_count_per_column(self):
|
|
611
|
+
"""
|
|
612
|
+
DESCRIPTION:
|
|
613
|
+
Function finds and returns a dictnoary containing list of columns
|
|
614
|
+
with missing values.
|
|
615
|
+
|
|
616
|
+
Returns:
|
|
617
|
+
dict, keys represent column names and
|
|
618
|
+
values represent the missing value count for corresponding column.
|
|
619
|
+
"""
|
|
620
|
+
|
|
621
|
+
# Removing rows with missing target column value
|
|
622
|
+
self.data = self.data.dropna(subset=[self.target_column])
|
|
623
|
+
|
|
624
|
+
obj = ColumnSummary(data=self.data,
|
|
625
|
+
target_columns=self.data.columns,
|
|
626
|
+
volatile=True)
|
|
627
|
+
|
|
628
|
+
cols_miss_val={}
|
|
629
|
+
# Iterating over each row in the column summary result
|
|
630
|
+
for row in obj.result.itertuples():
|
|
631
|
+
# Checking if the third element of the row (missing values count) is greater than 0
|
|
632
|
+
if row[3] > 0:
|
|
633
|
+
# If so, add an entry to the 'cols_miss_val' dictionary
|
|
634
|
+
# Key: column name (first element of the row)
|
|
635
|
+
# Value: count of missing values in the column (third element of the row)
|
|
636
|
+
cols_miss_val[row[0]] = row[3]
|
|
637
|
+
|
|
638
|
+
return cols_miss_val
|
|
639
|
+
|
|
640
|
+
def _handling_missing_value(self):
|
|
641
|
+
"""
|
|
642
|
+
DESCRIPTION:
|
|
643
|
+
Function detects the missing values in the each feature of dataset,
|
|
644
|
+
then performs these operation based on condition :-
|
|
645
|
+
1. deleting rows from columns/feature
|
|
646
|
+
2. dropping columns from dataset
|
|
647
|
+
"""
|
|
648
|
+
self._display_msg(msg="\nChecking Missing values in dataset ...",
|
|
649
|
+
progress_bar=self.progress_bar,
|
|
650
|
+
show_data=True)
|
|
651
|
+
start_time = time.time()
|
|
652
|
+
|
|
653
|
+
# Flag for missing values
|
|
654
|
+
msg_val_found=0
|
|
655
|
+
|
|
656
|
+
#num of rows
|
|
657
|
+
d_size = self.data.shape[0]
|
|
658
|
+
|
|
659
|
+
delete_rows = []
|
|
660
|
+
drop_cols = []
|
|
661
|
+
self.imputation_cols = {}
|
|
662
|
+
|
|
663
|
+
cols_miss_val = self._missing_count_per_column()
|
|
664
|
+
|
|
665
|
+
if len(cols_miss_val) != 0:
|
|
666
|
+
self._display_msg(msg="Columns with their missing values:",
|
|
667
|
+
col_lst=cols_miss_val,
|
|
668
|
+
progress_bar=self.progress_bar)
|
|
669
|
+
|
|
670
|
+
# Get distinct value in each column
|
|
671
|
+
self._get_distinct_count()
|
|
672
|
+
|
|
673
|
+
# Iterating over columns with missing values
|
|
674
|
+
for col,val in cols_miss_val.items():
|
|
675
|
+
|
|
676
|
+
# Drop col, if count of missing value > 60%
|
|
677
|
+
if val > .6*d_size:
|
|
678
|
+
drop_cols.append(col)
|
|
679
|
+
continue
|
|
680
|
+
|
|
681
|
+
if self.data_types[col] in ['float', 'int']:
|
|
682
|
+
corr_df = self.data[col].corr(self.data[self.target_column])
|
|
683
|
+
corr_val = self.data.assign(True, corr_=corr_df)
|
|
684
|
+
related = next(corr_val.itertuples())[0]
|
|
685
|
+
|
|
686
|
+
# Delete row, if count of missing value < 2% and
|
|
687
|
+
# Relation b/w target column and numeric column <= .25
|
|
688
|
+
if val < .02*d_size and related <= .25:
|
|
689
|
+
delete_rows.append(col)
|
|
690
|
+
continue
|
|
691
|
+
|
|
692
|
+
elif self.data_types[col] in ['str']:
|
|
693
|
+
# Delete row, if count of missing value < 4%
|
|
694
|
+
if val < .04*d_size:
|
|
695
|
+
delete_rows.append(col)
|
|
696
|
+
continue
|
|
697
|
+
# Drop col, if unique count of column > 75%
|
|
698
|
+
elif self.counts_dict[f'count_{col}'] > .75*(d_size-val):
|
|
699
|
+
drop_cols.append(col)
|
|
700
|
+
continue
|
|
701
|
+
|
|
702
|
+
# Remaining column for imputation
|
|
703
|
+
self.imputation_cols[col] = val
|
|
704
|
+
# Storing columns with missing value for imputation in data transform dictionary
|
|
705
|
+
self.data_transform_dict['imputation_columns'] = self.imputation_cols
|
|
706
|
+
|
|
707
|
+
if len(delete_rows) != 0:
|
|
708
|
+
self.data = self.data.dropna(subset=delete_rows)
|
|
709
|
+
msg_val_found=1
|
|
710
|
+
self._display_msg(msg='Deleting rows of these columns for handling missing values:',
|
|
711
|
+
col_lst=delete_rows,
|
|
712
|
+
progress_bar=self.progress_bar)
|
|
713
|
+
|
|
714
|
+
if len(drop_cols) != 0:
|
|
715
|
+
self.data = self.data.drop(drop_cols, axis=1)
|
|
716
|
+
msg_val_found=1
|
|
717
|
+
# Storing columns with missing value for drop in data transform dictionary
|
|
718
|
+
self.data_transform_dict['drop_missing_columns'] = drop_cols
|
|
719
|
+
self._display_msg(msg='Dropping these columns for handling missing values:',
|
|
720
|
+
col_lst=drop_cols,
|
|
721
|
+
progress_bar=self.progress_bar)
|
|
722
|
+
|
|
723
|
+
if len(self.imputation_cols) == 0 and msg_val_found ==0:
|
|
724
|
+
self._display_msg(inline_msg="No Missing Values Detected.",
|
|
725
|
+
progress_bar=self.progress_bar)
|
|
726
|
+
|
|
727
|
+
end_time = time.time()
|
|
728
|
+
self._display_msg(msg="Total time to find missing values in data: {:.2f} sec ".format( end_time - start_time),
|
|
729
|
+
progress_bar=self.progress_bar,
|
|
730
|
+
show_data=True)
|
|
731
|
+
|
|
732
|
+
def _impute_helper(self):
|
|
733
|
+
"""
|
|
734
|
+
DESCRIPTION:
|
|
735
|
+
Function decides the imputation methods [mean/ median/ mode] for columns with missing values
|
|
736
|
+
on the basis of skewness of column in the dataset.
|
|
737
|
+
|
|
738
|
+
RETURNS:
|
|
739
|
+
A tuple containing,
|
|
740
|
+
col_stat (name of columns with missing value)
|
|
741
|
+
stat (imputation method for respective columns)
|
|
742
|
+
"""
|
|
743
|
+
col_stat = []
|
|
744
|
+
stat = []
|
|
745
|
+
|
|
746
|
+
# Converting o/p of skew() into dictonary with key as column name and value as skewness value
|
|
747
|
+
df = self.data.skew()
|
|
748
|
+
skew_data = next(df.itertuples())._asdict()
|
|
749
|
+
|
|
750
|
+
# Iterating over columns with missing value
|
|
751
|
+
for key, val in self.imputation_cols.items():
|
|
752
|
+
|
|
753
|
+
col_stat.append(key)
|
|
754
|
+
if self.data_types[key] in ['float', 'int']:
|
|
755
|
+
val = skew_data[f'skew_{key}']
|
|
756
|
+
# Median imputation method, if abs(skewness value) > 1
|
|
757
|
+
if abs(val) > 1:
|
|
758
|
+
stat.append('median')
|
|
759
|
+
# Mean imputation method, if abs(skewness value) <= 1
|
|
760
|
+
else:
|
|
761
|
+
stat.append('mean')
|
|
762
|
+
# Mode imputation method, if categorical column
|
|
763
|
+
else:
|
|
764
|
+
stat.append('mode')
|
|
765
|
+
|
|
766
|
+
self._display_msg(msg="Columns with their imputation method:",
|
|
767
|
+
col_lst=dict(zip(col_stat, stat)),
|
|
768
|
+
progress_bar=self.progress_bar)
|
|
769
|
+
|
|
770
|
+
return col_stat, stat
|
|
771
|
+
|
|
772
|
+
def _impute_missing_value(self):
|
|
773
|
+
"""
|
|
774
|
+
DESCRIPTION:
|
|
775
|
+
Function performs the imputation on columns/features with missing values in the dataset.
|
|
776
|
+
"""
|
|
777
|
+
|
|
778
|
+
start_time = time.time()
|
|
779
|
+
self._display_msg(msg="\nImputing Missing Values ...",
|
|
780
|
+
progress_bar=self.progress_bar,
|
|
781
|
+
show_data=True)
|
|
782
|
+
|
|
783
|
+
if len(self.imputation_cols) != 0:
|
|
784
|
+
|
|
785
|
+
# List of columns and imputation Method
|
|
786
|
+
col_stat, stat = self._impute_helper()
|
|
787
|
+
|
|
788
|
+
fit_obj = SimpleImputeFit(data=self.data,
|
|
789
|
+
stats_columns=col_stat,
|
|
790
|
+
stats=stat,
|
|
791
|
+
volatile=True)
|
|
792
|
+
|
|
793
|
+
# Storing fit object for imputation in data transform dictionary
|
|
794
|
+
self.data_transform_dict['imputation_fit_object'] = fit_obj.output
|
|
795
|
+
sm = SimpleImputeTransform(data=self.data,
|
|
796
|
+
object=fit_obj,
|
|
797
|
+
volatile=True)
|
|
798
|
+
|
|
799
|
+
self.data = sm.result
|
|
800
|
+
self._display_msg(msg="Sample of Data after Imputation:",
|
|
801
|
+
data=self.data,
|
|
802
|
+
progress_bar=self.progress_bar)
|
|
803
|
+
else:
|
|
804
|
+
self._display_msg(inline_msg="No imputation is Required.",
|
|
805
|
+
progress_bar=self.progress_bar)
|
|
806
|
+
|
|
807
|
+
end_time = time.time()
|
|
808
|
+
self._display_msg(msg="Time taken to perform imputation: {:.2f} sec ".format(end_time - start_time),
|
|
809
|
+
progress_bar=self.progress_bar,
|
|
810
|
+
show_data=True)
|
|
811
|
+
|
|
812
|
+
|
|
813
|
+
def _custom_handling_missing_value(self):
|
|
814
|
+
"""
|
|
815
|
+
DESCRIPTION:
|
|
816
|
+
Function to perform customized missing value handling for features based on user input.
|
|
817
|
+
|
|
818
|
+
"""
|
|
819
|
+
# Fetching user input for performing missing value handling
|
|
820
|
+
missing_handling_input = self.custom_data.get("MissingValueHandlingIndicator", False)
|
|
821
|
+
|
|
822
|
+
if missing_handling_input:
|
|
823
|
+
# Fetching parameters required for performing
|
|
824
|
+
missing_handling_param = self.custom_data.get("MissingValueHandlingParam", None)
|
|
825
|
+
if missing_handling_param:
|
|
826
|
+
# Fetching user input for different methods missing value handling
|
|
827
|
+
drop_col_ind = missing_handling_param.get("DroppingColumnIndicator", False)
|
|
828
|
+
drop_row_ind = missing_handling_param.get("DroppingRowIndicator", False)
|
|
829
|
+
impute_ind = missing_handling_param.get("ImputeMissingIndicator", False)
|
|
830
|
+
# Checking for user input if all methods indicator are false or not
|
|
831
|
+
if not any([drop_col_ind, drop_row_ind, impute_ind]):
|
|
832
|
+
self._display_msg(inline_msg="No method information provided for performing customized missing value handling. \
|
|
833
|
+
AutoML will proceed with default missing value handling method.",
|
|
834
|
+
progress_bar=self.progress_bar)
|
|
835
|
+
|
|
836
|
+
else:
|
|
837
|
+
# Checking user input for dropping missing value columns
|
|
838
|
+
if drop_col_ind:
|
|
839
|
+
drop_col_list = missing_handling_param.get("DroppingColumnList", [])
|
|
840
|
+
# Storing customcolumns with missing value for drop in data transform dictionary
|
|
841
|
+
self.data_transform_dict["custom_drop_missing_columns"] = drop_col_list
|
|
842
|
+
if len(drop_col_list):
|
|
843
|
+
# Checking for column present in dataset or not
|
|
844
|
+
_Validators._validate_dataframe_has_argument_columns(drop_col_list, "DroppingColumnList", self.data, "df")
|
|
845
|
+
|
|
846
|
+
self._display_msg(msg="\nDropping these columns for handling customized missing value:",
|
|
847
|
+
col_lst=drop_col_list,
|
|
848
|
+
progress_bar=self.progress_bar)
|
|
849
|
+
self.data = self.data.drop(drop_col_list, axis=1)
|
|
850
|
+
else:
|
|
851
|
+
self._display_msg(inline_msg="No information provided for dropping missing value containing columns.",
|
|
852
|
+
progress_bar=self.progress_bar)
|
|
853
|
+
|
|
854
|
+
# Checking user input for dropping missing value rows
|
|
855
|
+
if drop_row_ind:
|
|
856
|
+
drop_row_list = missing_handling_param.get("DroppingRowList", [])
|
|
857
|
+
if len(drop_row_list):
|
|
858
|
+
# Checking for column present in dataset or not
|
|
859
|
+
_Validators._validate_dataframe_has_argument_columns(drop_row_list, "DroppingRowList", self.data, "df")
|
|
860
|
+
|
|
861
|
+
self._display_msg(msg="Dropping missing rows in these columns for handling customized missing value:",
|
|
862
|
+
col_lst=drop_row_list,
|
|
863
|
+
progress_bar=self.progress_bar)
|
|
864
|
+
self.data = self.data.dropna(subset = drop_row_list)
|
|
865
|
+
else:
|
|
866
|
+
self._display_msg(inline_msg="No information provided for dropping missing value containing rows.",
|
|
867
|
+
progress_bar=self.progress_bar)
|
|
868
|
+
# Checking user input for missing value imputation
|
|
869
|
+
if impute_ind:
|
|
870
|
+
stat_list = missing_handling_param.get("StatImputeList", None)
|
|
871
|
+
stat_method = missing_handling_param.get("StatImputeMethod", None)
|
|
872
|
+
literal_list = missing_handling_param.get("LiteralImputeList", None)
|
|
873
|
+
literal_value = missing_handling_param.get("LiteralImputeValue", None)
|
|
874
|
+
|
|
875
|
+
# Checking for column present in dataset or not
|
|
876
|
+
_Validators._validate_dataframe_has_argument_columns(stat_list, "StatImputeList", self.data, "df")
|
|
877
|
+
|
|
878
|
+
_Validators._validate_dataframe_has_argument_columns(literal_list, "LiteralImputeList", self.data, "df")
|
|
879
|
+
|
|
880
|
+
# Creating fit params
|
|
881
|
+
fit_param = {
|
|
882
|
+
"data" : self.data,
|
|
883
|
+
"stats_columns" : stat_list,
|
|
884
|
+
"stats" : stat_method,
|
|
885
|
+
"literals_columns" : literal_list,
|
|
886
|
+
"literals" : literal_value
|
|
887
|
+
}
|
|
888
|
+
# Fitting on dataset
|
|
889
|
+
fit_obj = SimpleImputeFit(**fit_param)
|
|
890
|
+
# Storing custom fit object for imputation in data transform dictionary
|
|
891
|
+
self.data_transform_dict["custom_imputation_ind"] = True
|
|
892
|
+
self.data_transform_dict["custom_imputation_fit_object"] = fit_obj.output
|
|
893
|
+
# Creating transform params
|
|
894
|
+
transform_param = {
|
|
895
|
+
"data" : self.data,
|
|
896
|
+
"object" : fit_obj.output,
|
|
897
|
+
"persist" : True
|
|
898
|
+
}
|
|
899
|
+
# Updating dataset with transform result
|
|
900
|
+
self.data = SimpleImputeTransform(**transform_param).result
|
|
901
|
+
self._display_msg(msg="Updated dataset sample after performing customized missing value imputation:",
|
|
902
|
+
data=self.data,
|
|
903
|
+
progress_bar=self.progress_bar)
|
|
904
|
+
else:
|
|
905
|
+
self._display_msg(inline_msg="No information provided for performing customized missing value handling. \
|
|
906
|
+
AutoML will proceed with default missing value handling method.",
|
|
907
|
+
progress_bar=self.progress_bar)
|
|
908
|
+
else:
|
|
909
|
+
self._display_msg(inline_msg="Proceeding with default option for missing value imputation.",
|
|
910
|
+
progress_bar=self.progress_bar)
|
|
911
|
+
|
|
912
|
+
# Proceeding with default method for handling remaining missing values
|
|
913
|
+
self._display_msg(inline_msg="Proceeding with default option for handling remaining missing values.",
|
|
914
|
+
progress_bar=self.progress_bar)
|
|
915
|
+
self._handling_missing_value()
|
|
916
|
+
self._impute_missing_value()
|
|
917
|
+
|
|
918
|
+
def _bin_code_transformation(self):
|
|
919
|
+
"""
|
|
920
|
+
DESCRIPTION:
|
|
921
|
+
Function to perform customized binning on features based on user input.
|
|
922
|
+
|
|
923
|
+
"""
|
|
924
|
+
# Fetching user input for performing bin code transformation.
|
|
925
|
+
bin_code_input = self.custom_data.get("BincodeIndicator", False)
|
|
926
|
+
|
|
927
|
+
if bin_code_input:
|
|
928
|
+
# Storing custom bin code transformation indicator in data transform dictionary
|
|
929
|
+
self.data_transform_dict['custom_bincode_ind'] = True
|
|
930
|
+
# Fetching list required for performing transfomation.
|
|
931
|
+
extracted_col = self.custom_data.get("BincodeParam", None)
|
|
932
|
+
if not extracted_col:
|
|
933
|
+
self._display_msg(inline_msg="BincodeParam is empty. Skipping customized bincode transformation.",
|
|
934
|
+
progress_bar=self.progress_bar)
|
|
935
|
+
else:
|
|
936
|
+
# Creating list for storing column and binning informartion for performing transformation
|
|
937
|
+
equal_width_bin_list = []
|
|
938
|
+
equal_width_bin_columns = []
|
|
939
|
+
var_width_bin_list = []
|
|
940
|
+
var_width_bin_columns = []
|
|
941
|
+
|
|
942
|
+
# Checking for column present in dataset or not
|
|
943
|
+
_Validators._validate_dataframe_has_argument_columns(list(extracted_col.keys()), "BincodeParam", self.data, "df")
|
|
944
|
+
|
|
945
|
+
for col,transform_val in extracted_col.items():
|
|
946
|
+
# Fetching type of binning to be performed
|
|
947
|
+
bin_trans_type = transform_val["Type"]
|
|
948
|
+
# Fetching number of bins to be created
|
|
949
|
+
num_bin = transform_val["NumOfBins"]
|
|
950
|
+
# Checking for bin types and adding details into lists for binning
|
|
951
|
+
if bin_trans_type == "Equal-Width":
|
|
952
|
+
bins = num_bin
|
|
953
|
+
equal_width_bin_list.append(bins)
|
|
954
|
+
equal_width_bin_columns.append(col)
|
|
955
|
+
elif bin_trans_type == "Variable-Width":
|
|
956
|
+
var_width_bin_columns.append(col)
|
|
957
|
+
bins = num_bin
|
|
958
|
+
for i in range(1, bins+1):
|
|
959
|
+
# Forming binning name as per expected input
|
|
960
|
+
temp="Bin_"+str(i)
|
|
961
|
+
# Fetching required details for variable type binning
|
|
962
|
+
minval = transform_val[temp]["min_value"]
|
|
963
|
+
maxval = transform_val[temp]["max_value"]
|
|
964
|
+
label = transform_val[temp]["label"]
|
|
965
|
+
# Appending information of each bin
|
|
966
|
+
var_width_bin_list.append({ "ColumnName":col, "MinValue":minval, "MaxValue":maxval, "Label":label})
|
|
967
|
+
# Checking column list for performing binning with Equal-Width.
|
|
968
|
+
if len(equal_width_bin_columns) != 0:
|
|
969
|
+
# Adding fit parameter for performing binning with Equal-Width.
|
|
970
|
+
fit_params={
|
|
971
|
+
"data" : self.data,
|
|
972
|
+
"target_columns": equal_width_bin_columns,
|
|
973
|
+
"method_type" : "Equal-Width",
|
|
974
|
+
"nbins" : bins
|
|
975
|
+
}
|
|
976
|
+
eql_bin_code_fit = BincodeFit(**fit_params)
|
|
977
|
+
# Storing fit object and column list for Equal-Width binning in data transform dictionary
|
|
978
|
+
self.data_transform_dict['custom_eql_bincode_col'] = equal_width_bin_columns
|
|
979
|
+
self.data_transform_dict['custom_eql_bincode_fit_object'] = eql_bin_code_fit.output
|
|
980
|
+
# Extracting accumulate columns
|
|
981
|
+
accumulate_columns = self._extract_list(self.data.columns, equal_width_bin_columns)
|
|
982
|
+
# Adding transform parameters for performing binning with Equal-Width.
|
|
983
|
+
eql_transform_params={
|
|
984
|
+
"data" : self.data,
|
|
985
|
+
"object" : eql_bin_code_fit.output,
|
|
986
|
+
"accumulate" : accumulate_columns,
|
|
987
|
+
"persist" : True,
|
|
988
|
+
}
|
|
989
|
+
self.data = BincodeTransform(**eql_transform_params).result
|
|
990
|
+
self._display_msg(msg="\nUpdated dataset sample after performing Equal-Width binning :-",
|
|
991
|
+
data=self.data,
|
|
992
|
+
progress_bar=self.progress_bar)
|
|
993
|
+
else:
|
|
994
|
+
self._display_msg(inline_msg="No information provided for Equal-Width Transformation.",
|
|
995
|
+
progress_bar=self.progress_bar)
|
|
996
|
+
|
|
997
|
+
if len(var_width_bin_columns) != 0:
|
|
998
|
+
# Creating pandas dataframe and then teradata dataframe for storing binning information
|
|
999
|
+
var_bin_table = pd.DataFrame(var_width_bin_list, columns=["ColumnName", "MinValue", "MaxValue", "Label"])
|
|
1000
|
+
self._display_msg(msg="Variable-Width binning information:-",
|
|
1001
|
+
data=var_bin_table,
|
|
1002
|
+
progress_bar=self.progress_bar)
|
|
1003
|
+
copy_to_sql(df=var_bin_table, table_name="automl_bincode_var_fit", temporary=True)
|
|
1004
|
+
var_fit_input = DataFrame.from_table("automl_bincode_var_fit")
|
|
1005
|
+
fit_params = {
|
|
1006
|
+
"data" : self.data,
|
|
1007
|
+
"fit_data": var_fit_input,
|
|
1008
|
+
"fit_data_order_column" : ["MinValue", "MaxValue"],
|
|
1009
|
+
"target_columns": var_width_bin_columns,
|
|
1010
|
+
"minvalue_column" : "MinValue",
|
|
1011
|
+
"maxvalue_column" : "MaxValue",
|
|
1012
|
+
"label_column" : "Label",
|
|
1013
|
+
"method_type" : "Variable-Width",
|
|
1014
|
+
"label_prefix" : "label_prefix"
|
|
1015
|
+
}
|
|
1016
|
+
var_bin_code_fit = BincodeFit(**fit_params)
|
|
1017
|
+
# Storing fit object and column list for Variable-Width binning in data transform dictionary
|
|
1018
|
+
self.data_transform_dict['custom_var_bincode_col'] = var_width_bin_columns
|
|
1019
|
+
self.data_transform_dict['custom_var_bincode_fit_object'] = var_bin_code_fit.output
|
|
1020
|
+
accumulate_columns = self._extract_list(self.data.columns, var_width_bin_columns)
|
|
1021
|
+
var_transform_params = {
|
|
1022
|
+
"data" : self.data,
|
|
1023
|
+
"object" : var_bin_code_fit.output,
|
|
1024
|
+
"object_order_column" : "TD_MinValue_BINFIT",
|
|
1025
|
+
"accumulate" : accumulate_columns,
|
|
1026
|
+
"persist" : True
|
|
1027
|
+
}
|
|
1028
|
+
self.data = BincodeTransform(**var_transform_params).result
|
|
1029
|
+
self._display_msg(msg="Updated dataset sample after performing Variable-Width binning:",
|
|
1030
|
+
data=self.data,
|
|
1031
|
+
progress_bar=self.progress_bar)
|
|
1032
|
+
else:
|
|
1033
|
+
self._display_msg(inline_msg="No information provided for Variable-Width Transformation.",
|
|
1034
|
+
progress_bar=self.progress_bar)
|
|
1035
|
+
else:
|
|
1036
|
+
self._display_msg(inline_msg="No information provided for Variable-Width Transformation.",
|
|
1037
|
+
progress_bar=self.progress_bar)
|
|
1038
|
+
|
|
1039
|
+
def _string_manipulation(self):
|
|
1040
|
+
"""
|
|
1041
|
+
DESCRIPTION:
|
|
1042
|
+
Function to perform customized string manipulations on categorical features based on user input.
|
|
1043
|
+
|
|
1044
|
+
"""
|
|
1045
|
+
# Fetching user input for performing string manipulation.
|
|
1046
|
+
str_mnpl_input = self.custom_data.get("StringManipulationIndicator", False)
|
|
1047
|
+
# Checking user input for string manipulation on categrical features.
|
|
1048
|
+
if str_mnpl_input:
|
|
1049
|
+
# Storing custom string manipulation indicator in data transform dictionary
|
|
1050
|
+
self.data_transform_dict['custom_string_manipulation_ind'] = True
|
|
1051
|
+
# Fetching list required for performing operation.
|
|
1052
|
+
extracted_col = self.custom_data.get("StringManipulationParam", None)
|
|
1053
|
+
if not extracted_col:
|
|
1054
|
+
self._display_msg(inline_msg="No information provided for performing string manipulation.",
|
|
1055
|
+
progress_bar=self.progress_bar)
|
|
1056
|
+
else:
|
|
1057
|
+
# Checking for column present in dataset or not
|
|
1058
|
+
_Validators._validate_dataframe_has_argument_columns(list(extracted_col.keys()), "StringManipulationParam", self.data, "df")
|
|
1059
|
+
|
|
1060
|
+
for target_col,transform_val in extracted_col.items():
|
|
1061
|
+
self.data = self._str_method_mapping(target_col, transform_val)
|
|
1062
|
+
# Storing custom string manipulation parameters in data transform dictionary
|
|
1063
|
+
self.data_transform_dict['custom_string_manipulation_param'] = extracted_col
|
|
1064
|
+
|
|
1065
|
+
self._display_msg(msg="Updated dataset sample after performing string manipulation:",
|
|
1066
|
+
data=self.data,
|
|
1067
|
+
progress_bar=self.progress_bar)
|
|
1068
|
+
else:
|
|
1069
|
+
self._display_msg(inline_msg="Skipping customized string manipulation.")
|
|
1070
|
+
|
|
1071
|
+
def _str_method_mapping(self,
|
|
1072
|
+
target_col,
|
|
1073
|
+
transform_val):
|
|
1074
|
+
"""
|
|
1075
|
+
DESCRIPTION:
|
|
1076
|
+
Function to map customized parameters according to passed method and
|
|
1077
|
+
performs string manipulation on categorical features.
|
|
1078
|
+
|
|
1079
|
+
PARAMETERS:
|
|
1080
|
+
target_col:
|
|
1081
|
+
Required Argument.
|
|
1082
|
+
Specifies feature for applying string manipulation.
|
|
1083
|
+
Types: str
|
|
1084
|
+
|
|
1085
|
+
transform_val:
|
|
1086
|
+
Required Argument.
|
|
1087
|
+
Specifies different parameter require for applying string manipulation.
|
|
1088
|
+
Types: dict
|
|
1089
|
+
|
|
1090
|
+
RETURNS:
|
|
1091
|
+
Dataframe containing transformed data after applying string manipulation.
|
|
1092
|
+
|
|
1093
|
+
"""
|
|
1094
|
+
# Creating list of features for accumulating while performing string manipulation on certain features
|
|
1095
|
+
accumulate_columns = self._extract_list(self.data.columns, [target_col])
|
|
1096
|
+
|
|
1097
|
+
# Fetching required parameters from json object
|
|
1098
|
+
string_operation = transform_val["StringOperation"]
|
|
1099
|
+
|
|
1100
|
+
# Storing general parameters for performing string transformation
|
|
1101
|
+
fit_params = {
|
|
1102
|
+
"data" : self.data,
|
|
1103
|
+
"target_columns" : target_col,
|
|
1104
|
+
"string_operation" : string_operation,
|
|
1105
|
+
"accumulate" : accumulate_columns,
|
|
1106
|
+
"inplace" : True,
|
|
1107
|
+
"persist" : True
|
|
1108
|
+
}
|
|
1109
|
+
# Adding additional parameters based on string operation type
|
|
1110
|
+
if string_operation in ["StringCon", "StringTrim"]:
|
|
1111
|
+
string_argument = transform_val["String"]
|
|
1112
|
+
fit_params = {**fit_params,
|
|
1113
|
+
"string" : string_argument}
|
|
1114
|
+
elif string_operation == "StringPad":
|
|
1115
|
+
string_argument = transform_val["String"]
|
|
1116
|
+
string_length = transform_val["StringLength"]
|
|
1117
|
+
fit_params = {**fit_params,
|
|
1118
|
+
"string" : string_argument,
|
|
1119
|
+
"string_length" : string_length}
|
|
1120
|
+
elif string_operation == "Substring":
|
|
1121
|
+
string_index = transform_val["StartIndex"]
|
|
1122
|
+
string_length = transform_val["StringLength"]
|
|
1123
|
+
fit_params = {**fit_params,
|
|
1124
|
+
"start_index" : string_index,
|
|
1125
|
+
"string_length" : string_length}
|
|
1126
|
+
|
|
1127
|
+
# returning dataset after performing string manipulation
|
|
1128
|
+
return StrApply(**fit_params).result
|
|
1129
|
+
|
|
1130
|
+
def _one_hot_encoding(self,
|
|
1131
|
+
one_hot_columns,
|
|
1132
|
+
unique_counts):
|
|
1133
|
+
"""
|
|
1134
|
+
DESCRIPTION:
|
|
1135
|
+
Function performs the one hot encoding to categorcial columns/features in the dataset.
|
|
1136
|
+
|
|
1137
|
+
PARAMETERS:
|
|
1138
|
+
one_hot_columns:
|
|
1139
|
+
Required Argument.
|
|
1140
|
+
Specifies the categorical columns for which one hot encoding will be performed.
|
|
1141
|
+
Types: str or list of strings (str)
|
|
1142
|
+
|
|
1143
|
+
unique_counts:
|
|
1144
|
+
Required Argument.
|
|
1145
|
+
Specifies the unique counts in the categorical columns.
|
|
1146
|
+
Types: int or list of integer (int)
|
|
1147
|
+
|
|
1148
|
+
"""
|
|
1149
|
+
# TD function will add extra column_other in onehotEncoding, so
|
|
1150
|
+
# initailizing this list to remove those extra columns
|
|
1151
|
+
drop_lst = [ele + "_other" for ele in one_hot_columns]
|
|
1152
|
+
# Adding fit parameters for performing encoding
|
|
1153
|
+
fit_params = {
|
|
1154
|
+
"data" : self.data,
|
|
1155
|
+
"approach" : "auto",
|
|
1156
|
+
"is_input_dense" : True,
|
|
1157
|
+
"target_column" : one_hot_columns,
|
|
1158
|
+
"category_counts" : unique_counts,
|
|
1159
|
+
"other_column" : "other"
|
|
1160
|
+
}
|
|
1161
|
+
# Performing one hot encoding fit on target columns
|
|
1162
|
+
fit_obj = OneHotEncodingFit(**fit_params)
|
|
1163
|
+
# Storing indicator, fit object and column drop list for one hot encoding in data transform dictionary
|
|
1164
|
+
self.data_transform_dict['one_hot_encoding_ind'] = True
|
|
1165
|
+
self.data_transform_dict['one_hot_encoding_fit_obj'].update({self.one_hot_obj_count : fit_obj.result})
|
|
1166
|
+
self.data_transform_dict['one_hot_encoding_drop_list'].extend(drop_lst)
|
|
1167
|
+
self.one_hot_obj_count = self.one_hot_obj_count + 1
|
|
1168
|
+
# Adding transform parameters for performing encoding
|
|
1169
|
+
transform_params = {
|
|
1170
|
+
"data" : self.data,
|
|
1171
|
+
"object" : fit_obj.result,
|
|
1172
|
+
"is_input_dense" : True,
|
|
1173
|
+
"persist" : True
|
|
1174
|
+
}
|
|
1175
|
+
# Performing one hot encoding transformation
|
|
1176
|
+
transform_obj = OneHotEncodingTransform(**transform_params)
|
|
1177
|
+
self.data = transform_obj.result.drop(drop_lst, axis=1)
|
|
1178
|
+
|
|
1179
|
+
def _ordinal_encoding(self,
|
|
1180
|
+
ordinal_columns):
|
|
1181
|
+
"""
|
|
1182
|
+
DESCRIPTION:
|
|
1183
|
+
Function performs the ordinal encoding to categorcial columns or features in the dataset.
|
|
1184
|
+
|
|
1185
|
+
PARAMETERS:
|
|
1186
|
+
ordinal_columns:
|
|
1187
|
+
Required Argument.
|
|
1188
|
+
Specifies the categorical columns for which ordinal encoding will be performed.
|
|
1189
|
+
Types: str or list of strings (str)
|
|
1190
|
+
"""
|
|
1191
|
+
# Adding fit parameters for performing encoding
|
|
1192
|
+
fit_params = {
|
|
1193
|
+
"data" : self.data,
|
|
1194
|
+
"target_column" : ordinal_columns,
|
|
1195
|
+
"volatile" : True
|
|
1196
|
+
}
|
|
1197
|
+
# Performing ordinal encoding fit on target columns
|
|
1198
|
+
ord_fit_obj = OrdinalEncodingFit(**fit_params)
|
|
1199
|
+
# Storing fit object and column list for ordinal encoding in data transform dictionary
|
|
1200
|
+
if ordinal_columns[0] != self.target_column:
|
|
1201
|
+
self.data_transform_dict["custom_ord_encoding_fit_obj"] = ord_fit_obj.result
|
|
1202
|
+
self.data_transform_dict['custom_ord_encoding_col'] = ordinal_columns
|
|
1203
|
+
else:
|
|
1204
|
+
self.data_transform_dict['target_col_encode_ind'] = True
|
|
1205
|
+
self.data_transform_dict['target_col_ord_encoding_fit_obj'] = ord_fit_obj.result
|
|
1206
|
+
# Extracting accumulate columns
|
|
1207
|
+
accumulate_columns = self._extract_list(self.data.columns, ordinal_columns)
|
|
1208
|
+
# Adding transform parameters for performing encoding
|
|
1209
|
+
transform_params = {
|
|
1210
|
+
"data" : self.data,
|
|
1211
|
+
"object" : ord_fit_obj.result,
|
|
1212
|
+
"accumulate" : accumulate_columns,
|
|
1213
|
+
"persist" : True
|
|
1214
|
+
}
|
|
1215
|
+
# Performing ordinal encoding transformation
|
|
1216
|
+
self.data = OrdinalEncodingTransform(**transform_params).result
|
|
1217
|
+
|
|
1218
|
+
if len(ordinal_columns) == 1 and ordinal_columns[0] == self.target_column:
|
|
1219
|
+
self.target_label = ord_fit_obj
|
|
1220
|
+
|
|
1221
|
+
|
|
1222
|
+
def _target_encoding(self,
|
|
1223
|
+
target_encoding_list):
|
|
1224
|
+
"""
|
|
1225
|
+
DESCRIPTION:
|
|
1226
|
+
Function performs the target encoding to categorcial columns/features in the dataset.
|
|
1227
|
+
|
|
1228
|
+
PARAMETERS:
|
|
1229
|
+
target_encoding_list:
|
|
1230
|
+
Required Argument.
|
|
1231
|
+
Specifies the categorical columns for which target encoding will be performed.
|
|
1232
|
+
Types: str or list of strings (str)
|
|
1233
|
+
"""
|
|
1234
|
+
# Fetching all columns on which target encoding will be performed.
|
|
1235
|
+
target_columns= list(target_encoding_list.keys())
|
|
1236
|
+
# Checking for column present in dataset or not
|
|
1237
|
+
_Validators._validate_dataframe_has_argument_columns(target_columns, "TargetEncodingList", self.data, "df")
|
|
1238
|
+
# Finding distinct values and counts for columns.
|
|
1239
|
+
cat_sum = CategoricalSummary(data = self.data,
|
|
1240
|
+
target_columns = target_columns)
|
|
1241
|
+
category_data=cat_sum.result.groupby("ColumnName").count()
|
|
1242
|
+
category_data = category_data.assign(drop_columns = True,
|
|
1243
|
+
ColumnName = category_data.ColumnName,
|
|
1244
|
+
CategoryCount = category_data.count_DistinctValue)
|
|
1245
|
+
# Storing indicator and fit object for target encoding in data transform dictionary
|
|
1246
|
+
self.data_transform_dict["custom_target_encoding_ind"] = True
|
|
1247
|
+
self.data_transform_dict["custom_target_encoding_fit_obj"] = {}
|
|
1248
|
+
# Fetching required argument for performing target encoding
|
|
1249
|
+
for col,transform_val in target_encoding_list.items():
|
|
1250
|
+
encoder_method = transform_val["encoder_method"]
|
|
1251
|
+
response_column = transform_val["response_column"]
|
|
1252
|
+
# Adding fit parameters for performing encoding
|
|
1253
|
+
fit_params = {
|
|
1254
|
+
"data" : self.data,
|
|
1255
|
+
"category_data" : category_data,
|
|
1256
|
+
"encoder_method" : encoder_method,
|
|
1257
|
+
"target_columns" : col,
|
|
1258
|
+
"response_column" : response_column
|
|
1259
|
+
}
|
|
1260
|
+
if encoder_method == "CBM_DIRICHLET":
|
|
1261
|
+
num_distinct_responses=transform_val["num_distinct_responses"]
|
|
1262
|
+
fit_params = {**fit_params,
|
|
1263
|
+
"num_distinct_responses" : num_distinct_responses}
|
|
1264
|
+
# Performing target encoding fit on target columns
|
|
1265
|
+
tar_fit_obj = TargetEncodingFit(**fit_params)
|
|
1266
|
+
# Storing each column fit object for target encoding in data transform dictionary
|
|
1267
|
+
self.data_transform_dict["custom_target_encoding_fit_obj"].update({col : tar_fit_obj})
|
|
1268
|
+
# Extracting accumulate columns
|
|
1269
|
+
accumulate_columns = self._extract_list(self.data.columns, [col])
|
|
1270
|
+
# Adding transform parameters for performing encoding
|
|
1271
|
+
transform_params = {
|
|
1272
|
+
"data" : self.data,
|
|
1273
|
+
"object" : tar_fit_obj,
|
|
1274
|
+
"accumulate" : accumulate_columns,
|
|
1275
|
+
"persist" : True
|
|
1276
|
+
}
|
|
1277
|
+
# Performing ordinal encoding transformation
|
|
1278
|
+
self.data = TargetEncodingTransform(**transform_params).result
|
|
1279
|
+
|
|
1280
|
+
def _encoding_categorical_columns(self):
|
|
1281
|
+
"""
|
|
1282
|
+
DESCRIPTION:
|
|
1283
|
+
Function detects the categorical columns and performs encoding on categorical columns in the dataset.
|
|
1284
|
+
"""
|
|
1285
|
+
self._display_msg(msg="\nPerforming encoding for categorical columns ...",
|
|
1286
|
+
progress_bar=self.progress_bar,
|
|
1287
|
+
show_data=True)
|
|
1288
|
+
start_time = time.time()
|
|
1289
|
+
|
|
1290
|
+
ohe_col = []
|
|
1291
|
+
unique_count = []
|
|
1292
|
+
|
|
1293
|
+
# List of columns before one hot
|
|
1294
|
+
col_bf_ohe = self.data.columns
|
|
1295
|
+
|
|
1296
|
+
# Get distinct value in each column
|
|
1297
|
+
self._get_distinct_count()
|
|
1298
|
+
|
|
1299
|
+
# Detecting categorical columns with thier unique counts
|
|
1300
|
+
for col, d_type in self.data._column_names_and_types:
|
|
1301
|
+
if d_type in ['str']:
|
|
1302
|
+
ohe_col.append(col)
|
|
1303
|
+
unique_count.append(self.counts_dict[f'count_{col}'])
|
|
1304
|
+
|
|
1305
|
+
if len(ohe_col) != 0:
|
|
1306
|
+
self._one_hot_encoding(ohe_col, unique_count)
|
|
1307
|
+
|
|
1308
|
+
self._display_msg(msg="ONE HOT Encoding these Columns:",
|
|
1309
|
+
col_lst=ohe_col,
|
|
1310
|
+
progress_bar=self.progress_bar)
|
|
1311
|
+
else:
|
|
1312
|
+
self._display_msg(inline_msg="Encoding not required.",
|
|
1313
|
+
progress_bar=self.progress_bar)
|
|
1314
|
+
|
|
1315
|
+
# List of columns after one hot
|
|
1316
|
+
col_af_ohe = self.data.columns
|
|
1317
|
+
|
|
1318
|
+
# List of excluded columns from outlier processing and scaling
|
|
1319
|
+
self.excluded_cols= self._extract_list(col_af_ohe, col_bf_ohe)
|
|
1320
|
+
|
|
1321
|
+
end_time = time.time()
|
|
1322
|
+
self._display_msg(msg="Time taken to encode the columns: {:.2f} sec".format( end_time - start_time),
|
|
1323
|
+
progress_bar=self.progress_bar,
|
|
1324
|
+
show_data=True)
|
|
1325
|
+
|
|
1326
|
+
def _custom_categorical_encoding(self):
|
|
1327
|
+
"""
|
|
1328
|
+
DESCRIPTION:
|
|
1329
|
+
Function to perform specific encoding on the categorical columns based on user input.
|
|
1330
|
+
if validation fails, default encoding is getting performed on all remaining categorical columns.
|
|
1331
|
+
"""
|
|
1332
|
+
self._display_msg(msg="\nStarting Customized Categorical Feature Encoding ...",
|
|
1333
|
+
progress_bar=self.progress_bar)
|
|
1334
|
+
cat_end_input = self.custom_data.get("CategoricalEncodingIndicator", False)
|
|
1335
|
+
# Checking user input for categorical encoding
|
|
1336
|
+
if cat_end_input:
|
|
1337
|
+
# Storing custom categorical encoding indicator in data transform dictionary
|
|
1338
|
+
self.data_transform_dict["custom_categorical_encoding_ind"] = True
|
|
1339
|
+
# Fetching user input list for performing
|
|
1340
|
+
encoding_list = self.custom_data.get("CategoricalEncodingParam", None)
|
|
1341
|
+
if encoding_list:
|
|
1342
|
+
onehot_encode_ind = encoding_list.get("OneHotEncodingIndicator", False)
|
|
1343
|
+
ordinal_encode_ind = encoding_list.get("OrdinalEncodingIndicator", False)
|
|
1344
|
+
target_encode_ind = encoding_list.get("TargetEncodingIndicator", False)
|
|
1345
|
+
# Checking if any of categorical encoding technique indicator
|
|
1346
|
+
if not any([onehot_encode_ind, ordinal_encode_ind, target_encode_ind]):
|
|
1347
|
+
self._display_msg(inline_msg="No information provided for any type of customized categorical encoding techniques. AutoML will proceed with default encoding technique.",
|
|
1348
|
+
progress_bar=self.progress_bar)
|
|
1349
|
+
else:
|
|
1350
|
+
if onehot_encode_ind:
|
|
1351
|
+
unique_count = []
|
|
1352
|
+
ohe_list = encoding_list.get("OneHotEncodingList", None)
|
|
1353
|
+
# Checking for empty list
|
|
1354
|
+
if not ohe_list:
|
|
1355
|
+
self._display_msg(inline_msg="No information provided for customized one hot encoding technique.",
|
|
1356
|
+
progress_bar=self.progress_bar)
|
|
1357
|
+
else:
|
|
1358
|
+
# Checking for column present in dataset or not
|
|
1359
|
+
_Validators._validate_dataframe_has_argument_columns(ohe_list, "OneHotEncodingList", self.data, "df")
|
|
1360
|
+
|
|
1361
|
+
# Keeping track for existing columns before apply one hot encoding
|
|
1362
|
+
col_bf_ohe = self.data.columns
|
|
1363
|
+
# Detecting categorical columns with their unique counts
|
|
1364
|
+
for col in ohe_list:
|
|
1365
|
+
unique_count.append(self.data.drop_duplicate(col).size)
|
|
1366
|
+
# Performing one hot encoding
|
|
1367
|
+
self._one_hot_encoding(ohe_list, unique_count)
|
|
1368
|
+
# Keeping track for new columns after apply one hot encoding
|
|
1369
|
+
col_af_ohe = self.data.columns
|
|
1370
|
+
# Fetching list of columns on which outlier processing should not be applied
|
|
1371
|
+
self.excluded_cols.extend(self._extract_list(col_af_ohe, col_bf_ohe))
|
|
1372
|
+
|
|
1373
|
+
self._display_msg(msg="Updated dataset sample after performing one hot encoding:",
|
|
1374
|
+
data=self.data,
|
|
1375
|
+
progress_bar=self.progress_bar)
|
|
1376
|
+
|
|
1377
|
+
if ordinal_encode_ind:
|
|
1378
|
+
ord_list = encoding_list.get("OrdinalEncodingList", None)
|
|
1379
|
+
# Checking for empty list
|
|
1380
|
+
if not ord_list:
|
|
1381
|
+
self._display_msg(inline_msg="No information provided for customized ordinal encoding technique.",
|
|
1382
|
+
progress_bar=self.progress_bar)
|
|
1383
|
+
else:
|
|
1384
|
+
# Checking for column present in dataset or not
|
|
1385
|
+
_Validators._validate_dataframe_has_argument_columns(ord_list, "OrdinalEncodingList", self.data, "df")
|
|
1386
|
+
|
|
1387
|
+
# Performing ordinal encoding
|
|
1388
|
+
self._ordinal_encoding(ord_list)
|
|
1389
|
+
self._display_msg(msg="Updated dataset sample after performing ordinal encoding:",
|
|
1390
|
+
data=self.data,
|
|
1391
|
+
progress_bar=self.progress_bar)
|
|
1392
|
+
|
|
1393
|
+
if target_encode_ind:
|
|
1394
|
+
tar_list = encoding_list.get("TargetEncodingList", None)
|
|
1395
|
+
if not tar_list:
|
|
1396
|
+
self._display_msg(inline_msg="No information provided for customized target encoding technique.",
|
|
1397
|
+
progress_bar=self.progress_bar)
|
|
1398
|
+
else:
|
|
1399
|
+
# Performing target encoding
|
|
1400
|
+
self._target_encoding(tar_list)
|
|
1401
|
+
self._display_msg(msg="Updated dataset sample after performing target encoding:",
|
|
1402
|
+
data=self.data,
|
|
1403
|
+
progress_bar=self.progress_bar)
|
|
1404
|
+
else:
|
|
1405
|
+
self._display_msg(inline_msg="No input provided for performing customized categorical encoding. AutoML will proceed with default encoding technique.",
|
|
1406
|
+
progress_bar=self.progress_bar)
|
|
1407
|
+
else:
|
|
1408
|
+
self._display_msg(inline_msg="AutoML will proceed with default encoding technique.",
|
|
1409
|
+
progress_bar=self.progress_bar)
|
|
1410
|
+
|
|
1411
|
+
# Performing default encoding on remaining categorical columns
|
|
1412
|
+
self._encoding_categorical_columns()
|
|
1413
|
+
|
|
1414
|
+
def _numapply_transformation(self, target_col, transform_val):
|
|
1415
|
+
"""
|
|
1416
|
+
DESCRIPTION:
|
|
1417
|
+
Function to perform different numerical transformations using NumApply on numerical features based on user input.
|
|
1418
|
+
|
|
1419
|
+
"""
|
|
1420
|
+
# Fetching columns for accumulation
|
|
1421
|
+
accumulate_columns = self._extract_list(self.data.columns, [target_col])
|
|
1422
|
+
apply_method = transform_val["apply_method"]
|
|
1423
|
+
# Adding fit parameters for performing transformation
|
|
1424
|
+
fit_params={
|
|
1425
|
+
"data": self.data,
|
|
1426
|
+
"target_columns" : target_col,
|
|
1427
|
+
"apply_method" : apply_method,
|
|
1428
|
+
"inplace" : True,
|
|
1429
|
+
"persist" :True,
|
|
1430
|
+
"accumulate" : accumulate_columns
|
|
1431
|
+
}
|
|
1432
|
+
# Adding addition details for fit parameters in case of SIGMOID transformation
|
|
1433
|
+
if apply_method == "sigmoid":
|
|
1434
|
+
sigmoid_style=transform_val["sigmoid_style"]
|
|
1435
|
+
fit_params = {**fit_params, "sigmoid_style" : sigmoid_style}
|
|
1436
|
+
# Performing transformation on target columns
|
|
1437
|
+
return NumApply(**fit_params).result
|
|
1438
|
+
|
|
1439
|
+
def _numerical_transformation(self, target_columns, num_transform_data):
|
|
1440
|
+
"""
|
|
1441
|
+
DESCRIPTION:
|
|
1442
|
+
Function to perform different numerical transformations using Fit and Transform on numerical features based on user input.
|
|
1443
|
+
|
|
1444
|
+
"""
|
|
1445
|
+
# Adding fit parameters for transformation
|
|
1446
|
+
fit_params={
|
|
1447
|
+
"data" : self.data,
|
|
1448
|
+
"object" : num_transform_data,
|
|
1449
|
+
"object_order_column" : "TargetColumn"
|
|
1450
|
+
}
|
|
1451
|
+
# Peforming fit with all arguments.
|
|
1452
|
+
num_fit_obj = Fit(**fit_params)
|
|
1453
|
+
# Fetching all numerical columns
|
|
1454
|
+
numerical_columns = [col for col, d_type in self.data._column_names_and_types if d_type in ["int","float"]]
|
|
1455
|
+
# Extracting id columns where transformation should not affect numerical columns
|
|
1456
|
+
id_columns = self._extract_list(numerical_columns,target_columns)
|
|
1457
|
+
# Storing fit object and id column list for numerical transformation in data transform dictionary
|
|
1458
|
+
self.data_transform_dict['custom_numerical_transformation_fit_object'] = num_fit_obj.result
|
|
1459
|
+
self.data_transform_dict['custom_numerical_transformation_id_columns'] = id_columns
|
|
1460
|
+
# Adding transform parameters for transformation
|
|
1461
|
+
transform_params={
|
|
1462
|
+
"data" : self.data,
|
|
1463
|
+
"object" : num_fit_obj.result,
|
|
1464
|
+
"id_columns" : id_columns,
|
|
1465
|
+
"persist" :True
|
|
1466
|
+
}
|
|
1467
|
+
# Peforming transformation on target columns
|
|
1468
|
+
self.data = Transform(**transform_params).result
|
|
1469
|
+
self._display_msg(msg="Updated dataset sample after applying numerical transformation:",
|
|
1470
|
+
data=self.data,
|
|
1471
|
+
progress_bar=self.progress_bar)
|
|
1472
|
+
|
|
1473
|
+
def _mathematical_transformation(self):
|
|
1474
|
+
"""
|
|
1475
|
+
DESCRIPTION:
|
|
1476
|
+
Function to perform different mathematical transformations (i.e., log, pow,
|
|
1477
|
+
exp, sininv, sigmoid) on numerical features based on user input.
|
|
1478
|
+
"""
|
|
1479
|
+
self._display_msg(msg="\nStarting customized mathematical transformation ...",
|
|
1480
|
+
progress_bar=self.progress_bar,
|
|
1481
|
+
show_data=True)
|
|
1482
|
+
|
|
1483
|
+
mat_transform_input = self.custom_data.get("MathameticalTransformationIndicator", False)
|
|
1484
|
+
# Checking user input for mathematical transformations
|
|
1485
|
+
if mat_transform_input:
|
|
1486
|
+
# Extracting list required for mathematical transformations
|
|
1487
|
+
mat_transform_list = self.custom_data.get("MathameticalTransformationParam", None)
|
|
1488
|
+
if mat_transform_list:
|
|
1489
|
+
# Checking for column present in dataset or not
|
|
1490
|
+
_Validators._validate_dataframe_has_argument_columns(list(mat_transform_list.keys()),
|
|
1491
|
+
"MathameticalTransformationParam", self.data, "df")
|
|
1492
|
+
|
|
1493
|
+
# List of storing target columns and mathematical transformation information
|
|
1494
|
+
transform_data=[]
|
|
1495
|
+
target_columns=[]
|
|
1496
|
+
# Storing custom mathematical transformation indicator in data transform dictionary
|
|
1497
|
+
self.data_transform_dict['custom_mathematical_transformation_ind'] = True
|
|
1498
|
+
# Storing custom numapply transformation parameters in data transform dictionary
|
|
1499
|
+
self.data_transform_dict['custom_numapply_transformation_param'] = {}
|
|
1500
|
+
|
|
1501
|
+
for col, transform_val in mat_transform_list.items():
|
|
1502
|
+
apply_method=transform_val["apply_method"]
|
|
1503
|
+
if apply_method in (["sininv","sigmoid"]):
|
|
1504
|
+
# Applying numapply transformation
|
|
1505
|
+
self.data = self._numapply_transformation(col,transform_val)
|
|
1506
|
+
self._display_msg(msg="Updated dataset sample after applying numapply transformation:",
|
|
1507
|
+
data=self.data,
|
|
1508
|
+
progress_bar=self.progress_bar)
|
|
1509
|
+
# Updating parameter details for each column
|
|
1510
|
+
self.data_transform_dict['custom_numapply_transformation_param'].update({col:transform_val})
|
|
1511
|
+
else:
|
|
1512
|
+
# Handling specific scenarios for log and pow transformation
|
|
1513
|
+
parameters=""
|
|
1514
|
+
if apply_method == "log":
|
|
1515
|
+
base = transform_val["base"]
|
|
1516
|
+
parameters = json.dumps({"base":base})
|
|
1517
|
+
elif apply_method == "pow":
|
|
1518
|
+
exponent = transform_val["exponent"]
|
|
1519
|
+
parameters = json.dumps({"exponent":exponent})
|
|
1520
|
+
target_columns.append(col)
|
|
1521
|
+
transform_data.append({"TargetColumn":col, "DefaultValue":1, "Transformation":apply_method, "Parameters":parameters})
|
|
1522
|
+
# Checking for transformation data
|
|
1523
|
+
if len(transform_data):
|
|
1524
|
+
# Coverting into pandas and then teradata dataframe for performing further opration
|
|
1525
|
+
transform_data = pd.DataFrame(transform_data, columns=["TargetColumn", "DefaultValue", "Transformation", "Parameters"])
|
|
1526
|
+
self._display_msg(msg="Numerical transformation information :-",
|
|
1527
|
+
data=transform_data,
|
|
1528
|
+
progress_bar=self.progress_bar)
|
|
1529
|
+
copy_to_sql(df=transform_data, table_name="automl_num_transform_data", temporary=True)
|
|
1530
|
+
num_transform_data = DataFrame.from_table("automl_num_transform_data")
|
|
1531
|
+
# Applying transformation using Fit/Transform functions
|
|
1532
|
+
self._numerical_transformation(target_columns, num_transform_data)
|
|
1533
|
+
# Storing custom numerical transformation parameters and column list in data transform dictionary
|
|
1534
|
+
self.data_transform_dict['custom_numerical_transformation_col'] = target_columns
|
|
1535
|
+
self.data_transform_dict['custom_numerical_transformation_params'] = num_transform_data
|
|
1536
|
+
else:
|
|
1537
|
+
self._display_msg(inline_msg="No input provided for performing customized mathematical transformation.",
|
|
1538
|
+
progress_bar=self.progress_bar)
|
|
1539
|
+
else:
|
|
1540
|
+
self._display_msg(inline_msg="Skipping customized mathematical transformation.",
|
|
1541
|
+
progress_bar=self.progress_bar)
|
|
1542
|
+
|
|
1543
|
+
def _non_linear_transformation(self):
|
|
1544
|
+
"""
|
|
1545
|
+
DESCRIPTION:
|
|
1546
|
+
Function to perform customized non-linear transformation on numerical features based on user input.
|
|
1547
|
+
|
|
1548
|
+
"""
|
|
1549
|
+
self._display_msg(msg="\nStarting customized non-linear transformation ...",
|
|
1550
|
+
progress_bar=self.progress_bar,
|
|
1551
|
+
show_data=True)
|
|
1552
|
+
nl_transform_input = self.custom_data.get("NonLinearTransformationIndicator", False)
|
|
1553
|
+
# Checking user input for non-linear transformation
|
|
1554
|
+
if nl_transform_input:
|
|
1555
|
+
nl_transform_list = self.custom_data.get("NonLinearTransformationParam", None)
|
|
1556
|
+
# Extracting list required for non-linear transformation
|
|
1557
|
+
if nl_transform_list:
|
|
1558
|
+
total_combination = len(nl_transform_list)
|
|
1559
|
+
# Generating all possible combination names
|
|
1560
|
+
possible_combination = ["Combination_"+str(counter) for counter in range(1,total_combination+1)]
|
|
1561
|
+
self._display_msg(msg="Possible combination :",
|
|
1562
|
+
col_lst=possible_combination,
|
|
1563
|
+
progress_bar=self.progress_bar)
|
|
1564
|
+
# Storing custom non-linear transformation indicator in data transform dictionary
|
|
1565
|
+
self.data_transform_dict['custom_non_linear_transformation_ind'] = True
|
|
1566
|
+
# Storing custom non-linear transformation fit object in data transform dictionary
|
|
1567
|
+
self.data_transform_dict['custom_non_linear_transformation_fit_object'] = {}
|
|
1568
|
+
# print("Possible combination :",possible_combination)
|
|
1569
|
+
# Performing transformation for each combination
|
|
1570
|
+
for comb, transform_val in nl_transform_list.items():
|
|
1571
|
+
if comb in possible_combination:
|
|
1572
|
+
target_columns = transform_val["target_columns"]
|
|
1573
|
+
# Checking for column present in dataset or not
|
|
1574
|
+
_Validators._validate_dataframe_has_argument_columns(target_columns,
|
|
1575
|
+
"target_columns", self.data, "df")
|
|
1576
|
+
|
|
1577
|
+
formula = transform_val["formula"]
|
|
1578
|
+
result_column = transform_val["result_column"]
|
|
1579
|
+
# Adding fit params for transformation
|
|
1580
|
+
fit_param = {
|
|
1581
|
+
"data" : self.data,
|
|
1582
|
+
"target_columns" : target_columns,
|
|
1583
|
+
"formula" : formula,
|
|
1584
|
+
"result_column" : result_column
|
|
1585
|
+
}
|
|
1586
|
+
# Performing fit on dataset
|
|
1587
|
+
fit_obj = NonLinearCombineFit(**fit_param)
|
|
1588
|
+
# Updating it for each non-linear combination
|
|
1589
|
+
self.data_transform_dict['custom_non_linear_transformation_fit_object'].update({comb:fit_obj})
|
|
1590
|
+
# Adding transform params for transformation
|
|
1591
|
+
transform_params = {
|
|
1592
|
+
"data" : self.data,
|
|
1593
|
+
"object" : fit_obj,
|
|
1594
|
+
"accumulate" : self.data.columns,
|
|
1595
|
+
"persist" : True
|
|
1596
|
+
}
|
|
1597
|
+
self.data = NonLinearCombineTransform(**transform_params).result
|
|
1598
|
+
else:
|
|
1599
|
+
self._display_msg(inline_msg="Combinations are not as per expectation.",
|
|
1600
|
+
progress_bar=self.progress_bar)
|
|
1601
|
+
self._display_msg(msg="Updated dataset sample after performing non-liner transformation:",
|
|
1602
|
+
data=self.data,
|
|
1603
|
+
progress_bar=self.progress_bar)
|
|
1604
|
+
else:
|
|
1605
|
+
self._display_msg(inline_msg="No information provided for performing customized non-linear transformation.",
|
|
1606
|
+
progress_bar=self.progress_bar)
|
|
1607
|
+
else:
|
|
1608
|
+
self._display_msg(inline_msg="Skipping customized non-linear transformation.",
|
|
1609
|
+
progress_bar=self.progress_bar)
|
|
1610
|
+
|
|
1611
|
+
def _anti_select_columns(self):
|
|
1612
|
+
"""
|
|
1613
|
+
DESCRIPTION:
|
|
1614
|
+
Function to remove specific features from dataset based on user input.
|
|
1615
|
+
|
|
1616
|
+
"""
|
|
1617
|
+
self._display_msg(msg="\nStarting customized anti-select columns ...",
|
|
1618
|
+
progress_bar=self.progress_bar,
|
|
1619
|
+
show_data=True)
|
|
1620
|
+
anti_select_input = self.custom_data.get("AntiselectIndicator", False)
|
|
1621
|
+
# Checking user input for anti-select columns
|
|
1622
|
+
if anti_select_input:
|
|
1623
|
+
# Extracting list required for anti-select columns
|
|
1624
|
+
anti_select_list = self.custom_data.get("AntiselectParam", None)
|
|
1625
|
+
if(anti_select_list):
|
|
1626
|
+
if all(item in self.data.columns for item in anti_select_list):
|
|
1627
|
+
# Storing custom anti-select columns indicator and column list in data transform dictionary
|
|
1628
|
+
self.data_transform_dict['custom_anti_select_columns_ind'] = True
|
|
1629
|
+
self.data_transform_dict['custom_anti_select_columns'] = anti_select_list
|
|
1630
|
+
fit_params = {
|
|
1631
|
+
"data" : self.data,
|
|
1632
|
+
"exclude" : anti_select_list
|
|
1633
|
+
}
|
|
1634
|
+
# Performing transformation for given user input
|
|
1635
|
+
self.data = Antiselect(**fit_params).result
|
|
1636
|
+
self._display_msg(msg="Updated dataset sample after performing anti-select columns:",
|
|
1637
|
+
data=self.data,
|
|
1638
|
+
progress_bar=self.progress_bar)
|
|
1639
|
+
else:
|
|
1640
|
+
self._display_msg(msg="Columns provided in list are not present in dataset:",
|
|
1641
|
+
col_lst=anti_select_list,
|
|
1642
|
+
progress_bar=self.progress_bar)
|
|
1643
|
+
else:
|
|
1644
|
+
self._display_msg(inline_msg="No information provided for performing anti-select columns operation.",
|
|
1645
|
+
progress_bar=self.progress_bar)
|
|
1646
|
+
else:
|
|
1647
|
+
self._display_msg(inline_msg="Skipping customized anti-select columns.",
|
|
1648
|
+
progress_bar=self.progress_bar)
|