teradataml 17.20.0.7__py3-none-any.whl → 20.0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of teradataml might be problematic. Click here for more details.
- teradataml/LICENSE-3RD-PARTY.pdf +0 -0
- teradataml/LICENSE.pdf +0 -0
- teradataml/README.md +1935 -1640
- teradataml/__init__.py +70 -60
- teradataml/_version.py +11 -11
- teradataml/analytics/Transformations.py +2995 -2995
- teradataml/analytics/__init__.py +81 -83
- teradataml/analytics/analytic_function_executor.py +2040 -2010
- teradataml/analytics/analytic_query_generator.py +958 -958
- teradataml/analytics/byom/H2OPredict.py +514 -514
- teradataml/analytics/byom/PMMLPredict.py +437 -437
- teradataml/analytics/byom/__init__.py +14 -14
- teradataml/analytics/json_parser/__init__.py +130 -130
- teradataml/analytics/json_parser/analytic_functions_argument.py +1707 -1707
- teradataml/analytics/json_parser/json_store.py +191 -191
- teradataml/analytics/json_parser/metadata.py +1637 -1637
- teradataml/analytics/json_parser/utils.py +798 -803
- teradataml/analytics/meta_class.py +196 -196
- teradataml/analytics/sqle/DecisionTreePredict.py +455 -470
- teradataml/analytics/sqle/NaiveBayesPredict.py +419 -428
- teradataml/analytics/sqle/__init__.py +97 -110
- teradataml/analytics/sqle/json/decisiontreepredict_sqle.json +78 -78
- teradataml/analytics/sqle/json/naivebayespredict_sqle.json +62 -62
- teradataml/analytics/table_operator/__init__.py +10 -10
- teradataml/analytics/uaf/__init__.py +63 -63
- teradataml/analytics/utils.py +693 -692
- teradataml/analytics/valib.py +1603 -1600
- teradataml/automl/__init__.py +1683 -0
- teradataml/automl/custom_json_utils.py +1270 -0
- teradataml/automl/data_preparation.py +1011 -0
- teradataml/automl/data_transformation.py +789 -0
- teradataml/automl/feature_engineering.py +1580 -0
- teradataml/automl/feature_exploration.py +554 -0
- teradataml/automl/model_evaluation.py +151 -0
- teradataml/automl/model_training.py +1026 -0
- teradataml/catalog/__init__.py +1 -3
- teradataml/catalog/byom.py +1759 -1716
- teradataml/catalog/function_argument_mapper.py +859 -861
- teradataml/catalog/model_cataloging_utils.py +491 -1510
- teradataml/clients/auth_client.py +133 -0
- teradataml/clients/pkce_client.py +481 -481
- teradataml/common/aed_utils.py +7 -2
- teradataml/common/bulk_exposed_utils.py +111 -111
- teradataml/common/constants.py +1438 -1441
- teradataml/common/deprecations.py +160 -0
- teradataml/common/exceptions.py +73 -73
- teradataml/common/formula.py +742 -742
- teradataml/common/garbagecollector.py +597 -635
- teradataml/common/messagecodes.py +424 -431
- teradataml/common/messages.py +228 -231
- teradataml/common/sqlbundle.py +693 -693
- teradataml/common/td_coltype_code_to_tdtype.py +48 -48
- teradataml/common/utils.py +2424 -2500
- teradataml/common/warnings.py +25 -25
- teradataml/common/wrapper_utils.py +1 -110
- teradataml/config/dummy_file1.cfg +4 -4
- teradataml/config/dummy_file2.cfg +2 -2
- teradataml/config/sqlengine_alias_definitions_v1.0 +13 -13
- teradataml/config/sqlengine_alias_definitions_v1.1 +19 -19
- teradataml/config/sqlengine_alias_definitions_v1.3 +18 -18
- teradataml/context/aed_context.py +217 -217
- teradataml/context/context.py +1091 -999
- teradataml/data/A_loan.csv +19 -19
- teradataml/data/BINARY_REALS_LEFT.csv +11 -11
- teradataml/data/BINARY_REALS_RIGHT.csv +11 -11
- teradataml/data/B_loan.csv +49 -49
- teradataml/data/BuoyData2.csv +17 -17
- teradataml/data/CONVOLVE2_COMPLEX_LEFT.csv +5 -5
- teradataml/data/CONVOLVE2_COMPLEX_RIGHT.csv +5 -5
- teradataml/data/Convolve2RealsLeft.csv +5 -5
- teradataml/data/Convolve2RealsRight.csv +5 -5
- teradataml/data/Convolve2ValidLeft.csv +11 -11
- teradataml/data/Convolve2ValidRight.csv +11 -11
- teradataml/data/DFFTConv_Real_8_8.csv +65 -65
- teradataml/data/Orders1_12mf.csv +24 -24
- teradataml/data/Pi_loan.csv +7 -7
- teradataml/data/SMOOTHED_DATA.csv +7 -7
- teradataml/data/TestDFFT8.csv +9 -9
- teradataml/data/TestRiver.csv +109 -109
- teradataml/data/Traindata.csv +28 -28
- teradataml/data/acf.csv +17 -17
- teradataml/data/adaboost_example.json +34 -34
- teradataml/data/adaboostpredict_example.json +24 -24
- teradataml/data/additional_table.csv +10 -10
- teradataml/data/admissions_test.csv +21 -21
- teradataml/data/admissions_train.csv +41 -41
- teradataml/data/admissions_train_nulls.csv +41 -41
- teradataml/data/advertising.csv +201 -0
- teradataml/data/ageandheight.csv +13 -13
- teradataml/data/ageandpressure.csv +31 -31
- teradataml/data/antiselect_example.json +36 -36
- teradataml/data/antiselect_input.csv +8 -8
- teradataml/data/antiselect_input_mixed_case.csv +8 -8
- teradataml/data/applicant_external.csv +6 -6
- teradataml/data/applicant_reference.csv +6 -6
- teradataml/data/arima_example.json +9 -9
- teradataml/data/assortedtext_input.csv +8 -8
- teradataml/data/attribution_example.json +33 -33
- teradataml/data/attribution_sample_table.csv +27 -27
- teradataml/data/attribution_sample_table1.csv +6 -6
- teradataml/data/attribution_sample_table2.csv +11 -11
- teradataml/data/bank_churn.csv +10001 -0
- teradataml/data/bank_marketing.csv +11163 -0
- teradataml/data/bank_web_clicks1.csv +42 -42
- teradataml/data/bank_web_clicks2.csv +91 -91
- teradataml/data/bank_web_url.csv +85 -85
- teradataml/data/barrier.csv +2 -2
- teradataml/data/barrier_new.csv +3 -3
- teradataml/data/betweenness_example.json +13 -13
- teradataml/data/bike_sharing.csv +732 -0
- teradataml/data/bin_breaks.csv +8 -8
- teradataml/data/bin_fit_ip.csv +3 -3
- teradataml/data/binary_complex_left.csv +11 -11
- teradataml/data/binary_complex_right.csv +11 -11
- teradataml/data/binary_matrix_complex_left.csv +21 -21
- teradataml/data/binary_matrix_complex_right.csv +21 -21
- teradataml/data/binary_matrix_real_left.csv +21 -21
- teradataml/data/binary_matrix_real_right.csv +21 -21
- teradataml/data/blood2ageandweight.csv +26 -26
- teradataml/data/bmi.csv +501 -0
- teradataml/data/boston.csv +507 -507
- teradataml/data/boston2cols.csv +721 -0
- teradataml/data/breast_cancer.csv +570 -0
- teradataml/data/buoydata_mix.csv +11 -11
- teradataml/data/burst_data.csv +5 -5
- teradataml/data/burst_example.json +20 -20
- teradataml/data/byom_example.json +17 -17
- teradataml/data/bytes_table.csv +3 -3
- teradataml/data/cal_housing_ex_raw.csv +70 -70
- teradataml/data/callers.csv +7 -7
- teradataml/data/calls.csv +10 -10
- teradataml/data/cars_hist.csv +33 -33
- teradataml/data/cat_table.csv +24 -24
- teradataml/data/ccm_example.json +31 -31
- teradataml/data/ccm_input.csv +91 -91
- teradataml/data/ccm_input2.csv +13 -13
- teradataml/data/ccmexample.csv +101 -101
- teradataml/data/ccmprepare_example.json +8 -8
- teradataml/data/ccmprepare_input.csv +91 -91
- teradataml/data/cfilter_example.json +12 -12
- teradataml/data/changepointdetection_example.json +18 -18
- teradataml/data/changepointdetectionrt_example.json +8 -8
- teradataml/data/chi_sq.csv +2 -2
- teradataml/data/churn_data.csv +14 -14
- teradataml/data/churn_emission.csv +35 -35
- teradataml/data/churn_initial.csv +3 -3
- teradataml/data/churn_state_transition.csv +5 -5
- teradataml/data/citedges_2.csv +745 -745
- teradataml/data/citvertices_2.csv +1210 -1210
- teradataml/data/clicks2.csv +16 -16
- teradataml/data/clickstream.csv +12 -12
- teradataml/data/clickstream1.csv +11 -11
- teradataml/data/closeness_example.json +15 -15
- teradataml/data/complaints.csv +21 -21
- teradataml/data/complaints_mini.csv +3 -3
- teradataml/data/complaints_testtoken.csv +224 -224
- teradataml/data/complaints_tokens_test.csv +353 -353
- teradataml/data/complaints_traintoken.csv +472 -472
- teradataml/data/computers_category.csv +1001 -1001
- teradataml/data/computers_test1.csv +1252 -1252
- teradataml/data/computers_train1.csv +5009 -5009
- teradataml/data/computers_train1_clustered.csv +5009 -5009
- teradataml/data/confusionmatrix_example.json +9 -9
- teradataml/data/conversion_event_table.csv +3 -3
- teradataml/data/corr_input.csv +17 -17
- teradataml/data/correlation_example.json +11 -11
- teradataml/data/coxhazardratio_example.json +39 -39
- teradataml/data/coxph_example.json +15 -15
- teradataml/data/coxsurvival_example.json +28 -28
- teradataml/data/cpt.csv +41 -41
- teradataml/data/credit_ex_merged.csv +45 -45
- teradataml/data/customer_loyalty.csv +301 -301
- teradataml/data/customer_loyalty_newseq.csv +31 -31
- teradataml/data/customer_segmentation_test.csv +2628 -0
- teradataml/data/customer_segmentation_train.csv +8069 -0
- teradataml/data/dataframe_example.json +146 -146
- teradataml/data/decisionforest_example.json +37 -37
- teradataml/data/decisionforestpredict_example.json +38 -38
- teradataml/data/decisiontree_example.json +21 -21
- teradataml/data/decisiontreepredict_example.json +45 -45
- teradataml/data/dfft2_size4_real.csv +17 -17
- teradataml/data/dfft2_test_matrix16.csv +17 -17
- teradataml/data/dfft2conv_real_4_4.csv +65 -65
- teradataml/data/diabetes.csv +443 -443
- teradataml/data/diabetes_test.csv +89 -89
- teradataml/data/dict_table.csv +5 -5
- teradataml/data/docperterm_table.csv +4 -4
- teradataml/data/docs/__init__.py +1 -1
- teradataml/data/docs/byom/docs/DataRobotPredict.py +180 -180
- teradataml/data/docs/byom/docs/DataikuPredict.py +177 -177
- teradataml/data/docs/byom/docs/H2OPredict.py +324 -324
- teradataml/data/docs/byom/docs/ONNXPredict.py +283 -283
- teradataml/data/docs/byom/docs/PMMLPredict.py +277 -277
- teradataml/data/docs/sqle/docs_17_10/Antiselect.py +82 -82
- teradataml/data/docs/sqle/docs_17_10/Attribution.py +199 -199
- teradataml/data/docs/sqle/docs_17_10/BincodeFit.py +171 -171
- teradataml/data/docs/sqle/docs_17_10/BincodeTransform.py +131 -130
- teradataml/data/docs/sqle/docs_17_10/CategoricalSummary.py +86 -86
- teradataml/data/docs/sqle/docs_17_10/ChiSq.py +90 -90
- teradataml/data/docs/sqle/docs_17_10/ColumnSummary.py +85 -85
- teradataml/data/docs/sqle/docs_17_10/ConvertTo.py +95 -95
- teradataml/data/docs/sqle/docs_17_10/DecisionForestPredict.py +139 -139
- teradataml/data/docs/sqle/docs_17_10/DecisionTreePredict.py +151 -151
- teradataml/data/docs/sqle/docs_17_10/FTest.py +160 -160
- teradataml/data/docs/sqle/docs_17_10/FillRowId.py +82 -82
- teradataml/data/docs/sqle/docs_17_10/Fit.py +87 -87
- teradataml/data/docs/sqle/docs_17_10/GLMPredict.py +144 -144
- teradataml/data/docs/sqle/docs_17_10/GetRowsWithMissingValues.py +84 -84
- teradataml/data/docs/sqle/docs_17_10/GetRowsWithoutMissingValues.py +81 -81
- teradataml/data/docs/sqle/docs_17_10/Histogram.py +164 -164
- teradataml/data/docs/sqle/docs_17_10/MovingAverage.py +134 -134
- teradataml/data/docs/sqle/docs_17_10/NGramSplitter.py +208 -208
- teradataml/data/docs/sqle/docs_17_10/NPath.py +265 -265
- teradataml/data/docs/sqle/docs_17_10/NaiveBayesPredict.py +116 -116
- teradataml/data/docs/sqle/docs_17_10/NaiveBayesTextClassifierPredict.py +176 -176
- teradataml/data/docs/sqle/docs_17_10/NumApply.py +147 -147
- teradataml/data/docs/sqle/docs_17_10/OneHotEncodingFit.py +134 -132
- teradataml/data/docs/sqle/docs_17_10/OneHotEncodingTransform.py +109 -103
- teradataml/data/docs/sqle/docs_17_10/OutlierFilterFit.py +165 -165
- teradataml/data/docs/sqle/docs_17_10/OutlierFilterTransform.py +105 -101
- teradataml/data/docs/sqle/docs_17_10/Pack.py +128 -128
- teradataml/data/docs/sqle/docs_17_10/PolynomialFeaturesFit.py +111 -111
- teradataml/data/docs/sqle/docs_17_10/PolynomialFeaturesTransform.py +102 -102
- teradataml/data/docs/sqle/docs_17_10/QQNorm.py +104 -104
- teradataml/data/docs/sqle/docs_17_10/RoundColumns.py +109 -109
- teradataml/data/docs/sqle/docs_17_10/RowNormalizeFit.py +117 -117
- teradataml/data/docs/sqle/docs_17_10/RowNormalizeTransform.py +99 -98
- teradataml/data/docs/sqle/docs_17_10/SVMSparsePredict.py +152 -152
- teradataml/data/docs/sqle/docs_17_10/ScaleFit.py +197 -197
- teradataml/data/docs/sqle/docs_17_10/ScaleTransform.py +99 -98
- teradataml/data/docs/sqle/docs_17_10/Sessionize.py +113 -113
- teradataml/data/docs/sqle/docs_17_10/SimpleImputeFit.py +116 -116
- teradataml/data/docs/sqle/docs_17_10/SimpleImputeTransform.py +98 -98
- teradataml/data/docs/sqle/docs_17_10/StrApply.py +187 -187
- teradataml/data/docs/sqle/docs_17_10/StringSimilarity.py +145 -145
- teradataml/data/docs/sqle/docs_17_10/Transform.py +105 -104
- teradataml/data/docs/sqle/docs_17_10/UnivariateStatistics.py +141 -141
- teradataml/data/docs/sqle/docs_17_10/Unpack.py +214 -214
- teradataml/data/docs/sqle/docs_17_10/WhichMax.py +83 -83
- teradataml/data/docs/sqle/docs_17_10/WhichMin.py +83 -83
- teradataml/data/docs/sqle/docs_17_10/ZTest.py +155 -155
- teradataml/data/docs/sqle/docs_17_20/ANOVA.py +186 -126
- teradataml/data/docs/sqle/docs_17_20/Antiselect.py +82 -82
- teradataml/data/docs/sqle/docs_17_20/Attribution.py +200 -200
- teradataml/data/docs/sqle/docs_17_20/BincodeFit.py +171 -171
- teradataml/data/docs/sqle/docs_17_20/BincodeTransform.py +139 -138
- teradataml/data/docs/sqle/docs_17_20/CategoricalSummary.py +86 -86
- teradataml/data/docs/sqle/docs_17_20/ChiSq.py +90 -90
- teradataml/data/docs/sqle/docs_17_20/ClassificationEvaluator.py +166 -166
- teradataml/data/docs/sqle/docs_17_20/ColumnSummary.py +85 -85
- teradataml/data/docs/sqle/docs_17_20/ColumnTransformer.py +245 -243
- teradataml/data/docs/sqle/docs_17_20/ConvertTo.py +113 -113
- teradataml/data/docs/sqle/docs_17_20/DecisionForest.py +279 -279
- teradataml/data/docs/sqle/docs_17_20/DecisionForestPredict.py +144 -144
- teradataml/data/docs/sqle/docs_17_20/DecisionTreePredict.py +135 -135
- teradataml/data/docs/sqle/docs_17_20/FTest.py +239 -160
- teradataml/data/docs/sqle/docs_17_20/FillRowId.py +82 -82
- teradataml/data/docs/sqle/docs_17_20/Fit.py +87 -87
- teradataml/data/docs/sqle/docs_17_20/GLM.py +541 -380
- teradataml/data/docs/sqle/docs_17_20/GLMPerSegment.py +414 -414
- teradataml/data/docs/sqle/docs_17_20/GLMPredict.py +144 -144
- teradataml/data/docs/sqle/docs_17_20/GLMPredictPerSegment.py +233 -234
- teradataml/data/docs/sqle/docs_17_20/GetFutileColumns.py +125 -123
- teradataml/data/docs/sqle/docs_17_20/GetRowsWithMissingValues.py +108 -108
- teradataml/data/docs/sqle/docs_17_20/GetRowsWithoutMissingValues.py +105 -105
- teradataml/data/docs/sqle/docs_17_20/Histogram.py +223 -223
- teradataml/data/docs/sqle/docs_17_20/KMeans.py +251 -204
- teradataml/data/docs/sqle/docs_17_20/KMeansPredict.py +144 -143
- teradataml/data/docs/sqle/docs_17_20/KNN.py +214 -214
- teradataml/data/docs/sqle/docs_17_20/MovingAverage.py +134 -134
- teradataml/data/docs/sqle/docs_17_20/NGramSplitter.py +208 -208
- teradataml/data/docs/sqle/docs_17_20/NPath.py +265 -265
- teradataml/data/docs/sqle/docs_17_20/NaiveBayesPredict.py +116 -116
- teradataml/data/docs/sqle/docs_17_20/NaiveBayesTextClassifierPredict.py +177 -176
- teradataml/data/docs/sqle/docs_17_20/NaiveBayesTextClassifierTrainer.py +126 -126
- teradataml/data/docs/sqle/docs_17_20/NonLinearCombineFit.py +118 -117
- teradataml/data/docs/sqle/docs_17_20/NonLinearCombineTransform.py +112 -112
- teradataml/data/docs/sqle/docs_17_20/NumApply.py +147 -147
- teradataml/data/docs/sqle/docs_17_20/OneClassSVM.py +307 -307
- teradataml/data/docs/sqle/docs_17_20/OneClassSVMPredict.py +185 -184
- teradataml/data/docs/sqle/docs_17_20/OneHotEncodingFit.py +230 -225
- teradataml/data/docs/sqle/docs_17_20/OneHotEncodingTransform.py +121 -115
- teradataml/data/docs/sqle/docs_17_20/OrdinalEncodingFit.py +219 -219
- teradataml/data/docs/sqle/docs_17_20/OrdinalEncodingTransform.py +127 -127
- teradataml/data/docs/sqle/docs_17_20/OutlierFilterFit.py +189 -189
- teradataml/data/docs/sqle/docs_17_20/OutlierFilterTransform.py +117 -112
- teradataml/data/docs/sqle/docs_17_20/Pack.py +128 -128
- teradataml/data/docs/sqle/docs_17_20/PolynomialFeaturesFit.py +111 -111
- teradataml/data/docs/sqle/docs_17_20/PolynomialFeaturesTransform.py +112 -111
- teradataml/data/docs/sqle/docs_17_20/QQNorm.py +104 -104
- teradataml/data/docs/sqle/docs_17_20/ROC.py +164 -163
- teradataml/data/docs/sqle/docs_17_20/RandomProjectionFit.py +154 -154
- teradataml/data/docs/sqle/docs_17_20/RandomProjectionMinComponents.py +106 -106
- teradataml/data/docs/sqle/docs_17_20/RandomProjectionTransform.py +120 -120
- teradataml/data/docs/sqle/docs_17_20/RegressionEvaluator.py +211 -211
- teradataml/data/docs/sqle/docs_17_20/RoundColumns.py +108 -108
- teradataml/data/docs/sqle/docs_17_20/RowNormalizeFit.py +117 -117
- teradataml/data/docs/sqle/docs_17_20/RowNormalizeTransform.py +111 -110
- teradataml/data/docs/sqle/docs_17_20/SVM.py +413 -413
- teradataml/data/docs/sqle/docs_17_20/SVMPredict.py +213 -202
- teradataml/data/docs/sqle/docs_17_20/SVMSparsePredict.py +152 -152
- teradataml/data/docs/sqle/docs_17_20/ScaleFit.py +315 -197
- teradataml/data/docs/sqle/docs_17_20/ScaleTransform.py +202 -109
- teradataml/data/docs/sqle/docs_17_20/SentimentExtractor.py +206 -206
- teradataml/data/docs/sqle/docs_17_20/Sessionize.py +113 -113
- teradataml/data/docs/sqle/docs_17_20/Silhouette.py +152 -152
- teradataml/data/docs/sqle/docs_17_20/SimpleImputeFit.py +116 -116
- teradataml/data/docs/sqle/docs_17_20/SimpleImputeTransform.py +109 -108
- teradataml/data/docs/sqle/docs_17_20/StrApply.py +187 -187
- teradataml/data/docs/sqle/docs_17_20/StringSimilarity.py +145 -145
- teradataml/data/docs/sqle/docs_17_20/TDDecisionForestPredict.py +207 -207
- teradataml/data/docs/sqle/docs_17_20/TDGLMPredict.py +333 -171
- teradataml/data/docs/sqle/docs_17_20/TargetEncodingFit.py +266 -266
- teradataml/data/docs/sqle/docs_17_20/TargetEncodingTransform.py +141 -140
- teradataml/data/docs/sqle/docs_17_20/TextParser.py +172 -172
- teradataml/data/docs/sqle/docs_17_20/TrainTestSplit.py +159 -159
- teradataml/data/docs/sqle/docs_17_20/Transform.py +123 -123
- teradataml/data/docs/sqle/docs_17_20/UnivariateStatistics.py +141 -141
- teradataml/data/docs/sqle/docs_17_20/Unpack.py +214 -214
- teradataml/data/docs/sqle/docs_17_20/VectorDistance.py +168 -168
- teradataml/data/docs/sqle/docs_17_20/WhichMax.py +83 -83
- teradataml/data/docs/sqle/docs_17_20/WhichMin.py +83 -83
- teradataml/data/docs/sqle/docs_17_20/WordEmbeddings.py +236 -236
- teradataml/data/docs/sqle/docs_17_20/XGBoost.py +361 -353
- teradataml/data/docs/sqle/docs_17_20/XGBoostPredict.py +281 -275
- teradataml/data/docs/sqle/docs_17_20/ZTest.py +220 -155
- teradataml/data/docs/tableoperator/docs_17_00/ReadNOS.py +429 -429
- teradataml/data/docs/tableoperator/docs_17_05/ReadNOS.py +429 -429
- teradataml/data/docs/tableoperator/docs_17_05/WriteNOS.py +347 -347
- teradataml/data/docs/tableoperator/docs_17_10/ReadNOS.py +428 -428
- teradataml/data/docs/tableoperator/docs_17_10/WriteNOS.py +347 -347
- teradataml/data/docs/tableoperator/docs_17_20/ReadNOS.py +439 -439
- teradataml/data/docs/tableoperator/docs_17_20/WriteNOS.py +386 -386
- teradataml/data/docs/uaf/docs_17_20/ACF.py +195 -195
- teradataml/data/docs/uaf/docs_17_20/ArimaEstimate.py +369 -369
- teradataml/data/docs/uaf/docs_17_20/ArimaForecast.py +142 -142
- teradataml/data/docs/uaf/docs_17_20/ArimaValidate.py +159 -159
- teradataml/data/docs/uaf/docs_17_20/BinaryMatrixOp.py +247 -247
- teradataml/data/docs/uaf/docs_17_20/BinarySeriesOp.py +252 -252
- teradataml/data/docs/uaf/docs_17_20/BreuschGodfrey.py +177 -177
- teradataml/data/docs/uaf/docs_17_20/BreuschPaganGodfrey.py +174 -174
- teradataml/data/docs/uaf/docs_17_20/Convolve.py +226 -226
- teradataml/data/docs/uaf/docs_17_20/Convolve2.py +214 -214
- teradataml/data/docs/uaf/docs_17_20/CumulPeriodogram.py +183 -183
- teradataml/data/docs/uaf/docs_17_20/DFFT.py +203 -203
- teradataml/data/docs/uaf/docs_17_20/DFFT2.py +216 -216
- teradataml/data/docs/uaf/docs_17_20/DFFT2Conv.py +215 -215
- teradataml/data/docs/uaf/docs_17_20/DFFTConv.py +191 -191
- teradataml/data/docs/uaf/docs_17_20/DTW.py +179 -179
- teradataml/data/docs/uaf/docs_17_20/DickeyFuller.py +144 -144
- teradataml/data/docs/uaf/docs_17_20/DurbinWatson.py +183 -183
- teradataml/data/docs/uaf/docs_17_20/ExtractResults.py +184 -184
- teradataml/data/docs/uaf/docs_17_20/FitMetrics.py +172 -172
- teradataml/data/docs/uaf/docs_17_20/GenseriesFormula.py +205 -205
- teradataml/data/docs/uaf/docs_17_20/GenseriesSinusoids.py +142 -142
- teradataml/data/docs/uaf/docs_17_20/HoltWintersForecaster.py +258 -258
- teradataml/data/docs/uaf/docs_17_20/IDFFT.py +164 -164
- teradataml/data/docs/uaf/docs_17_20/IDFFT2.py +198 -198
- teradataml/data/docs/uaf/docs_17_20/InputValidator.py +120 -120
- teradataml/data/docs/uaf/docs_17_20/LineSpec.py +155 -155
- teradataml/data/docs/uaf/docs_17_20/LinearRegr.py +214 -214
- teradataml/data/docs/uaf/docs_17_20/MAMean.py +173 -173
- teradataml/data/docs/uaf/docs_17_20/MInfo.py +133 -133
- teradataml/data/docs/uaf/docs_17_20/MatrixMultiply.py +135 -135
- teradataml/data/docs/uaf/docs_17_20/MultivarRegr.py +190 -190
- teradataml/data/docs/uaf/docs_17_20/PACF.py +158 -158
- teradataml/data/docs/uaf/docs_17_20/Portman.py +216 -216
- teradataml/data/docs/uaf/docs_17_20/PowerTransform.py +154 -154
- teradataml/data/docs/uaf/docs_17_20/Resample.py +228 -228
- teradataml/data/docs/uaf/docs_17_20/SInfo.py +122 -122
- teradataml/data/docs/uaf/docs_17_20/SeasonalNormalize.py +165 -165
- teradataml/data/docs/uaf/docs_17_20/SelectionCriteria.py +173 -173
- teradataml/data/docs/uaf/docs_17_20/SignifPeriodicities.py +170 -170
- teradataml/data/docs/uaf/docs_17_20/SignifResidmean.py +163 -163
- teradataml/data/docs/uaf/docs_17_20/SimpleExp.py +179 -179
- teradataml/data/docs/uaf/docs_17_20/Smoothma.py +207 -207
- teradataml/data/docs/uaf/docs_17_20/TrackingOp.py +150 -150
- teradataml/data/docs/uaf/docs_17_20/UNDIFF.py +171 -171
- teradataml/data/docs/uaf/docs_17_20/Unnormalize.py +201 -201
- teradataml/data/docs/uaf/docs_17_20/WhitesGeneral.py +169 -169
- teradataml/data/dtw_example.json +17 -17
- teradataml/data/dtw_t1.csv +11 -11
- teradataml/data/dtw_t2.csv +4 -4
- teradataml/data/dwt2d_example.json +15 -15
- teradataml/data/dwt_example.json +14 -14
- teradataml/data/dwt_filter_dim.csv +5 -5
- teradataml/data/emission.csv +9 -9
- teradataml/data/emp_table_by_dept.csv +19 -19
- teradataml/data/employee_info.csv +4 -4
- teradataml/data/employee_table.csv +6 -6
- teradataml/data/excluding_event_table.csv +2 -2
- teradataml/data/finance_data.csv +6 -6
- teradataml/data/finance_data2.csv +61 -61
- teradataml/data/finance_data3.csv +93 -93
- teradataml/data/fish.csv +160 -0
- teradataml/data/fm_blood2ageandweight.csv +26 -26
- teradataml/data/fmeasure_example.json +11 -11
- teradataml/data/followers_leaders.csv +10 -10
- teradataml/data/fpgrowth_example.json +12 -12
- teradataml/data/frequentpaths_example.json +29 -29
- teradataml/data/friends.csv +9 -9
- teradataml/data/fs_input.csv +33 -33
- teradataml/data/fs_input1.csv +33 -33
- teradataml/data/genData.csv +513 -513
- teradataml/data/geodataframe_example.json +39 -39
- teradataml/data/glass_types.csv +215 -0
- teradataml/data/glm_admissions_model.csv +12 -12
- teradataml/data/glm_example.json +56 -29
- teradataml/data/glml1l2_example.json +28 -28
- teradataml/data/glml1l2predict_example.json +54 -54
- teradataml/data/glmpredict_example.json +54 -54
- teradataml/data/gq_t1.csv +21 -21
- teradataml/data/hconvolve_complex_right.csv +5 -5
- teradataml/data/hconvolve_complex_rightmulti.csv +5 -5
- teradataml/data/histogram_example.json +11 -11
- teradataml/data/hmmdecoder_example.json +78 -78
- teradataml/data/hmmevaluator_example.json +24 -24
- teradataml/data/hmmsupervised_example.json +10 -10
- teradataml/data/hmmunsupervised_example.json +7 -7
- teradataml/data/house_values.csv +12 -12
- teradataml/data/house_values2.csv +13 -13
- teradataml/data/housing_cat.csv +7 -7
- teradataml/data/housing_data.csv +9 -9
- teradataml/data/housing_test.csv +47 -47
- teradataml/data/housing_test_binary.csv +47 -47
- teradataml/data/housing_train.csv +493 -493
- teradataml/data/housing_train_attribute.csv +4 -4
- teradataml/data/housing_train_binary.csv +437 -437
- teradataml/data/housing_train_parameter.csv +2 -2
- teradataml/data/housing_train_response.csv +493 -493
- teradataml/data/housing_train_segment.csv +201 -0
- teradataml/data/ibm_stock.csv +370 -370
- teradataml/data/ibm_stock1.csv +370 -370
- teradataml/data/identitymatch_example.json +21 -21
- teradataml/data/idf_table.csv +4 -4
- teradataml/data/impressions.csv +101 -101
- teradataml/data/inflation.csv +21 -21
- teradataml/data/initial.csv +3 -3
- teradataml/data/insect2Cols.csv +61 -0
- teradataml/data/insect_sprays.csv +12 -12
- teradataml/data/insurance.csv +1339 -1339
- teradataml/data/interpolator_example.json +12 -12
- teradataml/data/iris_altinput.csv +481 -481
- teradataml/data/iris_attribute_output.csv +8 -8
- teradataml/data/iris_attribute_test.csv +121 -121
- teradataml/data/iris_attribute_train.csv +481 -481
- teradataml/data/iris_category_expect_predict.csv +31 -31
- teradataml/data/iris_data.csv +151 -0
- teradataml/data/iris_input.csv +151 -151
- teradataml/data/iris_response_train.csv +121 -121
- teradataml/data/iris_test.csv +31 -31
- teradataml/data/iris_train.csv +121 -121
- teradataml/data/join_table1.csv +4 -4
- teradataml/data/join_table2.csv +4 -4
- teradataml/data/jsons/anly_function_name.json +6 -6
- teradataml/data/jsons/byom/dataikupredict.json +147 -147
- teradataml/data/jsons/byom/datarobotpredict.json +146 -146
- teradataml/data/jsons/byom/h2opredict.json +194 -194
- teradataml/data/jsons/byom/onnxpredict.json +186 -186
- teradataml/data/jsons/byom/pmmlpredict.json +146 -146
- teradataml/data/jsons/paired_functions.json +435 -435
- teradataml/data/jsons/sqle/16.20/Antiselect.json +56 -56
- teradataml/data/jsons/sqle/16.20/Attribution.json +249 -249
- teradataml/data/jsons/sqle/16.20/DecisionForestPredict.json +156 -156
- teradataml/data/jsons/sqle/16.20/DecisionTreePredict.json +170 -170
- teradataml/data/jsons/sqle/16.20/GLMPredict.json +122 -122
- teradataml/data/jsons/sqle/16.20/MovingAverage.json +367 -367
- teradataml/data/jsons/sqle/16.20/NGramSplitter.json +239 -239
- teradataml/data/jsons/sqle/16.20/NaiveBayesPredict.json +136 -136
- teradataml/data/jsons/sqle/16.20/NaiveBayesTextClassifierPredict.json +235 -235
- teradataml/data/jsons/sqle/16.20/Pack.json +98 -98
- teradataml/data/jsons/sqle/16.20/SVMSparsePredict.json +162 -162
- teradataml/data/jsons/sqle/16.20/Sessionize.json +105 -105
- teradataml/data/jsons/sqle/16.20/StringSimilarity.json +86 -86
- teradataml/data/jsons/sqle/16.20/Unpack.json +166 -166
- teradataml/data/jsons/sqle/16.20/nPath.json +269 -269
- teradataml/data/jsons/sqle/17.00/Antiselect.json +56 -56
- teradataml/data/jsons/sqle/17.00/Attribution.json +249 -249
- teradataml/data/jsons/sqle/17.00/DecisionForestPredict.json +156 -156
- teradataml/data/jsons/sqle/17.00/DecisionTreePredict.json +170 -170
- teradataml/data/jsons/sqle/17.00/GLMPredict.json +122 -122
- teradataml/data/jsons/sqle/17.00/MovingAverage.json +367 -367
- teradataml/data/jsons/sqle/17.00/NGramSplitter.json +239 -239
- teradataml/data/jsons/sqle/17.00/NaiveBayesPredict.json +136 -136
- teradataml/data/jsons/sqle/17.00/NaiveBayesTextClassifierPredict.json +235 -235
- teradataml/data/jsons/sqle/17.00/Pack.json +98 -98
- teradataml/data/jsons/sqle/17.00/SVMSparsePredict.json +162 -162
- teradataml/data/jsons/sqle/17.00/Sessionize.json +105 -105
- teradataml/data/jsons/sqle/17.00/StringSimilarity.json +86 -86
- teradataml/data/jsons/sqle/17.00/Unpack.json +166 -166
- teradataml/data/jsons/sqle/17.00/nPath.json +269 -269
- teradataml/data/jsons/sqle/17.05/Antiselect.json +56 -56
- teradataml/data/jsons/sqle/17.05/Attribution.json +249 -249
- teradataml/data/jsons/sqle/17.05/DecisionForestPredict.json +156 -156
- teradataml/data/jsons/sqle/17.05/DecisionTreePredict.json +170 -170
- teradataml/data/jsons/sqle/17.05/GLMPredict.json +122 -122
- teradataml/data/jsons/sqle/17.05/MovingAverage.json +367 -367
- teradataml/data/jsons/sqle/17.05/NGramSplitter.json +239 -239
- teradataml/data/jsons/sqle/17.05/NaiveBayesPredict.json +136 -136
- teradataml/data/jsons/sqle/17.05/NaiveBayesTextClassifierPredict.json +235 -235
- teradataml/data/jsons/sqle/17.05/Pack.json +98 -98
- teradataml/data/jsons/sqle/17.05/SVMSparsePredict.json +162 -162
- teradataml/data/jsons/sqle/17.05/Sessionize.json +105 -105
- teradataml/data/jsons/sqle/17.05/StringSimilarity.json +86 -86
- teradataml/data/jsons/sqle/17.05/Unpack.json +166 -166
- teradataml/data/jsons/sqle/17.05/nPath.json +269 -269
- teradataml/data/jsons/sqle/17.10/Antiselect.json +56 -56
- teradataml/data/jsons/sqle/17.10/Attribution.json +249 -249
- teradataml/data/jsons/sqle/17.10/DecisionForestPredict.json +185 -185
- teradataml/data/jsons/sqle/17.10/DecisionTreePredict.json +171 -171
- teradataml/data/jsons/sqle/17.10/GLMPredict.json +151 -151
- teradataml/data/jsons/sqle/17.10/MovingAverage.json +368 -368
- teradataml/data/jsons/sqle/17.10/NGramSplitter.json +239 -239
- teradataml/data/jsons/sqle/17.10/NaiveBayesPredict.json +149 -149
- teradataml/data/jsons/sqle/17.10/NaiveBayesTextClassifierPredict.json +288 -288
- teradataml/data/jsons/sqle/17.10/Pack.json +133 -133
- teradataml/data/jsons/sqle/17.10/SVMSparsePredict.json +193 -193
- teradataml/data/jsons/sqle/17.10/Sessionize.json +105 -105
- teradataml/data/jsons/sqle/17.10/StringSimilarity.json +86 -86
- teradataml/data/jsons/sqle/17.10/TD_BinCodeFit.json +239 -239
- teradataml/data/jsons/sqle/17.10/TD_BinCodeTransform.json +70 -70
- teradataml/data/jsons/sqle/17.10/TD_CategoricalSummary.json +53 -53
- teradataml/data/jsons/sqle/17.10/TD_Chisq.json +67 -67
- teradataml/data/jsons/sqle/17.10/TD_ColumnSummary.json +53 -53
- teradataml/data/jsons/sqle/17.10/TD_ConvertTo.json +68 -68
- teradataml/data/jsons/sqle/17.10/TD_FTest.json +187 -187
- teradataml/data/jsons/sqle/17.10/TD_FillRowID.json +51 -51
- teradataml/data/jsons/sqle/17.10/TD_FunctionFit.json +46 -46
- teradataml/data/jsons/sqle/17.10/TD_FunctionTransform.json +72 -71
- teradataml/data/jsons/sqle/17.10/TD_GetRowsWithMissingValues.json +52 -52
- teradataml/data/jsons/sqle/17.10/TD_GetRowsWithoutMissingValues.json +52 -52
- teradataml/data/jsons/sqle/17.10/TD_Histogram.json +132 -132
- teradataml/data/jsons/sqle/17.10/TD_NumApply.json +147 -147
- teradataml/data/jsons/sqle/17.10/TD_OneHotEncodingFit.json +182 -182
- teradataml/data/jsons/sqle/17.10/TD_OneHotEncodingTransform.json +65 -64
- teradataml/data/jsons/sqle/17.10/TD_OutlierFilterFit.json +196 -196
- teradataml/data/jsons/sqle/17.10/TD_OutlierFilterTransform.json +48 -47
- teradataml/data/jsons/sqle/17.10/TD_PolynomialFeaturesFit.json +114 -114
- teradataml/data/jsons/sqle/17.10/TD_PolynomialFeaturesTransform.json +72 -71
- teradataml/data/jsons/sqle/17.10/TD_QQNorm.json +111 -111
- teradataml/data/jsons/sqle/17.10/TD_RoundColumns.json +93 -93
- teradataml/data/jsons/sqle/17.10/TD_RowNormalizeFit.json +127 -127
- teradataml/data/jsons/sqle/17.10/TD_RowNormalizeTransform.json +70 -69
- teradataml/data/jsons/sqle/17.10/TD_ScaleFit.json +156 -156
- teradataml/data/jsons/sqle/17.10/TD_ScaleTransform.json +70 -69
- teradataml/data/jsons/sqle/17.10/TD_SimpleImputeFit.json +147 -147
- teradataml/data/jsons/sqle/17.10/TD_SimpleImputeTransform.json +48 -47
- teradataml/data/jsons/sqle/17.10/TD_StrApply.json +240 -240
- teradataml/data/jsons/sqle/17.10/TD_UnivariateStatistics.json +118 -118
- teradataml/data/jsons/sqle/17.10/TD_WhichMax.json +52 -52
- teradataml/data/jsons/sqle/17.10/TD_WhichMin.json +52 -52
- teradataml/data/jsons/sqle/17.10/TD_ZTest.json +171 -171
- teradataml/data/jsons/sqle/17.10/Unpack.json +188 -188
- teradataml/data/jsons/sqle/17.10/nPath.json +269 -269
- teradataml/data/jsons/sqle/17.20/Antiselect.json +56 -56
- teradataml/data/jsons/sqle/17.20/Attribution.json +249 -249
- teradataml/data/jsons/sqle/17.20/DecisionForestPredict.json +185 -185
- teradataml/data/jsons/sqle/17.20/DecisionTreePredict.json +172 -172
- teradataml/data/jsons/sqle/17.20/GLMPredict.json +151 -151
- teradataml/data/jsons/sqle/17.20/MovingAverage.json +367 -367
- teradataml/data/jsons/sqle/17.20/NGramSplitter.json +239 -239
- teradataml/data/jsons/sqle/17.20/NaiveBayesPredict.json +149 -149
- teradataml/data/jsons/sqle/17.20/NaiveBayesTextClassifierPredict.json +287 -287
- teradataml/data/jsons/sqle/17.20/Pack.json +133 -133
- teradataml/data/jsons/sqle/17.20/SVMSparsePredict.json +192 -192
- teradataml/data/jsons/sqle/17.20/Sessionize.json +105 -105
- teradataml/data/jsons/sqle/17.20/StringSimilarity.json +86 -86
- teradataml/data/jsons/sqle/17.20/TD_ANOVA.json +148 -76
- teradataml/data/jsons/sqle/17.20/TD_BinCodeFit.json +239 -239
- teradataml/data/jsons/sqle/17.20/TD_BinCodeTransform.json +71 -71
- teradataml/data/jsons/sqle/17.20/TD_CategoricalSummary.json +53 -53
- teradataml/data/jsons/sqle/17.20/TD_Chisq.json +67 -67
- teradataml/data/jsons/sqle/17.20/TD_ClassificationEvaluator.json +145 -145
- teradataml/data/jsons/sqle/17.20/TD_ColumnSummary.json +53 -53
- teradataml/data/jsons/sqle/17.20/TD_ColumnTransformer.json +218 -218
- teradataml/data/jsons/sqle/17.20/TD_ConvertTo.json +92 -92
- teradataml/data/jsons/sqle/17.20/TD_DecisionForest.json +259 -259
- teradataml/data/jsons/sqle/17.20/TD_DecisionForestPredict.json +139 -139
- teradataml/data/jsons/sqle/17.20/TD_FTest.json +269 -186
- teradataml/data/jsons/sqle/17.20/TD_FillRowID.json +52 -52
- teradataml/data/jsons/sqle/17.20/TD_FunctionFit.json +46 -46
- teradataml/data/jsons/sqle/17.20/TD_FunctionTransform.json +72 -72
- teradataml/data/jsons/sqle/17.20/TD_GLM.json +507 -431
- teradataml/data/jsons/sqle/17.20/TD_GLMPREDICT.json +168 -125
- teradataml/data/jsons/sqle/17.20/TD_GLMPerSegment.json +411 -411
- teradataml/data/jsons/sqle/17.20/TD_GLMPredictPerSegment.json +146 -146
- teradataml/data/jsons/sqle/17.20/TD_GetFutileColumns.json +93 -91
- teradataml/data/jsons/sqle/17.20/TD_GetRowsWithMissingValues.json +76 -76
- teradataml/data/jsons/sqle/17.20/TD_GetRowsWithoutMissingValues.json +76 -76
- teradataml/data/jsons/sqle/17.20/TD_Histogram.json +152 -152
- teradataml/data/jsons/sqle/17.20/TD_KMeans.json +231 -211
- teradataml/data/jsons/sqle/17.20/TD_KMeansPredict.json +86 -86
- teradataml/data/jsons/sqle/17.20/TD_KNN.json +262 -262
- teradataml/data/jsons/sqle/17.20/TD_NaiveBayesTextClassifierTrainer.json +137 -137
- teradataml/data/jsons/sqle/17.20/TD_NonLinearCombineFit.json +102 -101
- teradataml/data/jsons/sqle/17.20/TD_NonLinearCombineTransform.json +71 -71
- teradataml/data/jsons/sqle/17.20/TD_NumApply.json +147 -147
- teradataml/data/jsons/sqle/17.20/TD_OneClassSVM.json +315 -315
- teradataml/data/jsons/sqle/17.20/TD_OneClassSVMPredict.json +123 -123
- teradataml/data/jsons/sqle/17.20/TD_OneHotEncodingFit.json +271 -271
- teradataml/data/jsons/sqle/17.20/TD_OneHotEncodingTransform.json +65 -65
- teradataml/data/jsons/sqle/17.20/TD_OrdinalEncodingFit.json +229 -229
- teradataml/data/jsons/sqle/17.20/TD_OrdinalEncodingTransform.json +75 -75
- teradataml/data/jsons/sqle/17.20/TD_OutlierFilterFit.json +217 -217
- teradataml/data/jsons/sqle/17.20/TD_OutlierFilterTransform.json +48 -48
- teradataml/data/jsons/sqle/17.20/TD_PolynomialFeaturesFit.json +114 -114
- teradataml/data/jsons/sqle/17.20/TD_PolynomialFeaturesTransform.json +72 -72
- teradataml/data/jsons/sqle/17.20/TD_QQNorm.json +111 -111
- teradataml/data/jsons/sqle/17.20/TD_ROC.json +178 -177
- teradataml/data/jsons/sqle/17.20/TD_RandomProjectionFit.json +178 -178
- teradataml/data/jsons/sqle/17.20/TD_RandomProjectionMinComponents.json +73 -73
- teradataml/data/jsons/sqle/17.20/TD_RandomProjectionTransform.json +74 -74
- teradataml/data/jsons/sqle/17.20/TD_RegressionEvaluator.json +137 -137
- teradataml/data/jsons/sqle/17.20/TD_RoundColumns.json +93 -93
- teradataml/data/jsons/sqle/17.20/TD_RowNormalizeFit.json +127 -127
- teradataml/data/jsons/sqle/17.20/TD_RowNormalizeTransform.json +70 -70
- teradataml/data/jsons/sqle/17.20/TD_SVM.json +389 -389
- teradataml/data/jsons/sqle/17.20/TD_SVMPredict.json +142 -124
- teradataml/data/jsons/sqle/17.20/TD_ScaleFit.json +309 -156
- teradataml/data/jsons/sqle/17.20/TD_ScaleTransform.json +119 -70
- teradataml/data/jsons/sqle/17.20/TD_SentimentExtractor.json +193 -193
- teradataml/data/jsons/sqle/17.20/TD_Silhouette.json +142 -142
- teradataml/data/jsons/sqle/17.20/TD_SimpleImputeFit.json +147 -147
- teradataml/data/jsons/sqle/17.20/TD_SimpleImputeTransform.json +48 -48
- teradataml/data/jsons/sqle/17.20/TD_StrApply.json +240 -240
- teradataml/data/jsons/sqle/17.20/TD_TargetEncodingFit.json +248 -248
- teradataml/data/jsons/sqle/17.20/TD_TargetEncodingTransform.json +75 -75
- teradataml/data/jsons/sqle/17.20/TD_TextParser.json +192 -192
- teradataml/data/jsons/sqle/17.20/TD_TrainTestSplit.json +142 -142
- teradataml/data/jsons/sqle/17.20/TD_UnivariateStatistics.json +117 -117
- teradataml/data/jsons/sqle/17.20/TD_VectorDistance.json +182 -182
- teradataml/data/jsons/sqle/17.20/TD_WhichMax.json +52 -52
- teradataml/data/jsons/sqle/17.20/TD_WhichMin.json +52 -52
- teradataml/data/jsons/sqle/17.20/TD_WordEmbeddings.json +241 -241
- teradataml/data/jsons/sqle/17.20/TD_XGBoost.json +330 -312
- teradataml/data/jsons/sqle/17.20/TD_XGBoostPredict.json +195 -182
- teradataml/data/jsons/sqle/17.20/TD_ZTest.json +247 -170
- teradataml/data/jsons/sqle/17.20/Unpack.json +188 -188
- teradataml/data/jsons/sqle/17.20/nPath.json +269 -269
- teradataml/data/jsons/tableoperator/17.00/read_nos.json +197 -197
- teradataml/data/jsons/tableoperator/17.05/read_nos.json +197 -197
- teradataml/data/jsons/tableoperator/17.05/write_nos.json +194 -194
- teradataml/data/jsons/tableoperator/17.10/read_nos.json +183 -183
- teradataml/data/jsons/tableoperator/17.10/write_nos.json +194 -194
- teradataml/data/jsons/tableoperator/17.20/read_nos.json +182 -182
- teradataml/data/jsons/tableoperator/17.20/write_nos.json +223 -223
- teradataml/data/jsons/uaf/17.20/TD_ACF.json +149 -149
- teradataml/data/jsons/uaf/17.20/TD_ARIMAESTIMATE.json +409 -409
- teradataml/data/jsons/uaf/17.20/TD_ARIMAFORECAST.json +79 -79
- teradataml/data/jsons/uaf/17.20/TD_ARIMAVALIDATE.json +151 -151
- teradataml/data/jsons/uaf/17.20/TD_BINARYMATRIXOP.json +109 -109
- teradataml/data/jsons/uaf/17.20/TD_BINARYSERIESOP.json +107 -107
- teradataml/data/jsons/uaf/17.20/TD_BREUSCH_GODFREY.json +87 -87
- teradataml/data/jsons/uaf/17.20/TD_BREUSCH_PAGAN_GODFREY.json +106 -106
- teradataml/data/jsons/uaf/17.20/TD_CONVOLVE.json +80 -80
- teradataml/data/jsons/uaf/17.20/TD_CONVOLVE2.json +67 -67
- teradataml/data/jsons/uaf/17.20/TD_CUMUL_PERIODOGRAM.json +91 -91
- teradataml/data/jsons/uaf/17.20/TD_DFFT.json +136 -136
- teradataml/data/jsons/uaf/17.20/TD_DFFT2.json +148 -148
- teradataml/data/jsons/uaf/17.20/TD_DFFT2CONV.json +108 -108
- teradataml/data/jsons/uaf/17.20/TD_DFFTCONV.json +109 -109
- teradataml/data/jsons/uaf/17.20/TD_DICKEY_FULLER.json +86 -86
- teradataml/data/jsons/uaf/17.20/TD_DIFF.json +91 -91
- teradataml/data/jsons/uaf/17.20/TD_DTW.json +116 -116
- teradataml/data/jsons/uaf/17.20/TD_DURBIN_WATSON.json +100 -100
- teradataml/data/jsons/uaf/17.20/TD_EXTRACT_RESULTS.json +38 -38
- teradataml/data/jsons/uaf/17.20/TD_FITMETRICS.json +100 -100
- teradataml/data/jsons/uaf/17.20/TD_GENSERIES4FORMULA.json +84 -84
- teradataml/data/jsons/uaf/17.20/TD_GENSERIES4SINUSOIDS.json +70 -70
- teradataml/data/jsons/uaf/17.20/TD_GOLDFELD_QUANDT.json +152 -152
- teradataml/data/jsons/uaf/17.20/TD_HOLT_WINTERS_FORECAST.json +313 -313
- teradataml/data/jsons/uaf/17.20/TD_IDFFT.json +57 -57
- teradataml/data/jsons/uaf/17.20/TD_IDFFT2.json +94 -94
- teradataml/data/jsons/uaf/17.20/TD_INPUTVALIDATOR.json +63 -63
- teradataml/data/jsons/uaf/17.20/TD_LINEAR_REGR.json +181 -181
- teradataml/data/jsons/uaf/17.20/TD_LINESPEC.json +102 -102
- teradataml/data/jsons/uaf/17.20/TD_MAMEAN.json +182 -182
- teradataml/data/jsons/uaf/17.20/TD_MATRIXMULTIPLY.json +67 -67
- teradataml/data/jsons/uaf/17.20/TD_MINFO.json +66 -66
- teradataml/data/jsons/uaf/17.20/TD_MULTIVAR_REGR.json +178 -178
- teradataml/data/jsons/uaf/17.20/TD_PACF.json +114 -114
- teradataml/data/jsons/uaf/17.20/TD_PORTMAN.json +118 -118
- teradataml/data/jsons/uaf/17.20/TD_POWERSPEC.json +175 -175
- teradataml/data/jsons/uaf/17.20/TD_POWERTRANSFORM.json +97 -97
- teradataml/data/jsons/uaf/17.20/TD_RESAMPLE.json +173 -173
- teradataml/data/jsons/uaf/17.20/TD_SEASONALNORMALIZE.json +136 -136
- teradataml/data/jsons/uaf/17.20/TD_SELECTION_CRITERIA.json +89 -89
- teradataml/data/jsons/uaf/17.20/TD_SIGNIF_PERIODICITIES.json +79 -79
- teradataml/data/jsons/uaf/17.20/TD_SIGNIF_RESIDMEAN.json +67 -67
- teradataml/data/jsons/uaf/17.20/TD_SIMPLEEXP.json +184 -184
- teradataml/data/jsons/uaf/17.20/TD_SINFO.json +57 -57
- teradataml/data/jsons/uaf/17.20/TD_SMOOTHMA.json +162 -162
- teradataml/data/jsons/uaf/17.20/TD_TRACKINGOP.json +100 -100
- teradataml/data/jsons/uaf/17.20/TD_UNDIFF.json +111 -111
- teradataml/data/jsons/uaf/17.20/TD_UNNORMALIZE.json +95 -95
- teradataml/data/jsons/uaf/17.20/TD_WHITES_GENERAL.json +77 -77
- teradataml/data/kmeans_example.json +22 -17
- teradataml/data/kmeans_table.csv +10 -0
- teradataml/data/kmeans_us_arrests_data.csv +0 -0
- teradataml/data/knn_example.json +18 -18
- teradataml/data/knnrecommender_example.json +6 -6
- teradataml/data/knnrecommenderpredict_example.json +12 -12
- teradataml/data/lar_example.json +17 -17
- teradataml/data/larpredict_example.json +30 -30
- teradataml/data/lc_new_predictors.csv +5 -5
- teradataml/data/lc_new_reference.csv +9 -9
- teradataml/data/lda_example.json +8 -8
- teradataml/data/ldainference_example.json +14 -14
- teradataml/data/ldatopicsummary_example.json +8 -8
- teradataml/data/levendist_input.csv +13 -13
- teradataml/data/levenshteindistance_example.json +10 -10
- teradataml/data/linreg_example.json +9 -9
- teradataml/data/load_example_data.py +326 -323
- teradataml/data/loan_prediction.csv +295 -295
- teradataml/data/lungcancer.csv +138 -138
- teradataml/data/mappingdata.csv +12 -12
- teradataml/data/milk_timeseries.csv +157 -157
- teradataml/data/min_max_titanic.csv +4 -4
- teradataml/data/minhash_example.json +6 -6
- teradataml/data/ml_ratings.csv +7547 -7547
- teradataml/data/ml_ratings_10.csv +2445 -2445
- teradataml/data/model1_table.csv +5 -5
- teradataml/data/model2_table.csv +5 -5
- teradataml/data/models/iris_db_glm_model.pmml +56 -56
- teradataml/data/models/iris_db_xgb_model.pmml +4471 -4471
- teradataml/data/modularity_example.json +12 -12
- teradataml/data/movavg_example.json +7 -7
- teradataml/data/mtx1.csv +7 -7
- teradataml/data/mtx2.csv +13 -13
- teradataml/data/multi_model_classification.csv +401 -0
- teradataml/data/multi_model_regression.csv +401 -0
- teradataml/data/mvdfft8.csv +9 -9
- teradataml/data/naivebayes_example.json +9 -9
- teradataml/data/naivebayespredict_example.json +19 -19
- teradataml/data/naivebayestextclassifier2_example.json +6 -6
- teradataml/data/naivebayestextclassifier_example.json +8 -8
- teradataml/data/naivebayestextclassifierpredict_example.json +20 -20
- teradataml/data/name_Find_configure.csv +10 -10
- teradataml/data/namedentityfinder_example.json +14 -14
- teradataml/data/namedentityfinderevaluator_example.json +10 -10
- teradataml/data/namedentityfindertrainer_example.json +6 -6
- teradataml/data/nb_iris_input_test.csv +31 -31
- teradataml/data/nb_iris_input_train.csv +121 -121
- teradataml/data/nbp_iris_model.csv +13 -13
- teradataml/data/ner_extractor_text.csv +2 -2
- teradataml/data/ner_sports_test2.csv +29 -29
- teradataml/data/ner_sports_train.csv +501 -501
- teradataml/data/nerevaluator_example.json +5 -5
- teradataml/data/nerextractor_example.json +18 -18
- teradataml/data/nermem_sports_test.csv +17 -17
- teradataml/data/nermem_sports_train.csv +50 -50
- teradataml/data/nertrainer_example.json +6 -6
- teradataml/data/ngrams_example.json +6 -6
- teradataml/data/notebooks/sqlalchemy/Teradata Vantage Aggregate Functions using SQLAlchemy.ipynb +1455 -1455
- teradataml/data/notebooks/sqlalchemy/Teradata Vantage Arithmetic Functions Using SQLAlchemy.ipynb +1993 -1993
- teradataml/data/notebooks/sqlalchemy/Teradata Vantage Bit-Byte Manipulation Functions using SQLAlchemy.ipynb +1492 -1492
- teradataml/data/notebooks/sqlalchemy/Teradata Vantage Built-in functions using SQLAlchemy.ipynb +536 -536
- teradataml/data/notebooks/sqlalchemy/Teradata Vantage Regular Expressions Using SQLAlchemy.ipynb +570 -570
- teradataml/data/notebooks/sqlalchemy/Teradata Vantage String Functions Using SQLAlchemy.ipynb +2559 -2559
- teradataml/data/notebooks/sqlalchemy/Teradata Vantage Window Aggregate Functions using SQLAlchemy.ipynb +2911 -2911
- teradataml/data/notebooks/sqlalchemy/Using Generic SQLAlchemy ClauseElements teradataml DataFrame assign method.ipynb +698 -698
- teradataml/data/notebooks/sqlalchemy/teradataml filtering using SQLAlchemy ClauseElements.ipynb +784 -784
- teradataml/data/npath_example.json +23 -23
- teradataml/data/ntree_example.json +14 -14
- teradataml/data/numeric_strings.csv +4 -4
- teradataml/data/numerics.csv +4 -4
- teradataml/data/ocean_buoy.csv +17 -17
- teradataml/data/ocean_buoy2.csv +17 -17
- teradataml/data/ocean_buoys.csv +27 -27
- teradataml/data/ocean_buoys2.csv +10 -10
- teradataml/data/ocean_buoys_nonpti.csv +28 -28
- teradataml/data/ocean_buoys_seq.csv +29 -29
- teradataml/data/onehot_encoder_train.csv +4 -0
- teradataml/data/openml_example.json +92 -0
- teradataml/data/optional_event_table.csv +4 -4
- teradataml/data/orders1.csv +11 -11
- teradataml/data/orders1_12.csv +12 -12
- teradataml/data/orders_ex.csv +4 -4
- teradataml/data/pack_example.json +8 -8
- teradataml/data/package_tracking.csv +19 -19
- teradataml/data/package_tracking_pti.csv +18 -18
- teradataml/data/pagerank_example.json +13 -13
- teradataml/data/paragraphs_input.csv +6 -6
- teradataml/data/pathanalyzer_example.json +7 -7
- teradataml/data/pathgenerator_example.json +7 -7
- teradataml/data/phrases.csv +7 -7
- teradataml/data/pivot_example.json +8 -8
- teradataml/data/pivot_input.csv +22 -22
- teradataml/data/playerRating.csv +31 -31
- teradataml/data/postagger_example.json +6 -6
- teradataml/data/posttagger_output.csv +44 -44
- teradataml/data/production_data.csv +16 -16
- teradataml/data/production_data2.csv +7 -7
- teradataml/data/randomsample_example.json +31 -31
- teradataml/data/randomwalksample_example.json +8 -8
- teradataml/data/rank_table.csv +6 -6
- teradataml/data/ref_mobile_data.csv +4 -4
- teradataml/data/ref_mobile_data_dense.csv +2 -2
- teradataml/data/ref_url.csv +17 -17
- teradataml/data/restaurant_reviews.csv +7 -7
- teradataml/data/river_data.csv +145 -145
- teradataml/data/roc_example.json +7 -7
- teradataml/data/roc_input.csv +101 -101
- teradataml/data/rule_inputs.csv +6 -6
- teradataml/data/rule_table.csv +2 -2
- teradataml/data/sales.csv +7 -7
- teradataml/data/sales_transaction.csv +501 -501
- teradataml/data/salesdata.csv +342 -342
- teradataml/data/sample_cities.csv +2 -2
- teradataml/data/sample_shapes.csv +10 -10
- teradataml/data/sample_streets.csv +2 -2
- teradataml/data/sampling_example.json +15 -15
- teradataml/data/sax_example.json +8 -8
- teradataml/data/scale_attributes.csv +3 -0
- teradataml/data/scale_example.json +74 -23
- teradataml/data/scale_housing.csv +11 -11
- teradataml/data/scale_housing_test.csv +6 -6
- teradataml/data/scale_input_part_sparse.csv +31 -0
- teradataml/data/scale_input_partitioned.csv +16 -0
- teradataml/data/scale_input_sparse.csv +11 -0
- teradataml/data/scale_parameters.csv +3 -0
- teradataml/data/scale_stat.csv +11 -11
- teradataml/data/scalebypartition_example.json +13 -13
- teradataml/data/scalemap_example.json +13 -13
- teradataml/data/scalesummary_example.json +12 -12
- teradataml/data/score_category.csv +101 -101
- teradataml/data/score_summary.csv +4 -4
- teradataml/data/script_example.json +9 -9
- teradataml/data/scripts/deploy_script.py +84 -0
- teradataml/data/scripts/mapper.R +20 -0
- teradataml/data/scripts/mapper.py +15 -15
- teradataml/data/scripts/mapper_replace.py +15 -15
- teradataml/data/scripts/sklearn/__init__.py +0 -0
- teradataml/data/scripts/sklearn/sklearn_fit.py +171 -0
- teradataml/data/scripts/sklearn/sklearn_fit_predict.py +127 -0
- teradataml/data/scripts/sklearn/sklearn_function.template +108 -0
- teradataml/data/scripts/sklearn/sklearn_model_selection_split.py +148 -0
- teradataml/data/scripts/sklearn/sklearn_neighbors.py +143 -0
- teradataml/data/scripts/sklearn/sklearn_score.py +119 -0
- teradataml/data/scripts/sklearn/sklearn_transform.py +171 -0
- teradataml/data/seeds.csv +10 -10
- teradataml/data/sentenceextractor_example.json +6 -6
- teradataml/data/sentiment_extract_input.csv +11 -11
- teradataml/data/sentiment_train.csv +16 -16
- teradataml/data/sentiment_word.csv +20 -20
- teradataml/data/sentiment_word_input.csv +19 -19
- teradataml/data/sentimentextractor_example.json +24 -24
- teradataml/data/sentimenttrainer_example.json +8 -8
- teradataml/data/sequence_table.csv +10 -10
- teradataml/data/seriessplitter_example.json +7 -7
- teradataml/data/sessionize_example.json +17 -17
- teradataml/data/sessionize_table.csv +116 -116
- teradataml/data/setop_test1.csv +24 -24
- teradataml/data/setop_test2.csv +22 -22
- teradataml/data/soc_nw_edges.csv +10 -10
- teradataml/data/soc_nw_vertices.csv +7 -7
- teradataml/data/souvenir_timeseries.csv +167 -167
- teradataml/data/sparse_iris_attribute.csv +5 -5
- teradataml/data/sparse_iris_test.csv +121 -121
- teradataml/data/sparse_iris_train.csv +601 -601
- teradataml/data/star1.csv +6 -6
- teradataml/data/state_transition.csv +5 -5
- teradataml/data/stock_data.csv +53 -53
- teradataml/data/stock_movement.csv +11 -11
- teradataml/data/stock_vol.csv +76 -76
- teradataml/data/stop_words.csv +8 -8
- teradataml/data/store_sales.csv +37 -37
- teradataml/data/stringsimilarity_example.json +7 -7
- teradataml/data/strsimilarity_input.csv +13 -13
- teradataml/data/students.csv +101 -101
- teradataml/data/svm_iris_input_test.csv +121 -121
- teradataml/data/svm_iris_input_train.csv +481 -481
- teradataml/data/svm_iris_model.csv +7 -7
- teradataml/data/svmdense_example.json +9 -9
- teradataml/data/svmdensepredict_example.json +18 -18
- teradataml/data/svmsparse_example.json +7 -7
- teradataml/data/svmsparsepredict_example.json +13 -13
- teradataml/data/svmsparsesummary_example.json +7 -7
- teradataml/data/target_mobile_data.csv +13 -13
- teradataml/data/target_mobile_data_dense.csv +5 -5
- teradataml/data/templatedata.csv +1201 -1201
- teradataml/data/templates/open_source_ml.json +9 -0
- teradataml/data/teradataml_example.json +150 -1
- teradataml/data/test_classification.csv +101 -0
- teradataml/data/test_loan_prediction.csv +53 -53
- teradataml/data/test_pacf_12.csv +37 -37
- teradataml/data/test_prediction.csv +101 -0
- teradataml/data/test_regression.csv +101 -0
- teradataml/data/test_river2.csv +109 -109
- teradataml/data/text_inputs.csv +6 -6
- teradataml/data/textchunker_example.json +7 -7
- teradataml/data/textclassifier_example.json +6 -6
- teradataml/data/textclassifier_input.csv +7 -7
- teradataml/data/textclassifiertrainer_example.json +6 -6
- teradataml/data/textmorph_example.json +5 -5
- teradataml/data/textparser_example.json +15 -15
- teradataml/data/texttagger_example.json +11 -11
- teradataml/data/texttokenizer_example.json +6 -6
- teradataml/data/texttrainer_input.csv +11 -11
- teradataml/data/tf_example.json +6 -6
- teradataml/data/tfidf_example.json +13 -13
- teradataml/data/tfidf_input1.csv +201 -201
- teradataml/data/tfidf_train.csv +6 -6
- teradataml/data/time_table1.csv +535 -535
- teradataml/data/time_table2.csv +14 -14
- teradataml/data/timeseriesdata.csv +1601 -1601
- teradataml/data/timeseriesdatasetsd4.csv +105 -105
- teradataml/data/titanic.csv +892 -892
- teradataml/data/token_table.csv +696 -696
- teradataml/data/train_multiclass.csv +101 -0
- teradataml/data/train_regression.csv +101 -0
- teradataml/data/train_regression_multiple_labels.csv +101 -0
- teradataml/data/train_tracking.csv +27 -27
- teradataml/data/transformation_table.csv +5 -5
- teradataml/data/transformation_table_new.csv +1 -1
- teradataml/data/tv_spots.csv +16 -16
- teradataml/data/twod_climate_data.csv +117 -117
- teradataml/data/uaf_example.json +475 -475
- teradataml/data/univariatestatistics_example.json +8 -8
- teradataml/data/unpack_example.json +9 -9
- teradataml/data/unpivot_example.json +9 -9
- teradataml/data/unpivot_input.csv +8 -8
- teradataml/data/us_air_pass.csv +36 -36
- teradataml/data/us_population.csv +624 -624
- teradataml/data/us_states_shapes.csv +52 -52
- teradataml/data/varmax_example.json +17 -17
- teradataml/data/vectordistance_example.json +25 -25
- teradataml/data/ville_climatedata.csv +121 -121
- teradataml/data/ville_tempdata.csv +12 -12
- teradataml/data/ville_tempdata1.csv +12 -12
- teradataml/data/ville_temperature.csv +11 -11
- teradataml/data/waveletTable.csv +1605 -1605
- teradataml/data/waveletTable2.csv +1605 -1605
- teradataml/data/weightedmovavg_example.json +8 -8
- teradataml/data/wft_testing.csv +5 -5
- teradataml/data/wine_data.csv +1600 -0
- teradataml/data/word_embed_input_table1.csv +5 -5
- teradataml/data/word_embed_input_table2.csv +4 -4
- teradataml/data/word_embed_model.csv +22 -22
- teradataml/data/words_input.csv +13 -13
- teradataml/data/xconvolve_complex_left.csv +6 -6
- teradataml/data/xconvolve_complex_leftmulti.csv +6 -6
- teradataml/data/xgboost_example.json +35 -35
- teradataml/data/xgboostpredict_example.json +31 -31
- teradataml/data/ztest_example.json +16 -0
- teradataml/dataframe/copy_to.py +1769 -1698
- teradataml/dataframe/data_transfer.py +2812 -2745
- teradataml/dataframe/dataframe.py +17630 -16946
- teradataml/dataframe/dataframe_utils.py +1875 -1740
- teradataml/dataframe/fastload.py +794 -603
- teradataml/dataframe/indexer.py +424 -424
- teradataml/dataframe/setop.py +1179 -1166
- teradataml/dataframe/sql.py +10174 -6432
- teradataml/dataframe/sql_function_parameters.py +439 -388
- teradataml/dataframe/sql_functions.py +652 -652
- teradataml/dataframe/sql_interfaces.py +220 -220
- teradataml/dataframe/vantage_function_types.py +674 -630
- teradataml/dataframe/window.py +693 -692
- teradataml/dbutils/__init__.py +3 -3
- teradataml/dbutils/dbutils.py +1167 -1150
- teradataml/dbutils/filemgr.py +267 -267
- teradataml/gen_ai/__init__.py +2 -2
- teradataml/gen_ai/convAI.py +472 -472
- teradataml/geospatial/__init__.py +3 -3
- teradataml/geospatial/geodataframe.py +1105 -1094
- teradataml/geospatial/geodataframecolumn.py +392 -387
- teradataml/geospatial/geometry_types.py +925 -925
- teradataml/hyperparameter_tuner/__init__.py +1 -1
- teradataml/hyperparameter_tuner/optimizer.py +3783 -2993
- teradataml/hyperparameter_tuner/utils.py +281 -187
- teradataml/lib/aed_0_1.dll +0 -0
- teradataml/lib/libaed_0_1.dylib +0 -0
- teradataml/lib/libaed_0_1.so +0 -0
- teradataml/libaed_0_1.dylib +0 -0
- teradataml/libaed_0_1.so +0 -0
- teradataml/opensource/__init__.py +1 -0
- teradataml/opensource/sklearn/__init__.py +1 -0
- teradataml/opensource/sklearn/_class.py +255 -0
- teradataml/opensource/sklearn/_sklearn_wrapper.py +1715 -0
- teradataml/opensource/sklearn/_wrapper_utils.py +268 -0
- teradataml/opensource/sklearn/constants.py +54 -0
- teradataml/options/__init__.py +130 -124
- teradataml/options/configure.py +358 -336
- teradataml/options/display.py +176 -176
- teradataml/plot/__init__.py +2 -2
- teradataml/plot/axis.py +1388 -1388
- teradataml/plot/constants.py +15 -15
- teradataml/plot/figure.py +398 -398
- teradataml/plot/plot.py +760 -760
- teradataml/plot/query_generator.py +83 -83
- teradataml/plot/subplot.py +216 -216
- teradataml/scriptmgmt/UserEnv.py +3791 -3761
- teradataml/scriptmgmt/__init__.py +3 -3
- teradataml/scriptmgmt/lls_utils.py +1719 -1604
- teradataml/series/series.py +532 -532
- teradataml/series/series_utils.py +71 -71
- teradataml/table_operators/Apply.py +949 -917
- teradataml/table_operators/Script.py +1718 -1982
- teradataml/table_operators/TableOperator.py +1255 -1616
- teradataml/table_operators/__init__.py +2 -3
- teradataml/table_operators/apply_query_generator.py +262 -262
- teradataml/table_operators/query_generator.py +507 -507
- teradataml/table_operators/table_operator_query_generator.py +460 -460
- teradataml/table_operators/table_operator_util.py +631 -639
- teradataml/table_operators/templates/dataframe_apply.template +184 -184
- teradataml/table_operators/templates/dataframe_map.template +176 -176
- teradataml/table_operators/templates/script_executor.template +170 -170
- teradataml/utils/dtypes.py +684 -684
- teradataml/utils/internal_buffer.py +84 -84
- teradataml/utils/print_versions.py +205 -205
- teradataml/utils/utils.py +410 -410
- teradataml/utils/validators.py +2277 -2115
- {teradataml-17.20.0.7.dist-info → teradataml-20.0.0.1.dist-info}/METADATA +346 -45
- teradataml-20.0.0.1.dist-info/RECORD +1056 -0
- {teradataml-17.20.0.7.dist-info → teradataml-20.0.0.1.dist-info}/WHEEL +1 -1
- {teradataml-17.20.0.7.dist-info → teradataml-20.0.0.1.dist-info}/zip-safe +1 -1
- teradataml/analytics/mle/AdaBoost.py +0 -651
- teradataml/analytics/mle/AdaBoostPredict.py +0 -564
- teradataml/analytics/mle/Antiselect.py +0 -342
- teradataml/analytics/mle/Arima.py +0 -641
- teradataml/analytics/mle/ArimaPredict.py +0 -477
- teradataml/analytics/mle/Attribution.py +0 -1070
- teradataml/analytics/mle/Betweenness.py +0 -658
- teradataml/analytics/mle/Burst.py +0 -711
- teradataml/analytics/mle/CCM.py +0 -600
- teradataml/analytics/mle/CCMPrepare.py +0 -324
- teradataml/analytics/mle/CFilter.py +0 -460
- teradataml/analytics/mle/ChangePointDetection.py +0 -572
- teradataml/analytics/mle/ChangePointDetectionRT.py +0 -477
- teradataml/analytics/mle/Closeness.py +0 -737
- teradataml/analytics/mle/ConfusionMatrix.py +0 -420
- teradataml/analytics/mle/Correlation.py +0 -477
- teradataml/analytics/mle/Correlation2.py +0 -573
- teradataml/analytics/mle/CoxHazardRatio.py +0 -679
- teradataml/analytics/mle/CoxPH.py +0 -556
- teradataml/analytics/mle/CoxSurvival.py +0 -478
- teradataml/analytics/mle/CumulativeMovAvg.py +0 -363
- teradataml/analytics/mle/DTW.py +0 -623
- teradataml/analytics/mle/DWT.py +0 -564
- teradataml/analytics/mle/DWT2D.py +0 -599
- teradataml/analytics/mle/DecisionForest.py +0 -716
- teradataml/analytics/mle/DecisionForestEvaluator.py +0 -363
- teradataml/analytics/mle/DecisionForestPredict.py +0 -561
- teradataml/analytics/mle/DecisionTree.py +0 -830
- teradataml/analytics/mle/DecisionTreePredict.py +0 -528
- teradataml/analytics/mle/ExponentialMovAvg.py +0 -418
- teradataml/analytics/mle/FMeasure.py +0 -402
- teradataml/analytics/mle/FPGrowth.py +0 -734
- teradataml/analytics/mle/FrequentPaths.py +0 -695
- teradataml/analytics/mle/GLM.py +0 -558
- teradataml/analytics/mle/GLML1L2.py +0 -547
- teradataml/analytics/mle/GLML1L2Predict.py +0 -519
- teradataml/analytics/mle/GLMPredict.py +0 -529
- teradataml/analytics/mle/HMMDecoder.py +0 -945
- teradataml/analytics/mle/HMMEvaluator.py +0 -901
- teradataml/analytics/mle/HMMSupervised.py +0 -521
- teradataml/analytics/mle/HMMUnsupervised.py +0 -572
- teradataml/analytics/mle/Histogram.py +0 -561
- teradataml/analytics/mle/IDWT.py +0 -476
- teradataml/analytics/mle/IDWT2D.py +0 -493
- teradataml/analytics/mle/IdentityMatch.py +0 -763
- teradataml/analytics/mle/Interpolator.py +0 -918
- teradataml/analytics/mle/KMeans.py +0 -485
- teradataml/analytics/mle/KNN.py +0 -627
- teradataml/analytics/mle/KNNRecommender.py +0 -488
- teradataml/analytics/mle/KNNRecommenderPredict.py +0 -581
- teradataml/analytics/mle/LAR.py +0 -439
- teradataml/analytics/mle/LARPredict.py +0 -478
- teradataml/analytics/mle/LDA.py +0 -548
- teradataml/analytics/mle/LDAInference.py +0 -492
- teradataml/analytics/mle/LDATopicSummary.py +0 -464
- teradataml/analytics/mle/LevenshteinDistance.py +0 -450
- teradataml/analytics/mle/LinReg.py +0 -433
- teradataml/analytics/mle/LinRegPredict.py +0 -438
- teradataml/analytics/mle/MinHash.py +0 -544
- teradataml/analytics/mle/Modularity.py +0 -587
- teradataml/analytics/mle/NEREvaluator.py +0 -410
- teradataml/analytics/mle/NERExtractor.py +0 -595
- teradataml/analytics/mle/NERTrainer.py +0 -458
- teradataml/analytics/mle/NGrams.py +0 -570
- teradataml/analytics/mle/NPath.py +0 -634
- teradataml/analytics/mle/NTree.py +0 -549
- teradataml/analytics/mle/NaiveBayes.py +0 -462
- teradataml/analytics/mle/NaiveBayesPredict.py +0 -513
- teradataml/analytics/mle/NaiveBayesTextClassifier.py +0 -607
- teradataml/analytics/mle/NaiveBayesTextClassifier2.py +0 -531
- teradataml/analytics/mle/NaiveBayesTextClassifierPredict.py +0 -799
- teradataml/analytics/mle/NamedEntityFinder.py +0 -529
- teradataml/analytics/mle/NamedEntityFinderEvaluator.py +0 -414
- teradataml/analytics/mle/NamedEntityFinderTrainer.py +0 -396
- teradataml/analytics/mle/POSTagger.py +0 -417
- teradataml/analytics/mle/Pack.py +0 -411
- teradataml/analytics/mle/PageRank.py +0 -535
- teradataml/analytics/mle/PathAnalyzer.py +0 -426
- teradataml/analytics/mle/PathGenerator.py +0 -367
- teradataml/analytics/mle/PathStart.py +0 -464
- teradataml/analytics/mle/PathSummarizer.py +0 -470
- teradataml/analytics/mle/Pivot.py +0 -471
- teradataml/analytics/mle/ROC.py +0 -425
- teradataml/analytics/mle/RandomSample.py +0 -637
- teradataml/analytics/mle/RandomWalkSample.py +0 -490
- teradataml/analytics/mle/SAX.py +0 -779
- teradataml/analytics/mle/SVMDense.py +0 -677
- teradataml/analytics/mle/SVMDensePredict.py +0 -536
- teradataml/analytics/mle/SVMDenseSummary.py +0 -437
- teradataml/analytics/mle/SVMSparse.py +0 -557
- teradataml/analytics/mle/SVMSparsePredict.py +0 -553
- teradataml/analytics/mle/SVMSparseSummary.py +0 -435
- teradataml/analytics/mle/Sampling.py +0 -549
- teradataml/analytics/mle/Scale.py +0 -565
- teradataml/analytics/mle/ScaleByPartition.py +0 -496
- teradataml/analytics/mle/ScaleMap.py +0 -378
- teradataml/analytics/mle/ScaleSummary.py +0 -320
- teradataml/analytics/mle/SentenceExtractor.py +0 -363
- teradataml/analytics/mle/SentimentEvaluator.py +0 -432
- teradataml/analytics/mle/SentimentExtractor.py +0 -578
- teradataml/analytics/mle/SentimentTrainer.py +0 -405
- teradataml/analytics/mle/SeriesSplitter.py +0 -641
- teradataml/analytics/mle/Sessionize.py +0 -475
- teradataml/analytics/mle/SimpleMovAvg.py +0 -397
- teradataml/analytics/mle/StringSimilarity.py +0 -425
- teradataml/analytics/mle/TF.py +0 -389
- teradataml/analytics/mle/TFIDF.py +0 -504
- teradataml/analytics/mle/TextChunker.py +0 -414
- teradataml/analytics/mle/TextClassifier.py +0 -399
- teradataml/analytics/mle/TextClassifierEvaluator.py +0 -413
- teradataml/analytics/mle/TextClassifierTrainer.py +0 -565
- teradataml/analytics/mle/TextMorph.py +0 -494
- teradataml/analytics/mle/TextParser.py +0 -623
- teradataml/analytics/mle/TextTagger.py +0 -530
- teradataml/analytics/mle/TextTokenizer.py +0 -502
- teradataml/analytics/mle/UnivariateStatistics.py +0 -488
- teradataml/analytics/mle/Unpack.py +0 -526
- teradataml/analytics/mle/Unpivot.py +0 -438
- teradataml/analytics/mle/VarMax.py +0 -776
- teradataml/analytics/mle/VectorDistance.py +0 -762
- teradataml/analytics/mle/WeightedMovAvg.py +0 -400
- teradataml/analytics/mle/XGBoost.py +0 -842
- teradataml/analytics/mle/XGBoostPredict.py +0 -627
- teradataml/analytics/mle/__init__.py +0 -123
- teradataml/analytics/mle/json/adaboost_mle.json +0 -135
- teradataml/analytics/mle/json/adaboostpredict_mle.json +0 -85
- teradataml/analytics/mle/json/antiselect_mle.json +0 -34
- teradataml/analytics/mle/json/antiselect_mle_mle.json +0 -34
- teradataml/analytics/mle/json/arima_mle.json +0 -172
- teradataml/analytics/mle/json/arimapredict_mle.json +0 -52
- teradataml/analytics/mle/json/attribution_mle_mle.json +0 -143
- teradataml/analytics/mle/json/betweenness_mle.json +0 -97
- teradataml/analytics/mle/json/burst_mle.json +0 -140
- teradataml/analytics/mle/json/ccm_mle.json +0 -124
- teradataml/analytics/mle/json/ccmprepare_mle.json +0 -14
- teradataml/analytics/mle/json/cfilter_mle.json +0 -93
- teradataml/analytics/mle/json/changepointdetection_mle.json +0 -92
- teradataml/analytics/mle/json/changepointdetectionrt_mle.json +0 -78
- teradataml/analytics/mle/json/closeness_mle.json +0 -104
- teradataml/analytics/mle/json/confusionmatrix_mle.json +0 -79
- teradataml/analytics/mle/json/correlation_mle.json +0 -86
- teradataml/analytics/mle/json/correlationreduce_mle.json +0 -49
- teradataml/analytics/mle/json/coxhazardratio_mle.json +0 -89
- teradataml/analytics/mle/json/coxph_mle.json +0 -98
- teradataml/analytics/mle/json/coxsurvival_mle.json +0 -79
- teradataml/analytics/mle/json/cumulativemovavg_mle.json +0 -34
- teradataml/analytics/mle/json/decisionforest_mle.json +0 -167
- teradataml/analytics/mle/json/decisionforestevaluator_mle.json +0 -33
- teradataml/analytics/mle/json/decisionforestpredict_mle_mle.json +0 -74
- teradataml/analytics/mle/json/decisiontree_mle.json +0 -194
- teradataml/analytics/mle/json/decisiontreepredict_mle_mle.json +0 -86
- teradataml/analytics/mle/json/dtw_mle.json +0 -97
- teradataml/analytics/mle/json/dwt2d_mle.json +0 -116
- teradataml/analytics/mle/json/dwt_mle.json +0 -101
- teradataml/analytics/mle/json/exponentialmovavg_mle.json +0 -55
- teradataml/analytics/mle/json/fmeasure_mle.json +0 -58
- teradataml/analytics/mle/json/fpgrowth_mle.json +0 -159
- teradataml/analytics/mle/json/frequentpaths_mle.json +0 -129
- teradataml/analytics/mle/json/glm_mle.json +0 -111
- teradataml/analytics/mle/json/glml1l2_mle.json +0 -106
- teradataml/analytics/mle/json/glml1l2predict_mle.json +0 -57
- teradataml/analytics/mle/json/glmpredict_mle_mle.json +0 -74
- teradataml/analytics/mle/json/histogram_mle.json +0 -100
- teradataml/analytics/mle/json/hmmdecoder_mle.json +0 -192
- teradataml/analytics/mle/json/hmmevaluator_mle.json +0 -206
- teradataml/analytics/mle/json/hmmsupervised_mle.json +0 -91
- teradataml/analytics/mle/json/hmmunsupervised_mle.json +0 -114
- teradataml/analytics/mle/json/identitymatch_mle.json +0 -88
- teradataml/analytics/mle/json/idwt2d_mle.json +0 -73
- teradataml/analytics/mle/json/idwt_mle.json +0 -66
- teradataml/analytics/mle/json/interpolator_mle.json +0 -151
- teradataml/analytics/mle/json/kmeans_mle.json +0 -97
- teradataml/analytics/mle/json/knn_mle.json +0 -141
- teradataml/analytics/mle/json/knnrecommender_mle.json +0 -111
- teradataml/analytics/mle/json/knnrecommenderpredict_mle.json +0 -75
- teradataml/analytics/mle/json/lar_mle.json +0 -78
- teradataml/analytics/mle/json/larpredict_mle.json +0 -69
- teradataml/analytics/mle/json/lda_mle.json +0 -130
- teradataml/analytics/mle/json/ldainference_mle.json +0 -78
- teradataml/analytics/mle/json/ldatopicsummary_mle.json +0 -64
- teradataml/analytics/mle/json/levenshteindistance_mle.json +0 -92
- teradataml/analytics/mle/json/linreg_mle.json +0 -42
- teradataml/analytics/mle/json/linregpredict_mle.json +0 -56
- teradataml/analytics/mle/json/minhash_mle.json +0 -113
- teradataml/analytics/mle/json/modularity_mle.json +0 -91
- teradataml/analytics/mle/json/naivebayespredict_mle_mle.json +0 -85
- teradataml/analytics/mle/json/naivebayesreduce_mle.json +0 -52
- teradataml/analytics/mle/json/naivebayestextclassifierpredict_mle_mle.json +0 -147
- teradataml/analytics/mle/json/naivebayestextclassifiertrainer2_mle.json +0 -108
- teradataml/analytics/mle/json/naivebayestextclassifiertrainer_mle.json +0 -102
- teradataml/analytics/mle/json/namedentityfinder_mle.json +0 -84
- teradataml/analytics/mle/json/namedentityfinderevaluatorreduce_mle.json +0 -43
- teradataml/analytics/mle/json/namedentityfindertrainer_mle.json +0 -64
- teradataml/analytics/mle/json/nerevaluator_mle.json +0 -54
- teradataml/analytics/mle/json/nerextractor_mle.json +0 -87
- teradataml/analytics/mle/json/nertrainer_mle.json +0 -89
- teradataml/analytics/mle/json/ngrams_mle.json +0 -137
- teradataml/analytics/mle/json/ngramsplitter_mle_mle.json +0 -137
- teradataml/analytics/mle/json/npath@coprocessor_mle.json +0 -73
- teradataml/analytics/mle/json/ntree@coprocessor_mle.json +0 -123
- teradataml/analytics/mle/json/pack_mle.json +0 -58
- teradataml/analytics/mle/json/pack_mle_mle.json +0 -58
- teradataml/analytics/mle/json/pagerank_mle.json +0 -81
- teradataml/analytics/mle/json/pathanalyzer_mle.json +0 -63
- teradataml/analytics/mle/json/pathgenerator_mle.json +0 -40
- teradataml/analytics/mle/json/pathstart_mle.json +0 -62
- teradataml/analytics/mle/json/pathsummarizer_mle.json +0 -72
- teradataml/analytics/mle/json/pivoting_mle.json +0 -71
- teradataml/analytics/mle/json/postagger_mle.json +0 -51
- teradataml/analytics/mle/json/randomsample_mle.json +0 -131
- teradataml/analytics/mle/json/randomwalksample_mle.json +0 -85
- teradataml/analytics/mle/json/roc_mle.json +0 -73
- teradataml/analytics/mle/json/sampling_mle.json +0 -75
- teradataml/analytics/mle/json/sax_mle.json +0 -154
- teradataml/analytics/mle/json/scale_mle.json +0 -93
- teradataml/analytics/mle/json/scalebypartition_mle.json +0 -89
- teradataml/analytics/mle/json/scalemap_mle.json +0 -44
- teradataml/analytics/mle/json/scalesummary_mle.json +0 -14
- teradataml/analytics/mle/json/sentenceextractor_mle.json +0 -41
- teradataml/analytics/mle/json/sentimentevaluator_mle.json +0 -43
- teradataml/analytics/mle/json/sentimentextractor_mle.json +0 -100
- teradataml/analytics/mle/json/sentimenttrainer_mle.json +0 -68
- teradataml/analytics/mle/json/seriessplitter_mle.json +0 -133
- teradataml/analytics/mle/json/sessionize_mle_mle.json +0 -62
- teradataml/analytics/mle/json/simplemovavg_mle.json +0 -48
- teradataml/analytics/mle/json/stringsimilarity_mle.json +0 -50
- teradataml/analytics/mle/json/stringsimilarity_mle_mle.json +0 -50
- teradataml/analytics/mle/json/svmdense_mle.json +0 -165
- teradataml/analytics/mle/json/svmdensepredict_mle.json +0 -95
- teradataml/analytics/mle/json/svmdensesummary_mle.json +0 -58
- teradataml/analytics/mle/json/svmsparse_mle.json +0 -148
- teradataml/analytics/mle/json/svmsparsepredict_mle_mle.json +0 -103
- teradataml/analytics/mle/json/svmsparsesummary_mle.json +0 -57
- teradataml/analytics/mle/json/textchunker_mle.json +0 -40
- teradataml/analytics/mle/json/textclassifier_mle.json +0 -51
- teradataml/analytics/mle/json/textclassifierevaluator_mle.json +0 -43
- teradataml/analytics/mle/json/textclassifiertrainer_mle.json +0 -103
- teradataml/analytics/mle/json/textmorph_mle.json +0 -63
- teradataml/analytics/mle/json/textparser_mle.json +0 -166
- teradataml/analytics/mle/json/texttagger_mle.json +0 -81
- teradataml/analytics/mle/json/texttokenizer_mle.json +0 -91
- teradataml/analytics/mle/json/tf_mle.json +0 -33
- teradataml/analytics/mle/json/tfidf_mle.json +0 -34
- teradataml/analytics/mle/json/univariatestatistics_mle.json +0 -81
- teradataml/analytics/mle/json/unpack_mle.json +0 -91
- teradataml/analytics/mle/json/unpack_mle_mle.json +0 -91
- teradataml/analytics/mle/json/unpivoting_mle.json +0 -63
- teradataml/analytics/mle/json/varmax_mle.json +0 -176
- teradataml/analytics/mle/json/vectordistance_mle.json +0 -179
- teradataml/analytics/mle/json/weightedmovavg_mle.json +0 -48
- teradataml/analytics/mle/json/xgboost_mle.json +0 -178
- teradataml/analytics/mle/json/xgboostpredict_mle.json +0 -104
- teradataml/analytics/sqle/Antiselect.py +0 -321
- teradataml/analytics/sqle/Attribution.py +0 -603
- teradataml/analytics/sqle/DecisionForestPredict.py +0 -408
- teradataml/analytics/sqle/GLMPredict.py +0 -430
- teradataml/analytics/sqle/MovingAverage.py +0 -543
- teradataml/analytics/sqle/NGramSplitter.py +0 -548
- teradataml/analytics/sqle/NPath.py +0 -632
- teradataml/analytics/sqle/NaiveBayesTextClassifierPredict.py +0 -515
- teradataml/analytics/sqle/Pack.py +0 -388
- teradataml/analytics/sqle/SVMSparsePredict.py +0 -464
- teradataml/analytics/sqle/Sessionize.py +0 -390
- teradataml/analytics/sqle/StringSimilarity.py +0 -400
- teradataml/analytics/sqle/Unpack.py +0 -503
- teradataml/analytics/sqle/json/antiselect_sqle.json +0 -21
- teradataml/analytics/sqle/json/attribution_sqle.json +0 -92
- teradataml/analytics/sqle/json/decisionforestpredict_sqle.json +0 -48
- teradataml/analytics/sqle/json/glmpredict_sqle.json +0 -48
- teradataml/analytics/sqle/json/h2opredict_sqle.json +0 -63
- teradataml/analytics/sqle/json/movingaverage_sqle.json +0 -58
- teradataml/analytics/sqle/json/naivebayestextclassifierpredict_sqle.json +0 -76
- teradataml/analytics/sqle/json/ngramsplitter_sqle.json +0 -126
- teradataml/analytics/sqle/json/npath_sqle.json +0 -67
- teradataml/analytics/sqle/json/pack_sqle.json +0 -47
- teradataml/analytics/sqle/json/pmmlpredict_sqle.json +0 -55
- teradataml/analytics/sqle/json/sessionize_sqle.json +0 -43
- teradataml/analytics/sqle/json/stringsimilarity_sqle.json +0 -39
- teradataml/analytics/sqle/json/svmsparsepredict_sqle.json +0 -74
- teradataml/analytics/sqle/json/unpack_sqle.json +0 -80
- teradataml/catalog/model_cataloging.py +0 -980
- teradataml/config/mlengine_alias_definitions_v1.0 +0 -118
- teradataml/config/mlengine_alias_definitions_v1.1 +0 -127
- teradataml/config/mlengine_alias_definitions_v1.3 +0 -129
- teradataml/table_operators/sandbox_container_util.py +0 -643
- teradataml-17.20.0.7.dist-info/RECORD +0 -1280
- {teradataml-17.20.0.7.dist-info → teradataml-20.0.0.1.dist-info}/top_level.txt +0 -0
|
@@ -1,415 +1,415 @@
|
|
|
1
|
-
def GLMPerSegment(formula=None, data=None, input_columns=None, response_column=None,
|
|
2
|
-
attribute_data=None, parameter_data=None, family="GAUSSIAN",
|
|
3
|
-
iter_max=300, batch_size=10, lambda1=0.02, alpha=0.15,
|
|
4
|
-
iter_num_no_change=50, tolerance=0.001, intercept=True,
|
|
5
|
-
class_weights="0:1.0, 1:1.0", learning_rate=None, initial_eta=0.05,
|
|
6
|
-
decay_rate=0.25, decay_steps=5, momentum=0.0, nesterov=True,
|
|
7
|
-
iteration_mode="BATCH", partition_column=None, **generic_arguments):
|
|
8
|
-
"""
|
|
9
|
-
DESCRIPTION:
|
|
10
|
-
The GLM() function is used to train the whole data set as one model. The
|
|
11
|
-
GLMPerSegment() function is a partition-by-key function to create a single
|
|
12
|
-
model for each partition.
|
|
13
|
-
The following operations are supported for GLMPerSegment():
|
|
14
|
-
* Gaussian linear regression.
|
|
15
|
-
* Binomial logistic regression for binary classification.
|
|
16
|
-
* Iteration modes batch and epoch.
|
|
17
|
-
* Regularization using L1, L2 and Elasticnet.
|
|
18
|
-
* Mini-batch gradient descent for numeric optimization algorithm.
|
|
19
|
-
* Training support with and without intercept.
|
|
20
|
-
* Class-weighted modeling.
|
|
21
|
-
* Learning rate support for mini-batch gradient descent using constant,
|
|
22
|
-
dynamic and hybrid gradients.
|
|
23
|
-
* Learning rate optimization algorithms for mini-batch gradient
|
|
24
|
-
descent using plain, momentum, and nesterov gradients.
|
|
25
|
-
|
|
26
|
-
Notes:
|
|
27
|
-
* The order column can be optionally applied to guarantee the result in each
|
|
28
|
-
run is deterministic. The situation of indeterministic result in a partition
|
|
29
|
-
can occur if the "batch_size" argument is less than the number of rows in
|
|
30
|
-
the partition. However, adding order columns can influence the performance.
|
|
31
|
-
* A model is generated from the GLMPerSegment(), and a model from GLM()
|
|
32
|
-
should be the same when the "batch_size" argument is not less than the
|
|
33
|
-
number of rows in the corresponding partition of the model.
|
|
34
|
-
* GLMPerSegment() takes all features as numeric input. Categorical columns
|
|
35
|
-
must be converted to numeric columns as preprocessing step, such as using
|
|
36
|
-
OneHotEncodingFit()/OneHotEncodingTransform(), OrdinalEncodingFit()/
|
|
37
|
-
OrdinalEncodingTransform(), and TargetEncodingFit()/TargetEncodingTransform().
|
|
38
|
-
* Any observation with a missing value in an input column is ignored and
|
|
39
|
-
not used fortraining. You can use some imputation function, such as
|
|
40
|
-
SimpleImputeFit()/SimpleImputeTransform() to do imputation of missing values.
|
|
41
|
-
* Best practice is to standardize the dataset before using GLMPerSegment().
|
|
42
|
-
Standardization, also known as feature scaling, makes a better model and
|
|
43
|
-
converges quicker.
|
|
44
|
-
* Model evaluation metrics of MSE, Loglikelihood, AIC, and BIC are generated by
|
|
45
|
-
GLMPerSegment(). For additional regression and classification metrics, you
|
|
46
|
-
should use RegressionEvaluator(), ClassificationEvaluator() and ROC()
|
|
47
|
-
functions as post-processing step.
|
|
48
|
-
* GLMPerSegment() supports binary classification only.
|
|
49
|
-
* "response_column" for classification accepts values of 0 and 1 for two
|
|
50
|
-
classes in the response column.
|
|
51
|
-
* A maximum of 2046 features are supported due to the limitation imposed
|
|
52
|
-
by the maximum number of columns (2048) in a input data.
|
|
53
|
-
* "batch_size" and "learning_rate" are directly related. With a larger "batch_size",
|
|
54
|
-
"learning_rate" can be increased for faster training with fewer iterations.
|
|
55
|
-
* "iter_num" and "iter_num_no_change" are criteria used to stop learning. To force
|
|
56
|
-
the function to run through all iterations, disable the "iter_num_no_change"
|
|
57
|
-
by specifying "iter_num_no_change" to 0.
|
|
58
|
-
* User need to try different combinations to find the best values for a particular
|
|
59
|
-
use case.
|
|
60
|
-
* When an unsupported data type is passed in "input_columns" or "response_column",
|
|
61
|
-
the error message is of the following format:
|
|
62
|
-
Unsupported data type for column index n in argument InputColumns.
|
|
63
|
-
In the message, n refers to the index of the column based on an input to the
|
|
64
|
-
function comprising "input_columns" and "response_column only. This is due to
|
|
65
|
-
the rest of the columns in input are not needed by the function and internal
|
|
66
|
-
optimizer does not expose them to the function. Due to this, n might be different
|
|
67
|
-
from the actual index in the input data.
|
|
68
|
-
|
|
69
|
-
PARAMETERS:
|
|
70
|
-
formula:
|
|
71
|
-
Required Argument when "input_columns" and "response_column" are not
|
|
72
|
-
provided, optional otherwise.
|
|
73
|
-
Specifies a string consisting of "formula" which is the model to be fitted.
|
|
74
|
-
Only basic formula of the "col1 ~ col2 + col3 +..." form are
|
|
75
|
-
supported and all variables must be from the same teradataml
|
|
76
|
-
DataFrame object.
|
|
77
|
-
Notes:
|
|
78
|
-
* The function only accepts numeric features. User must convert the
|
|
79
|
-
categorical features to numeric values, before passing to the formula.
|
|
80
|
-
* In case categorical features are passed to formula, those are ignored,
|
|
81
|
-
and only numeric features are considered.
|
|
82
|
-
* Provide either "formula" argument or "input_columns" and
|
|
83
|
-
"response_column" arguments.
|
|
84
|
-
Types: str
|
|
85
|
-
|
|
86
|
-
data:
|
|
87
|
-
Required Argument.
|
|
88
|
-
Specifies the teradataml DataFrame containing the input data.
|
|
89
|
-
Types: teradataml DataFrame
|
|
90
|
-
|
|
91
|
-
attribute_data:
|
|
92
|
-
Optional Argument.
|
|
93
|
-
Specifies the teradataml DataFrame containing a subset of features
|
|
94
|
-
to be used with respect to each partition.
|
|
95
|
-
Types: teradataml DataFrame
|
|
96
|
-
|
|
97
|
-
parameter_data:
|
|
98
|
-
Optional Argument.
|
|
99
|
-
Specifies the teradataml DataFrame containing a subset of parameters
|
|
100
|
-
to be used with respect to each partition.
|
|
101
|
-
Types: teradataml DataFrame
|
|
102
|
-
|
|
103
|
-
input_columns:
|
|
104
|
-
Required argument when "response_column" is provided and "formula" is not
|
|
105
|
-
provided, optional otherwise.
|
|
106
|
-
Specifies the name(s) of the teradataml DataFrame column(s) that need
|
|
107
|
-
to be used for training the model (predictors, features, or
|
|
108
|
-
independent variables).
|
|
109
|
-
Types: str OR list of Strings (str)
|
|
110
|
-
|
|
111
|
-
response_column:
|
|
112
|
-
Required argument when "response_column" is provided and "formula" is not
|
|
113
|
-
provided, optional otherwise.
|
|
114
|
-
Specifies the name of the column that contains the class label for binary
|
|
115
|
-
classification when "family" is 'Binomial', or target value (dependent
|
|
116
|
-
variable) for 'Regression' when "family" is 'Gaussian'.
|
|
117
|
-
Types: str
|
|
118
|
-
|
|
119
|
-
family:
|
|
120
|
-
Optional Argument.
|
|
121
|
-
Specifies the distribution exponential family.
|
|
122
|
-
Permitted Values:
|
|
123
|
-
* BINOMIAL
|
|
124
|
-
* GAUSSIAN
|
|
125
|
-
Default Value: GAUSSIAN
|
|
126
|
-
Types: str
|
|
127
|
-
|
|
128
|
-
iter_max:
|
|
129
|
-
Optional Argument.
|
|
130
|
-
Specifies the maximum number of iterations over the training data
|
|
131
|
-
batches. If batch size is 0, then "iter_max" equals the number of epochs.
|
|
132
|
-
Note:
|
|
133
|
-
* The "iter_max" must be in the range [1, 10000000].
|
|
134
|
-
Default Value: 300
|
|
135
|
-
Types: int
|
|
136
|
-
|
|
137
|
-
batch_size:
|
|
138
|
-
Optional Argument.
|
|
139
|
-
Specifies the number of observations (training samples) to be parsed in
|
|
140
|
-
one mini-batch. Must be a non-negative integer value. A value of 0
|
|
141
|
-
indicates no mini-batches, so the entire input is processed in each
|
|
142
|
-
iteration, and the algorithm becomes (Batch) Gradient Descent. A
|
|
143
|
-
value higher than the number of rows on any partition also default
|
|
144
|
-
to Batch Gradient Descent.
|
|
145
|
-
Note:
|
|
146
|
-
* The "iter_max" must be in the range [0, 2147483647].
|
|
147
|
-
Default Value: 10
|
|
148
|
-
Types: int
|
|
149
|
-
|
|
150
|
-
lambda1:
|
|
151
|
-
Optional Argument.
|
|
152
|
-
Specifies the amount of regularization to be added. The higher the
|
|
153
|
-
value, the stronger the regularization. It is also used to compute
|
|
154
|
-
learning rate when "learning_rate" is set to 'Optimal'.
|
|
155
|
-
A value of '0' means no regularization.
|
|
156
|
-
Notes:
|
|
157
|
-
* The "lambda1" must be in the range [0, 1e7].
|
|
158
|
-
* The "lambda1" must be a non-negative float value.
|
|
159
|
-
Default Value: 0.02
|
|
160
|
-
Types: float OR int
|
|
161
|
-
|
|
162
|
-
alpha:
|
|
163
|
-
Optional Argument.
|
|
164
|
-
Specifies the Elasticnet parameter for penalty computation. It is
|
|
165
|
-
only effective if "lambda1" is greater than 0. The value represents
|
|
166
|
-
the contribution ratio of L1 in the penalty. A value of 1.0 indicates
|
|
167
|
-
L1 (LASSO) only, a value of 0 indicates L2 (Ridge) only, and a value
|
|
168
|
-
in between is a combination of L1 and L2.
|
|
169
|
-
Note:
|
|
170
|
-
* The "alpha" must be in the range [0, 1].
|
|
171
|
-
Default Value: 0.15
|
|
172
|
-
Types: float OR int
|
|
173
|
-
|
|
174
|
-
iter_num_no_change:
|
|
175
|
-
Optional Argument.
|
|
176
|
-
Specifies the number of iterations with no improvement in loss, including
|
|
177
|
-
the "tolerance", to stop training. A value of 0 indicates no early
|
|
178
|
-
stopping and the algorithm continues until "iter_max" iterations are reached.
|
|
179
|
-
Note:
|
|
180
|
-
* The "iter_num_no_change" must be in the range [0, 2147483647].
|
|
181
|
-
Default Value: 50
|
|
182
|
-
Types: int
|
|
183
|
-
|
|
184
|
-
tolerance:
|
|
185
|
-
Optional Argument.
|
|
186
|
-
Specifies the stopping criteria in terms of loss function improvement.
|
|
187
|
-
Training stops when loss is greater than best_loss – tolerance for
|
|
188
|
-
"iter_num_no_change" times.
|
|
189
|
-
Notes:
|
|
190
|
-
* The "tolerance" must be in the range [1e-7, 1e7].
|
|
191
|
-
* The "tolerance" works only when "iter_num_no_change"
|
|
192
|
-
is greater than 0.
|
|
193
|
-
* The "tolerance" must be a non-negative value.
|
|
194
|
-
Default Value: 0.001
|
|
195
|
-
Types: float OR int
|
|
196
|
-
|
|
197
|
-
intercept:
|
|
198
|
-
Optional Argument.
|
|
199
|
-
Specifies intercept should be estimated based on whether data is
|
|
200
|
-
already centered or not.
|
|
201
|
-
Default Value: True
|
|
202
|
-
Types: bool
|
|
203
|
-
|
|
204
|
-
class_weights:
|
|
205
|
-
Optional Argument.
|
|
206
|
-
Specifies the weights associated with classes. The format is
|
|
207
|
-
'0:weight,1:weight'. For example, '0:1.0,1:0.5' gives twice
|
|
208
|
-
as much weight to each observation in class 0. If the weight of
|
|
209
|
-
a class is omitted, then it is assumed to be 1.0.
|
|
210
|
-
Note:
|
|
211
|
-
* The "class_weights" argument works only when "family" is
|
|
212
|
-
'Binomial'.
|
|
213
|
-
Default Value: 0:1.0, 1:1.0
|
|
214
|
-
Types: str
|
|
215
|
-
|
|
216
|
-
learning_rate:
|
|
217
|
-
Optional Argument.
|
|
218
|
-
Specifies the Learning rate algorithm.
|
|
219
|
-
Permitted Values:
|
|
220
|
-
* CONSTANT
|
|
221
|
-
* OPTIMAL
|
|
222
|
-
* INVTIME
|
|
223
|
-
* ADAPTIVE
|
|
224
|
-
Default Value:
|
|
225
|
-
* 'INVTIME' when "family" is set to 'Gaussian'
|
|
226
|
-
* 'OPTIMAL' when "family" is set to 'Binomial'
|
|
227
|
-
Types: str
|
|
228
|
-
|
|
229
|
-
initial_eta:
|
|
230
|
-
Optional Argument.
|
|
231
|
-
Specifies the initial value of eta for learning rate. For constant,
|
|
232
|
-
this value is the learning rate for all iterations.
|
|
233
|
-
Note:
|
|
234
|
-
* The "initial_eta" must be in the range [1e-7, 1e7].
|
|
235
|
-
Default Value: 0.05.
|
|
236
|
-
Types: float OR int
|
|
237
|
-
|
|
238
|
-
decay_rate:
|
|
239
|
-
Optional Argument.
|
|
240
|
-
Specifies the decay rate for learning rate (invtime and adaptive).
|
|
241
|
-
Note:
|
|
242
|
-
* The "decay_rate" must be in the range [1e-7, 1e7].
|
|
243
|
-
Default Value: 0.25.
|
|
244
|
-
Types: float OR int
|
|
245
|
-
|
|
246
|
-
decay_steps:
|
|
247
|
-
Optional Argument.
|
|
248
|
-
Specifies the decay steps (number of iterations) for adaptive learning rate.
|
|
249
|
-
The learning rate changes by decay rate after this many number of iterations.
|
|
250
|
-
Note:
|
|
251
|
-
* The "decay_steps" must be in the range [1, 2147483647].
|
|
252
|
-
Default Value: 5
|
|
253
|
-
Types: int
|
|
254
|
-
|
|
255
|
-
momentum:
|
|
256
|
-
Optional Argument.
|
|
257
|
-
Specifies the value to use for momentum learning rate optimizer.
|
|
258
|
-
A larger value indicates higher momentum contribution. A value of 0
|
|
259
|
-
means momentum optimizer is disabled. For a good momentum contribution,
|
|
260
|
-
a value between 0.6-0.95 is recommended.
|
|
261
|
-
Note:
|
|
262
|
-
* The "momentum" must be in the range [0, 1].
|
|
263
|
-
Default Value: 0.0
|
|
264
|
-
Types: float OR int
|
|
265
|
-
|
|
266
|
-
nesterov:
|
|
267
|
-
Optional Argument.
|
|
268
|
-
Specifies the indicator that nesterov optimization should be
|
|
269
|
-
applied to Momentum Optimizer or not. Default is True when momentum
|
|
270
|
-
is greater than 0, otherwise False.
|
|
271
|
-
Default Value: True
|
|
272
|
-
Types: bool
|
|
273
|
-
|
|
274
|
-
iteration_mode:
|
|
275
|
-
Optional Argument.
|
|
276
|
-
Specifies the iteration mode.
|
|
277
|
-
Permitted Values:
|
|
278
|
-
* Batch: One iteration per batch. After processing rows in a
|
|
279
|
-
batch, update the weight of the parameters and proceed to the
|
|
280
|
-
next iteration.
|
|
281
|
-
* Epoch: One iteration per epoch. After processing all rows
|
|
282
|
-
in a partition (with one or more batches), update the weight
|
|
283
|
-
of the parameters and proceed to the next epoch.
|
|
284
|
-
Default Value: Batch
|
|
285
|
-
Types: str OR list of Strings (str)
|
|
286
|
-
|
|
287
|
-
partition_column:
|
|
288
|
-
Optional Argument.
|
|
289
|
-
Specifies the name of the "input_columns" on which to partition the input.
|
|
290
|
-
The name should be consistent with the "data_partition_column". If the
|
|
291
|
-
"data_partition_column" is unicode with foreign language characters, then
|
|
292
|
-
it is necessary to specify "partition_column" argument.
|
|
293
|
-
Types: str
|
|
294
|
-
|
|
295
|
-
**generic_arguments:
|
|
296
|
-
Specifies the generic keyword arguments SQLE functions accept. Below
|
|
297
|
-
are the generic keyword arguments:
|
|
298
|
-
persist:
|
|
299
|
-
Optional Argument.
|
|
300
|
-
Specifies whether to persist the results of the
|
|
301
|
-
function in a table or not. When set to True,
|
|
302
|
-
results are persisted in a table; otherwise,
|
|
303
|
-
results are garbage collected at the end of the
|
|
304
|
-
session.
|
|
305
|
-
Default Value: False
|
|
306
|
-
Types: bool
|
|
307
|
-
|
|
308
|
-
volatile:
|
|
309
|
-
Optional Argument.
|
|
310
|
-
Specifies whether to put the results of the
|
|
311
|
-
function in a volatile table or not. When set to
|
|
312
|
-
True, results are stored in a volatile table,
|
|
313
|
-
otherwise not.
|
|
314
|
-
Default Value: False
|
|
315
|
-
Types: bool
|
|
316
|
-
|
|
317
|
-
Function allows the user to partition, hash, order or local
|
|
318
|
-
order the input data. These generic arguments are available
|
|
319
|
-
for each argument that accepts teradataml DataFrame as
|
|
320
|
-
input and can be accessed as:
|
|
321
|
-
* "<input_data_arg_name>_partition_column" accepts str or
|
|
322
|
-
list of str (Strings)
|
|
323
|
-
* "<input_data_arg_name>_hash_column" accepts str or list
|
|
324
|
-
of str (Strings)
|
|
325
|
-
* "<input_data_arg_name>_order_column" accepts str or list
|
|
326
|
-
of str (Strings)
|
|
327
|
-
* "local_order_<input_data_arg_name>" accepts boolean
|
|
328
|
-
Note:
|
|
329
|
-
These generic arguments are supported by teradataml if
|
|
330
|
-
the underlying SQLE Engine function supports, else an
|
|
331
|
-
exception is raised.
|
|
332
|
-
|
|
333
|
-
RETURNS:
|
|
334
|
-
Instance of GLMPerSegment.
|
|
335
|
-
Output teradataml DataFrames can be accessed using attribute
|
|
336
|
-
references, such as GLMPerSegmentObj.<attribute_name>.
|
|
337
|
-
Output teradataml DataFrame attribute name is:
|
|
338
|
-
result
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
RAISES:
|
|
342
|
-
TeradataMlException, TypeError, ValueError
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
EXAMPLES:
|
|
346
|
-
# Notes:
|
|
347
|
-
# 1. Get the connection to Vantage to execute the function.
|
|
348
|
-
# 2. One must import the required functions mentioned in
|
|
349
|
-
# the example from teradataml.
|
|
350
|
-
# 3. Function will raise error if not supported on the Vantage
|
|
351
|
-
# user is connected to.
|
|
352
|
-
|
|
353
|
-
# Load the example data.
|
|
354
|
-
load_example_data("decisionforestpredict", ["housing_train"])
|
|
355
|
-
load_example_data("teradataml", ["housing_train_attribute", "housing_train_parameter"])
|
|
356
|
-
|
|
357
|
-
# Create teradataml DataFrame objects.
|
|
358
|
-
housing_train = DataFrame.from_table("housing_train")
|
|
359
|
-
housing_train_attribute = DataFrame.from_table("housing_train_attribute")
|
|
360
|
-
housing_train_parameter = DataFrame.from_table("housing_train_parameter")
|
|
361
|
-
|
|
362
|
-
# Check the list of available analytic functions.
|
|
363
|
-
display_analytic_functions()
|
|
364
|
-
|
|
365
|
-
# Filter the rows from train dataset with homestyle as Classic and Eclectic.
|
|
366
|
-
binomial_housing_train = DataFrame('housing_train', index_label="homestyle")
|
|
367
|
-
binomial_housing_train = binomial_housing_train.filter(like = 'ic', axis = 'rows')
|
|
368
|
-
|
|
369
|
-
# GLMPerSegment() function requires features in numeric format for processing,
|
|
370
|
-
# so dropping the non-numeric columns.
|
|
371
|
-
binomial_housing_train = binomial_housing_train.drop(columns=["driveway", "recroom",
|
|
372
|
-
"gashw", "airco", "prefarea",
|
|
373
|
-
"fullbase"])
|
|
374
|
-
gaussian_housing_train = binomial_housing_train.drop(columns="homestyle")
|
|
375
|
-
|
|
376
|
-
# Transform the train dataset categorical values to encoded values.
|
|
377
|
-
housing_train_ordinal_encodingfit = OrdinalEncodingFit(
|
|
378
|
-
target_column='homestyle',
|
|
379
|
-
data=binomial_housing_train)
|
|
380
|
-
|
|
381
|
-
housing_train_ordinal_encodingtransform = OrdinalEncodingTransform(
|
|
382
|
-
data=binomial_housing_train,
|
|
383
|
-
object=housing_train_ordinal_encodingfit.result,
|
|
384
|
-
accumulate=["sn", "price", "lotsize",
|
|
385
|
-
"bedrooms", "bathrms", "stories"])
|
|
386
|
-
|
|
387
|
-
# Example 1: Train the model using the 'Gaussian' family.
|
|
388
|
-
GLMPerSegment_out_1 = GLMPerSegment(data=gaussian_housing_train,
|
|
389
|
-
data_partition_column="stories",
|
|
390
|
-
input_columns=['garagepl', 'lotsize', 'bedrooms', 'bathrms'],
|
|
391
|
-
response_column="price",
|
|
392
|
-
family="Gaussian",
|
|
393
|
-
iter_max=1000,
|
|
394
|
-
batch_size=9)
|
|
395
|
-
|
|
396
|
-
# Print the result DataFrame.
|
|
397
|
-
print(GLMPerSegment_out_1.result)
|
|
398
|
-
|
|
399
|
-
# Example 2: Train the model using the 'Binomial' family, formula argument and
|
|
400
|
-
# subset of features and parameters to be used with respect to
|
|
401
|
-
# "partition_id".
|
|
402
|
-
formula = "homestyle ~ price + lotsize + bedrooms + bathrms"
|
|
403
|
-
GLMPerSegment_out_2 = GLMPerSegment(data=housing_train_ordinal_encodingtransform.result,
|
|
404
|
-
data_partition_column="stories",
|
|
405
|
-
formula = formula,
|
|
406
|
-
attribute_data=housing_train_attribute,
|
|
407
|
-
attribute_data_partition_column="partition_id",
|
|
408
|
-
parameter_data=housing_train_parameter,
|
|
409
|
-
parameter_data_partition_column="partition_id",
|
|
410
|
-
family="Binomial",
|
|
411
|
-
iter_max=100)
|
|
412
|
-
|
|
413
|
-
# Print the result DataFrame.
|
|
414
|
-
print(GLMPerSegment_out_2.result)
|
|
1
|
+
def GLMPerSegment(formula=None, data=None, input_columns=None, response_column=None,
|
|
2
|
+
attribute_data=None, parameter_data=None, family="GAUSSIAN",
|
|
3
|
+
iter_max=300, batch_size=10, lambda1=0.02, alpha=0.15,
|
|
4
|
+
iter_num_no_change=50, tolerance=0.001, intercept=True,
|
|
5
|
+
class_weights="0:1.0, 1:1.0", learning_rate=None, initial_eta=0.05,
|
|
6
|
+
decay_rate=0.25, decay_steps=5, momentum=0.0, nesterov=True,
|
|
7
|
+
iteration_mode="BATCH", partition_column=None, **generic_arguments):
|
|
8
|
+
"""
|
|
9
|
+
DESCRIPTION:
|
|
10
|
+
The GLM() function is used to train the whole data set as one model. The
|
|
11
|
+
GLMPerSegment() function is a partition-by-key function to create a single
|
|
12
|
+
model for each partition.
|
|
13
|
+
The following operations are supported for GLMPerSegment():
|
|
14
|
+
* Gaussian linear regression.
|
|
15
|
+
* Binomial logistic regression for binary classification.
|
|
16
|
+
* Iteration modes batch and epoch.
|
|
17
|
+
* Regularization using L1, L2 and Elasticnet.
|
|
18
|
+
* Mini-batch gradient descent for numeric optimization algorithm.
|
|
19
|
+
* Training support with and without intercept.
|
|
20
|
+
* Class-weighted modeling.
|
|
21
|
+
* Learning rate support for mini-batch gradient descent using constant,
|
|
22
|
+
dynamic and hybrid gradients.
|
|
23
|
+
* Learning rate optimization algorithms for mini-batch gradient
|
|
24
|
+
descent using plain, momentum, and nesterov gradients.
|
|
25
|
+
|
|
26
|
+
Notes:
|
|
27
|
+
* The order column can be optionally applied to guarantee the result in each
|
|
28
|
+
run is deterministic. The situation of indeterministic result in a partition
|
|
29
|
+
can occur if the "batch_size" argument is less than the number of rows in
|
|
30
|
+
the partition. However, adding order columns can influence the performance.
|
|
31
|
+
* A model is generated from the GLMPerSegment(), and a model from GLM()
|
|
32
|
+
should be the same when the "batch_size" argument is not less than the
|
|
33
|
+
number of rows in the corresponding partition of the model.
|
|
34
|
+
* GLMPerSegment() takes all features as numeric input. Categorical columns
|
|
35
|
+
must be converted to numeric columns as preprocessing step, such as using
|
|
36
|
+
OneHotEncodingFit()/OneHotEncodingTransform(), OrdinalEncodingFit()/
|
|
37
|
+
OrdinalEncodingTransform(), and TargetEncodingFit()/TargetEncodingTransform().
|
|
38
|
+
* Any observation with a missing value in an input column is ignored and
|
|
39
|
+
not used fortraining. You can use some imputation function, such as
|
|
40
|
+
SimpleImputeFit()/SimpleImputeTransform() to do imputation of missing values.
|
|
41
|
+
* Best practice is to standardize the dataset before using GLMPerSegment().
|
|
42
|
+
Standardization, also known as feature scaling, makes a better model and
|
|
43
|
+
converges quicker.
|
|
44
|
+
* Model evaluation metrics of MSE, Loglikelihood, AIC, and BIC are generated by
|
|
45
|
+
GLMPerSegment(). For additional regression and classification metrics, you
|
|
46
|
+
should use RegressionEvaluator(), ClassificationEvaluator() and ROC()
|
|
47
|
+
functions as post-processing step.
|
|
48
|
+
* GLMPerSegment() supports binary classification only.
|
|
49
|
+
* "response_column" for classification accepts values of 0 and 1 for two
|
|
50
|
+
classes in the response column.
|
|
51
|
+
* A maximum of 2046 features are supported due to the limitation imposed
|
|
52
|
+
by the maximum number of columns (2048) in a input data.
|
|
53
|
+
* "batch_size" and "learning_rate" are directly related. With a larger "batch_size",
|
|
54
|
+
"learning_rate" can be increased for faster training with fewer iterations.
|
|
55
|
+
* "iter_num" and "iter_num_no_change" are criteria used to stop learning. To force
|
|
56
|
+
the function to run through all iterations, disable the "iter_num_no_change"
|
|
57
|
+
by specifying "iter_num_no_change" to 0.
|
|
58
|
+
* User need to try different combinations to find the best values for a particular
|
|
59
|
+
use case.
|
|
60
|
+
* When an unsupported data type is passed in "input_columns" or "response_column",
|
|
61
|
+
the error message is of the following format:
|
|
62
|
+
Unsupported data type for column index n in argument InputColumns.
|
|
63
|
+
In the message, n refers to the index of the column based on an input to the
|
|
64
|
+
function comprising "input_columns" and "response_column only. This is due to
|
|
65
|
+
the rest of the columns in input are not needed by the function and internal
|
|
66
|
+
optimizer does not expose them to the function. Due to this, n might be different
|
|
67
|
+
from the actual index in the input data.
|
|
68
|
+
|
|
69
|
+
PARAMETERS:
|
|
70
|
+
formula:
|
|
71
|
+
Required Argument when "input_columns" and "response_column" are not
|
|
72
|
+
provided, optional otherwise.
|
|
73
|
+
Specifies a string consisting of "formula" which is the model to be fitted.
|
|
74
|
+
Only basic formula of the "col1 ~ col2 + col3 +..." form are
|
|
75
|
+
supported and all variables must be from the same teradataml
|
|
76
|
+
DataFrame object.
|
|
77
|
+
Notes:
|
|
78
|
+
* The function only accepts numeric features. User must convert the
|
|
79
|
+
categorical features to numeric values, before passing to the formula.
|
|
80
|
+
* In case categorical features are passed to formula, those are ignored,
|
|
81
|
+
and only numeric features are considered.
|
|
82
|
+
* Provide either "formula" argument or "input_columns" and
|
|
83
|
+
"response_column" arguments.
|
|
84
|
+
Types: str
|
|
85
|
+
|
|
86
|
+
data:
|
|
87
|
+
Required Argument.
|
|
88
|
+
Specifies the teradataml DataFrame containing the input data.
|
|
89
|
+
Types: teradataml DataFrame
|
|
90
|
+
|
|
91
|
+
attribute_data:
|
|
92
|
+
Optional Argument.
|
|
93
|
+
Specifies the teradataml DataFrame containing a subset of features
|
|
94
|
+
to be used with respect to each partition.
|
|
95
|
+
Types: teradataml DataFrame
|
|
96
|
+
|
|
97
|
+
parameter_data:
|
|
98
|
+
Optional Argument.
|
|
99
|
+
Specifies the teradataml DataFrame containing a subset of parameters
|
|
100
|
+
to be used with respect to each partition.
|
|
101
|
+
Types: teradataml DataFrame
|
|
102
|
+
|
|
103
|
+
input_columns:
|
|
104
|
+
Required argument when "response_column" is provided and "formula" is not
|
|
105
|
+
provided, optional otherwise.
|
|
106
|
+
Specifies the name(s) of the teradataml DataFrame column(s) that need
|
|
107
|
+
to be used for training the model (predictors, features, or
|
|
108
|
+
independent variables).
|
|
109
|
+
Types: str OR list of Strings (str)
|
|
110
|
+
|
|
111
|
+
response_column:
|
|
112
|
+
Required argument when "response_column" is provided and "formula" is not
|
|
113
|
+
provided, optional otherwise.
|
|
114
|
+
Specifies the name of the column that contains the class label for binary
|
|
115
|
+
classification when "family" is 'Binomial', or target value (dependent
|
|
116
|
+
variable) for 'Regression' when "family" is 'Gaussian'.
|
|
117
|
+
Types: str
|
|
118
|
+
|
|
119
|
+
family:
|
|
120
|
+
Optional Argument.
|
|
121
|
+
Specifies the distribution exponential family.
|
|
122
|
+
Permitted Values:
|
|
123
|
+
* BINOMIAL
|
|
124
|
+
* GAUSSIAN
|
|
125
|
+
Default Value: GAUSSIAN
|
|
126
|
+
Types: str
|
|
127
|
+
|
|
128
|
+
iter_max:
|
|
129
|
+
Optional Argument.
|
|
130
|
+
Specifies the maximum number of iterations over the training data
|
|
131
|
+
batches. If batch size is 0, then "iter_max" equals the number of epochs.
|
|
132
|
+
Note:
|
|
133
|
+
* The "iter_max" must be in the range [1, 10000000].
|
|
134
|
+
Default Value: 300
|
|
135
|
+
Types: int
|
|
136
|
+
|
|
137
|
+
batch_size:
|
|
138
|
+
Optional Argument.
|
|
139
|
+
Specifies the number of observations (training samples) to be parsed in
|
|
140
|
+
one mini-batch. Must be a non-negative integer value. A value of 0
|
|
141
|
+
indicates no mini-batches, so the entire input is processed in each
|
|
142
|
+
iteration, and the algorithm becomes (Batch) Gradient Descent. A
|
|
143
|
+
value higher than the number of rows on any partition also default
|
|
144
|
+
to Batch Gradient Descent.
|
|
145
|
+
Note:
|
|
146
|
+
* The "iter_max" must be in the range [0, 2147483647].
|
|
147
|
+
Default Value: 10
|
|
148
|
+
Types: int
|
|
149
|
+
|
|
150
|
+
lambda1:
|
|
151
|
+
Optional Argument.
|
|
152
|
+
Specifies the amount of regularization to be added. The higher the
|
|
153
|
+
value, the stronger the regularization. It is also used to compute
|
|
154
|
+
learning rate when "learning_rate" is set to 'Optimal'.
|
|
155
|
+
A value of '0' means no regularization.
|
|
156
|
+
Notes:
|
|
157
|
+
* The "lambda1" must be in the range [0, 1e7].
|
|
158
|
+
* The "lambda1" must be a non-negative float value.
|
|
159
|
+
Default Value: 0.02
|
|
160
|
+
Types: float OR int
|
|
161
|
+
|
|
162
|
+
alpha:
|
|
163
|
+
Optional Argument.
|
|
164
|
+
Specifies the Elasticnet parameter for penalty computation. It is
|
|
165
|
+
only effective if "lambda1" is greater than 0. The value represents
|
|
166
|
+
the contribution ratio of L1 in the penalty. A value of 1.0 indicates
|
|
167
|
+
L1 (LASSO) only, a value of 0 indicates L2 (Ridge) only, and a value
|
|
168
|
+
in between is a combination of L1 and L2.
|
|
169
|
+
Note:
|
|
170
|
+
* The "alpha" must be in the range [0, 1].
|
|
171
|
+
Default Value: 0.15
|
|
172
|
+
Types: float OR int
|
|
173
|
+
|
|
174
|
+
iter_num_no_change:
|
|
175
|
+
Optional Argument.
|
|
176
|
+
Specifies the number of iterations with no improvement in loss, including
|
|
177
|
+
the "tolerance", to stop training. A value of 0 indicates no early
|
|
178
|
+
stopping and the algorithm continues until "iter_max" iterations are reached.
|
|
179
|
+
Note:
|
|
180
|
+
* The "iter_num_no_change" must be in the range [0, 2147483647].
|
|
181
|
+
Default Value: 50
|
|
182
|
+
Types: int
|
|
183
|
+
|
|
184
|
+
tolerance:
|
|
185
|
+
Optional Argument.
|
|
186
|
+
Specifies the stopping criteria in terms of loss function improvement.
|
|
187
|
+
Training stops when loss is greater than best_loss – tolerance for
|
|
188
|
+
"iter_num_no_change" times.
|
|
189
|
+
Notes:
|
|
190
|
+
* The "tolerance" must be in the range [1e-7, 1e7].
|
|
191
|
+
* The "tolerance" works only when "iter_num_no_change"
|
|
192
|
+
is greater than 0.
|
|
193
|
+
* The "tolerance" must be a non-negative value.
|
|
194
|
+
Default Value: 0.001
|
|
195
|
+
Types: float OR int
|
|
196
|
+
|
|
197
|
+
intercept:
|
|
198
|
+
Optional Argument.
|
|
199
|
+
Specifies intercept should be estimated based on whether data is
|
|
200
|
+
already centered or not.
|
|
201
|
+
Default Value: True
|
|
202
|
+
Types: bool
|
|
203
|
+
|
|
204
|
+
class_weights:
|
|
205
|
+
Optional Argument.
|
|
206
|
+
Specifies the weights associated with classes. The format is
|
|
207
|
+
'0:weight,1:weight'. For example, '0:1.0,1:0.5' gives twice
|
|
208
|
+
as much weight to each observation in class 0. If the weight of
|
|
209
|
+
a class is omitted, then it is assumed to be 1.0.
|
|
210
|
+
Note:
|
|
211
|
+
* The "class_weights" argument works only when "family" is
|
|
212
|
+
'Binomial'.
|
|
213
|
+
Default Value: 0:1.0, 1:1.0
|
|
214
|
+
Types: str
|
|
215
|
+
|
|
216
|
+
learning_rate:
|
|
217
|
+
Optional Argument.
|
|
218
|
+
Specifies the Learning rate algorithm.
|
|
219
|
+
Permitted Values:
|
|
220
|
+
* CONSTANT
|
|
221
|
+
* OPTIMAL
|
|
222
|
+
* INVTIME
|
|
223
|
+
* ADAPTIVE
|
|
224
|
+
Default Value:
|
|
225
|
+
* 'INVTIME' when "family" is set to 'Gaussian'
|
|
226
|
+
* 'OPTIMAL' when "family" is set to 'Binomial'
|
|
227
|
+
Types: str
|
|
228
|
+
|
|
229
|
+
initial_eta:
|
|
230
|
+
Optional Argument.
|
|
231
|
+
Specifies the initial value of eta for learning rate. For constant,
|
|
232
|
+
this value is the learning rate for all iterations.
|
|
233
|
+
Note:
|
|
234
|
+
* The "initial_eta" must be in the range [1e-7, 1e7].
|
|
235
|
+
Default Value: 0.05.
|
|
236
|
+
Types: float OR int
|
|
237
|
+
|
|
238
|
+
decay_rate:
|
|
239
|
+
Optional Argument.
|
|
240
|
+
Specifies the decay rate for learning rate (invtime and adaptive).
|
|
241
|
+
Note:
|
|
242
|
+
* The "decay_rate" must be in the range [1e-7, 1e7].
|
|
243
|
+
Default Value: 0.25.
|
|
244
|
+
Types: float OR int
|
|
245
|
+
|
|
246
|
+
decay_steps:
|
|
247
|
+
Optional Argument.
|
|
248
|
+
Specifies the decay steps (number of iterations) for adaptive learning rate.
|
|
249
|
+
The learning rate changes by decay rate after this many number of iterations.
|
|
250
|
+
Note:
|
|
251
|
+
* The "decay_steps" must be in the range [1, 2147483647].
|
|
252
|
+
Default Value: 5
|
|
253
|
+
Types: int
|
|
254
|
+
|
|
255
|
+
momentum:
|
|
256
|
+
Optional Argument.
|
|
257
|
+
Specifies the value to use for momentum learning rate optimizer.
|
|
258
|
+
A larger value indicates higher momentum contribution. A value of 0
|
|
259
|
+
means momentum optimizer is disabled. For a good momentum contribution,
|
|
260
|
+
a value between 0.6-0.95 is recommended.
|
|
261
|
+
Note:
|
|
262
|
+
* The "momentum" must be in the range [0, 1].
|
|
263
|
+
Default Value: 0.0
|
|
264
|
+
Types: float OR int
|
|
265
|
+
|
|
266
|
+
nesterov:
|
|
267
|
+
Optional Argument.
|
|
268
|
+
Specifies the indicator that nesterov optimization should be
|
|
269
|
+
applied to Momentum Optimizer or not. Default is True when momentum
|
|
270
|
+
is greater than 0, otherwise False.
|
|
271
|
+
Default Value: True
|
|
272
|
+
Types: bool
|
|
273
|
+
|
|
274
|
+
iteration_mode:
|
|
275
|
+
Optional Argument.
|
|
276
|
+
Specifies the iteration mode.
|
|
277
|
+
Permitted Values:
|
|
278
|
+
* Batch: One iteration per batch. After processing rows in a
|
|
279
|
+
batch, update the weight of the parameters and proceed to the
|
|
280
|
+
next iteration.
|
|
281
|
+
* Epoch: One iteration per epoch. After processing all rows
|
|
282
|
+
in a partition (with one or more batches), update the weight
|
|
283
|
+
of the parameters and proceed to the next epoch.
|
|
284
|
+
Default Value: Batch
|
|
285
|
+
Types: str OR list of Strings (str)
|
|
286
|
+
|
|
287
|
+
partition_column:
|
|
288
|
+
Optional Argument.
|
|
289
|
+
Specifies the name of the "input_columns" on which to partition the input.
|
|
290
|
+
The name should be consistent with the "data_partition_column". If the
|
|
291
|
+
"data_partition_column" is unicode with foreign language characters, then
|
|
292
|
+
it is necessary to specify "partition_column" argument.
|
|
293
|
+
Types: str
|
|
294
|
+
|
|
295
|
+
**generic_arguments:
|
|
296
|
+
Specifies the generic keyword arguments SQLE functions accept. Below
|
|
297
|
+
are the generic keyword arguments:
|
|
298
|
+
persist:
|
|
299
|
+
Optional Argument.
|
|
300
|
+
Specifies whether to persist the results of the
|
|
301
|
+
function in a table or not. When set to True,
|
|
302
|
+
results are persisted in a table; otherwise,
|
|
303
|
+
results are garbage collected at the end of the
|
|
304
|
+
session.
|
|
305
|
+
Default Value: False
|
|
306
|
+
Types: bool
|
|
307
|
+
|
|
308
|
+
volatile:
|
|
309
|
+
Optional Argument.
|
|
310
|
+
Specifies whether to put the results of the
|
|
311
|
+
function in a volatile table or not. When set to
|
|
312
|
+
True, results are stored in a volatile table,
|
|
313
|
+
otherwise not.
|
|
314
|
+
Default Value: False
|
|
315
|
+
Types: bool
|
|
316
|
+
|
|
317
|
+
Function allows the user to partition, hash, order or local
|
|
318
|
+
order the input data. These generic arguments are available
|
|
319
|
+
for each argument that accepts teradataml DataFrame as
|
|
320
|
+
input and can be accessed as:
|
|
321
|
+
* "<input_data_arg_name>_partition_column" accepts str or
|
|
322
|
+
list of str (Strings)
|
|
323
|
+
* "<input_data_arg_name>_hash_column" accepts str or list
|
|
324
|
+
of str (Strings)
|
|
325
|
+
* "<input_data_arg_name>_order_column" accepts str or list
|
|
326
|
+
of str (Strings)
|
|
327
|
+
* "local_order_<input_data_arg_name>" accepts boolean
|
|
328
|
+
Note:
|
|
329
|
+
These generic arguments are supported by teradataml if
|
|
330
|
+
the underlying SQLE Engine function supports, else an
|
|
331
|
+
exception is raised.
|
|
332
|
+
|
|
333
|
+
RETURNS:
|
|
334
|
+
Instance of GLMPerSegment.
|
|
335
|
+
Output teradataml DataFrames can be accessed using attribute
|
|
336
|
+
references, such as GLMPerSegmentObj.<attribute_name>.
|
|
337
|
+
Output teradataml DataFrame attribute name is:
|
|
338
|
+
result
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+
RAISES:
|
|
342
|
+
TeradataMlException, TypeError, ValueError
|
|
343
|
+
|
|
344
|
+
|
|
345
|
+
EXAMPLES:
|
|
346
|
+
# Notes:
|
|
347
|
+
# 1. Get the connection to Vantage to execute the function.
|
|
348
|
+
# 2. One must import the required functions mentioned in
|
|
349
|
+
# the example from teradataml.
|
|
350
|
+
# 3. Function will raise error if not supported on the Vantage
|
|
351
|
+
# user is connected to.
|
|
352
|
+
|
|
353
|
+
# Load the example data.
|
|
354
|
+
load_example_data("decisionforestpredict", ["housing_train"])
|
|
355
|
+
load_example_data("teradataml", ["housing_train_attribute", "housing_train_parameter"])
|
|
356
|
+
|
|
357
|
+
# Create teradataml DataFrame objects.
|
|
358
|
+
housing_train = DataFrame.from_table("housing_train")
|
|
359
|
+
housing_train_attribute = DataFrame.from_table("housing_train_attribute")
|
|
360
|
+
housing_train_parameter = DataFrame.from_table("housing_train_parameter")
|
|
361
|
+
|
|
362
|
+
# Check the list of available analytic functions.
|
|
363
|
+
display_analytic_functions()
|
|
364
|
+
|
|
365
|
+
# Filter the rows from train dataset with homestyle as Classic and Eclectic.
|
|
366
|
+
binomial_housing_train = DataFrame('housing_train', index_label="homestyle")
|
|
367
|
+
binomial_housing_train = binomial_housing_train.filter(like = 'ic', axis = 'rows')
|
|
368
|
+
|
|
369
|
+
# GLMPerSegment() function requires features in numeric format for processing,
|
|
370
|
+
# so dropping the non-numeric columns.
|
|
371
|
+
binomial_housing_train = binomial_housing_train.drop(columns=["driveway", "recroom",
|
|
372
|
+
"gashw", "airco", "prefarea",
|
|
373
|
+
"fullbase"])
|
|
374
|
+
gaussian_housing_train = binomial_housing_train.drop(columns="homestyle")
|
|
375
|
+
|
|
376
|
+
# Transform the train dataset categorical values to encoded values.
|
|
377
|
+
housing_train_ordinal_encodingfit = OrdinalEncodingFit(
|
|
378
|
+
target_column='homestyle',
|
|
379
|
+
data=binomial_housing_train)
|
|
380
|
+
|
|
381
|
+
housing_train_ordinal_encodingtransform = OrdinalEncodingTransform(
|
|
382
|
+
data=binomial_housing_train,
|
|
383
|
+
object=housing_train_ordinal_encodingfit.result,
|
|
384
|
+
accumulate=["sn", "price", "lotsize",
|
|
385
|
+
"bedrooms", "bathrms", "stories"])
|
|
386
|
+
|
|
387
|
+
# Example 1: Train the model using the 'Gaussian' family.
|
|
388
|
+
GLMPerSegment_out_1 = GLMPerSegment(data=gaussian_housing_train,
|
|
389
|
+
data_partition_column="stories",
|
|
390
|
+
input_columns=['garagepl', 'lotsize', 'bedrooms', 'bathrms'],
|
|
391
|
+
response_column="price",
|
|
392
|
+
family="Gaussian",
|
|
393
|
+
iter_max=1000,
|
|
394
|
+
batch_size=9)
|
|
395
|
+
|
|
396
|
+
# Print the result DataFrame.
|
|
397
|
+
print(GLMPerSegment_out_1.result)
|
|
398
|
+
|
|
399
|
+
# Example 2: Train the model using the 'Binomial' family, formula argument and
|
|
400
|
+
# subset of features and parameters to be used with respect to
|
|
401
|
+
# "partition_id".
|
|
402
|
+
formula = "homestyle ~ price + lotsize + bedrooms + bathrms"
|
|
403
|
+
GLMPerSegment_out_2 = GLMPerSegment(data=housing_train_ordinal_encodingtransform.result,
|
|
404
|
+
data_partition_column="stories",
|
|
405
|
+
formula = formula,
|
|
406
|
+
attribute_data=housing_train_attribute,
|
|
407
|
+
attribute_data_partition_column="partition_id",
|
|
408
|
+
parameter_data=housing_train_parameter,
|
|
409
|
+
parameter_data_partition_column="partition_id",
|
|
410
|
+
family="Binomial",
|
|
411
|
+
iter_max=100)
|
|
412
|
+
|
|
413
|
+
# Print the result DataFrame.
|
|
414
|
+
print(GLMPerSegment_out_2.result)
|
|
415
415
|
"""
|