teradataml 17.20.0.7__py3-none-any.whl → 20.0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of teradataml might be problematic. Click here for more details.
- teradataml/LICENSE-3RD-PARTY.pdf +0 -0
- teradataml/LICENSE.pdf +0 -0
- teradataml/README.md +1935 -1640
- teradataml/__init__.py +70 -60
- teradataml/_version.py +11 -11
- teradataml/analytics/Transformations.py +2995 -2995
- teradataml/analytics/__init__.py +81 -83
- teradataml/analytics/analytic_function_executor.py +2040 -2010
- teradataml/analytics/analytic_query_generator.py +958 -958
- teradataml/analytics/byom/H2OPredict.py +514 -514
- teradataml/analytics/byom/PMMLPredict.py +437 -437
- teradataml/analytics/byom/__init__.py +14 -14
- teradataml/analytics/json_parser/__init__.py +130 -130
- teradataml/analytics/json_parser/analytic_functions_argument.py +1707 -1707
- teradataml/analytics/json_parser/json_store.py +191 -191
- teradataml/analytics/json_parser/metadata.py +1637 -1637
- teradataml/analytics/json_parser/utils.py +798 -803
- teradataml/analytics/meta_class.py +196 -196
- teradataml/analytics/sqle/DecisionTreePredict.py +455 -470
- teradataml/analytics/sqle/NaiveBayesPredict.py +419 -428
- teradataml/analytics/sqle/__init__.py +97 -110
- teradataml/analytics/sqle/json/decisiontreepredict_sqle.json +78 -78
- teradataml/analytics/sqle/json/naivebayespredict_sqle.json +62 -62
- teradataml/analytics/table_operator/__init__.py +10 -10
- teradataml/analytics/uaf/__init__.py +63 -63
- teradataml/analytics/utils.py +693 -692
- teradataml/analytics/valib.py +1603 -1600
- teradataml/automl/__init__.py +1683 -0
- teradataml/automl/custom_json_utils.py +1270 -0
- teradataml/automl/data_preparation.py +1011 -0
- teradataml/automl/data_transformation.py +789 -0
- teradataml/automl/feature_engineering.py +1580 -0
- teradataml/automl/feature_exploration.py +554 -0
- teradataml/automl/model_evaluation.py +151 -0
- teradataml/automl/model_training.py +1026 -0
- teradataml/catalog/__init__.py +1 -3
- teradataml/catalog/byom.py +1759 -1716
- teradataml/catalog/function_argument_mapper.py +859 -861
- teradataml/catalog/model_cataloging_utils.py +491 -1510
- teradataml/clients/auth_client.py +133 -0
- teradataml/clients/pkce_client.py +481 -481
- teradataml/common/aed_utils.py +7 -2
- teradataml/common/bulk_exposed_utils.py +111 -111
- teradataml/common/constants.py +1438 -1441
- teradataml/common/deprecations.py +160 -0
- teradataml/common/exceptions.py +73 -73
- teradataml/common/formula.py +742 -742
- teradataml/common/garbagecollector.py +597 -635
- teradataml/common/messagecodes.py +424 -431
- teradataml/common/messages.py +228 -231
- teradataml/common/sqlbundle.py +693 -693
- teradataml/common/td_coltype_code_to_tdtype.py +48 -48
- teradataml/common/utils.py +2424 -2500
- teradataml/common/warnings.py +25 -25
- teradataml/common/wrapper_utils.py +1 -110
- teradataml/config/dummy_file1.cfg +4 -4
- teradataml/config/dummy_file2.cfg +2 -2
- teradataml/config/sqlengine_alias_definitions_v1.0 +13 -13
- teradataml/config/sqlengine_alias_definitions_v1.1 +19 -19
- teradataml/config/sqlengine_alias_definitions_v1.3 +18 -18
- teradataml/context/aed_context.py +217 -217
- teradataml/context/context.py +1091 -999
- teradataml/data/A_loan.csv +19 -19
- teradataml/data/BINARY_REALS_LEFT.csv +11 -11
- teradataml/data/BINARY_REALS_RIGHT.csv +11 -11
- teradataml/data/B_loan.csv +49 -49
- teradataml/data/BuoyData2.csv +17 -17
- teradataml/data/CONVOLVE2_COMPLEX_LEFT.csv +5 -5
- teradataml/data/CONVOLVE2_COMPLEX_RIGHT.csv +5 -5
- teradataml/data/Convolve2RealsLeft.csv +5 -5
- teradataml/data/Convolve2RealsRight.csv +5 -5
- teradataml/data/Convolve2ValidLeft.csv +11 -11
- teradataml/data/Convolve2ValidRight.csv +11 -11
- teradataml/data/DFFTConv_Real_8_8.csv +65 -65
- teradataml/data/Orders1_12mf.csv +24 -24
- teradataml/data/Pi_loan.csv +7 -7
- teradataml/data/SMOOTHED_DATA.csv +7 -7
- teradataml/data/TestDFFT8.csv +9 -9
- teradataml/data/TestRiver.csv +109 -109
- teradataml/data/Traindata.csv +28 -28
- teradataml/data/acf.csv +17 -17
- teradataml/data/adaboost_example.json +34 -34
- teradataml/data/adaboostpredict_example.json +24 -24
- teradataml/data/additional_table.csv +10 -10
- teradataml/data/admissions_test.csv +21 -21
- teradataml/data/admissions_train.csv +41 -41
- teradataml/data/admissions_train_nulls.csv +41 -41
- teradataml/data/advertising.csv +201 -0
- teradataml/data/ageandheight.csv +13 -13
- teradataml/data/ageandpressure.csv +31 -31
- teradataml/data/antiselect_example.json +36 -36
- teradataml/data/antiselect_input.csv +8 -8
- teradataml/data/antiselect_input_mixed_case.csv +8 -8
- teradataml/data/applicant_external.csv +6 -6
- teradataml/data/applicant_reference.csv +6 -6
- teradataml/data/arima_example.json +9 -9
- teradataml/data/assortedtext_input.csv +8 -8
- teradataml/data/attribution_example.json +33 -33
- teradataml/data/attribution_sample_table.csv +27 -27
- teradataml/data/attribution_sample_table1.csv +6 -6
- teradataml/data/attribution_sample_table2.csv +11 -11
- teradataml/data/bank_churn.csv +10001 -0
- teradataml/data/bank_marketing.csv +11163 -0
- teradataml/data/bank_web_clicks1.csv +42 -42
- teradataml/data/bank_web_clicks2.csv +91 -91
- teradataml/data/bank_web_url.csv +85 -85
- teradataml/data/barrier.csv +2 -2
- teradataml/data/barrier_new.csv +3 -3
- teradataml/data/betweenness_example.json +13 -13
- teradataml/data/bike_sharing.csv +732 -0
- teradataml/data/bin_breaks.csv +8 -8
- teradataml/data/bin_fit_ip.csv +3 -3
- teradataml/data/binary_complex_left.csv +11 -11
- teradataml/data/binary_complex_right.csv +11 -11
- teradataml/data/binary_matrix_complex_left.csv +21 -21
- teradataml/data/binary_matrix_complex_right.csv +21 -21
- teradataml/data/binary_matrix_real_left.csv +21 -21
- teradataml/data/binary_matrix_real_right.csv +21 -21
- teradataml/data/blood2ageandweight.csv +26 -26
- teradataml/data/bmi.csv +501 -0
- teradataml/data/boston.csv +507 -507
- teradataml/data/boston2cols.csv +721 -0
- teradataml/data/breast_cancer.csv +570 -0
- teradataml/data/buoydata_mix.csv +11 -11
- teradataml/data/burst_data.csv +5 -5
- teradataml/data/burst_example.json +20 -20
- teradataml/data/byom_example.json +17 -17
- teradataml/data/bytes_table.csv +3 -3
- teradataml/data/cal_housing_ex_raw.csv +70 -70
- teradataml/data/callers.csv +7 -7
- teradataml/data/calls.csv +10 -10
- teradataml/data/cars_hist.csv +33 -33
- teradataml/data/cat_table.csv +24 -24
- teradataml/data/ccm_example.json +31 -31
- teradataml/data/ccm_input.csv +91 -91
- teradataml/data/ccm_input2.csv +13 -13
- teradataml/data/ccmexample.csv +101 -101
- teradataml/data/ccmprepare_example.json +8 -8
- teradataml/data/ccmprepare_input.csv +91 -91
- teradataml/data/cfilter_example.json +12 -12
- teradataml/data/changepointdetection_example.json +18 -18
- teradataml/data/changepointdetectionrt_example.json +8 -8
- teradataml/data/chi_sq.csv +2 -2
- teradataml/data/churn_data.csv +14 -14
- teradataml/data/churn_emission.csv +35 -35
- teradataml/data/churn_initial.csv +3 -3
- teradataml/data/churn_state_transition.csv +5 -5
- teradataml/data/citedges_2.csv +745 -745
- teradataml/data/citvertices_2.csv +1210 -1210
- teradataml/data/clicks2.csv +16 -16
- teradataml/data/clickstream.csv +12 -12
- teradataml/data/clickstream1.csv +11 -11
- teradataml/data/closeness_example.json +15 -15
- teradataml/data/complaints.csv +21 -21
- teradataml/data/complaints_mini.csv +3 -3
- teradataml/data/complaints_testtoken.csv +224 -224
- teradataml/data/complaints_tokens_test.csv +353 -353
- teradataml/data/complaints_traintoken.csv +472 -472
- teradataml/data/computers_category.csv +1001 -1001
- teradataml/data/computers_test1.csv +1252 -1252
- teradataml/data/computers_train1.csv +5009 -5009
- teradataml/data/computers_train1_clustered.csv +5009 -5009
- teradataml/data/confusionmatrix_example.json +9 -9
- teradataml/data/conversion_event_table.csv +3 -3
- teradataml/data/corr_input.csv +17 -17
- teradataml/data/correlation_example.json +11 -11
- teradataml/data/coxhazardratio_example.json +39 -39
- teradataml/data/coxph_example.json +15 -15
- teradataml/data/coxsurvival_example.json +28 -28
- teradataml/data/cpt.csv +41 -41
- teradataml/data/credit_ex_merged.csv +45 -45
- teradataml/data/customer_loyalty.csv +301 -301
- teradataml/data/customer_loyalty_newseq.csv +31 -31
- teradataml/data/customer_segmentation_test.csv +2628 -0
- teradataml/data/customer_segmentation_train.csv +8069 -0
- teradataml/data/dataframe_example.json +146 -146
- teradataml/data/decisionforest_example.json +37 -37
- teradataml/data/decisionforestpredict_example.json +38 -38
- teradataml/data/decisiontree_example.json +21 -21
- teradataml/data/decisiontreepredict_example.json +45 -45
- teradataml/data/dfft2_size4_real.csv +17 -17
- teradataml/data/dfft2_test_matrix16.csv +17 -17
- teradataml/data/dfft2conv_real_4_4.csv +65 -65
- teradataml/data/diabetes.csv +443 -443
- teradataml/data/diabetes_test.csv +89 -89
- teradataml/data/dict_table.csv +5 -5
- teradataml/data/docperterm_table.csv +4 -4
- teradataml/data/docs/__init__.py +1 -1
- teradataml/data/docs/byom/docs/DataRobotPredict.py +180 -180
- teradataml/data/docs/byom/docs/DataikuPredict.py +177 -177
- teradataml/data/docs/byom/docs/H2OPredict.py +324 -324
- teradataml/data/docs/byom/docs/ONNXPredict.py +283 -283
- teradataml/data/docs/byom/docs/PMMLPredict.py +277 -277
- teradataml/data/docs/sqle/docs_17_10/Antiselect.py +82 -82
- teradataml/data/docs/sqle/docs_17_10/Attribution.py +199 -199
- teradataml/data/docs/sqle/docs_17_10/BincodeFit.py +171 -171
- teradataml/data/docs/sqle/docs_17_10/BincodeTransform.py +131 -130
- teradataml/data/docs/sqle/docs_17_10/CategoricalSummary.py +86 -86
- teradataml/data/docs/sqle/docs_17_10/ChiSq.py +90 -90
- teradataml/data/docs/sqle/docs_17_10/ColumnSummary.py +85 -85
- teradataml/data/docs/sqle/docs_17_10/ConvertTo.py +95 -95
- teradataml/data/docs/sqle/docs_17_10/DecisionForestPredict.py +139 -139
- teradataml/data/docs/sqle/docs_17_10/DecisionTreePredict.py +151 -151
- teradataml/data/docs/sqle/docs_17_10/FTest.py +160 -160
- teradataml/data/docs/sqle/docs_17_10/FillRowId.py +82 -82
- teradataml/data/docs/sqle/docs_17_10/Fit.py +87 -87
- teradataml/data/docs/sqle/docs_17_10/GLMPredict.py +144 -144
- teradataml/data/docs/sqle/docs_17_10/GetRowsWithMissingValues.py +84 -84
- teradataml/data/docs/sqle/docs_17_10/GetRowsWithoutMissingValues.py +81 -81
- teradataml/data/docs/sqle/docs_17_10/Histogram.py +164 -164
- teradataml/data/docs/sqle/docs_17_10/MovingAverage.py +134 -134
- teradataml/data/docs/sqle/docs_17_10/NGramSplitter.py +208 -208
- teradataml/data/docs/sqle/docs_17_10/NPath.py +265 -265
- teradataml/data/docs/sqle/docs_17_10/NaiveBayesPredict.py +116 -116
- teradataml/data/docs/sqle/docs_17_10/NaiveBayesTextClassifierPredict.py +176 -176
- teradataml/data/docs/sqle/docs_17_10/NumApply.py +147 -147
- teradataml/data/docs/sqle/docs_17_10/OneHotEncodingFit.py +134 -132
- teradataml/data/docs/sqle/docs_17_10/OneHotEncodingTransform.py +109 -103
- teradataml/data/docs/sqle/docs_17_10/OutlierFilterFit.py +165 -165
- teradataml/data/docs/sqle/docs_17_10/OutlierFilterTransform.py +105 -101
- teradataml/data/docs/sqle/docs_17_10/Pack.py +128 -128
- teradataml/data/docs/sqle/docs_17_10/PolynomialFeaturesFit.py +111 -111
- teradataml/data/docs/sqle/docs_17_10/PolynomialFeaturesTransform.py +102 -102
- teradataml/data/docs/sqle/docs_17_10/QQNorm.py +104 -104
- teradataml/data/docs/sqle/docs_17_10/RoundColumns.py +109 -109
- teradataml/data/docs/sqle/docs_17_10/RowNormalizeFit.py +117 -117
- teradataml/data/docs/sqle/docs_17_10/RowNormalizeTransform.py +99 -98
- teradataml/data/docs/sqle/docs_17_10/SVMSparsePredict.py +152 -152
- teradataml/data/docs/sqle/docs_17_10/ScaleFit.py +197 -197
- teradataml/data/docs/sqle/docs_17_10/ScaleTransform.py +99 -98
- teradataml/data/docs/sqle/docs_17_10/Sessionize.py +113 -113
- teradataml/data/docs/sqle/docs_17_10/SimpleImputeFit.py +116 -116
- teradataml/data/docs/sqle/docs_17_10/SimpleImputeTransform.py +98 -98
- teradataml/data/docs/sqle/docs_17_10/StrApply.py +187 -187
- teradataml/data/docs/sqle/docs_17_10/StringSimilarity.py +145 -145
- teradataml/data/docs/sqle/docs_17_10/Transform.py +105 -104
- teradataml/data/docs/sqle/docs_17_10/UnivariateStatistics.py +141 -141
- teradataml/data/docs/sqle/docs_17_10/Unpack.py +214 -214
- teradataml/data/docs/sqle/docs_17_10/WhichMax.py +83 -83
- teradataml/data/docs/sqle/docs_17_10/WhichMin.py +83 -83
- teradataml/data/docs/sqle/docs_17_10/ZTest.py +155 -155
- teradataml/data/docs/sqle/docs_17_20/ANOVA.py +186 -126
- teradataml/data/docs/sqle/docs_17_20/Antiselect.py +82 -82
- teradataml/data/docs/sqle/docs_17_20/Attribution.py +200 -200
- teradataml/data/docs/sqle/docs_17_20/BincodeFit.py +171 -171
- teradataml/data/docs/sqle/docs_17_20/BincodeTransform.py +139 -138
- teradataml/data/docs/sqle/docs_17_20/CategoricalSummary.py +86 -86
- teradataml/data/docs/sqle/docs_17_20/ChiSq.py +90 -90
- teradataml/data/docs/sqle/docs_17_20/ClassificationEvaluator.py +166 -166
- teradataml/data/docs/sqle/docs_17_20/ColumnSummary.py +85 -85
- teradataml/data/docs/sqle/docs_17_20/ColumnTransformer.py +245 -243
- teradataml/data/docs/sqle/docs_17_20/ConvertTo.py +113 -113
- teradataml/data/docs/sqle/docs_17_20/DecisionForest.py +279 -279
- teradataml/data/docs/sqle/docs_17_20/DecisionForestPredict.py +144 -144
- teradataml/data/docs/sqle/docs_17_20/DecisionTreePredict.py +135 -135
- teradataml/data/docs/sqle/docs_17_20/FTest.py +239 -160
- teradataml/data/docs/sqle/docs_17_20/FillRowId.py +82 -82
- teradataml/data/docs/sqle/docs_17_20/Fit.py +87 -87
- teradataml/data/docs/sqle/docs_17_20/GLM.py +541 -380
- teradataml/data/docs/sqle/docs_17_20/GLMPerSegment.py +414 -414
- teradataml/data/docs/sqle/docs_17_20/GLMPredict.py +144 -144
- teradataml/data/docs/sqle/docs_17_20/GLMPredictPerSegment.py +233 -234
- teradataml/data/docs/sqle/docs_17_20/GetFutileColumns.py +125 -123
- teradataml/data/docs/sqle/docs_17_20/GetRowsWithMissingValues.py +108 -108
- teradataml/data/docs/sqle/docs_17_20/GetRowsWithoutMissingValues.py +105 -105
- teradataml/data/docs/sqle/docs_17_20/Histogram.py +223 -223
- teradataml/data/docs/sqle/docs_17_20/KMeans.py +251 -204
- teradataml/data/docs/sqle/docs_17_20/KMeansPredict.py +144 -143
- teradataml/data/docs/sqle/docs_17_20/KNN.py +214 -214
- teradataml/data/docs/sqle/docs_17_20/MovingAverage.py +134 -134
- teradataml/data/docs/sqle/docs_17_20/NGramSplitter.py +208 -208
- teradataml/data/docs/sqle/docs_17_20/NPath.py +265 -265
- teradataml/data/docs/sqle/docs_17_20/NaiveBayesPredict.py +116 -116
- teradataml/data/docs/sqle/docs_17_20/NaiveBayesTextClassifierPredict.py +177 -176
- teradataml/data/docs/sqle/docs_17_20/NaiveBayesTextClassifierTrainer.py +126 -126
- teradataml/data/docs/sqle/docs_17_20/NonLinearCombineFit.py +118 -117
- teradataml/data/docs/sqle/docs_17_20/NonLinearCombineTransform.py +112 -112
- teradataml/data/docs/sqle/docs_17_20/NumApply.py +147 -147
- teradataml/data/docs/sqle/docs_17_20/OneClassSVM.py +307 -307
- teradataml/data/docs/sqle/docs_17_20/OneClassSVMPredict.py +185 -184
- teradataml/data/docs/sqle/docs_17_20/OneHotEncodingFit.py +230 -225
- teradataml/data/docs/sqle/docs_17_20/OneHotEncodingTransform.py +121 -115
- teradataml/data/docs/sqle/docs_17_20/OrdinalEncodingFit.py +219 -219
- teradataml/data/docs/sqle/docs_17_20/OrdinalEncodingTransform.py +127 -127
- teradataml/data/docs/sqle/docs_17_20/OutlierFilterFit.py +189 -189
- teradataml/data/docs/sqle/docs_17_20/OutlierFilterTransform.py +117 -112
- teradataml/data/docs/sqle/docs_17_20/Pack.py +128 -128
- teradataml/data/docs/sqle/docs_17_20/PolynomialFeaturesFit.py +111 -111
- teradataml/data/docs/sqle/docs_17_20/PolynomialFeaturesTransform.py +112 -111
- teradataml/data/docs/sqle/docs_17_20/QQNorm.py +104 -104
- teradataml/data/docs/sqle/docs_17_20/ROC.py +164 -163
- teradataml/data/docs/sqle/docs_17_20/RandomProjectionFit.py +154 -154
- teradataml/data/docs/sqle/docs_17_20/RandomProjectionMinComponents.py +106 -106
- teradataml/data/docs/sqle/docs_17_20/RandomProjectionTransform.py +120 -120
- teradataml/data/docs/sqle/docs_17_20/RegressionEvaluator.py +211 -211
- teradataml/data/docs/sqle/docs_17_20/RoundColumns.py +108 -108
- teradataml/data/docs/sqle/docs_17_20/RowNormalizeFit.py +117 -117
- teradataml/data/docs/sqle/docs_17_20/RowNormalizeTransform.py +111 -110
- teradataml/data/docs/sqle/docs_17_20/SVM.py +413 -413
- teradataml/data/docs/sqle/docs_17_20/SVMPredict.py +213 -202
- teradataml/data/docs/sqle/docs_17_20/SVMSparsePredict.py +152 -152
- teradataml/data/docs/sqle/docs_17_20/ScaleFit.py +315 -197
- teradataml/data/docs/sqle/docs_17_20/ScaleTransform.py +202 -109
- teradataml/data/docs/sqle/docs_17_20/SentimentExtractor.py +206 -206
- teradataml/data/docs/sqle/docs_17_20/Sessionize.py +113 -113
- teradataml/data/docs/sqle/docs_17_20/Silhouette.py +152 -152
- teradataml/data/docs/sqle/docs_17_20/SimpleImputeFit.py +116 -116
- teradataml/data/docs/sqle/docs_17_20/SimpleImputeTransform.py +109 -108
- teradataml/data/docs/sqle/docs_17_20/StrApply.py +187 -187
- teradataml/data/docs/sqle/docs_17_20/StringSimilarity.py +145 -145
- teradataml/data/docs/sqle/docs_17_20/TDDecisionForestPredict.py +207 -207
- teradataml/data/docs/sqle/docs_17_20/TDGLMPredict.py +333 -171
- teradataml/data/docs/sqle/docs_17_20/TargetEncodingFit.py +266 -266
- teradataml/data/docs/sqle/docs_17_20/TargetEncodingTransform.py +141 -140
- teradataml/data/docs/sqle/docs_17_20/TextParser.py +172 -172
- teradataml/data/docs/sqle/docs_17_20/TrainTestSplit.py +159 -159
- teradataml/data/docs/sqle/docs_17_20/Transform.py +123 -123
- teradataml/data/docs/sqle/docs_17_20/UnivariateStatistics.py +141 -141
- teradataml/data/docs/sqle/docs_17_20/Unpack.py +214 -214
- teradataml/data/docs/sqle/docs_17_20/VectorDistance.py +168 -168
- teradataml/data/docs/sqle/docs_17_20/WhichMax.py +83 -83
- teradataml/data/docs/sqle/docs_17_20/WhichMin.py +83 -83
- teradataml/data/docs/sqle/docs_17_20/WordEmbeddings.py +236 -236
- teradataml/data/docs/sqle/docs_17_20/XGBoost.py +361 -353
- teradataml/data/docs/sqle/docs_17_20/XGBoostPredict.py +281 -275
- teradataml/data/docs/sqle/docs_17_20/ZTest.py +220 -155
- teradataml/data/docs/tableoperator/docs_17_00/ReadNOS.py +429 -429
- teradataml/data/docs/tableoperator/docs_17_05/ReadNOS.py +429 -429
- teradataml/data/docs/tableoperator/docs_17_05/WriteNOS.py +347 -347
- teradataml/data/docs/tableoperator/docs_17_10/ReadNOS.py +428 -428
- teradataml/data/docs/tableoperator/docs_17_10/WriteNOS.py +347 -347
- teradataml/data/docs/tableoperator/docs_17_20/ReadNOS.py +439 -439
- teradataml/data/docs/tableoperator/docs_17_20/WriteNOS.py +386 -386
- teradataml/data/docs/uaf/docs_17_20/ACF.py +195 -195
- teradataml/data/docs/uaf/docs_17_20/ArimaEstimate.py +369 -369
- teradataml/data/docs/uaf/docs_17_20/ArimaForecast.py +142 -142
- teradataml/data/docs/uaf/docs_17_20/ArimaValidate.py +159 -159
- teradataml/data/docs/uaf/docs_17_20/BinaryMatrixOp.py +247 -247
- teradataml/data/docs/uaf/docs_17_20/BinarySeriesOp.py +252 -252
- teradataml/data/docs/uaf/docs_17_20/BreuschGodfrey.py +177 -177
- teradataml/data/docs/uaf/docs_17_20/BreuschPaganGodfrey.py +174 -174
- teradataml/data/docs/uaf/docs_17_20/Convolve.py +226 -226
- teradataml/data/docs/uaf/docs_17_20/Convolve2.py +214 -214
- teradataml/data/docs/uaf/docs_17_20/CumulPeriodogram.py +183 -183
- teradataml/data/docs/uaf/docs_17_20/DFFT.py +203 -203
- teradataml/data/docs/uaf/docs_17_20/DFFT2.py +216 -216
- teradataml/data/docs/uaf/docs_17_20/DFFT2Conv.py +215 -215
- teradataml/data/docs/uaf/docs_17_20/DFFTConv.py +191 -191
- teradataml/data/docs/uaf/docs_17_20/DTW.py +179 -179
- teradataml/data/docs/uaf/docs_17_20/DickeyFuller.py +144 -144
- teradataml/data/docs/uaf/docs_17_20/DurbinWatson.py +183 -183
- teradataml/data/docs/uaf/docs_17_20/ExtractResults.py +184 -184
- teradataml/data/docs/uaf/docs_17_20/FitMetrics.py +172 -172
- teradataml/data/docs/uaf/docs_17_20/GenseriesFormula.py +205 -205
- teradataml/data/docs/uaf/docs_17_20/GenseriesSinusoids.py +142 -142
- teradataml/data/docs/uaf/docs_17_20/HoltWintersForecaster.py +258 -258
- teradataml/data/docs/uaf/docs_17_20/IDFFT.py +164 -164
- teradataml/data/docs/uaf/docs_17_20/IDFFT2.py +198 -198
- teradataml/data/docs/uaf/docs_17_20/InputValidator.py +120 -120
- teradataml/data/docs/uaf/docs_17_20/LineSpec.py +155 -155
- teradataml/data/docs/uaf/docs_17_20/LinearRegr.py +214 -214
- teradataml/data/docs/uaf/docs_17_20/MAMean.py +173 -173
- teradataml/data/docs/uaf/docs_17_20/MInfo.py +133 -133
- teradataml/data/docs/uaf/docs_17_20/MatrixMultiply.py +135 -135
- teradataml/data/docs/uaf/docs_17_20/MultivarRegr.py +190 -190
- teradataml/data/docs/uaf/docs_17_20/PACF.py +158 -158
- teradataml/data/docs/uaf/docs_17_20/Portman.py +216 -216
- teradataml/data/docs/uaf/docs_17_20/PowerTransform.py +154 -154
- teradataml/data/docs/uaf/docs_17_20/Resample.py +228 -228
- teradataml/data/docs/uaf/docs_17_20/SInfo.py +122 -122
- teradataml/data/docs/uaf/docs_17_20/SeasonalNormalize.py +165 -165
- teradataml/data/docs/uaf/docs_17_20/SelectionCriteria.py +173 -173
- teradataml/data/docs/uaf/docs_17_20/SignifPeriodicities.py +170 -170
- teradataml/data/docs/uaf/docs_17_20/SignifResidmean.py +163 -163
- teradataml/data/docs/uaf/docs_17_20/SimpleExp.py +179 -179
- teradataml/data/docs/uaf/docs_17_20/Smoothma.py +207 -207
- teradataml/data/docs/uaf/docs_17_20/TrackingOp.py +150 -150
- teradataml/data/docs/uaf/docs_17_20/UNDIFF.py +171 -171
- teradataml/data/docs/uaf/docs_17_20/Unnormalize.py +201 -201
- teradataml/data/docs/uaf/docs_17_20/WhitesGeneral.py +169 -169
- teradataml/data/dtw_example.json +17 -17
- teradataml/data/dtw_t1.csv +11 -11
- teradataml/data/dtw_t2.csv +4 -4
- teradataml/data/dwt2d_example.json +15 -15
- teradataml/data/dwt_example.json +14 -14
- teradataml/data/dwt_filter_dim.csv +5 -5
- teradataml/data/emission.csv +9 -9
- teradataml/data/emp_table_by_dept.csv +19 -19
- teradataml/data/employee_info.csv +4 -4
- teradataml/data/employee_table.csv +6 -6
- teradataml/data/excluding_event_table.csv +2 -2
- teradataml/data/finance_data.csv +6 -6
- teradataml/data/finance_data2.csv +61 -61
- teradataml/data/finance_data3.csv +93 -93
- teradataml/data/fish.csv +160 -0
- teradataml/data/fm_blood2ageandweight.csv +26 -26
- teradataml/data/fmeasure_example.json +11 -11
- teradataml/data/followers_leaders.csv +10 -10
- teradataml/data/fpgrowth_example.json +12 -12
- teradataml/data/frequentpaths_example.json +29 -29
- teradataml/data/friends.csv +9 -9
- teradataml/data/fs_input.csv +33 -33
- teradataml/data/fs_input1.csv +33 -33
- teradataml/data/genData.csv +513 -513
- teradataml/data/geodataframe_example.json +39 -39
- teradataml/data/glass_types.csv +215 -0
- teradataml/data/glm_admissions_model.csv +12 -12
- teradataml/data/glm_example.json +56 -29
- teradataml/data/glml1l2_example.json +28 -28
- teradataml/data/glml1l2predict_example.json +54 -54
- teradataml/data/glmpredict_example.json +54 -54
- teradataml/data/gq_t1.csv +21 -21
- teradataml/data/hconvolve_complex_right.csv +5 -5
- teradataml/data/hconvolve_complex_rightmulti.csv +5 -5
- teradataml/data/histogram_example.json +11 -11
- teradataml/data/hmmdecoder_example.json +78 -78
- teradataml/data/hmmevaluator_example.json +24 -24
- teradataml/data/hmmsupervised_example.json +10 -10
- teradataml/data/hmmunsupervised_example.json +7 -7
- teradataml/data/house_values.csv +12 -12
- teradataml/data/house_values2.csv +13 -13
- teradataml/data/housing_cat.csv +7 -7
- teradataml/data/housing_data.csv +9 -9
- teradataml/data/housing_test.csv +47 -47
- teradataml/data/housing_test_binary.csv +47 -47
- teradataml/data/housing_train.csv +493 -493
- teradataml/data/housing_train_attribute.csv +4 -4
- teradataml/data/housing_train_binary.csv +437 -437
- teradataml/data/housing_train_parameter.csv +2 -2
- teradataml/data/housing_train_response.csv +493 -493
- teradataml/data/housing_train_segment.csv +201 -0
- teradataml/data/ibm_stock.csv +370 -370
- teradataml/data/ibm_stock1.csv +370 -370
- teradataml/data/identitymatch_example.json +21 -21
- teradataml/data/idf_table.csv +4 -4
- teradataml/data/impressions.csv +101 -101
- teradataml/data/inflation.csv +21 -21
- teradataml/data/initial.csv +3 -3
- teradataml/data/insect2Cols.csv +61 -0
- teradataml/data/insect_sprays.csv +12 -12
- teradataml/data/insurance.csv +1339 -1339
- teradataml/data/interpolator_example.json +12 -12
- teradataml/data/iris_altinput.csv +481 -481
- teradataml/data/iris_attribute_output.csv +8 -8
- teradataml/data/iris_attribute_test.csv +121 -121
- teradataml/data/iris_attribute_train.csv +481 -481
- teradataml/data/iris_category_expect_predict.csv +31 -31
- teradataml/data/iris_data.csv +151 -0
- teradataml/data/iris_input.csv +151 -151
- teradataml/data/iris_response_train.csv +121 -121
- teradataml/data/iris_test.csv +31 -31
- teradataml/data/iris_train.csv +121 -121
- teradataml/data/join_table1.csv +4 -4
- teradataml/data/join_table2.csv +4 -4
- teradataml/data/jsons/anly_function_name.json +6 -6
- teradataml/data/jsons/byom/dataikupredict.json +147 -147
- teradataml/data/jsons/byom/datarobotpredict.json +146 -146
- teradataml/data/jsons/byom/h2opredict.json +194 -194
- teradataml/data/jsons/byom/onnxpredict.json +186 -186
- teradataml/data/jsons/byom/pmmlpredict.json +146 -146
- teradataml/data/jsons/paired_functions.json +435 -435
- teradataml/data/jsons/sqle/16.20/Antiselect.json +56 -56
- teradataml/data/jsons/sqle/16.20/Attribution.json +249 -249
- teradataml/data/jsons/sqle/16.20/DecisionForestPredict.json +156 -156
- teradataml/data/jsons/sqle/16.20/DecisionTreePredict.json +170 -170
- teradataml/data/jsons/sqle/16.20/GLMPredict.json +122 -122
- teradataml/data/jsons/sqle/16.20/MovingAverage.json +367 -367
- teradataml/data/jsons/sqle/16.20/NGramSplitter.json +239 -239
- teradataml/data/jsons/sqle/16.20/NaiveBayesPredict.json +136 -136
- teradataml/data/jsons/sqle/16.20/NaiveBayesTextClassifierPredict.json +235 -235
- teradataml/data/jsons/sqle/16.20/Pack.json +98 -98
- teradataml/data/jsons/sqle/16.20/SVMSparsePredict.json +162 -162
- teradataml/data/jsons/sqle/16.20/Sessionize.json +105 -105
- teradataml/data/jsons/sqle/16.20/StringSimilarity.json +86 -86
- teradataml/data/jsons/sqle/16.20/Unpack.json +166 -166
- teradataml/data/jsons/sqle/16.20/nPath.json +269 -269
- teradataml/data/jsons/sqle/17.00/Antiselect.json +56 -56
- teradataml/data/jsons/sqle/17.00/Attribution.json +249 -249
- teradataml/data/jsons/sqle/17.00/DecisionForestPredict.json +156 -156
- teradataml/data/jsons/sqle/17.00/DecisionTreePredict.json +170 -170
- teradataml/data/jsons/sqle/17.00/GLMPredict.json +122 -122
- teradataml/data/jsons/sqle/17.00/MovingAverage.json +367 -367
- teradataml/data/jsons/sqle/17.00/NGramSplitter.json +239 -239
- teradataml/data/jsons/sqle/17.00/NaiveBayesPredict.json +136 -136
- teradataml/data/jsons/sqle/17.00/NaiveBayesTextClassifierPredict.json +235 -235
- teradataml/data/jsons/sqle/17.00/Pack.json +98 -98
- teradataml/data/jsons/sqle/17.00/SVMSparsePredict.json +162 -162
- teradataml/data/jsons/sqle/17.00/Sessionize.json +105 -105
- teradataml/data/jsons/sqle/17.00/StringSimilarity.json +86 -86
- teradataml/data/jsons/sqle/17.00/Unpack.json +166 -166
- teradataml/data/jsons/sqle/17.00/nPath.json +269 -269
- teradataml/data/jsons/sqle/17.05/Antiselect.json +56 -56
- teradataml/data/jsons/sqle/17.05/Attribution.json +249 -249
- teradataml/data/jsons/sqle/17.05/DecisionForestPredict.json +156 -156
- teradataml/data/jsons/sqle/17.05/DecisionTreePredict.json +170 -170
- teradataml/data/jsons/sqle/17.05/GLMPredict.json +122 -122
- teradataml/data/jsons/sqle/17.05/MovingAverage.json +367 -367
- teradataml/data/jsons/sqle/17.05/NGramSplitter.json +239 -239
- teradataml/data/jsons/sqle/17.05/NaiveBayesPredict.json +136 -136
- teradataml/data/jsons/sqle/17.05/NaiveBayesTextClassifierPredict.json +235 -235
- teradataml/data/jsons/sqle/17.05/Pack.json +98 -98
- teradataml/data/jsons/sqle/17.05/SVMSparsePredict.json +162 -162
- teradataml/data/jsons/sqle/17.05/Sessionize.json +105 -105
- teradataml/data/jsons/sqle/17.05/StringSimilarity.json +86 -86
- teradataml/data/jsons/sqle/17.05/Unpack.json +166 -166
- teradataml/data/jsons/sqle/17.05/nPath.json +269 -269
- teradataml/data/jsons/sqle/17.10/Antiselect.json +56 -56
- teradataml/data/jsons/sqle/17.10/Attribution.json +249 -249
- teradataml/data/jsons/sqle/17.10/DecisionForestPredict.json +185 -185
- teradataml/data/jsons/sqle/17.10/DecisionTreePredict.json +171 -171
- teradataml/data/jsons/sqle/17.10/GLMPredict.json +151 -151
- teradataml/data/jsons/sqle/17.10/MovingAverage.json +368 -368
- teradataml/data/jsons/sqle/17.10/NGramSplitter.json +239 -239
- teradataml/data/jsons/sqle/17.10/NaiveBayesPredict.json +149 -149
- teradataml/data/jsons/sqle/17.10/NaiveBayesTextClassifierPredict.json +288 -288
- teradataml/data/jsons/sqle/17.10/Pack.json +133 -133
- teradataml/data/jsons/sqle/17.10/SVMSparsePredict.json +193 -193
- teradataml/data/jsons/sqle/17.10/Sessionize.json +105 -105
- teradataml/data/jsons/sqle/17.10/StringSimilarity.json +86 -86
- teradataml/data/jsons/sqle/17.10/TD_BinCodeFit.json +239 -239
- teradataml/data/jsons/sqle/17.10/TD_BinCodeTransform.json +70 -70
- teradataml/data/jsons/sqle/17.10/TD_CategoricalSummary.json +53 -53
- teradataml/data/jsons/sqle/17.10/TD_Chisq.json +67 -67
- teradataml/data/jsons/sqle/17.10/TD_ColumnSummary.json +53 -53
- teradataml/data/jsons/sqle/17.10/TD_ConvertTo.json +68 -68
- teradataml/data/jsons/sqle/17.10/TD_FTest.json +187 -187
- teradataml/data/jsons/sqle/17.10/TD_FillRowID.json +51 -51
- teradataml/data/jsons/sqle/17.10/TD_FunctionFit.json +46 -46
- teradataml/data/jsons/sqle/17.10/TD_FunctionTransform.json +72 -71
- teradataml/data/jsons/sqle/17.10/TD_GetRowsWithMissingValues.json +52 -52
- teradataml/data/jsons/sqle/17.10/TD_GetRowsWithoutMissingValues.json +52 -52
- teradataml/data/jsons/sqle/17.10/TD_Histogram.json +132 -132
- teradataml/data/jsons/sqle/17.10/TD_NumApply.json +147 -147
- teradataml/data/jsons/sqle/17.10/TD_OneHotEncodingFit.json +182 -182
- teradataml/data/jsons/sqle/17.10/TD_OneHotEncodingTransform.json +65 -64
- teradataml/data/jsons/sqle/17.10/TD_OutlierFilterFit.json +196 -196
- teradataml/data/jsons/sqle/17.10/TD_OutlierFilterTransform.json +48 -47
- teradataml/data/jsons/sqle/17.10/TD_PolynomialFeaturesFit.json +114 -114
- teradataml/data/jsons/sqle/17.10/TD_PolynomialFeaturesTransform.json +72 -71
- teradataml/data/jsons/sqle/17.10/TD_QQNorm.json +111 -111
- teradataml/data/jsons/sqle/17.10/TD_RoundColumns.json +93 -93
- teradataml/data/jsons/sqle/17.10/TD_RowNormalizeFit.json +127 -127
- teradataml/data/jsons/sqle/17.10/TD_RowNormalizeTransform.json +70 -69
- teradataml/data/jsons/sqle/17.10/TD_ScaleFit.json +156 -156
- teradataml/data/jsons/sqle/17.10/TD_ScaleTransform.json +70 -69
- teradataml/data/jsons/sqle/17.10/TD_SimpleImputeFit.json +147 -147
- teradataml/data/jsons/sqle/17.10/TD_SimpleImputeTransform.json +48 -47
- teradataml/data/jsons/sqle/17.10/TD_StrApply.json +240 -240
- teradataml/data/jsons/sqle/17.10/TD_UnivariateStatistics.json +118 -118
- teradataml/data/jsons/sqle/17.10/TD_WhichMax.json +52 -52
- teradataml/data/jsons/sqle/17.10/TD_WhichMin.json +52 -52
- teradataml/data/jsons/sqle/17.10/TD_ZTest.json +171 -171
- teradataml/data/jsons/sqle/17.10/Unpack.json +188 -188
- teradataml/data/jsons/sqle/17.10/nPath.json +269 -269
- teradataml/data/jsons/sqle/17.20/Antiselect.json +56 -56
- teradataml/data/jsons/sqle/17.20/Attribution.json +249 -249
- teradataml/data/jsons/sqle/17.20/DecisionForestPredict.json +185 -185
- teradataml/data/jsons/sqle/17.20/DecisionTreePredict.json +172 -172
- teradataml/data/jsons/sqle/17.20/GLMPredict.json +151 -151
- teradataml/data/jsons/sqle/17.20/MovingAverage.json +367 -367
- teradataml/data/jsons/sqle/17.20/NGramSplitter.json +239 -239
- teradataml/data/jsons/sqle/17.20/NaiveBayesPredict.json +149 -149
- teradataml/data/jsons/sqle/17.20/NaiveBayesTextClassifierPredict.json +287 -287
- teradataml/data/jsons/sqle/17.20/Pack.json +133 -133
- teradataml/data/jsons/sqle/17.20/SVMSparsePredict.json +192 -192
- teradataml/data/jsons/sqle/17.20/Sessionize.json +105 -105
- teradataml/data/jsons/sqle/17.20/StringSimilarity.json +86 -86
- teradataml/data/jsons/sqle/17.20/TD_ANOVA.json +148 -76
- teradataml/data/jsons/sqle/17.20/TD_BinCodeFit.json +239 -239
- teradataml/data/jsons/sqle/17.20/TD_BinCodeTransform.json +71 -71
- teradataml/data/jsons/sqle/17.20/TD_CategoricalSummary.json +53 -53
- teradataml/data/jsons/sqle/17.20/TD_Chisq.json +67 -67
- teradataml/data/jsons/sqle/17.20/TD_ClassificationEvaluator.json +145 -145
- teradataml/data/jsons/sqle/17.20/TD_ColumnSummary.json +53 -53
- teradataml/data/jsons/sqle/17.20/TD_ColumnTransformer.json +218 -218
- teradataml/data/jsons/sqle/17.20/TD_ConvertTo.json +92 -92
- teradataml/data/jsons/sqle/17.20/TD_DecisionForest.json +259 -259
- teradataml/data/jsons/sqle/17.20/TD_DecisionForestPredict.json +139 -139
- teradataml/data/jsons/sqle/17.20/TD_FTest.json +269 -186
- teradataml/data/jsons/sqle/17.20/TD_FillRowID.json +52 -52
- teradataml/data/jsons/sqle/17.20/TD_FunctionFit.json +46 -46
- teradataml/data/jsons/sqle/17.20/TD_FunctionTransform.json +72 -72
- teradataml/data/jsons/sqle/17.20/TD_GLM.json +507 -431
- teradataml/data/jsons/sqle/17.20/TD_GLMPREDICT.json +168 -125
- teradataml/data/jsons/sqle/17.20/TD_GLMPerSegment.json +411 -411
- teradataml/data/jsons/sqle/17.20/TD_GLMPredictPerSegment.json +146 -146
- teradataml/data/jsons/sqle/17.20/TD_GetFutileColumns.json +93 -91
- teradataml/data/jsons/sqle/17.20/TD_GetRowsWithMissingValues.json +76 -76
- teradataml/data/jsons/sqle/17.20/TD_GetRowsWithoutMissingValues.json +76 -76
- teradataml/data/jsons/sqle/17.20/TD_Histogram.json +152 -152
- teradataml/data/jsons/sqle/17.20/TD_KMeans.json +231 -211
- teradataml/data/jsons/sqle/17.20/TD_KMeansPredict.json +86 -86
- teradataml/data/jsons/sqle/17.20/TD_KNN.json +262 -262
- teradataml/data/jsons/sqle/17.20/TD_NaiveBayesTextClassifierTrainer.json +137 -137
- teradataml/data/jsons/sqle/17.20/TD_NonLinearCombineFit.json +102 -101
- teradataml/data/jsons/sqle/17.20/TD_NonLinearCombineTransform.json +71 -71
- teradataml/data/jsons/sqle/17.20/TD_NumApply.json +147 -147
- teradataml/data/jsons/sqle/17.20/TD_OneClassSVM.json +315 -315
- teradataml/data/jsons/sqle/17.20/TD_OneClassSVMPredict.json +123 -123
- teradataml/data/jsons/sqle/17.20/TD_OneHotEncodingFit.json +271 -271
- teradataml/data/jsons/sqle/17.20/TD_OneHotEncodingTransform.json +65 -65
- teradataml/data/jsons/sqle/17.20/TD_OrdinalEncodingFit.json +229 -229
- teradataml/data/jsons/sqle/17.20/TD_OrdinalEncodingTransform.json +75 -75
- teradataml/data/jsons/sqle/17.20/TD_OutlierFilterFit.json +217 -217
- teradataml/data/jsons/sqle/17.20/TD_OutlierFilterTransform.json +48 -48
- teradataml/data/jsons/sqle/17.20/TD_PolynomialFeaturesFit.json +114 -114
- teradataml/data/jsons/sqle/17.20/TD_PolynomialFeaturesTransform.json +72 -72
- teradataml/data/jsons/sqle/17.20/TD_QQNorm.json +111 -111
- teradataml/data/jsons/sqle/17.20/TD_ROC.json +178 -177
- teradataml/data/jsons/sqle/17.20/TD_RandomProjectionFit.json +178 -178
- teradataml/data/jsons/sqle/17.20/TD_RandomProjectionMinComponents.json +73 -73
- teradataml/data/jsons/sqle/17.20/TD_RandomProjectionTransform.json +74 -74
- teradataml/data/jsons/sqle/17.20/TD_RegressionEvaluator.json +137 -137
- teradataml/data/jsons/sqle/17.20/TD_RoundColumns.json +93 -93
- teradataml/data/jsons/sqle/17.20/TD_RowNormalizeFit.json +127 -127
- teradataml/data/jsons/sqle/17.20/TD_RowNormalizeTransform.json +70 -70
- teradataml/data/jsons/sqle/17.20/TD_SVM.json +389 -389
- teradataml/data/jsons/sqle/17.20/TD_SVMPredict.json +142 -124
- teradataml/data/jsons/sqle/17.20/TD_ScaleFit.json +309 -156
- teradataml/data/jsons/sqle/17.20/TD_ScaleTransform.json +119 -70
- teradataml/data/jsons/sqle/17.20/TD_SentimentExtractor.json +193 -193
- teradataml/data/jsons/sqle/17.20/TD_Silhouette.json +142 -142
- teradataml/data/jsons/sqle/17.20/TD_SimpleImputeFit.json +147 -147
- teradataml/data/jsons/sqle/17.20/TD_SimpleImputeTransform.json +48 -48
- teradataml/data/jsons/sqle/17.20/TD_StrApply.json +240 -240
- teradataml/data/jsons/sqle/17.20/TD_TargetEncodingFit.json +248 -248
- teradataml/data/jsons/sqle/17.20/TD_TargetEncodingTransform.json +75 -75
- teradataml/data/jsons/sqle/17.20/TD_TextParser.json +192 -192
- teradataml/data/jsons/sqle/17.20/TD_TrainTestSplit.json +142 -142
- teradataml/data/jsons/sqle/17.20/TD_UnivariateStatistics.json +117 -117
- teradataml/data/jsons/sqle/17.20/TD_VectorDistance.json +182 -182
- teradataml/data/jsons/sqle/17.20/TD_WhichMax.json +52 -52
- teradataml/data/jsons/sqle/17.20/TD_WhichMin.json +52 -52
- teradataml/data/jsons/sqle/17.20/TD_WordEmbeddings.json +241 -241
- teradataml/data/jsons/sqle/17.20/TD_XGBoost.json +330 -312
- teradataml/data/jsons/sqle/17.20/TD_XGBoostPredict.json +195 -182
- teradataml/data/jsons/sqle/17.20/TD_ZTest.json +247 -170
- teradataml/data/jsons/sqle/17.20/Unpack.json +188 -188
- teradataml/data/jsons/sqle/17.20/nPath.json +269 -269
- teradataml/data/jsons/tableoperator/17.00/read_nos.json +197 -197
- teradataml/data/jsons/tableoperator/17.05/read_nos.json +197 -197
- teradataml/data/jsons/tableoperator/17.05/write_nos.json +194 -194
- teradataml/data/jsons/tableoperator/17.10/read_nos.json +183 -183
- teradataml/data/jsons/tableoperator/17.10/write_nos.json +194 -194
- teradataml/data/jsons/tableoperator/17.20/read_nos.json +182 -182
- teradataml/data/jsons/tableoperator/17.20/write_nos.json +223 -223
- teradataml/data/jsons/uaf/17.20/TD_ACF.json +149 -149
- teradataml/data/jsons/uaf/17.20/TD_ARIMAESTIMATE.json +409 -409
- teradataml/data/jsons/uaf/17.20/TD_ARIMAFORECAST.json +79 -79
- teradataml/data/jsons/uaf/17.20/TD_ARIMAVALIDATE.json +151 -151
- teradataml/data/jsons/uaf/17.20/TD_BINARYMATRIXOP.json +109 -109
- teradataml/data/jsons/uaf/17.20/TD_BINARYSERIESOP.json +107 -107
- teradataml/data/jsons/uaf/17.20/TD_BREUSCH_GODFREY.json +87 -87
- teradataml/data/jsons/uaf/17.20/TD_BREUSCH_PAGAN_GODFREY.json +106 -106
- teradataml/data/jsons/uaf/17.20/TD_CONVOLVE.json +80 -80
- teradataml/data/jsons/uaf/17.20/TD_CONVOLVE2.json +67 -67
- teradataml/data/jsons/uaf/17.20/TD_CUMUL_PERIODOGRAM.json +91 -91
- teradataml/data/jsons/uaf/17.20/TD_DFFT.json +136 -136
- teradataml/data/jsons/uaf/17.20/TD_DFFT2.json +148 -148
- teradataml/data/jsons/uaf/17.20/TD_DFFT2CONV.json +108 -108
- teradataml/data/jsons/uaf/17.20/TD_DFFTCONV.json +109 -109
- teradataml/data/jsons/uaf/17.20/TD_DICKEY_FULLER.json +86 -86
- teradataml/data/jsons/uaf/17.20/TD_DIFF.json +91 -91
- teradataml/data/jsons/uaf/17.20/TD_DTW.json +116 -116
- teradataml/data/jsons/uaf/17.20/TD_DURBIN_WATSON.json +100 -100
- teradataml/data/jsons/uaf/17.20/TD_EXTRACT_RESULTS.json +38 -38
- teradataml/data/jsons/uaf/17.20/TD_FITMETRICS.json +100 -100
- teradataml/data/jsons/uaf/17.20/TD_GENSERIES4FORMULA.json +84 -84
- teradataml/data/jsons/uaf/17.20/TD_GENSERIES4SINUSOIDS.json +70 -70
- teradataml/data/jsons/uaf/17.20/TD_GOLDFELD_QUANDT.json +152 -152
- teradataml/data/jsons/uaf/17.20/TD_HOLT_WINTERS_FORECAST.json +313 -313
- teradataml/data/jsons/uaf/17.20/TD_IDFFT.json +57 -57
- teradataml/data/jsons/uaf/17.20/TD_IDFFT2.json +94 -94
- teradataml/data/jsons/uaf/17.20/TD_INPUTVALIDATOR.json +63 -63
- teradataml/data/jsons/uaf/17.20/TD_LINEAR_REGR.json +181 -181
- teradataml/data/jsons/uaf/17.20/TD_LINESPEC.json +102 -102
- teradataml/data/jsons/uaf/17.20/TD_MAMEAN.json +182 -182
- teradataml/data/jsons/uaf/17.20/TD_MATRIXMULTIPLY.json +67 -67
- teradataml/data/jsons/uaf/17.20/TD_MINFO.json +66 -66
- teradataml/data/jsons/uaf/17.20/TD_MULTIVAR_REGR.json +178 -178
- teradataml/data/jsons/uaf/17.20/TD_PACF.json +114 -114
- teradataml/data/jsons/uaf/17.20/TD_PORTMAN.json +118 -118
- teradataml/data/jsons/uaf/17.20/TD_POWERSPEC.json +175 -175
- teradataml/data/jsons/uaf/17.20/TD_POWERTRANSFORM.json +97 -97
- teradataml/data/jsons/uaf/17.20/TD_RESAMPLE.json +173 -173
- teradataml/data/jsons/uaf/17.20/TD_SEASONALNORMALIZE.json +136 -136
- teradataml/data/jsons/uaf/17.20/TD_SELECTION_CRITERIA.json +89 -89
- teradataml/data/jsons/uaf/17.20/TD_SIGNIF_PERIODICITIES.json +79 -79
- teradataml/data/jsons/uaf/17.20/TD_SIGNIF_RESIDMEAN.json +67 -67
- teradataml/data/jsons/uaf/17.20/TD_SIMPLEEXP.json +184 -184
- teradataml/data/jsons/uaf/17.20/TD_SINFO.json +57 -57
- teradataml/data/jsons/uaf/17.20/TD_SMOOTHMA.json +162 -162
- teradataml/data/jsons/uaf/17.20/TD_TRACKINGOP.json +100 -100
- teradataml/data/jsons/uaf/17.20/TD_UNDIFF.json +111 -111
- teradataml/data/jsons/uaf/17.20/TD_UNNORMALIZE.json +95 -95
- teradataml/data/jsons/uaf/17.20/TD_WHITES_GENERAL.json +77 -77
- teradataml/data/kmeans_example.json +22 -17
- teradataml/data/kmeans_table.csv +10 -0
- teradataml/data/kmeans_us_arrests_data.csv +0 -0
- teradataml/data/knn_example.json +18 -18
- teradataml/data/knnrecommender_example.json +6 -6
- teradataml/data/knnrecommenderpredict_example.json +12 -12
- teradataml/data/lar_example.json +17 -17
- teradataml/data/larpredict_example.json +30 -30
- teradataml/data/lc_new_predictors.csv +5 -5
- teradataml/data/lc_new_reference.csv +9 -9
- teradataml/data/lda_example.json +8 -8
- teradataml/data/ldainference_example.json +14 -14
- teradataml/data/ldatopicsummary_example.json +8 -8
- teradataml/data/levendist_input.csv +13 -13
- teradataml/data/levenshteindistance_example.json +10 -10
- teradataml/data/linreg_example.json +9 -9
- teradataml/data/load_example_data.py +326 -323
- teradataml/data/loan_prediction.csv +295 -295
- teradataml/data/lungcancer.csv +138 -138
- teradataml/data/mappingdata.csv +12 -12
- teradataml/data/milk_timeseries.csv +157 -157
- teradataml/data/min_max_titanic.csv +4 -4
- teradataml/data/minhash_example.json +6 -6
- teradataml/data/ml_ratings.csv +7547 -7547
- teradataml/data/ml_ratings_10.csv +2445 -2445
- teradataml/data/model1_table.csv +5 -5
- teradataml/data/model2_table.csv +5 -5
- teradataml/data/models/iris_db_glm_model.pmml +56 -56
- teradataml/data/models/iris_db_xgb_model.pmml +4471 -4471
- teradataml/data/modularity_example.json +12 -12
- teradataml/data/movavg_example.json +7 -7
- teradataml/data/mtx1.csv +7 -7
- teradataml/data/mtx2.csv +13 -13
- teradataml/data/multi_model_classification.csv +401 -0
- teradataml/data/multi_model_regression.csv +401 -0
- teradataml/data/mvdfft8.csv +9 -9
- teradataml/data/naivebayes_example.json +9 -9
- teradataml/data/naivebayespredict_example.json +19 -19
- teradataml/data/naivebayestextclassifier2_example.json +6 -6
- teradataml/data/naivebayestextclassifier_example.json +8 -8
- teradataml/data/naivebayestextclassifierpredict_example.json +20 -20
- teradataml/data/name_Find_configure.csv +10 -10
- teradataml/data/namedentityfinder_example.json +14 -14
- teradataml/data/namedentityfinderevaluator_example.json +10 -10
- teradataml/data/namedentityfindertrainer_example.json +6 -6
- teradataml/data/nb_iris_input_test.csv +31 -31
- teradataml/data/nb_iris_input_train.csv +121 -121
- teradataml/data/nbp_iris_model.csv +13 -13
- teradataml/data/ner_extractor_text.csv +2 -2
- teradataml/data/ner_sports_test2.csv +29 -29
- teradataml/data/ner_sports_train.csv +501 -501
- teradataml/data/nerevaluator_example.json +5 -5
- teradataml/data/nerextractor_example.json +18 -18
- teradataml/data/nermem_sports_test.csv +17 -17
- teradataml/data/nermem_sports_train.csv +50 -50
- teradataml/data/nertrainer_example.json +6 -6
- teradataml/data/ngrams_example.json +6 -6
- teradataml/data/notebooks/sqlalchemy/Teradata Vantage Aggregate Functions using SQLAlchemy.ipynb +1455 -1455
- teradataml/data/notebooks/sqlalchemy/Teradata Vantage Arithmetic Functions Using SQLAlchemy.ipynb +1993 -1993
- teradataml/data/notebooks/sqlalchemy/Teradata Vantage Bit-Byte Manipulation Functions using SQLAlchemy.ipynb +1492 -1492
- teradataml/data/notebooks/sqlalchemy/Teradata Vantage Built-in functions using SQLAlchemy.ipynb +536 -536
- teradataml/data/notebooks/sqlalchemy/Teradata Vantage Regular Expressions Using SQLAlchemy.ipynb +570 -570
- teradataml/data/notebooks/sqlalchemy/Teradata Vantage String Functions Using SQLAlchemy.ipynb +2559 -2559
- teradataml/data/notebooks/sqlalchemy/Teradata Vantage Window Aggregate Functions using SQLAlchemy.ipynb +2911 -2911
- teradataml/data/notebooks/sqlalchemy/Using Generic SQLAlchemy ClauseElements teradataml DataFrame assign method.ipynb +698 -698
- teradataml/data/notebooks/sqlalchemy/teradataml filtering using SQLAlchemy ClauseElements.ipynb +784 -784
- teradataml/data/npath_example.json +23 -23
- teradataml/data/ntree_example.json +14 -14
- teradataml/data/numeric_strings.csv +4 -4
- teradataml/data/numerics.csv +4 -4
- teradataml/data/ocean_buoy.csv +17 -17
- teradataml/data/ocean_buoy2.csv +17 -17
- teradataml/data/ocean_buoys.csv +27 -27
- teradataml/data/ocean_buoys2.csv +10 -10
- teradataml/data/ocean_buoys_nonpti.csv +28 -28
- teradataml/data/ocean_buoys_seq.csv +29 -29
- teradataml/data/onehot_encoder_train.csv +4 -0
- teradataml/data/openml_example.json +92 -0
- teradataml/data/optional_event_table.csv +4 -4
- teradataml/data/orders1.csv +11 -11
- teradataml/data/orders1_12.csv +12 -12
- teradataml/data/orders_ex.csv +4 -4
- teradataml/data/pack_example.json +8 -8
- teradataml/data/package_tracking.csv +19 -19
- teradataml/data/package_tracking_pti.csv +18 -18
- teradataml/data/pagerank_example.json +13 -13
- teradataml/data/paragraphs_input.csv +6 -6
- teradataml/data/pathanalyzer_example.json +7 -7
- teradataml/data/pathgenerator_example.json +7 -7
- teradataml/data/phrases.csv +7 -7
- teradataml/data/pivot_example.json +8 -8
- teradataml/data/pivot_input.csv +22 -22
- teradataml/data/playerRating.csv +31 -31
- teradataml/data/postagger_example.json +6 -6
- teradataml/data/posttagger_output.csv +44 -44
- teradataml/data/production_data.csv +16 -16
- teradataml/data/production_data2.csv +7 -7
- teradataml/data/randomsample_example.json +31 -31
- teradataml/data/randomwalksample_example.json +8 -8
- teradataml/data/rank_table.csv +6 -6
- teradataml/data/ref_mobile_data.csv +4 -4
- teradataml/data/ref_mobile_data_dense.csv +2 -2
- teradataml/data/ref_url.csv +17 -17
- teradataml/data/restaurant_reviews.csv +7 -7
- teradataml/data/river_data.csv +145 -145
- teradataml/data/roc_example.json +7 -7
- teradataml/data/roc_input.csv +101 -101
- teradataml/data/rule_inputs.csv +6 -6
- teradataml/data/rule_table.csv +2 -2
- teradataml/data/sales.csv +7 -7
- teradataml/data/sales_transaction.csv +501 -501
- teradataml/data/salesdata.csv +342 -342
- teradataml/data/sample_cities.csv +2 -2
- teradataml/data/sample_shapes.csv +10 -10
- teradataml/data/sample_streets.csv +2 -2
- teradataml/data/sampling_example.json +15 -15
- teradataml/data/sax_example.json +8 -8
- teradataml/data/scale_attributes.csv +3 -0
- teradataml/data/scale_example.json +74 -23
- teradataml/data/scale_housing.csv +11 -11
- teradataml/data/scale_housing_test.csv +6 -6
- teradataml/data/scale_input_part_sparse.csv +31 -0
- teradataml/data/scale_input_partitioned.csv +16 -0
- teradataml/data/scale_input_sparse.csv +11 -0
- teradataml/data/scale_parameters.csv +3 -0
- teradataml/data/scale_stat.csv +11 -11
- teradataml/data/scalebypartition_example.json +13 -13
- teradataml/data/scalemap_example.json +13 -13
- teradataml/data/scalesummary_example.json +12 -12
- teradataml/data/score_category.csv +101 -101
- teradataml/data/score_summary.csv +4 -4
- teradataml/data/script_example.json +9 -9
- teradataml/data/scripts/deploy_script.py +84 -0
- teradataml/data/scripts/mapper.R +20 -0
- teradataml/data/scripts/mapper.py +15 -15
- teradataml/data/scripts/mapper_replace.py +15 -15
- teradataml/data/scripts/sklearn/__init__.py +0 -0
- teradataml/data/scripts/sklearn/sklearn_fit.py +171 -0
- teradataml/data/scripts/sklearn/sklearn_fit_predict.py +127 -0
- teradataml/data/scripts/sklearn/sklearn_function.template +108 -0
- teradataml/data/scripts/sklearn/sklearn_model_selection_split.py +148 -0
- teradataml/data/scripts/sklearn/sklearn_neighbors.py +143 -0
- teradataml/data/scripts/sklearn/sklearn_score.py +119 -0
- teradataml/data/scripts/sklearn/sklearn_transform.py +171 -0
- teradataml/data/seeds.csv +10 -10
- teradataml/data/sentenceextractor_example.json +6 -6
- teradataml/data/sentiment_extract_input.csv +11 -11
- teradataml/data/sentiment_train.csv +16 -16
- teradataml/data/sentiment_word.csv +20 -20
- teradataml/data/sentiment_word_input.csv +19 -19
- teradataml/data/sentimentextractor_example.json +24 -24
- teradataml/data/sentimenttrainer_example.json +8 -8
- teradataml/data/sequence_table.csv +10 -10
- teradataml/data/seriessplitter_example.json +7 -7
- teradataml/data/sessionize_example.json +17 -17
- teradataml/data/sessionize_table.csv +116 -116
- teradataml/data/setop_test1.csv +24 -24
- teradataml/data/setop_test2.csv +22 -22
- teradataml/data/soc_nw_edges.csv +10 -10
- teradataml/data/soc_nw_vertices.csv +7 -7
- teradataml/data/souvenir_timeseries.csv +167 -167
- teradataml/data/sparse_iris_attribute.csv +5 -5
- teradataml/data/sparse_iris_test.csv +121 -121
- teradataml/data/sparse_iris_train.csv +601 -601
- teradataml/data/star1.csv +6 -6
- teradataml/data/state_transition.csv +5 -5
- teradataml/data/stock_data.csv +53 -53
- teradataml/data/stock_movement.csv +11 -11
- teradataml/data/stock_vol.csv +76 -76
- teradataml/data/stop_words.csv +8 -8
- teradataml/data/store_sales.csv +37 -37
- teradataml/data/stringsimilarity_example.json +7 -7
- teradataml/data/strsimilarity_input.csv +13 -13
- teradataml/data/students.csv +101 -101
- teradataml/data/svm_iris_input_test.csv +121 -121
- teradataml/data/svm_iris_input_train.csv +481 -481
- teradataml/data/svm_iris_model.csv +7 -7
- teradataml/data/svmdense_example.json +9 -9
- teradataml/data/svmdensepredict_example.json +18 -18
- teradataml/data/svmsparse_example.json +7 -7
- teradataml/data/svmsparsepredict_example.json +13 -13
- teradataml/data/svmsparsesummary_example.json +7 -7
- teradataml/data/target_mobile_data.csv +13 -13
- teradataml/data/target_mobile_data_dense.csv +5 -5
- teradataml/data/templatedata.csv +1201 -1201
- teradataml/data/templates/open_source_ml.json +9 -0
- teradataml/data/teradataml_example.json +150 -1
- teradataml/data/test_classification.csv +101 -0
- teradataml/data/test_loan_prediction.csv +53 -53
- teradataml/data/test_pacf_12.csv +37 -37
- teradataml/data/test_prediction.csv +101 -0
- teradataml/data/test_regression.csv +101 -0
- teradataml/data/test_river2.csv +109 -109
- teradataml/data/text_inputs.csv +6 -6
- teradataml/data/textchunker_example.json +7 -7
- teradataml/data/textclassifier_example.json +6 -6
- teradataml/data/textclassifier_input.csv +7 -7
- teradataml/data/textclassifiertrainer_example.json +6 -6
- teradataml/data/textmorph_example.json +5 -5
- teradataml/data/textparser_example.json +15 -15
- teradataml/data/texttagger_example.json +11 -11
- teradataml/data/texttokenizer_example.json +6 -6
- teradataml/data/texttrainer_input.csv +11 -11
- teradataml/data/tf_example.json +6 -6
- teradataml/data/tfidf_example.json +13 -13
- teradataml/data/tfidf_input1.csv +201 -201
- teradataml/data/tfidf_train.csv +6 -6
- teradataml/data/time_table1.csv +535 -535
- teradataml/data/time_table2.csv +14 -14
- teradataml/data/timeseriesdata.csv +1601 -1601
- teradataml/data/timeseriesdatasetsd4.csv +105 -105
- teradataml/data/titanic.csv +892 -892
- teradataml/data/token_table.csv +696 -696
- teradataml/data/train_multiclass.csv +101 -0
- teradataml/data/train_regression.csv +101 -0
- teradataml/data/train_regression_multiple_labels.csv +101 -0
- teradataml/data/train_tracking.csv +27 -27
- teradataml/data/transformation_table.csv +5 -5
- teradataml/data/transformation_table_new.csv +1 -1
- teradataml/data/tv_spots.csv +16 -16
- teradataml/data/twod_climate_data.csv +117 -117
- teradataml/data/uaf_example.json +475 -475
- teradataml/data/univariatestatistics_example.json +8 -8
- teradataml/data/unpack_example.json +9 -9
- teradataml/data/unpivot_example.json +9 -9
- teradataml/data/unpivot_input.csv +8 -8
- teradataml/data/us_air_pass.csv +36 -36
- teradataml/data/us_population.csv +624 -624
- teradataml/data/us_states_shapes.csv +52 -52
- teradataml/data/varmax_example.json +17 -17
- teradataml/data/vectordistance_example.json +25 -25
- teradataml/data/ville_climatedata.csv +121 -121
- teradataml/data/ville_tempdata.csv +12 -12
- teradataml/data/ville_tempdata1.csv +12 -12
- teradataml/data/ville_temperature.csv +11 -11
- teradataml/data/waveletTable.csv +1605 -1605
- teradataml/data/waveletTable2.csv +1605 -1605
- teradataml/data/weightedmovavg_example.json +8 -8
- teradataml/data/wft_testing.csv +5 -5
- teradataml/data/wine_data.csv +1600 -0
- teradataml/data/word_embed_input_table1.csv +5 -5
- teradataml/data/word_embed_input_table2.csv +4 -4
- teradataml/data/word_embed_model.csv +22 -22
- teradataml/data/words_input.csv +13 -13
- teradataml/data/xconvolve_complex_left.csv +6 -6
- teradataml/data/xconvolve_complex_leftmulti.csv +6 -6
- teradataml/data/xgboost_example.json +35 -35
- teradataml/data/xgboostpredict_example.json +31 -31
- teradataml/data/ztest_example.json +16 -0
- teradataml/dataframe/copy_to.py +1769 -1698
- teradataml/dataframe/data_transfer.py +2812 -2745
- teradataml/dataframe/dataframe.py +17630 -16946
- teradataml/dataframe/dataframe_utils.py +1875 -1740
- teradataml/dataframe/fastload.py +794 -603
- teradataml/dataframe/indexer.py +424 -424
- teradataml/dataframe/setop.py +1179 -1166
- teradataml/dataframe/sql.py +10174 -6432
- teradataml/dataframe/sql_function_parameters.py +439 -388
- teradataml/dataframe/sql_functions.py +652 -652
- teradataml/dataframe/sql_interfaces.py +220 -220
- teradataml/dataframe/vantage_function_types.py +674 -630
- teradataml/dataframe/window.py +693 -692
- teradataml/dbutils/__init__.py +3 -3
- teradataml/dbutils/dbutils.py +1167 -1150
- teradataml/dbutils/filemgr.py +267 -267
- teradataml/gen_ai/__init__.py +2 -2
- teradataml/gen_ai/convAI.py +472 -472
- teradataml/geospatial/__init__.py +3 -3
- teradataml/geospatial/geodataframe.py +1105 -1094
- teradataml/geospatial/geodataframecolumn.py +392 -387
- teradataml/geospatial/geometry_types.py +925 -925
- teradataml/hyperparameter_tuner/__init__.py +1 -1
- teradataml/hyperparameter_tuner/optimizer.py +3783 -2993
- teradataml/hyperparameter_tuner/utils.py +281 -187
- teradataml/lib/aed_0_1.dll +0 -0
- teradataml/lib/libaed_0_1.dylib +0 -0
- teradataml/lib/libaed_0_1.so +0 -0
- teradataml/libaed_0_1.dylib +0 -0
- teradataml/libaed_0_1.so +0 -0
- teradataml/opensource/__init__.py +1 -0
- teradataml/opensource/sklearn/__init__.py +1 -0
- teradataml/opensource/sklearn/_class.py +255 -0
- teradataml/opensource/sklearn/_sklearn_wrapper.py +1715 -0
- teradataml/opensource/sklearn/_wrapper_utils.py +268 -0
- teradataml/opensource/sklearn/constants.py +54 -0
- teradataml/options/__init__.py +130 -124
- teradataml/options/configure.py +358 -336
- teradataml/options/display.py +176 -176
- teradataml/plot/__init__.py +2 -2
- teradataml/plot/axis.py +1388 -1388
- teradataml/plot/constants.py +15 -15
- teradataml/plot/figure.py +398 -398
- teradataml/plot/plot.py +760 -760
- teradataml/plot/query_generator.py +83 -83
- teradataml/plot/subplot.py +216 -216
- teradataml/scriptmgmt/UserEnv.py +3791 -3761
- teradataml/scriptmgmt/__init__.py +3 -3
- teradataml/scriptmgmt/lls_utils.py +1719 -1604
- teradataml/series/series.py +532 -532
- teradataml/series/series_utils.py +71 -71
- teradataml/table_operators/Apply.py +949 -917
- teradataml/table_operators/Script.py +1718 -1982
- teradataml/table_operators/TableOperator.py +1255 -1616
- teradataml/table_operators/__init__.py +2 -3
- teradataml/table_operators/apply_query_generator.py +262 -262
- teradataml/table_operators/query_generator.py +507 -507
- teradataml/table_operators/table_operator_query_generator.py +460 -460
- teradataml/table_operators/table_operator_util.py +631 -639
- teradataml/table_operators/templates/dataframe_apply.template +184 -184
- teradataml/table_operators/templates/dataframe_map.template +176 -176
- teradataml/table_operators/templates/script_executor.template +170 -170
- teradataml/utils/dtypes.py +684 -684
- teradataml/utils/internal_buffer.py +84 -84
- teradataml/utils/print_versions.py +205 -205
- teradataml/utils/utils.py +410 -410
- teradataml/utils/validators.py +2277 -2115
- {teradataml-17.20.0.7.dist-info → teradataml-20.0.0.1.dist-info}/METADATA +346 -45
- teradataml-20.0.0.1.dist-info/RECORD +1056 -0
- {teradataml-17.20.0.7.dist-info → teradataml-20.0.0.1.dist-info}/WHEEL +1 -1
- {teradataml-17.20.0.7.dist-info → teradataml-20.0.0.1.dist-info}/zip-safe +1 -1
- teradataml/analytics/mle/AdaBoost.py +0 -651
- teradataml/analytics/mle/AdaBoostPredict.py +0 -564
- teradataml/analytics/mle/Antiselect.py +0 -342
- teradataml/analytics/mle/Arima.py +0 -641
- teradataml/analytics/mle/ArimaPredict.py +0 -477
- teradataml/analytics/mle/Attribution.py +0 -1070
- teradataml/analytics/mle/Betweenness.py +0 -658
- teradataml/analytics/mle/Burst.py +0 -711
- teradataml/analytics/mle/CCM.py +0 -600
- teradataml/analytics/mle/CCMPrepare.py +0 -324
- teradataml/analytics/mle/CFilter.py +0 -460
- teradataml/analytics/mle/ChangePointDetection.py +0 -572
- teradataml/analytics/mle/ChangePointDetectionRT.py +0 -477
- teradataml/analytics/mle/Closeness.py +0 -737
- teradataml/analytics/mle/ConfusionMatrix.py +0 -420
- teradataml/analytics/mle/Correlation.py +0 -477
- teradataml/analytics/mle/Correlation2.py +0 -573
- teradataml/analytics/mle/CoxHazardRatio.py +0 -679
- teradataml/analytics/mle/CoxPH.py +0 -556
- teradataml/analytics/mle/CoxSurvival.py +0 -478
- teradataml/analytics/mle/CumulativeMovAvg.py +0 -363
- teradataml/analytics/mle/DTW.py +0 -623
- teradataml/analytics/mle/DWT.py +0 -564
- teradataml/analytics/mle/DWT2D.py +0 -599
- teradataml/analytics/mle/DecisionForest.py +0 -716
- teradataml/analytics/mle/DecisionForestEvaluator.py +0 -363
- teradataml/analytics/mle/DecisionForestPredict.py +0 -561
- teradataml/analytics/mle/DecisionTree.py +0 -830
- teradataml/analytics/mle/DecisionTreePredict.py +0 -528
- teradataml/analytics/mle/ExponentialMovAvg.py +0 -418
- teradataml/analytics/mle/FMeasure.py +0 -402
- teradataml/analytics/mle/FPGrowth.py +0 -734
- teradataml/analytics/mle/FrequentPaths.py +0 -695
- teradataml/analytics/mle/GLM.py +0 -558
- teradataml/analytics/mle/GLML1L2.py +0 -547
- teradataml/analytics/mle/GLML1L2Predict.py +0 -519
- teradataml/analytics/mle/GLMPredict.py +0 -529
- teradataml/analytics/mle/HMMDecoder.py +0 -945
- teradataml/analytics/mle/HMMEvaluator.py +0 -901
- teradataml/analytics/mle/HMMSupervised.py +0 -521
- teradataml/analytics/mle/HMMUnsupervised.py +0 -572
- teradataml/analytics/mle/Histogram.py +0 -561
- teradataml/analytics/mle/IDWT.py +0 -476
- teradataml/analytics/mle/IDWT2D.py +0 -493
- teradataml/analytics/mle/IdentityMatch.py +0 -763
- teradataml/analytics/mle/Interpolator.py +0 -918
- teradataml/analytics/mle/KMeans.py +0 -485
- teradataml/analytics/mle/KNN.py +0 -627
- teradataml/analytics/mle/KNNRecommender.py +0 -488
- teradataml/analytics/mle/KNNRecommenderPredict.py +0 -581
- teradataml/analytics/mle/LAR.py +0 -439
- teradataml/analytics/mle/LARPredict.py +0 -478
- teradataml/analytics/mle/LDA.py +0 -548
- teradataml/analytics/mle/LDAInference.py +0 -492
- teradataml/analytics/mle/LDATopicSummary.py +0 -464
- teradataml/analytics/mle/LevenshteinDistance.py +0 -450
- teradataml/analytics/mle/LinReg.py +0 -433
- teradataml/analytics/mle/LinRegPredict.py +0 -438
- teradataml/analytics/mle/MinHash.py +0 -544
- teradataml/analytics/mle/Modularity.py +0 -587
- teradataml/analytics/mle/NEREvaluator.py +0 -410
- teradataml/analytics/mle/NERExtractor.py +0 -595
- teradataml/analytics/mle/NERTrainer.py +0 -458
- teradataml/analytics/mle/NGrams.py +0 -570
- teradataml/analytics/mle/NPath.py +0 -634
- teradataml/analytics/mle/NTree.py +0 -549
- teradataml/analytics/mle/NaiveBayes.py +0 -462
- teradataml/analytics/mle/NaiveBayesPredict.py +0 -513
- teradataml/analytics/mle/NaiveBayesTextClassifier.py +0 -607
- teradataml/analytics/mle/NaiveBayesTextClassifier2.py +0 -531
- teradataml/analytics/mle/NaiveBayesTextClassifierPredict.py +0 -799
- teradataml/analytics/mle/NamedEntityFinder.py +0 -529
- teradataml/analytics/mle/NamedEntityFinderEvaluator.py +0 -414
- teradataml/analytics/mle/NamedEntityFinderTrainer.py +0 -396
- teradataml/analytics/mle/POSTagger.py +0 -417
- teradataml/analytics/mle/Pack.py +0 -411
- teradataml/analytics/mle/PageRank.py +0 -535
- teradataml/analytics/mle/PathAnalyzer.py +0 -426
- teradataml/analytics/mle/PathGenerator.py +0 -367
- teradataml/analytics/mle/PathStart.py +0 -464
- teradataml/analytics/mle/PathSummarizer.py +0 -470
- teradataml/analytics/mle/Pivot.py +0 -471
- teradataml/analytics/mle/ROC.py +0 -425
- teradataml/analytics/mle/RandomSample.py +0 -637
- teradataml/analytics/mle/RandomWalkSample.py +0 -490
- teradataml/analytics/mle/SAX.py +0 -779
- teradataml/analytics/mle/SVMDense.py +0 -677
- teradataml/analytics/mle/SVMDensePredict.py +0 -536
- teradataml/analytics/mle/SVMDenseSummary.py +0 -437
- teradataml/analytics/mle/SVMSparse.py +0 -557
- teradataml/analytics/mle/SVMSparsePredict.py +0 -553
- teradataml/analytics/mle/SVMSparseSummary.py +0 -435
- teradataml/analytics/mle/Sampling.py +0 -549
- teradataml/analytics/mle/Scale.py +0 -565
- teradataml/analytics/mle/ScaleByPartition.py +0 -496
- teradataml/analytics/mle/ScaleMap.py +0 -378
- teradataml/analytics/mle/ScaleSummary.py +0 -320
- teradataml/analytics/mle/SentenceExtractor.py +0 -363
- teradataml/analytics/mle/SentimentEvaluator.py +0 -432
- teradataml/analytics/mle/SentimentExtractor.py +0 -578
- teradataml/analytics/mle/SentimentTrainer.py +0 -405
- teradataml/analytics/mle/SeriesSplitter.py +0 -641
- teradataml/analytics/mle/Sessionize.py +0 -475
- teradataml/analytics/mle/SimpleMovAvg.py +0 -397
- teradataml/analytics/mle/StringSimilarity.py +0 -425
- teradataml/analytics/mle/TF.py +0 -389
- teradataml/analytics/mle/TFIDF.py +0 -504
- teradataml/analytics/mle/TextChunker.py +0 -414
- teradataml/analytics/mle/TextClassifier.py +0 -399
- teradataml/analytics/mle/TextClassifierEvaluator.py +0 -413
- teradataml/analytics/mle/TextClassifierTrainer.py +0 -565
- teradataml/analytics/mle/TextMorph.py +0 -494
- teradataml/analytics/mle/TextParser.py +0 -623
- teradataml/analytics/mle/TextTagger.py +0 -530
- teradataml/analytics/mle/TextTokenizer.py +0 -502
- teradataml/analytics/mle/UnivariateStatistics.py +0 -488
- teradataml/analytics/mle/Unpack.py +0 -526
- teradataml/analytics/mle/Unpivot.py +0 -438
- teradataml/analytics/mle/VarMax.py +0 -776
- teradataml/analytics/mle/VectorDistance.py +0 -762
- teradataml/analytics/mle/WeightedMovAvg.py +0 -400
- teradataml/analytics/mle/XGBoost.py +0 -842
- teradataml/analytics/mle/XGBoostPredict.py +0 -627
- teradataml/analytics/mle/__init__.py +0 -123
- teradataml/analytics/mle/json/adaboost_mle.json +0 -135
- teradataml/analytics/mle/json/adaboostpredict_mle.json +0 -85
- teradataml/analytics/mle/json/antiselect_mle.json +0 -34
- teradataml/analytics/mle/json/antiselect_mle_mle.json +0 -34
- teradataml/analytics/mle/json/arima_mle.json +0 -172
- teradataml/analytics/mle/json/arimapredict_mle.json +0 -52
- teradataml/analytics/mle/json/attribution_mle_mle.json +0 -143
- teradataml/analytics/mle/json/betweenness_mle.json +0 -97
- teradataml/analytics/mle/json/burst_mle.json +0 -140
- teradataml/analytics/mle/json/ccm_mle.json +0 -124
- teradataml/analytics/mle/json/ccmprepare_mle.json +0 -14
- teradataml/analytics/mle/json/cfilter_mle.json +0 -93
- teradataml/analytics/mle/json/changepointdetection_mle.json +0 -92
- teradataml/analytics/mle/json/changepointdetectionrt_mle.json +0 -78
- teradataml/analytics/mle/json/closeness_mle.json +0 -104
- teradataml/analytics/mle/json/confusionmatrix_mle.json +0 -79
- teradataml/analytics/mle/json/correlation_mle.json +0 -86
- teradataml/analytics/mle/json/correlationreduce_mle.json +0 -49
- teradataml/analytics/mle/json/coxhazardratio_mle.json +0 -89
- teradataml/analytics/mle/json/coxph_mle.json +0 -98
- teradataml/analytics/mle/json/coxsurvival_mle.json +0 -79
- teradataml/analytics/mle/json/cumulativemovavg_mle.json +0 -34
- teradataml/analytics/mle/json/decisionforest_mle.json +0 -167
- teradataml/analytics/mle/json/decisionforestevaluator_mle.json +0 -33
- teradataml/analytics/mle/json/decisionforestpredict_mle_mle.json +0 -74
- teradataml/analytics/mle/json/decisiontree_mle.json +0 -194
- teradataml/analytics/mle/json/decisiontreepredict_mle_mle.json +0 -86
- teradataml/analytics/mle/json/dtw_mle.json +0 -97
- teradataml/analytics/mle/json/dwt2d_mle.json +0 -116
- teradataml/analytics/mle/json/dwt_mle.json +0 -101
- teradataml/analytics/mle/json/exponentialmovavg_mle.json +0 -55
- teradataml/analytics/mle/json/fmeasure_mle.json +0 -58
- teradataml/analytics/mle/json/fpgrowth_mle.json +0 -159
- teradataml/analytics/mle/json/frequentpaths_mle.json +0 -129
- teradataml/analytics/mle/json/glm_mle.json +0 -111
- teradataml/analytics/mle/json/glml1l2_mle.json +0 -106
- teradataml/analytics/mle/json/glml1l2predict_mle.json +0 -57
- teradataml/analytics/mle/json/glmpredict_mle_mle.json +0 -74
- teradataml/analytics/mle/json/histogram_mle.json +0 -100
- teradataml/analytics/mle/json/hmmdecoder_mle.json +0 -192
- teradataml/analytics/mle/json/hmmevaluator_mle.json +0 -206
- teradataml/analytics/mle/json/hmmsupervised_mle.json +0 -91
- teradataml/analytics/mle/json/hmmunsupervised_mle.json +0 -114
- teradataml/analytics/mle/json/identitymatch_mle.json +0 -88
- teradataml/analytics/mle/json/idwt2d_mle.json +0 -73
- teradataml/analytics/mle/json/idwt_mle.json +0 -66
- teradataml/analytics/mle/json/interpolator_mle.json +0 -151
- teradataml/analytics/mle/json/kmeans_mle.json +0 -97
- teradataml/analytics/mle/json/knn_mle.json +0 -141
- teradataml/analytics/mle/json/knnrecommender_mle.json +0 -111
- teradataml/analytics/mle/json/knnrecommenderpredict_mle.json +0 -75
- teradataml/analytics/mle/json/lar_mle.json +0 -78
- teradataml/analytics/mle/json/larpredict_mle.json +0 -69
- teradataml/analytics/mle/json/lda_mle.json +0 -130
- teradataml/analytics/mle/json/ldainference_mle.json +0 -78
- teradataml/analytics/mle/json/ldatopicsummary_mle.json +0 -64
- teradataml/analytics/mle/json/levenshteindistance_mle.json +0 -92
- teradataml/analytics/mle/json/linreg_mle.json +0 -42
- teradataml/analytics/mle/json/linregpredict_mle.json +0 -56
- teradataml/analytics/mle/json/minhash_mle.json +0 -113
- teradataml/analytics/mle/json/modularity_mle.json +0 -91
- teradataml/analytics/mle/json/naivebayespredict_mle_mle.json +0 -85
- teradataml/analytics/mle/json/naivebayesreduce_mle.json +0 -52
- teradataml/analytics/mle/json/naivebayestextclassifierpredict_mle_mle.json +0 -147
- teradataml/analytics/mle/json/naivebayestextclassifiertrainer2_mle.json +0 -108
- teradataml/analytics/mle/json/naivebayestextclassifiertrainer_mle.json +0 -102
- teradataml/analytics/mle/json/namedentityfinder_mle.json +0 -84
- teradataml/analytics/mle/json/namedentityfinderevaluatorreduce_mle.json +0 -43
- teradataml/analytics/mle/json/namedentityfindertrainer_mle.json +0 -64
- teradataml/analytics/mle/json/nerevaluator_mle.json +0 -54
- teradataml/analytics/mle/json/nerextractor_mle.json +0 -87
- teradataml/analytics/mle/json/nertrainer_mle.json +0 -89
- teradataml/analytics/mle/json/ngrams_mle.json +0 -137
- teradataml/analytics/mle/json/ngramsplitter_mle_mle.json +0 -137
- teradataml/analytics/mle/json/npath@coprocessor_mle.json +0 -73
- teradataml/analytics/mle/json/ntree@coprocessor_mle.json +0 -123
- teradataml/analytics/mle/json/pack_mle.json +0 -58
- teradataml/analytics/mle/json/pack_mle_mle.json +0 -58
- teradataml/analytics/mle/json/pagerank_mle.json +0 -81
- teradataml/analytics/mle/json/pathanalyzer_mle.json +0 -63
- teradataml/analytics/mle/json/pathgenerator_mle.json +0 -40
- teradataml/analytics/mle/json/pathstart_mle.json +0 -62
- teradataml/analytics/mle/json/pathsummarizer_mle.json +0 -72
- teradataml/analytics/mle/json/pivoting_mle.json +0 -71
- teradataml/analytics/mle/json/postagger_mle.json +0 -51
- teradataml/analytics/mle/json/randomsample_mle.json +0 -131
- teradataml/analytics/mle/json/randomwalksample_mle.json +0 -85
- teradataml/analytics/mle/json/roc_mle.json +0 -73
- teradataml/analytics/mle/json/sampling_mle.json +0 -75
- teradataml/analytics/mle/json/sax_mle.json +0 -154
- teradataml/analytics/mle/json/scale_mle.json +0 -93
- teradataml/analytics/mle/json/scalebypartition_mle.json +0 -89
- teradataml/analytics/mle/json/scalemap_mle.json +0 -44
- teradataml/analytics/mle/json/scalesummary_mle.json +0 -14
- teradataml/analytics/mle/json/sentenceextractor_mle.json +0 -41
- teradataml/analytics/mle/json/sentimentevaluator_mle.json +0 -43
- teradataml/analytics/mle/json/sentimentextractor_mle.json +0 -100
- teradataml/analytics/mle/json/sentimenttrainer_mle.json +0 -68
- teradataml/analytics/mle/json/seriessplitter_mle.json +0 -133
- teradataml/analytics/mle/json/sessionize_mle_mle.json +0 -62
- teradataml/analytics/mle/json/simplemovavg_mle.json +0 -48
- teradataml/analytics/mle/json/stringsimilarity_mle.json +0 -50
- teradataml/analytics/mle/json/stringsimilarity_mle_mle.json +0 -50
- teradataml/analytics/mle/json/svmdense_mle.json +0 -165
- teradataml/analytics/mle/json/svmdensepredict_mle.json +0 -95
- teradataml/analytics/mle/json/svmdensesummary_mle.json +0 -58
- teradataml/analytics/mle/json/svmsparse_mle.json +0 -148
- teradataml/analytics/mle/json/svmsparsepredict_mle_mle.json +0 -103
- teradataml/analytics/mle/json/svmsparsesummary_mle.json +0 -57
- teradataml/analytics/mle/json/textchunker_mle.json +0 -40
- teradataml/analytics/mle/json/textclassifier_mle.json +0 -51
- teradataml/analytics/mle/json/textclassifierevaluator_mle.json +0 -43
- teradataml/analytics/mle/json/textclassifiertrainer_mle.json +0 -103
- teradataml/analytics/mle/json/textmorph_mle.json +0 -63
- teradataml/analytics/mle/json/textparser_mle.json +0 -166
- teradataml/analytics/mle/json/texttagger_mle.json +0 -81
- teradataml/analytics/mle/json/texttokenizer_mle.json +0 -91
- teradataml/analytics/mle/json/tf_mle.json +0 -33
- teradataml/analytics/mle/json/tfidf_mle.json +0 -34
- teradataml/analytics/mle/json/univariatestatistics_mle.json +0 -81
- teradataml/analytics/mle/json/unpack_mle.json +0 -91
- teradataml/analytics/mle/json/unpack_mle_mle.json +0 -91
- teradataml/analytics/mle/json/unpivoting_mle.json +0 -63
- teradataml/analytics/mle/json/varmax_mle.json +0 -176
- teradataml/analytics/mle/json/vectordistance_mle.json +0 -179
- teradataml/analytics/mle/json/weightedmovavg_mle.json +0 -48
- teradataml/analytics/mle/json/xgboost_mle.json +0 -178
- teradataml/analytics/mle/json/xgboostpredict_mle.json +0 -104
- teradataml/analytics/sqle/Antiselect.py +0 -321
- teradataml/analytics/sqle/Attribution.py +0 -603
- teradataml/analytics/sqle/DecisionForestPredict.py +0 -408
- teradataml/analytics/sqle/GLMPredict.py +0 -430
- teradataml/analytics/sqle/MovingAverage.py +0 -543
- teradataml/analytics/sqle/NGramSplitter.py +0 -548
- teradataml/analytics/sqle/NPath.py +0 -632
- teradataml/analytics/sqle/NaiveBayesTextClassifierPredict.py +0 -515
- teradataml/analytics/sqle/Pack.py +0 -388
- teradataml/analytics/sqle/SVMSparsePredict.py +0 -464
- teradataml/analytics/sqle/Sessionize.py +0 -390
- teradataml/analytics/sqle/StringSimilarity.py +0 -400
- teradataml/analytics/sqle/Unpack.py +0 -503
- teradataml/analytics/sqle/json/antiselect_sqle.json +0 -21
- teradataml/analytics/sqle/json/attribution_sqle.json +0 -92
- teradataml/analytics/sqle/json/decisionforestpredict_sqle.json +0 -48
- teradataml/analytics/sqle/json/glmpredict_sqle.json +0 -48
- teradataml/analytics/sqle/json/h2opredict_sqle.json +0 -63
- teradataml/analytics/sqle/json/movingaverage_sqle.json +0 -58
- teradataml/analytics/sqle/json/naivebayestextclassifierpredict_sqle.json +0 -76
- teradataml/analytics/sqle/json/ngramsplitter_sqle.json +0 -126
- teradataml/analytics/sqle/json/npath_sqle.json +0 -67
- teradataml/analytics/sqle/json/pack_sqle.json +0 -47
- teradataml/analytics/sqle/json/pmmlpredict_sqle.json +0 -55
- teradataml/analytics/sqle/json/sessionize_sqle.json +0 -43
- teradataml/analytics/sqle/json/stringsimilarity_sqle.json +0 -39
- teradataml/analytics/sqle/json/svmsparsepredict_sqle.json +0 -74
- teradataml/analytics/sqle/json/unpack_sqle.json +0 -80
- teradataml/catalog/model_cataloging.py +0 -980
- teradataml/config/mlengine_alias_definitions_v1.0 +0 -118
- teradataml/config/mlengine_alias_definitions_v1.1 +0 -127
- teradataml/config/mlengine_alias_definitions_v1.3 +0 -129
- teradataml/table_operators/sandbox_container_util.py +0 -643
- teradataml-17.20.0.7.dist-info/RECORD +0 -1280
- {teradataml-17.20.0.7.dist-info → teradataml-20.0.0.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
import numpy as np
|
|
3
|
+
from sklearn.pipeline import make_pipeline
|
|
4
|
+
from sklearn.preprocessing import StandardScaler
|
|
5
|
+
from sklearn.svm import SVC
|
|
6
|
+
import base64, pickle
|
|
7
|
+
|
|
8
|
+
DELIMITER = "\t"
|
|
9
|
+
|
|
10
|
+
def get_value(value):
|
|
11
|
+
ret_val = value
|
|
12
|
+
try:
|
|
13
|
+
ret_val = float(value.replace(' ', ''))
|
|
14
|
+
except Exception as ex:
|
|
15
|
+
# If the value can't be converted to float, then it is string.
|
|
16
|
+
pass
|
|
17
|
+
return ret_val
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def get_values_list(values, ignore_none=True):
|
|
21
|
+
ret_vals = []
|
|
22
|
+
for val in values:
|
|
23
|
+
if val == "" and ignore_none:
|
|
24
|
+
# Empty cell value in the database table.
|
|
25
|
+
continue
|
|
26
|
+
ret_vals.append(get_value(val))
|
|
27
|
+
|
|
28
|
+
return ret_vals
|
|
29
|
+
|
|
30
|
+
if len(sys.argv) != 2:
|
|
31
|
+
sys.exit("Script command format: python deploy_script.py <enterprise/lake>")
|
|
32
|
+
|
|
33
|
+
vantage_type = sys.argv[1]
|
|
34
|
+
|
|
35
|
+
data_partition_column_values = []
|
|
36
|
+
data_partition_column_indices = [5, 6]
|
|
37
|
+
|
|
38
|
+
features = []
|
|
39
|
+
labels = []
|
|
40
|
+
|
|
41
|
+
while 1:
|
|
42
|
+
try:
|
|
43
|
+
line = input()
|
|
44
|
+
if line == '': # Exit if user provides blank line
|
|
45
|
+
break
|
|
46
|
+
else:
|
|
47
|
+
values = line.split(DELIMITER)
|
|
48
|
+
|
|
49
|
+
features.append(get_values_list(values[:4]))
|
|
50
|
+
labels.append(get_values_list([values[4]]))
|
|
51
|
+
if not data_partition_column_values:
|
|
52
|
+
# Partition column values is same for all rows. Hence, only read once.
|
|
53
|
+
for i, val in enumerate(data_partition_column_indices):
|
|
54
|
+
data_partition_column_values.append(int(values[val]))
|
|
55
|
+
|
|
56
|
+
except EOFError: # Exit if reached EOF or CTRL-D
|
|
57
|
+
break
|
|
58
|
+
|
|
59
|
+
if not len(features):
|
|
60
|
+
sys.exit(0)
|
|
61
|
+
|
|
62
|
+
X = np.array(features)
|
|
63
|
+
y = np.array(labels)
|
|
64
|
+
|
|
65
|
+
clf = make_pipeline(StandardScaler(), SVC(gamma='auto'))
|
|
66
|
+
clf.fit(X, y)
|
|
67
|
+
|
|
68
|
+
model_str = pickle.dumps(clf)
|
|
69
|
+
|
|
70
|
+
# Prepare the corresponding model file name and extract model.
|
|
71
|
+
partition_join = "_".join([str(x) for x in data_partition_column_values])
|
|
72
|
+
# Replace '-' with '_' as '-' because partition_columns can be negative.
|
|
73
|
+
partition_join = partition_join.replace("-", "_")
|
|
74
|
+
|
|
75
|
+
if vantage_type == "lake":
|
|
76
|
+
model = f"/tmp/sklearn_model_{partition_join}.pickle"
|
|
77
|
+
with open(model, "wb") as fp:
|
|
78
|
+
fp.write(model_str)
|
|
79
|
+
elif vantage_type == "enterprise":
|
|
80
|
+
model = base64.b64encode(model_str)
|
|
81
|
+
else:
|
|
82
|
+
sys.exit("Invalid vantage type. Use either 'lake' or 'enterprise'.")
|
|
83
|
+
|
|
84
|
+
print(*(data_partition_column_values + [model]), sep=DELIMITER)
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
#!/usr/bin/Rscript
|
|
2
|
+
|
|
3
|
+
# Read input from STDIN (standard input)
|
|
4
|
+
input <- file("stdin", open = "r")
|
|
5
|
+
while (length(line <- readLines(input, n = 1)) > 0) {
|
|
6
|
+
# Remove leading and trailing whitespace
|
|
7
|
+
line <- trimws(line)
|
|
8
|
+
# Split the line into words
|
|
9
|
+
words <- unlist(strsplit(line, "\\s+"))
|
|
10
|
+
# Increase counters
|
|
11
|
+
for (word in words) {
|
|
12
|
+
# Write the results to STDOUT (standard output);
|
|
13
|
+
# what we output here will be the input for the
|
|
14
|
+
# Reduce step, i.e., the input for reducer.py
|
|
15
|
+
#
|
|
16
|
+
# tab-delimited; the trivial word count is 1
|
|
17
|
+
cat(paste(word,"\t","1\n"))
|
|
18
|
+
}
|
|
19
|
+
}
|
|
20
|
+
close(input)
|
|
@@ -1,16 +1,16 @@
|
|
|
1
|
-
#!/usr/bin/python
|
|
2
|
-
import sys
|
|
3
|
-
# input comes from STDIN (standard input)
|
|
4
|
-
for line in sys.stdin:
|
|
5
|
-
# remove leading and trailing whitespace
|
|
6
|
-
line = line.strip()
|
|
7
|
-
# split the line into words
|
|
8
|
-
words = line.split()
|
|
9
|
-
# increase counters
|
|
10
|
-
for word in words:
|
|
11
|
-
# write the results to STDOUT (standard output);
|
|
12
|
-
# what we output here will be the input for the
|
|
13
|
-
# Reduce step, i.e. the input for reducer.py
|
|
14
|
-
#
|
|
15
|
-
# tab-delimited; the trivial word count is 1
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
import sys
|
|
3
|
+
# input comes from STDIN (standard input)
|
|
4
|
+
for line in sys.stdin:
|
|
5
|
+
# remove leading and trailing whitespace
|
|
6
|
+
line = line.strip()
|
|
7
|
+
# split the line into words
|
|
8
|
+
words = line.split()
|
|
9
|
+
# increase counters
|
|
10
|
+
for word in words:
|
|
11
|
+
# write the results to STDOUT (standard output);
|
|
12
|
+
# what we output here will be the input for the
|
|
13
|
+
# Reduce step, i.e. the input for reducer.py
|
|
14
|
+
#
|
|
15
|
+
# tab-delimited; the trivial word count is 1
|
|
16
16
|
print ('%s\t%s' % (word, 1))
|
|
@@ -1,16 +1,16 @@
|
|
|
1
|
-
#!/usr/bin/python
|
|
2
|
-
import sys
|
|
3
|
-
# input comes from STDIN (standard input)
|
|
4
|
-
for line in sys.stdin:
|
|
5
|
-
# remove leading and trailing whitespace
|
|
6
|
-
line = line.strip()
|
|
7
|
-
# split the line into words
|
|
8
|
-
words = line.split()
|
|
9
|
-
# increase counters
|
|
10
|
-
for newword in words:
|
|
11
|
-
# write the results to STDOUT (standard output);
|
|
12
|
-
# what we output here will be the input for the
|
|
13
|
-
# Reduce step, i.e. the input for reducer.py
|
|
14
|
-
#
|
|
15
|
-
# tab-delimited; the trivial word count is 1
|
|
1
|
+
#!/usr/bin/python
|
|
2
|
+
import sys
|
|
3
|
+
# input comes from STDIN (standard input)
|
|
4
|
+
for line in sys.stdin:
|
|
5
|
+
# remove leading and trailing whitespace
|
|
6
|
+
line = line.strip()
|
|
7
|
+
# split the line into words
|
|
8
|
+
words = line.split()
|
|
9
|
+
# increase counters
|
|
10
|
+
for newword in words:
|
|
11
|
+
# write the results to STDOUT (standard output);
|
|
12
|
+
# what we output here will be the input for the
|
|
13
|
+
# Reduce step, i.e. the input for reducer.py
|
|
14
|
+
#
|
|
15
|
+
# tab-delimited; the trivial word count is 1
|
|
16
16
|
print ('%s,%s' % (newword, 1))
|
|
File without changes
|
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
import numpy as np
|
|
3
|
+
import pickle
|
|
4
|
+
import base64
|
|
5
|
+
|
|
6
|
+
DELIMITER = '\t'
|
|
7
|
+
|
|
8
|
+
def get_values_list(values, types, model_obj):
|
|
9
|
+
ret_vals = []
|
|
10
|
+
for i, val in enumerate(values):
|
|
11
|
+
if type(model_obj).__name__ == "MultiLabelBinarizer" and val == "":
|
|
12
|
+
continue
|
|
13
|
+
ret_vals.append(convert_to_type(val, types[i]))
|
|
14
|
+
return ret_vals
|
|
15
|
+
|
|
16
|
+
def convert_to_type(val, typee):
|
|
17
|
+
if typee == 'int':
|
|
18
|
+
return int(val) if val != "" else np.nan
|
|
19
|
+
if typee == 'float':
|
|
20
|
+
if isinstance(val, str):
|
|
21
|
+
val = val.replace(' ', '')
|
|
22
|
+
return float(val) if val != "" else np.nan
|
|
23
|
+
if typee == 'bool':
|
|
24
|
+
return eval(val) if val != "" else None
|
|
25
|
+
return str(val) if val != "" else None
|
|
26
|
+
|
|
27
|
+
def get_classes_as_list(classes, actual_type):
|
|
28
|
+
if classes == "None":
|
|
29
|
+
return None
|
|
30
|
+
if actual_type == "None":
|
|
31
|
+
sys.exit("type of class elements is None where class elements exists.")
|
|
32
|
+
|
|
33
|
+
# separated by '--'
|
|
34
|
+
classes = classes.split("--")
|
|
35
|
+
|
|
36
|
+
for idx, cls in enumerate(classes):
|
|
37
|
+
classes[idx] = convert_to_type(cls, actual_type)
|
|
38
|
+
|
|
39
|
+
return classes
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def splitter(strr, delim=",", convert_to="str"):
|
|
43
|
+
"""
|
|
44
|
+
Split the string based on delimiter and convert to the type specified.
|
|
45
|
+
"""
|
|
46
|
+
if strr == "None":
|
|
47
|
+
return []
|
|
48
|
+
return [convert_to_type(i, convert_to) for i in strr.split(delim)]
|
|
49
|
+
|
|
50
|
+
# Arguments to the Script
|
|
51
|
+
if len(sys.argv) != 10:
|
|
52
|
+
# 10 arguments command line arguments should be passed to this file.
|
|
53
|
+
# 1: file to be run
|
|
54
|
+
# 2. function name
|
|
55
|
+
# 3. No of feature columns.
|
|
56
|
+
# 4. No of class labels.
|
|
57
|
+
# 5. Comma separated indices of partition columns.
|
|
58
|
+
# 6. Comma separated types of all the data columns.
|
|
59
|
+
# 7. Model file prefix to generated model file using partition columns.
|
|
60
|
+
# 8. classes (separated by '--') - should be converted to list. "None" if no classes exists.
|
|
61
|
+
# 9. type of elements in passed in classes. "None" if no classes exists.
|
|
62
|
+
# 10. Flag to check the system type. True, means Lake, Enterprise otherwise
|
|
63
|
+
sys.exit("10 arguments command line arguments should be passed: file to be run,"
|
|
64
|
+
" function name, no of feature columns, no of class labels, comma separated indices"
|
|
65
|
+
" of partition columns, comma separated types of all columns, model file prefix ,"
|
|
66
|
+
" classes, type of elements in classes and flag to check lake or enterprise.")
|
|
67
|
+
|
|
68
|
+
is_lake_system = eval(sys.argv[9])
|
|
69
|
+
if not is_lake_system:
|
|
70
|
+
db = sys.argv[0].split("/")[1]
|
|
71
|
+
function_name = sys.argv[1]
|
|
72
|
+
n_f_cols = int(sys.argv[2])
|
|
73
|
+
n_c_labels = int(sys.argv[3])
|
|
74
|
+
data_column_types = splitter(sys.argv[5], delim="--")
|
|
75
|
+
data_partition_column_indices = splitter(sys.argv[4], convert_to="int") # indices are integers.
|
|
76
|
+
model_file_prefix = sys.argv[6]
|
|
77
|
+
class_type = sys.argv[8]
|
|
78
|
+
classes = get_classes_as_list(sys.argv[7], class_type)
|
|
79
|
+
|
|
80
|
+
data_partition_column_types = [data_column_types[idx] for idx in data_partition_column_indices]
|
|
81
|
+
|
|
82
|
+
model = None
|
|
83
|
+
|
|
84
|
+
# Data Format (n_features, k_labels, one data_partition_column):
|
|
85
|
+
# feature1, feature2, ..., featuren, label1, label2, ... labelk, data_partition_column1, ...,
|
|
86
|
+
# data_partition_columnn
|
|
87
|
+
# There can be no labels also.
|
|
88
|
+
|
|
89
|
+
# Read data from table through STO and build features and labels.
|
|
90
|
+
features = []
|
|
91
|
+
labels = []
|
|
92
|
+
data_partition_column_values = []
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
while 1:
|
|
96
|
+
try:
|
|
97
|
+
line = input()
|
|
98
|
+
if line == '': # Exit if user provides blank line
|
|
99
|
+
break
|
|
100
|
+
else:
|
|
101
|
+
values = line.split(DELIMITER)
|
|
102
|
+
|
|
103
|
+
if not data_partition_column_values:
|
|
104
|
+
# Partition column values is same for all rows. Hence, only read once.
|
|
105
|
+
for i, val in enumerate(data_partition_column_indices):
|
|
106
|
+
data_partition_column_values.append(
|
|
107
|
+
convert_to_type(values[val], typee=data_partition_column_types[i])
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
# Prepare the corresponding model file name and extract model.
|
|
111
|
+
partition_join = "_".join([str(x) for x in data_partition_column_values])
|
|
112
|
+
# Replace '-' with '_' as '-' because partition_columns can be negative.
|
|
113
|
+
partition_join = partition_join.replace("-", "_")
|
|
114
|
+
|
|
115
|
+
model_file_path = f"{model_file_prefix}_{partition_join}"\
|
|
116
|
+
if is_lake_system else \
|
|
117
|
+
f"./{db}/{model_file_prefix}_{partition_join}"
|
|
118
|
+
|
|
119
|
+
with open(model_file_path, "rb") as fp:
|
|
120
|
+
model = pickle.loads(fp.read())
|
|
121
|
+
|
|
122
|
+
if model is None:
|
|
123
|
+
sys.exit("Model file is not installed in Vantage.")
|
|
124
|
+
|
|
125
|
+
values = get_values_list(values, data_column_types, model)
|
|
126
|
+
values = values[:-len(data_partition_column_indices)] # Already processed partition columns.
|
|
127
|
+
features.append(values[:n_f_cols])
|
|
128
|
+
if n_c_labels > 0:
|
|
129
|
+
labels.append(values[n_f_cols:(n_f_cols+n_c_labels)])
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
except EOFError: # Exit if reached EOF or CTRL-D
|
|
133
|
+
break
|
|
134
|
+
|
|
135
|
+
if not len(features):
|
|
136
|
+
sys.exit(0)
|
|
137
|
+
|
|
138
|
+
# Fit/partial_fit the model to the data.
|
|
139
|
+
if function_name == "partial_fit":
|
|
140
|
+
if labels and classes:
|
|
141
|
+
model.partial_fit(np.array(features), np.array(labels), classes=classes)
|
|
142
|
+
elif labels:
|
|
143
|
+
model.partial_fit(np.array(features), np.array(labels))
|
|
144
|
+
elif classes:
|
|
145
|
+
model.partial_fit(np.array(features), classes=classes)
|
|
146
|
+
else:
|
|
147
|
+
model.partial_fit(np.array(features))
|
|
148
|
+
elif function_name == "fit":
|
|
149
|
+
# For IsotonicRegression, fit() accepts training target as
|
|
150
|
+
# y: array-like of shape (n_samples,).
|
|
151
|
+
if labels:
|
|
152
|
+
labels = np.array(labels).reshape(-1) \
|
|
153
|
+
if model.__class__.__name__ == "IsotonicRegression" else np.array(labels)
|
|
154
|
+
model.fit(np.array(features), labels)
|
|
155
|
+
else:
|
|
156
|
+
model.fit(np.array(features))
|
|
157
|
+
|
|
158
|
+
model_str = pickle.dumps(model)
|
|
159
|
+
|
|
160
|
+
if is_lake_system:
|
|
161
|
+
model_file_path = f"/tmp/{model_file_prefix}_{partition_join}.pickle"
|
|
162
|
+
|
|
163
|
+
# Write to file in Vantage, to be used in predict/scoring.
|
|
164
|
+
with open(model_file_path, "wb") as fp:
|
|
165
|
+
fp.write(model_str)
|
|
166
|
+
|
|
167
|
+
model_data = model_file_path if is_lake_system \
|
|
168
|
+
else base64.b64encode(model_str)
|
|
169
|
+
|
|
170
|
+
# Print the model to be read from script.
|
|
171
|
+
print(*(data_partition_column_values + [model_data]), sep=DELIMITER)
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
import numpy as np
|
|
3
|
+
import pickle
|
|
4
|
+
import math
|
|
5
|
+
|
|
6
|
+
DELIMITER = '\t'
|
|
7
|
+
|
|
8
|
+
def get_values_list(values, types):
|
|
9
|
+
ret_vals = []
|
|
10
|
+
for i, val in enumerate(values):
|
|
11
|
+
ret_vals.append(convert_to_type(val, types[i]))
|
|
12
|
+
return ret_vals
|
|
13
|
+
|
|
14
|
+
def convert_to_type(val, typee):
|
|
15
|
+
if typee == 'int':
|
|
16
|
+
return int(val) if val != "" else np.nan
|
|
17
|
+
if typee == 'float':
|
|
18
|
+
if isinstance(val, str):
|
|
19
|
+
val = val.replace(' ', '')
|
|
20
|
+
return float(val) if val != "" else np.nan
|
|
21
|
+
if typee == 'bool':
|
|
22
|
+
return eval(val) if val != "" else None
|
|
23
|
+
return str(val) if val != "" else None
|
|
24
|
+
|
|
25
|
+
def splitter(strr, delim=",", convert_to="str"):
|
|
26
|
+
"""
|
|
27
|
+
Split the string based on delimiter and convert to the type specified.
|
|
28
|
+
"""
|
|
29
|
+
if strr == "None":
|
|
30
|
+
return []
|
|
31
|
+
return [convert_to_type(i, convert_to) for i in strr.split(delim)]
|
|
32
|
+
|
|
33
|
+
# Arguments to the Script
|
|
34
|
+
if len(sys.argv) != 7:
|
|
35
|
+
# 6 arguments command line arguments should be passed to this file.
|
|
36
|
+
# 1: file to be run
|
|
37
|
+
# 2. No of feature columns.
|
|
38
|
+
# 3. No of class labels.
|
|
39
|
+
# 4. Comma separated indices of partition columns.
|
|
40
|
+
# 5. Comma separated types of all the data columns.
|
|
41
|
+
# 6. Model file prefix to generated model file using partition columns.
|
|
42
|
+
# 7. Flag to check the system type. True, means Lake, Enterprise otherwise.
|
|
43
|
+
sys.exit("7 arguments should be passed to this file - file to be run, "\
|
|
44
|
+
"no of feature columns, no of class labels, comma separated indices of partition "
|
|
45
|
+
"columns, comma separated types of all columns, model file prefix to generate model "
|
|
46
|
+
"file using partition columns and flag to check lake or enterprise.")
|
|
47
|
+
|
|
48
|
+
is_lake_system = eval(sys.argv[6])
|
|
49
|
+
if not is_lake_system:
|
|
50
|
+
db = sys.argv[0].split("/")[1]
|
|
51
|
+
n_f_cols = int(sys.argv[1])
|
|
52
|
+
n_c_labels = int(sys.argv[2])
|
|
53
|
+
model_file_prefix = sys.argv[5]
|
|
54
|
+
data_column_types = splitter(sys.argv[4], delim="--")
|
|
55
|
+
data_partition_column_indices = splitter(sys.argv[3], convert_to="int") # indices are integers.
|
|
56
|
+
|
|
57
|
+
data_partition_column_types = [data_column_types[idx] for idx in data_partition_column_indices]
|
|
58
|
+
|
|
59
|
+
model = None
|
|
60
|
+
|
|
61
|
+
# Data Format (n_features, k_labels, one data_partition_columns):
|
|
62
|
+
# feature1, feature2, ..., featuren, label1, label2, ... labelk, data_partition_column1, ...,
|
|
63
|
+
# data_partition_columnn.
|
|
64
|
+
# There can be no labels also.
|
|
65
|
+
|
|
66
|
+
# Read data from table through STO and build features and labels.
|
|
67
|
+
features = []
|
|
68
|
+
labels = []
|
|
69
|
+
data_partition_column_values = []
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
while 1:
|
|
73
|
+
try:
|
|
74
|
+
line = input()
|
|
75
|
+
if line == '': # Exit if user provides blank line
|
|
76
|
+
break
|
|
77
|
+
else:
|
|
78
|
+
values = line.split(DELIMITER)
|
|
79
|
+
values = get_values_list(values, data_column_types)
|
|
80
|
+
features.append(values[:n_f_cols])
|
|
81
|
+
if n_c_labels > 0:
|
|
82
|
+
labels.append(values[n_f_cols:(n_f_cols+n_c_labels)])
|
|
83
|
+
if not data_partition_column_values:
|
|
84
|
+
# Partition column values is same for all rows. Hence, only read once.
|
|
85
|
+
for i, val in enumerate(data_partition_column_indices):
|
|
86
|
+
data_partition_column_values.append(
|
|
87
|
+
convert_to_type(values[val], typee=data_partition_column_types[i])
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
# Prepare the corresponding model file name and extract model.
|
|
91
|
+
partition_join = "_".join([str(x) for x in data_partition_column_values])
|
|
92
|
+
# Replace '-' with '_' as '-' because partition_columns can be negative.
|
|
93
|
+
partition_join = partition_join.replace("-", "_")
|
|
94
|
+
|
|
95
|
+
model_file_path = f"{model_file_prefix}_{partition_join}" \
|
|
96
|
+
if is_lake_system else \
|
|
97
|
+
f"./{db}/{model_file_prefix}_{partition_join}"
|
|
98
|
+
|
|
99
|
+
with open(model_file_path, "rb") as fp:
|
|
100
|
+
model = pickle.loads(fp.read())
|
|
101
|
+
|
|
102
|
+
if model is None:
|
|
103
|
+
sys.exit("Model file is not installed in Vantage.")
|
|
104
|
+
|
|
105
|
+
except EOFError: # Exit if reached EOF or CTRL-D
|
|
106
|
+
break
|
|
107
|
+
|
|
108
|
+
if not len(features):
|
|
109
|
+
sys.exit(0)
|
|
110
|
+
|
|
111
|
+
# write code to call fit_predict with features and labels when n_c_labels > 0
|
|
112
|
+
if n_c_labels > 0:
|
|
113
|
+
predictions = model.fit_predict(np.array(features), np.array(labels))
|
|
114
|
+
else:
|
|
115
|
+
predictions = model.fit_predict(np.array(features))
|
|
116
|
+
|
|
117
|
+
# Export results to to the Databse through standard output
|
|
118
|
+
for i in range(len(predictions)):
|
|
119
|
+
if n_c_labels > 0:
|
|
120
|
+
# Add labels into output, if user passes it.
|
|
121
|
+
result_list = features[i] + labels[i] + [predictions[i]]
|
|
122
|
+
else:
|
|
123
|
+
result_list = features[i] + [predictions[i]]
|
|
124
|
+
print(*(data_partition_column_values +
|
|
125
|
+
['' if (val is None or (not isinstance(val, str) and (math.isnan(val) or math.isinf(val))))
|
|
126
|
+
else val for val in result_list]),
|
|
127
|
+
sep= DELIMITER)
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
import sys, json
|
|
2
|
+
import pickle, base64, importlib, numpy as np
|
|
3
|
+
from collections import OrderedDict
|
|
4
|
+
|
|
5
|
+
func_name = "<func_name>"
|
|
6
|
+
module_name = "<module_name>"
|
|
7
|
+
params = json.loads('<params>')
|
|
8
|
+
|
|
9
|
+
DELIMITER = '\t'
|
|
10
|
+
|
|
11
|
+
def convert_to_type(val, typee):
|
|
12
|
+
if typee == 'int':
|
|
13
|
+
return int(val) if val != "" else np.nan
|
|
14
|
+
if typee == 'float':
|
|
15
|
+
if isinstance(val, str):
|
|
16
|
+
val = val.replace(' ', '')
|
|
17
|
+
return float(val) if val != "" else np.nan
|
|
18
|
+
if typee == 'bool':
|
|
19
|
+
return eval(val) if val != "" else None
|
|
20
|
+
return str(val) if val != "" else None
|
|
21
|
+
|
|
22
|
+
def splitter(strr, delim=",", convert_to="str"):
|
|
23
|
+
"""
|
|
24
|
+
Split the string based on delimiter and convert to the type specified.
|
|
25
|
+
"""
|
|
26
|
+
if strr == "None":
|
|
27
|
+
return []
|
|
28
|
+
return [convert_to_type(i, convert_to) for i in strr.split(delim)]
|
|
29
|
+
|
|
30
|
+
# Arguments to the Script.
|
|
31
|
+
if len(sys.argv) != 4:
|
|
32
|
+
# 4 arguments command line arguments should be passed to this file.
|
|
33
|
+
# 1: file to be run
|
|
34
|
+
# 2. Comma separated indices of partition columns.
|
|
35
|
+
# 3. Comma separated types of all the data columns.
|
|
36
|
+
# 4. Data columns information separted by "--" where each data column information is in the form
|
|
37
|
+
# "<arg_name>-<comma separated data indices>-<comma separated data types>".
|
|
38
|
+
sys.exit("4 arguments command line arguments should be passed: file to be run,"
|
|
39
|
+
" comma separated indices of partition columns, comma separated types of all columns,"
|
|
40
|
+
" data columns information separated by '--' where each data column information is"
|
|
41
|
+
" in the form '<arg_name>-<comma separated data indices>-<comma separated data types>'.")
|
|
42
|
+
|
|
43
|
+
db = sys.argv[0].split("/")[1]
|
|
44
|
+
data_partition_column_indices = splitter(sys.argv[1], convert_to="int") # indices are integers.
|
|
45
|
+
data_column_types = splitter(sys.argv[2], delim="--")
|
|
46
|
+
|
|
47
|
+
data_partition_column_types = [data_column_types[idx] for idx in data_partition_column_indices]
|
|
48
|
+
|
|
49
|
+
# Data related arguments information of indices and types.
|
|
50
|
+
data_args_indices_types = OrderedDict()
|
|
51
|
+
|
|
52
|
+
# Data related arguments values - prepare dictionary and populate data later.
|
|
53
|
+
data_args_values = {}
|
|
54
|
+
|
|
55
|
+
for data_arg in sys.argv[3].split("--"):
|
|
56
|
+
arg_name, indices, types = data_arg.split("-")
|
|
57
|
+
indices = splitter(indices, convert_to="int")
|
|
58
|
+
types = splitter(types)
|
|
59
|
+
|
|
60
|
+
data_args_indices_types[arg_name] = {"indices": indices, "types": types}
|
|
61
|
+
data_args_values[arg_name] = [] # Keeping empty for each data arg name and populate data later.
|
|
62
|
+
|
|
63
|
+
data_partition_column_values = []
|
|
64
|
+
data_present = False
|
|
65
|
+
|
|
66
|
+
# Read data - columns information is passed as command line argument and stored in
|
|
67
|
+
# data_args_indices_types dictionary.
|
|
68
|
+
while 1:
|
|
69
|
+
try:
|
|
70
|
+
line = input()
|
|
71
|
+
if line == '': # Exit if user provides blank line
|
|
72
|
+
break
|
|
73
|
+
else:
|
|
74
|
+
data_present = True
|
|
75
|
+
values = line.split(DELIMITER)
|
|
76
|
+
if not data_partition_column_values:
|
|
77
|
+
# Partition column values is same for all rows. Hence, only read once.
|
|
78
|
+
for i, val in enumerate(data_partition_column_indices):
|
|
79
|
+
data_partition_column_values.append(
|
|
80
|
+
convert_to_type(values[val], typee=data_partition_column_types[i])
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
# Prepare data dictionary containing only arguments related to data.
|
|
84
|
+
for arg_name in data_args_values:
|
|
85
|
+
data_indices = data_args_indices_types[arg_name]["indices"]
|
|
86
|
+
types = data_args_indices_types[arg_name]["types"]
|
|
87
|
+
cur_row = []
|
|
88
|
+
for idx, data_idx in enumerate(data_indices):
|
|
89
|
+
cur_row.append(convert_to_type(values[data_idx], types[idx]))
|
|
90
|
+
data_args_values[arg_name].append(cur_row)
|
|
91
|
+
except EOFError: # Exit if reached EOF or CTRL-D
|
|
92
|
+
break
|
|
93
|
+
|
|
94
|
+
if not data_present:
|
|
95
|
+
sys.exit(0)
|
|
96
|
+
|
|
97
|
+
# Update data as numpy arrays.
|
|
98
|
+
for arg_name in data_args_values:
|
|
99
|
+
np_values = np.array(data_args_values[arg_name])
|
|
100
|
+
data_args_values[arg_name] = np_values
|
|
101
|
+
|
|
102
|
+
# Combine all arguments.
|
|
103
|
+
all_args = {**data_args_values, **params}
|
|
104
|
+
|
|
105
|
+
module_ = importlib.import_module(module_name)
|
|
106
|
+
sklearn_model = getattr(module_, func_name)(**all_args)
|
|
107
|
+
|
|
108
|
+
print(*(data_partition_column_values + [base64.b64encode(pickle.dumps(sklearn_model))]), sep=DELIMITER)
|