teradataml 20.0.0.0__py3-none-any.whl → 20.0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of teradataml might be problematic. Click here for more details.
- teradataml/LICENSE-3RD-PARTY.pdf +0 -0
- teradataml/LICENSE.pdf +0 -0
- teradataml/README.md +183 -0
- teradataml/__init__.py +6 -3
- teradataml/_version.py +2 -2
- teradataml/analytics/__init__.py +3 -2
- teradataml/analytics/analytic_function_executor.py +275 -40
- teradataml/analytics/analytic_query_generator.py +92 -0
- teradataml/analytics/byom/__init__.py +3 -2
- teradataml/analytics/json_parser/metadata.py +1 -0
- teradataml/analytics/json_parser/utils.py +17 -21
- teradataml/analytics/meta_class.py +40 -1
- teradataml/analytics/sqle/DecisionTreePredict.py +1 -1
- teradataml/analytics/sqle/__init__.py +10 -2
- teradataml/analytics/table_operator/__init__.py +3 -2
- teradataml/analytics/uaf/__init__.py +21 -2
- teradataml/analytics/utils.py +62 -1
- teradataml/analytics/valib.py +1 -1
- teradataml/automl/__init__.py +1553 -319
- teradataml/automl/custom_json_utils.py +139 -61
- teradataml/automl/data_preparation.py +276 -319
- teradataml/automl/data_transformation.py +163 -81
- teradataml/automl/feature_engineering.py +402 -239
- teradataml/automl/feature_exploration.py +9 -2
- teradataml/automl/model_evaluation.py +48 -51
- teradataml/automl/model_training.py +291 -189
- teradataml/catalog/byom.py +8 -8
- teradataml/catalog/model_cataloging_utils.py +1 -1
- teradataml/clients/auth_client.py +133 -0
- teradataml/clients/pkce_client.py +1 -1
- teradataml/common/aed_utils.py +3 -2
- teradataml/common/constants.py +48 -6
- teradataml/common/deprecations.py +13 -7
- teradataml/common/garbagecollector.py +156 -120
- teradataml/common/messagecodes.py +6 -1
- teradataml/common/messages.py +3 -1
- teradataml/common/sqlbundle.py +1 -1
- teradataml/common/utils.py +103 -11
- teradataml/common/wrapper_utils.py +1 -1
- teradataml/context/context.py +121 -31
- teradataml/data/advertising.csv +201 -0
- teradataml/data/bank_marketing.csv +11163 -0
- teradataml/data/bike_sharing.csv +732 -0
- teradataml/data/boston2cols.csv +721 -0
- teradataml/data/breast_cancer.csv +570 -0
- teradataml/data/complaints_test_tokenized.csv +353 -0
- teradataml/data/complaints_tokens_model.csv +348 -0
- teradataml/data/covid_confirm_sd.csv +83 -0
- teradataml/data/customer_segmentation_test.csv +2628 -0
- teradataml/data/customer_segmentation_train.csv +8069 -0
- teradataml/data/dataframe_example.json +10 -0
- teradataml/data/docs/sqle/docs_17_10/OneHotEncodingFit.py +3 -1
- teradataml/data/docs/sqle/docs_17_10/OneHotEncodingTransform.py +6 -0
- teradataml/data/docs/sqle/docs_17_10/OutlierFilterTransform.py +5 -1
- teradataml/data/docs/sqle/docs_17_20/ANOVA.py +61 -1
- teradataml/data/docs/sqle/docs_17_20/CFilter.py +132 -0
- teradataml/data/docs/sqle/docs_17_20/ColumnTransformer.py +2 -0
- teradataml/data/docs/sqle/docs_17_20/FTest.py +105 -26
- teradataml/data/docs/sqle/docs_17_20/GLM.py +162 -1
- teradataml/data/docs/sqle/docs_17_20/GetFutileColumns.py +5 -3
- teradataml/data/docs/sqle/docs_17_20/KMeans.py +48 -1
- teradataml/data/docs/sqle/docs_17_20/NaiveBayes.py +162 -0
- teradataml/data/docs/sqle/docs_17_20/NonLinearCombineFit.py +3 -2
- teradataml/data/docs/sqle/docs_17_20/OneHotEncodingFit.py +5 -0
- teradataml/data/docs/sqle/docs_17_20/OneHotEncodingTransform.py +6 -0
- teradataml/data/docs/sqle/docs_17_20/OutlierFilterFit.py +2 -0
- teradataml/data/docs/sqle/docs_17_20/Pivoting.py +279 -0
- teradataml/data/docs/sqle/docs_17_20/ROC.py +3 -2
- teradataml/data/docs/sqle/docs_17_20/SVMPredict.py +13 -2
- teradataml/data/docs/sqle/docs_17_20/ScaleFit.py +119 -1
- teradataml/data/docs/sqle/docs_17_20/ScaleTransform.py +93 -1
- teradataml/data/docs/sqle/docs_17_20/Shap.py +197 -0
- teradataml/data/docs/sqle/docs_17_20/TDGLMPredict.py +163 -1
- teradataml/data/docs/sqle/docs_17_20/TDNaiveBayesPredict.py +189 -0
- teradataml/data/docs/sqle/docs_17_20/TFIDF.py +142 -0
- teradataml/data/docs/sqle/docs_17_20/Unpivoting.py +216 -0
- teradataml/data/docs/sqle/docs_17_20/XGBoost.py +12 -4
- teradataml/data/docs/sqle/docs_17_20/XGBoostPredict.py +7 -1
- teradataml/data/docs/sqle/docs_17_20/ZTest.py +72 -7
- teradataml/data/docs/uaf/docs_17_20/ACF.py +1 -10
- teradataml/data/docs/uaf/docs_17_20/ArimaEstimate.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/ArimaForecast.py +35 -5
- teradataml/data/docs/uaf/docs_17_20/ArimaValidate.py +3 -1
- teradataml/data/docs/uaf/docs_17_20/ArimaXEstimate.py +293 -0
- teradataml/data/docs/uaf/docs_17_20/AutoArima.py +354 -0
- teradataml/data/docs/uaf/docs_17_20/BreuschGodfrey.py +3 -2
- teradataml/data/docs/uaf/docs_17_20/BreuschPaganGodfrey.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/Convolve.py +13 -10
- teradataml/data/docs/uaf/docs_17_20/Convolve2.py +4 -1
- teradataml/data/docs/uaf/docs_17_20/CumulPeriodogram.py +5 -4
- teradataml/data/docs/uaf/docs_17_20/DFFT2Conv.py +4 -4
- teradataml/data/docs/uaf/docs_17_20/DWT.py +235 -0
- teradataml/data/docs/uaf/docs_17_20/DWT2D.py +214 -0
- teradataml/data/docs/uaf/docs_17_20/DurbinWatson.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/ExtractResults.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/FilterFactory1d.py +160 -0
- teradataml/data/docs/uaf/docs_17_20/GenseriesSinusoids.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/GoldfeldQuandt.py +9 -31
- teradataml/data/docs/uaf/docs_17_20/HoltWintersForecaster.py +4 -2
- teradataml/data/docs/uaf/docs_17_20/IDFFT2.py +1 -8
- teradataml/data/docs/uaf/docs_17_20/IDWT.py +236 -0
- teradataml/data/docs/uaf/docs_17_20/IDWT2D.py +226 -0
- teradataml/data/docs/uaf/docs_17_20/IQR.py +134 -0
- teradataml/data/docs/uaf/docs_17_20/LineSpec.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/LinearRegr.py +2 -2
- teradataml/data/docs/uaf/docs_17_20/MAMean.py +3 -3
- teradataml/data/docs/uaf/docs_17_20/Matrix2Image.py +297 -0
- teradataml/data/docs/uaf/docs_17_20/MatrixMultiply.py +15 -6
- teradataml/data/docs/uaf/docs_17_20/PACF.py +0 -1
- teradataml/data/docs/uaf/docs_17_20/Portman.py +2 -2
- teradataml/data/docs/uaf/docs_17_20/PowerSpec.py +2 -2
- teradataml/data/docs/uaf/docs_17_20/Resample.py +9 -1
- teradataml/data/docs/uaf/docs_17_20/SAX.py +246 -0
- teradataml/data/docs/uaf/docs_17_20/SeasonalNormalize.py +17 -10
- teradataml/data/docs/uaf/docs_17_20/SignifPeriodicities.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/WhitesGeneral.py +3 -1
- teradataml/data/docs/uaf/docs_17_20/WindowDFFT.py +368 -0
- teradataml/data/dwt2d_dataTable.csv +65 -0
- teradataml/data/dwt_dataTable.csv +8 -0
- teradataml/data/dwt_filterTable.csv +3 -0
- teradataml/data/finance_data4.csv +13 -0
- teradataml/data/glm_example.json +28 -1
- teradataml/data/grocery_transaction.csv +19 -0
- teradataml/data/housing_train_segment.csv +201 -0
- teradataml/data/idwt2d_dataTable.csv +5 -0
- teradataml/data/idwt_dataTable.csv +8 -0
- teradataml/data/idwt_filterTable.csv +3 -0
- teradataml/data/insect2Cols.csv +61 -0
- teradataml/data/interval_data.csv +5 -0
- teradataml/data/jsons/paired_functions.json +14 -0
- teradataml/data/jsons/sqle/17.20/TD_ANOVA.json +99 -27
- teradataml/data/jsons/sqle/17.20/TD_CFilter.json +118 -0
- teradataml/data/jsons/sqle/17.20/TD_FTest.json +166 -83
- teradataml/data/jsons/sqle/17.20/TD_GLM.json +90 -14
- teradataml/data/jsons/sqle/17.20/TD_GLMPREDICT.json +48 -5
- teradataml/data/jsons/sqle/17.20/TD_GetFutileColumns.json +5 -3
- teradataml/data/jsons/sqle/17.20/TD_KMeans.json +31 -11
- teradataml/data/jsons/sqle/17.20/TD_NaiveBayes.json +193 -0
- teradataml/data/jsons/sqle/17.20/TD_NaiveBayesPredict.json +212 -0
- teradataml/data/jsons/sqle/17.20/TD_NonLinearCombineFit.json +3 -2
- teradataml/data/jsons/sqle/17.20/TD_OneClassSVM.json +9 -9
- teradataml/data/jsons/sqle/17.20/TD_Pivoting.json +280 -0
- teradataml/data/jsons/sqle/17.20/TD_ROC.json +2 -1
- teradataml/data/jsons/sqle/17.20/TD_SVM.json +16 -16
- teradataml/data/jsons/sqle/17.20/TD_SVMPredict.json +19 -1
- teradataml/data/jsons/sqle/17.20/TD_ScaleFit.json +168 -15
- teradataml/data/jsons/sqle/17.20/TD_ScaleTransform.json +50 -1
- teradataml/data/jsons/sqle/17.20/TD_Shap.json +222 -0
- teradataml/data/jsons/sqle/17.20/TD_TFIDF.json +162 -0
- teradataml/data/jsons/sqle/17.20/TD_Unpivoting.json +235 -0
- teradataml/data/jsons/sqle/17.20/TD_XGBoost.json +25 -7
- teradataml/data/jsons/sqle/17.20/TD_XGBoostPredict.json +17 -4
- teradataml/data/jsons/sqle/17.20/TD_ZTest.json +157 -80
- teradataml/data/jsons/storedprocedure/17.20/TD_FILTERFACTORY1D.json +150 -0
- teradataml/data/jsons/uaf/17.20/TD_ACF.json +1 -18
- teradataml/data/jsons/uaf/17.20/TD_ARIMAESTIMATE.json +3 -16
- teradataml/data/jsons/uaf/17.20/TD_ARIMAFORECAST.json +0 -3
- teradataml/data/jsons/uaf/17.20/TD_ARIMAVALIDATE.json +5 -3
- teradataml/data/jsons/uaf/17.20/TD_ARIMAXESTIMATE.json +362 -0
- teradataml/data/jsons/uaf/17.20/TD_AUTOARIMA.json +469 -0
- teradataml/data/jsons/uaf/17.20/TD_BINARYMATRIXOP.json +0 -3
- teradataml/data/jsons/uaf/17.20/TD_BINARYSERIESOP.json +0 -2
- teradataml/data/jsons/uaf/17.20/TD_BREUSCH_GODFREY.json +2 -1
- teradataml/data/jsons/uaf/17.20/TD_BREUSCH_PAGAN_GODFREY.json +2 -5
- teradataml/data/jsons/uaf/17.20/TD_CONVOLVE.json +3 -6
- teradataml/data/jsons/uaf/17.20/TD_CONVOLVE2.json +1 -3
- teradataml/data/jsons/uaf/17.20/TD_CUMUL_PERIODOGRAM.json +0 -5
- teradataml/data/jsons/uaf/17.20/TD_DFFT.json +1 -4
- teradataml/data/jsons/uaf/17.20/TD_DFFT2.json +2 -7
- teradataml/data/jsons/uaf/17.20/TD_DFFT2CONV.json +1 -2
- teradataml/data/jsons/uaf/17.20/TD_DFFTCONV.json +0 -2
- teradataml/data/jsons/uaf/17.20/TD_DTW.json +3 -6
- teradataml/data/jsons/uaf/17.20/TD_DWT.json +173 -0
- teradataml/data/jsons/uaf/17.20/TD_DWT2D.json +160 -0
- teradataml/data/jsons/uaf/17.20/TD_FITMETRICS.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_GOLDFELD_QUANDT.json +16 -30
- teradataml/data/jsons/uaf/17.20/{TD_HOLT_WINTERS_FORECAST.json → TD_HOLT_WINTERS_FORECASTER.json} +1 -2
- teradataml/data/jsons/uaf/17.20/TD_IDFFT2.json +1 -15
- teradataml/data/jsons/uaf/17.20/TD_IDWT.json +162 -0
- teradataml/data/jsons/uaf/17.20/TD_IDWT2D.json +149 -0
- teradataml/data/jsons/uaf/17.20/TD_IQR.json +117 -0
- teradataml/data/jsons/uaf/17.20/TD_LINEAR_REGR.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_LINESPEC.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_MAMEAN.json +1 -3
- teradataml/data/jsons/uaf/17.20/TD_MATRIX2IMAGE.json +209 -0
- teradataml/data/jsons/uaf/17.20/TD_PACF.json +2 -2
- teradataml/data/jsons/uaf/17.20/TD_POWERSPEC.json +5 -5
- teradataml/data/jsons/uaf/17.20/TD_RESAMPLE.json +48 -28
- teradataml/data/jsons/uaf/17.20/TD_SAX.json +208 -0
- teradataml/data/jsons/uaf/17.20/TD_SEASONALNORMALIZE.json +12 -6
- teradataml/data/jsons/uaf/17.20/TD_SIMPLEEXP.json +0 -1
- teradataml/data/jsons/uaf/17.20/TD_TRACKINGOP.json +8 -8
- teradataml/data/jsons/uaf/17.20/TD_UNDIFF.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_UNNORMALIZE.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_WINDOWDFFT.json +400 -0
- teradataml/data/kmeans_example.json +5 -0
- teradataml/data/kmeans_table.csv +10 -0
- teradataml/data/load_example_data.py +8 -2
- teradataml/data/naivebayestextclassifier_example.json +1 -1
- teradataml/data/naivebayestextclassifierpredict_example.json +11 -0
- teradataml/data/onehot_encoder_train.csv +4 -0
- teradataml/data/openml_example.json +29 -0
- teradataml/data/peppers.png +0 -0
- teradataml/data/real_values.csv +14 -0
- teradataml/data/sax_example.json +8 -0
- teradataml/data/scale_attributes.csv +3 -0
- teradataml/data/scale_example.json +52 -1
- teradataml/data/scale_input_part_sparse.csv +31 -0
- teradataml/data/scale_input_partitioned.csv +16 -0
- teradataml/data/scale_input_sparse.csv +11 -0
- teradataml/data/scale_parameters.csv +3 -0
- teradataml/data/scripts/deploy_script.py +21 -2
- teradataml/data/scripts/sklearn/sklearn_fit.py +40 -37
- teradataml/data/scripts/sklearn/sklearn_fit_predict.py +22 -30
- teradataml/data/scripts/sklearn/sklearn_function.template +42 -24
- teradataml/data/scripts/sklearn/sklearn_model_selection_split.py +23 -33
- teradataml/data/scripts/sklearn/sklearn_neighbors.py +19 -28
- teradataml/data/scripts/sklearn/sklearn_score.py +32 -32
- teradataml/data/scripts/sklearn/sklearn_transform.py +85 -42
- teradataml/data/star_pivot.csv +8 -0
- teradataml/data/templates/open_source_ml.json +2 -1
- teradataml/data/teradataml_example.json +97 -1
- teradataml/data/timestamp_data.csv +4 -0
- teradataml/data/titanic_dataset_unpivoted.csv +19 -0
- teradataml/data/uaf_example.json +55 -1
- teradataml/data/unpivot_example.json +15 -0
- teradataml/data/url_data.csv +9 -0
- teradataml/data/windowdfft.csv +16 -0
- teradataml/data/ztest_example.json +16 -0
- teradataml/dataframe/copy_to.py +9 -4
- teradataml/dataframe/data_transfer.py +125 -64
- teradataml/dataframe/dataframe.py +575 -57
- teradataml/dataframe/dataframe_utils.py +47 -9
- teradataml/dataframe/fastload.py +273 -90
- teradataml/dataframe/functions.py +339 -0
- teradataml/dataframe/row.py +160 -0
- teradataml/dataframe/setop.py +2 -2
- teradataml/dataframe/sql.py +740 -18
- teradataml/dataframe/window.py +1 -1
- teradataml/dbutils/dbutils.py +324 -18
- teradataml/geospatial/geodataframe.py +1 -1
- teradataml/geospatial/geodataframecolumn.py +1 -1
- teradataml/hyperparameter_tuner/optimizer.py +13 -13
- teradataml/lib/aed_0_1.dll +0 -0
- teradataml/opensource/sklearn/_sklearn_wrapper.py +254 -122
- teradataml/options/__init__.py +16 -5
- teradataml/options/configure.py +39 -6
- teradataml/options/display.py +2 -2
- teradataml/plot/axis.py +4 -4
- teradataml/scriptmgmt/UserEnv.py +26 -19
- teradataml/scriptmgmt/lls_utils.py +120 -16
- teradataml/table_operators/Script.py +4 -5
- teradataml/table_operators/TableOperator.py +160 -26
- teradataml/table_operators/table_operator_util.py +88 -41
- teradataml/table_operators/templates/dataframe_udf.template +63 -0
- teradataml/telemetry_utils/__init__.py +0 -0
- teradataml/telemetry_utils/queryband.py +52 -0
- teradataml/utils/validators.py +41 -3
- {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/METADATA +191 -6
- {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/RECORD +263 -185
- {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/WHEEL +0 -0
- {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/top_level.txt +0 -0
- {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/zip-safe +0 -0
|
@@ -1,5 +1,9 @@
|
|
|
1
1
|
def ScaleFit(data=None, target_columns=None, scale_method=None, miss_value="KEEP",
|
|
2
|
-
global_scale=False, multiplier='1', intercept='0',
|
|
2
|
+
global_scale=False, multiplier='1', intercept='0',
|
|
3
|
+
parameter_data=None, attribute_data=None, partition_columns=None,
|
|
4
|
+
ignoreinvalid_locationscale=False, unused_attributes="UNSCALED",
|
|
5
|
+
attribute_name_column=None, attribute_value_column=None, target_attributes=None,
|
|
6
|
+
**generic_arguments):
|
|
3
7
|
"""
|
|
4
8
|
DESCRIPTION:
|
|
5
9
|
ScaleFit() function outputs statistics to input to ScaleTransform() function,
|
|
@@ -15,6 +19,9 @@ def ScaleFit(data=None, target_columns=None, scale_method=None, miss_value="KEEP
|
|
|
15
19
|
Required Argument.
|
|
16
20
|
Specifies the input teradataml DataFrame column(s) for which to output statistics.
|
|
17
21
|
The columns must contain numeric data in the range (-1e\u00B3\u2070\u2078, 1e\u00B3\u2070\u2078).
|
|
22
|
+
Note:
|
|
23
|
+
* This argument cannot be used with "target_attributes", "attribute_name_column",
|
|
24
|
+
"attribute_value_column".
|
|
18
25
|
Types: str OR list of Strings (str)
|
|
19
26
|
|
|
20
27
|
scale_method:
|
|
@@ -124,6 +131,60 @@ def ScaleFit(data=None, target_columns=None, scale_method=None, miss_value="KEEP
|
|
|
124
131
|
Default Value: "0"
|
|
125
132
|
Types: str OR list of String (str)
|
|
126
133
|
|
|
134
|
+
parameter_data:
|
|
135
|
+
Optional Argument.
|
|
136
|
+
Specifies the input teradataml DataFrame containing the parameters.
|
|
137
|
+
Note:
|
|
138
|
+
* This is valid when "data_partition_column" is used.
|
|
139
|
+
Types: teradataml DataFrame
|
|
140
|
+
|
|
141
|
+
attribute_data:
|
|
142
|
+
Optional Argument.
|
|
143
|
+
Specifies the input teradataml DataFrame containing the attributes.
|
|
144
|
+
Note:
|
|
145
|
+
* This is valid when "data_partition_column" is used.
|
|
146
|
+
Types: teradataml DataFrame
|
|
147
|
+
|
|
148
|
+
partition_columns:
|
|
149
|
+
Optional Argument.
|
|
150
|
+
Specifies the column name in the "data" to partition the input.
|
|
151
|
+
Types: str OR list of Strings (str)
|
|
152
|
+
|
|
153
|
+
ignoreinvalid_locationscale:
|
|
154
|
+
Optional Argument.
|
|
155
|
+
Specifies whether to ignore invalid values of location and scale parameters.
|
|
156
|
+
Default Value: False
|
|
157
|
+
Types: bool
|
|
158
|
+
|
|
159
|
+
unused_attributes:
|
|
160
|
+
Optional Argument.
|
|
161
|
+
Specifies whether to emit out unused attributes of different partitions
|
|
162
|
+
as unscaled values or NULLs (for dense input).
|
|
163
|
+
Permitted Values: 'NULLIFY', 'UNSCALED'
|
|
164
|
+
Default Value: 'UNSCALED'
|
|
165
|
+
Types: str
|
|
166
|
+
|
|
167
|
+
attribute_name_column:
|
|
168
|
+
Optional Argument.
|
|
169
|
+
Specifies the column name in the "attribute_data" which contains attribute names.
|
|
170
|
+
Note:
|
|
171
|
+
* This is required for sparse input.
|
|
172
|
+
Types: str
|
|
173
|
+
|
|
174
|
+
attribute_value_column:
|
|
175
|
+
Optional Argument.
|
|
176
|
+
Specifies the column name in the "attribute_data" which contains attribute values.
|
|
177
|
+
Note:
|
|
178
|
+
* This is required for sparse input.
|
|
179
|
+
Types: str
|
|
180
|
+
|
|
181
|
+
target_attributes:
|
|
182
|
+
Optional Argument.
|
|
183
|
+
Specifies the attributes for which scaling should be performed.
|
|
184
|
+
Note:
|
|
185
|
+
* This is required for sparse input.
|
|
186
|
+
Types: str OR list of Strings (str)
|
|
187
|
+
|
|
127
188
|
**generic_arguments:
|
|
128
189
|
Specifies the generic keyword arguments SQLE functions accept.
|
|
129
190
|
Below are the generic keyword arguments:
|
|
@@ -174,9 +235,16 @@ def ScaleFit(data=None, target_columns=None, scale_method=None, miss_value="KEEP
|
|
|
174
235
|
|
|
175
236
|
# Load the example data.
|
|
176
237
|
load_example_data("teradataml", ["scale_housing"])
|
|
238
|
+
load_example_data('scale', ["scale_attributes", "scale_parameters",
|
|
239
|
+
"scale_input_partitioned", "scale_input_sparse","scale_input_part_sparse"])
|
|
177
240
|
|
|
178
241
|
# Create teradataml DataFrame.
|
|
179
242
|
scaling_house = DataFrame.from_table("scale_housing")
|
|
243
|
+
scale_attribute = DataFrame.from_table("scale_attributes")
|
|
244
|
+
scale_parameter = DataFrame.from_table("scale_parameters")
|
|
245
|
+
scale_inp_part = DataFrame.from_table("scale_input_partitioned")
|
|
246
|
+
scale_inp_sparse = DataFrame.from_table("scale_input_sparse")
|
|
247
|
+
scale_inp_part_sparse = DataFrame.from_table("scale_input_part_sparse")
|
|
180
248
|
|
|
181
249
|
# Check the list of available analytic functions.
|
|
182
250
|
display_analytic_functions()
|
|
@@ -194,4 +262,54 @@ def ScaleFit(data=None, target_columns=None, scale_method=None, miss_value="KEEP
|
|
|
194
262
|
# Print the result DataFrame.
|
|
195
263
|
print(fit_obj.output)
|
|
196
264
|
print(fit_obj.output_data)
|
|
265
|
+
|
|
266
|
+
# Example 2: Create statistics to scale "fare" and "age" columns
|
|
267
|
+
# with respect to maximum absolute value with partition column
|
|
268
|
+
# for dense input.
|
|
269
|
+
fit_obj = ScaleFit(data=scale_inp_part,
|
|
270
|
+
attribute_data=scale_attribute,
|
|
271
|
+
parameter_data=scale_parameter,
|
|
272
|
+
target_columns=['fare', 'age'],
|
|
273
|
+
scale_method="maxabs",
|
|
274
|
+
miss_value="zero",
|
|
275
|
+
global_scale=False,
|
|
276
|
+
data_partition_column='pid',
|
|
277
|
+
attribute_data_partition_column='pid',
|
|
278
|
+
parameter_data_partition_column='pid')
|
|
279
|
+
|
|
280
|
+
# Print the result DataFrame.
|
|
281
|
+
print(fit_obj.output)
|
|
282
|
+
print(fit_obj.output_data)
|
|
283
|
+
|
|
284
|
+
# Example 3: Create statistics to scale "fare" column with respect to
|
|
285
|
+
# range for sparse input.
|
|
286
|
+
fit_obj = ScaleFit(data=scale_inp_sparse,
|
|
287
|
+
target_attribute=['fare'],
|
|
288
|
+
scale_method="range",
|
|
289
|
+
miss_value="keep",
|
|
290
|
+
global_scale=False,
|
|
291
|
+
attribute_name_column='attribute_column',
|
|
292
|
+
attribute_value_column='attribute_value')
|
|
293
|
+
|
|
294
|
+
# Print the result DataFrame.
|
|
295
|
+
print(fit_obj.output)
|
|
296
|
+
print(fit_obj.output_data)
|
|
297
|
+
|
|
298
|
+
# Example 4: Create statistics to scale "fare" column with respect to
|
|
299
|
+
# maximum absolute value for sparse input with partition column.
|
|
300
|
+
fit_obj = ScaleFit(data=scale_inp_part_sparse,
|
|
301
|
+
parameter_data=scale_parameter,
|
|
302
|
+
attribute_data=scale_attribute,
|
|
303
|
+
scale_method="maxabs",
|
|
304
|
+
miss_value="zero",
|
|
305
|
+
global_scale=False,
|
|
306
|
+
attribute_name_column='attribute_column',
|
|
307
|
+
attribute_value_column='attribute_value',
|
|
308
|
+
data_partition_column='pid',
|
|
309
|
+
attribute_data_partition_column='pid',
|
|
310
|
+
parameter_data_partition_column='pid')
|
|
311
|
+
|
|
312
|
+
# Print the result DataFrame.
|
|
313
|
+
print(fit_obj.output)
|
|
314
|
+
print(fit_obj.output_data)
|
|
197
315
|
"""
|
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
def ScaleTransform(data=None, object=None, accumulate=None,
|
|
1
|
+
def ScaleTransform(data=None, object=None, accumulate=None, attribute_name_column=None,
|
|
2
|
+
attribute_value_column=None, **generic_arguments):
|
|
2
3
|
"""
|
|
3
4
|
DESCRIPTION:
|
|
4
5
|
ScaleTransform() function scales specified columns in input data, using ScaleFit() function output.
|
|
@@ -21,6 +22,20 @@ def ScaleTransform(data=None, object=None, accumulate=None, **generic_arguments)
|
|
|
21
22
|
Specifies the names of input teradataml DataFrame columns to copy to the output.
|
|
22
23
|
Types: str OR list of Strings (str)
|
|
23
24
|
|
|
25
|
+
attribute_name_column:
|
|
26
|
+
Optional Argument.
|
|
27
|
+
Specifies the column name in the "attribute_data" which contains attribute names.
|
|
28
|
+
Note:
|
|
29
|
+
* This is required for sparse input.
|
|
30
|
+
Types: str
|
|
31
|
+
|
|
32
|
+
attribute_value_column:
|
|
33
|
+
Optional Argument.
|
|
34
|
+
Specifies the column name in the "attribute_data" which contains attribute values.
|
|
35
|
+
Note:
|
|
36
|
+
* This is required for sparse input.
|
|
37
|
+
Types: str
|
|
38
|
+
|
|
24
39
|
**generic_arguments:
|
|
25
40
|
Specifies the generic keyword arguments SQLE functions accept.
|
|
26
41
|
Below are the generic keyword arguments:
|
|
@@ -70,9 +85,16 @@ def ScaleTransform(data=None, object=None, accumulate=None, **generic_arguments)
|
|
|
70
85
|
|
|
71
86
|
# Load the example data.
|
|
72
87
|
load_example_data("teradataml", ["scale_housing"])
|
|
88
|
+
load_example_data('scale', ["scale_attributes", "scale_parameters",
|
|
89
|
+
"scale_input_partitioned", "scale_input_sparse","scale_input_part_sparse"])
|
|
73
90
|
|
|
74
91
|
# Create teradataml DataFrame.
|
|
75
92
|
scaling_house = DataFrame.from_table("scale_housing")
|
|
93
|
+
scale_attribute = DataFrame.from_table("scale_attributes")
|
|
94
|
+
scale_parameter = DataFrame.from_table("scale_parameters")
|
|
95
|
+
scale_inp_part = DataFrame.from_table("scale_input_partitioned")
|
|
96
|
+
scale_inp_sparse = DataFrame.from_table("scale_input_sparse")
|
|
97
|
+
scale_inp_part_sparse = DataFrame.from_table("scale_input_part_sparse")
|
|
76
98
|
|
|
77
99
|
# Check the list of available analytic functions.
|
|
78
100
|
display_analytic_functions()
|
|
@@ -107,4 +129,74 @@ def ScaleTransform(data=None, object=None, accumulate=None, **generic_arguments)
|
|
|
107
129
|
|
|
108
130
|
# Print the result DataFrame.
|
|
109
131
|
print(obj1.result)
|
|
132
|
+
|
|
133
|
+
# Example 3: Create statistics to scale "fare" and "age" columns with respect to
|
|
134
|
+
# maximum absolute value for partitioned input.
|
|
135
|
+
fit_obj = ScaleFit(data=scale_inp_part,
|
|
136
|
+
attribute_data=scale_attribute,
|
|
137
|
+
parameter_data=scale_parameter,
|
|
138
|
+
target_columns=['fare', 'age'],
|
|
139
|
+
scale_method="maxabs",
|
|
140
|
+
miss_value="zero",
|
|
141
|
+
global_scale=False,
|
|
142
|
+
data_partition_column='pid',
|
|
143
|
+
attribute_data_partition_column='pid',
|
|
144
|
+
parameter_data_partition_column='pid')
|
|
145
|
+
|
|
146
|
+
obj = ScaleTransform(data=scale_inp_part,
|
|
147
|
+
object=fit_obj.output,
|
|
148
|
+
accumulate=['pid','passenger'],
|
|
149
|
+
data_partition_column='pid',
|
|
150
|
+
object_partition_column='pid')
|
|
151
|
+
|
|
152
|
+
# Print the result DataFrame.
|
|
153
|
+
print(obj.result)
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
# Example 4: Create statistics to scale "fare" column with respect to
|
|
157
|
+
# range for sparse input.
|
|
158
|
+
fit_obj = ScaleFit(data=scale_inp_sparse,
|
|
159
|
+
target_attribute=['fare'],
|
|
160
|
+
scale_method="range",
|
|
161
|
+
miss_value="keep",
|
|
162
|
+
global_scale=False,
|
|
163
|
+
attribute_name_column='attribute_column',
|
|
164
|
+
attribute_value_column='attribute_value')
|
|
165
|
+
|
|
166
|
+
obj = ScaleTransform(data=scale_inp_sparse,
|
|
167
|
+
object=fit_obj.output,
|
|
168
|
+
accumulate=['passenger'],
|
|
169
|
+
attribute_name_column='attribute_column',
|
|
170
|
+
attribute_value_column='attribute_value'
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
# Print the result DataFrame.
|
|
174
|
+
print(obj.result)
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
# Example 5: Create statistics to scale "fare" column with respect to
|
|
178
|
+
# maximum absolute value for sparse input with partition column.
|
|
179
|
+
fit_obj = ScaleFit(data=scale_inp_part_sparse,
|
|
180
|
+
parameter_data=scale_parameter,
|
|
181
|
+
attribute_data=scale_attribute,
|
|
182
|
+
scale_method="maxabs",
|
|
183
|
+
miss_value="zero",
|
|
184
|
+
global_scale=False,
|
|
185
|
+
attribute_name_column='attribute_column',
|
|
186
|
+
attribute_value_column='attribute_value',
|
|
187
|
+
data_partition_column='pid',
|
|
188
|
+
attribute_data_partition_column='pid',
|
|
189
|
+
parameter_data_partition_column='pid')
|
|
190
|
+
|
|
191
|
+
obj = ScaleTransform(data=scale_inp_part_sparse,
|
|
192
|
+
object=fit_obj.output,
|
|
193
|
+
accumulate=["passenger",'pid'],
|
|
194
|
+
attribute_name_column='attribute_column',
|
|
195
|
+
attribute_value_column='attribute_value',
|
|
196
|
+
object_partition_column='pid',
|
|
197
|
+
data_partition_column='pid')
|
|
198
|
+
|
|
199
|
+
# Print the result DataFrame.
|
|
200
|
+
print(obj.result)
|
|
201
|
+
|
|
110
202
|
"""
|
|
@@ -0,0 +1,197 @@
|
|
|
1
|
+
def Shap(data = None, object = None, training_function = "TD_GLM",
|
|
2
|
+
model_type = "Regression", input_columns = None, detailed = False,
|
|
3
|
+
accumulate = None, num_parallel_trees = 1000, num_boost_rounds = 10,
|
|
4
|
+
**generic_arguments):
|
|
5
|
+
|
|
6
|
+
"""
|
|
7
|
+
DESCRIPTION:
|
|
8
|
+
Function to get explanation for individual predictions
|
|
9
|
+
(feature contributions) in a machine learning model based on the
|
|
10
|
+
co-operative game theory optimal Shapley values.
|
|
11
|
+
|
|
12
|
+
PARAMETERS:
|
|
13
|
+
data:
|
|
14
|
+
Required Argument.
|
|
15
|
+
Specifies the teradataml DataFrame.
|
|
16
|
+
Types: teradataml DataFrame
|
|
17
|
+
|
|
18
|
+
object:
|
|
19
|
+
Required Argument.
|
|
20
|
+
Specifies the teradataml DataFrame containing the model data.
|
|
21
|
+
Types: teradataml DataFrame
|
|
22
|
+
|
|
23
|
+
training_function:
|
|
24
|
+
Required Argument.
|
|
25
|
+
Specifies the model type name.
|
|
26
|
+
Default Value: "TD_GLM"
|
|
27
|
+
Permitted Values: TD_GLM, TD_DECISIONFOREST, TD_XGBOOST
|
|
28
|
+
Types: str
|
|
29
|
+
|
|
30
|
+
model_type:
|
|
31
|
+
Required Argument.
|
|
32
|
+
Specifies the operation to be performed on input data.
|
|
33
|
+
Default Value: "Regression"
|
|
34
|
+
Permitted Values: Regression, Classification
|
|
35
|
+
Types: str
|
|
36
|
+
|
|
37
|
+
input_columns:
|
|
38
|
+
Required Argument.
|
|
39
|
+
Specifies the names of the columns in "data" used for
|
|
40
|
+
training the model (predictors, features or independent variables).
|
|
41
|
+
Types: str OR list of Strings (str)
|
|
42
|
+
|
|
43
|
+
detailed:
|
|
44
|
+
Optional Argument.
|
|
45
|
+
Specifies whether to output detailed shap information about the
|
|
46
|
+
forest trees.
|
|
47
|
+
Default Value: False
|
|
48
|
+
Types: bool
|
|
49
|
+
|
|
50
|
+
accumulate:
|
|
51
|
+
Optional Argument.
|
|
52
|
+
Specifies the names of the input columns to copy to the output teradataml DataFrame.
|
|
53
|
+
Types: str OR list of Strings (str)
|
|
54
|
+
|
|
55
|
+
num_parallel_trees:
|
|
56
|
+
Optional Argument.
|
|
57
|
+
Specify the number of parallel boosted trees. Each boosted tree
|
|
58
|
+
operates on a sample of data that fits in an AMPs memory.
|
|
59
|
+
Note:
|
|
60
|
+
* By default, "num_parallel_trees" is chosen equal to the number of AMPs with
|
|
61
|
+
data.
|
|
62
|
+
Default Value: 1000
|
|
63
|
+
Types: int
|
|
64
|
+
|
|
65
|
+
num_boost_rounds:
|
|
66
|
+
Optional Argument.
|
|
67
|
+
Specifies the number of iterations to boost the weak classifiers. The
|
|
68
|
+
iterations must be an int in the range [1, 100000].
|
|
69
|
+
Default Value: 10
|
|
70
|
+
Types: int
|
|
71
|
+
|
|
72
|
+
**generic_arguments:
|
|
73
|
+
Specifies the generic keyword arguments SQLE functions accept. Below
|
|
74
|
+
are the generic keyword arguments:
|
|
75
|
+
persist:
|
|
76
|
+
Optional Argument.
|
|
77
|
+
Specifies whether to persist the results of the
|
|
78
|
+
function in a table or not. When set to True,
|
|
79
|
+
results are persisted in a table; otherwise,
|
|
80
|
+
results are garbage collected at the end of the
|
|
81
|
+
session.
|
|
82
|
+
Default Value: False
|
|
83
|
+
Types: bool
|
|
84
|
+
|
|
85
|
+
volatile:
|
|
86
|
+
Optional Argument.
|
|
87
|
+
Specifies whether to put the results of the
|
|
88
|
+
function in a volatile table or not. When set to
|
|
89
|
+
True, results are stored in a volatile table,
|
|
90
|
+
otherwise not.
|
|
91
|
+
Default Value: False
|
|
92
|
+
Types: bool
|
|
93
|
+
|
|
94
|
+
Function allows the user to partition, hash, order or local
|
|
95
|
+
order the input data. These generic arguments are available
|
|
96
|
+
for each argument that accepts teradataml DataFrame as
|
|
97
|
+
input and can be accessed as:
|
|
98
|
+
* "<input_data_arg_name>_partition_column" accepts str or
|
|
99
|
+
list of str (Strings)
|
|
100
|
+
* "<input_data_arg_name>_hash_column" accepts str or list
|
|
101
|
+
of str (Strings)
|
|
102
|
+
* "<input_data_arg_name>_order_column" accepts str or list
|
|
103
|
+
of str (Strings)
|
|
104
|
+
* "local_order_<input_data_arg_name>" accepts boolean
|
|
105
|
+
Note:
|
|
106
|
+
These generic arguments are supported by teradataml if
|
|
107
|
+
the underlying SQL Engine function supports, else an
|
|
108
|
+
exception is raised.
|
|
109
|
+
|
|
110
|
+
RETURNS:
|
|
111
|
+
Instance of Shap.
|
|
112
|
+
Output teradataml DataFrames can be accessed using attribute
|
|
113
|
+
references, such as ShapObj.<attribute_name>.
|
|
114
|
+
Output teradataml DataFrame attribute name is:
|
|
115
|
+
1. output
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
RAISES:
|
|
119
|
+
TeradataMlException, TypeError, ValueError
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
EXAMPLES:
|
|
123
|
+
# Notes:
|
|
124
|
+
# 1. Get the connection to Vantage, before importing the
|
|
125
|
+
# function in user space.
|
|
126
|
+
# 2. User can import the function, if it is available on
|
|
127
|
+
# Vantage user is connected to.
|
|
128
|
+
# 3. To check the list of analytic functions available on
|
|
129
|
+
# Vantage user connected to, use
|
|
130
|
+
# "display_analytic_functions()".
|
|
131
|
+
|
|
132
|
+
# Load the example data.
|
|
133
|
+
load_example_data("byom", "iris_input")
|
|
134
|
+
load_example_data("teradataml", ["cal_housing_ex_raw"])
|
|
135
|
+
|
|
136
|
+
# Create teradataml DataFrame objects.
|
|
137
|
+
iris_input = DataFrame("iris_input")
|
|
138
|
+
data_input = DataFrame.from_table("cal_housing_ex_raw")
|
|
139
|
+
|
|
140
|
+
# Check the list of available analytic functions.
|
|
141
|
+
display_analytic_functions()
|
|
142
|
+
|
|
143
|
+
# Import function Shap.
|
|
144
|
+
from teradataml import Shap, XGBoost, DecisionForest, SVM
|
|
145
|
+
|
|
146
|
+
# Example 1: Shap for classification model.
|
|
147
|
+
XGBoost_out = XGBoost(data=iris_input,
|
|
148
|
+
input_columns=['sepal_length', 'sepal_width', 'petal_length', 'petal_width'],
|
|
149
|
+
response_column = 'species',
|
|
150
|
+
model_type='Classification',
|
|
151
|
+
iter_num=25)
|
|
152
|
+
|
|
153
|
+
Shap_out = Shap(data=iris_input,
|
|
154
|
+
object=XGBoost_out.result,
|
|
155
|
+
id_column='id',
|
|
156
|
+
training_function="TD_XGBOOST",
|
|
157
|
+
model_type="Classification",
|
|
158
|
+
input_columns=['sepal_length', 'sepal_width', 'petal_length', 'petal_width'],
|
|
159
|
+
detailed=True)
|
|
160
|
+
# Print the result DataFrame.
|
|
161
|
+
print(Shap_out.output_data)
|
|
162
|
+
|
|
163
|
+
# Example 2: Shap for regression model.
|
|
164
|
+
|
|
165
|
+
from teradataml import ScaleFit, ScaleTransform
|
|
166
|
+
|
|
167
|
+
# Scale "target_columns" with respect to 'STD' value of the column.
|
|
168
|
+
fit_obj = ScaleFit(data=data_input,
|
|
169
|
+
target_columns=['MedInc', 'HouseAge', 'AveRooms',
|
|
170
|
+
'AveBedrms', 'Population', 'AveOccup',
|
|
171
|
+
'Latitude', 'Longitude'],
|
|
172
|
+
scale_method="STD")
|
|
173
|
+
|
|
174
|
+
# Transform the data.
|
|
175
|
+
transform_obj = ScaleTransform(data=data_input,
|
|
176
|
+
object=fit_obj.output,
|
|
177
|
+
accumulate=["id", "MedHouseVal"])
|
|
178
|
+
|
|
179
|
+
decision_forest_out = DecisionForest(data=transform_obj.result,
|
|
180
|
+
input_columns=['MedInc', 'HouseAge', 'AveRooms',
|
|
181
|
+
'AveBedrms', 'Population', 'AveOccup',
|
|
182
|
+
'Latitude', 'Longitude'],
|
|
183
|
+
response_column="MedHouseVal",
|
|
184
|
+
model_type="Regression",
|
|
185
|
+
max_depth = 10
|
|
186
|
+
)
|
|
187
|
+
Shap_out2 = Shap(data=transform_obj.result,
|
|
188
|
+
object=decision_forest_out.result,
|
|
189
|
+
id_column='id',
|
|
190
|
+
training_function="TD_DECISIONFOREST",
|
|
191
|
+
model_type="Regression",
|
|
192
|
+
input_columns=['MedInc', 'HouseAge', 'AveRooms','AveBedrms', 'Population', 'AveOccup','Latitude', 'Longitude'],
|
|
193
|
+
detailed=True)
|
|
194
|
+
|
|
195
|
+
# Print the result DataFrame.
|
|
196
|
+
print(Shap_out2.output_data)
|
|
197
|
+
"""
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
def TDGLMPredict(object=None, newdata=None, id_column=None, accumulate=None, output_prob=False,
|
|
2
|
-
output_responses=None,
|
|
2
|
+
output_responses=None, partition_column=None, family="GAUSSIAN",
|
|
3
|
+
**generic_arguments):
|
|
3
4
|
"""
|
|
4
5
|
DESCRIPTION:
|
|
5
6
|
The TDGLMPredict() function predicts target values (regression) and class labels
|
|
@@ -57,6 +58,18 @@ def TDGLMPredict(object=None, newdata=None, id_column=None, accumulate=None, out
|
|
|
57
58
|
Note:
|
|
58
59
|
Only applicable if "output_prob" is True.
|
|
59
60
|
Types: str OR list of strs
|
|
61
|
+
|
|
62
|
+
partition_column:
|
|
63
|
+
Optional Argument.
|
|
64
|
+
Specifies the column names of "data" on which to partition the input.
|
|
65
|
+
Types: str OR list of Strings (str)
|
|
66
|
+
|
|
67
|
+
family:
|
|
68
|
+
Optional Argument.
|
|
69
|
+
Specifies the distribution exponential family.
|
|
70
|
+
Permitted Values: 'GAUSSIAN', 'BINOMIAL'
|
|
71
|
+
Default Value: 'GAUSSIAN'
|
|
72
|
+
Types: str
|
|
60
73
|
|
|
61
74
|
**generic_arguments:
|
|
62
75
|
Specifies the generic keyword arguments SQLE functions accept. Below
|
|
@@ -168,4 +181,153 @@ def TDGLMPredict(object=None, newdata=None, id_column=None, accumulate=None, out
|
|
|
168
181
|
|
|
169
182
|
# Print the result DataFrame.
|
|
170
183
|
print(TDGLMPredict_out1.result)
|
|
184
|
+
|
|
185
|
+
# Example 3 : TDGLMPredict() predicts the 'medv' using generated regression model by GLM
|
|
186
|
+
# using stepwise regression algorithm.
|
|
187
|
+
# This example uses the boston dataset and scales the data.
|
|
188
|
+
# Scaled data is used as input data to generate the GLM model and predict the target values.
|
|
189
|
+
|
|
190
|
+
# loading the example data
|
|
191
|
+
load_example_data("decisionforest", ["boston"])
|
|
192
|
+
load_example_data('glm', ['housing_train_segment', 'housing_train_parameter', 'housing_train_attribute'])
|
|
193
|
+
|
|
194
|
+
# Create teradataml DataFrame objects.
|
|
195
|
+
boston_df = DataFrame('boston')
|
|
196
|
+
housing_seg = DataFrame('housing_train_segment')
|
|
197
|
+
housing_parameter = DataFrame('housing_train_parameter')
|
|
198
|
+
housing_attribute = DataFrame('housing_train_attribute')
|
|
199
|
+
|
|
200
|
+
# scaling the data
|
|
201
|
+
# Scale "target_columns" with respect to 'STD' value of the column.
|
|
202
|
+
fit_obj = ScaleFit(data=boston_df,
|
|
203
|
+
target_columns=['crim','zn','indus','chas','nox','rm','age','dis','rad','tax','ptratio','black','lstat',],
|
|
204
|
+
scale_method="STD")
|
|
205
|
+
|
|
206
|
+
# Scale values specified in the input data using the fit data generated by the ScaleFit() function above.
|
|
207
|
+
obj = ScaleTransform(object=fit_obj.output,
|
|
208
|
+
data=boston_df,
|
|
209
|
+
accumulate=["id","medv"])
|
|
210
|
+
|
|
211
|
+
boston = obj.result
|
|
212
|
+
|
|
213
|
+
# Generate generalized linear model(GLM) using stepwise regression algorithm.
|
|
214
|
+
glm_1 = GLM(data=boston,
|
|
215
|
+
input_columns=['indus','chas','nox','rm'],
|
|
216
|
+
response_column='medv',
|
|
217
|
+
family='GAUSSIAN',
|
|
218
|
+
lambda1=0.02,
|
|
219
|
+
alpha=0.33,
|
|
220
|
+
batch_size=10,
|
|
221
|
+
learning_rate='optimal',
|
|
222
|
+
iter_max=36,
|
|
223
|
+
iter_num_no_change=100,
|
|
224
|
+
tolerance=0.0001,
|
|
225
|
+
initial_eta=0.02,
|
|
226
|
+
stepwise_direction='backward',
|
|
227
|
+
max_steps_num=10)
|
|
228
|
+
|
|
229
|
+
# Predict target values using generated regression model by GLM and newdata.
|
|
230
|
+
res = TDGLMPredict(id_column="id",
|
|
231
|
+
newdata=boston,
|
|
232
|
+
object=glm_1,
|
|
233
|
+
accumulate='medv')
|
|
234
|
+
|
|
235
|
+
# Print the result DataFrame.
|
|
236
|
+
print(res.result)
|
|
237
|
+
|
|
238
|
+
# Example 4 : TDGLMPredict() predicts the 'medv' using generated regression model by GLM
|
|
239
|
+
# stepwise regression algorithm with initial_stepwise_columns.
|
|
240
|
+
glm_2 = GLM(data=boston,
|
|
241
|
+
input_columns=['crim','zn','indus','chas','nox','rm','age','dis','rad','tax','ptratio','black','lstat'],
|
|
242
|
+
response_column='medv',
|
|
243
|
+
family='GAUSSIAN',
|
|
244
|
+
lambda1=0.02,
|
|
245
|
+
alpha=0.33,
|
|
246
|
+
batch_size=10,
|
|
247
|
+
learning_rate='optimal',
|
|
248
|
+
iter_max=36,
|
|
249
|
+
iter_num_no_change=100,
|
|
250
|
+
tolerance=0.0001,
|
|
251
|
+
initial_eta=0.02,
|
|
252
|
+
stepwise_direction='bidirectional',
|
|
253
|
+
max_steps_num=10,
|
|
254
|
+
initial_stepwise_columns=['rad','tax']
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
# Predict target values using generated regression model by GLM and newdata.
|
|
258
|
+
res = TDGLMPredict(id_column="id",
|
|
259
|
+
newdata=boston,
|
|
260
|
+
object=glm_2,
|
|
261
|
+
accumulate='medv')
|
|
262
|
+
|
|
263
|
+
# Print the result DataFrame.
|
|
264
|
+
print(res.result)
|
|
265
|
+
|
|
266
|
+
# Example 5 : TDGLMPredict() predicts the 'price' using generated regression model by GLM
|
|
267
|
+
# using partition by key.
|
|
268
|
+
glm_3 = GLM(data=housing_seg,
|
|
269
|
+
input_columns=['bedrooms', 'bathrms', 'stories', 'driveway', 'recroom', 'fullbase', 'gashw', 'airco'],
|
|
270
|
+
response_column='price',
|
|
271
|
+
family='GAUSSIAN',
|
|
272
|
+
batch_size=10,
|
|
273
|
+
iter_max=1000,
|
|
274
|
+
data_partition_column='partition_id'
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
# Predict target values using generated regression model by GLM and newdata.
|
|
278
|
+
res = TDGLMPredict(id_column="sn",
|
|
279
|
+
newdata=housing_seg,
|
|
280
|
+
object=glm_3,
|
|
281
|
+
accumulate='price',
|
|
282
|
+
newdata_partition_column='partition_id',
|
|
283
|
+
object_partition_column='partition_id')
|
|
284
|
+
|
|
285
|
+
# Print the result DataFrame.
|
|
286
|
+
print(res.result)
|
|
287
|
+
|
|
288
|
+
# Example 6 : TDGLMPredict() predicts the 'price' using generated regression model by GLM
|
|
289
|
+
# using partition by key with attribute data.
|
|
290
|
+
glm_4 = GLM(data=housing_seg,
|
|
291
|
+
input_columns=['bedrooms', 'bathrms', 'stories', 'driveway', 'recroom', 'fullbase', 'gashw', 'airco'],
|
|
292
|
+
response_column='price',
|
|
293
|
+
family='GAUSSIAN',
|
|
294
|
+
batch_size=10,
|
|
295
|
+
iter_max=1000,
|
|
296
|
+
data_partition_column='partition_id',
|
|
297
|
+
attribute_data = housing_attribute,
|
|
298
|
+
attribute_data_partition_column = 'partition_id'
|
|
299
|
+
)
|
|
300
|
+
|
|
301
|
+
# Predict target values using generated regression model by GLM and newdata.
|
|
302
|
+
res = TDGLMPredict(id_column="sn",
|
|
303
|
+
newdata=housing_seg,
|
|
304
|
+
object=glm_4,
|
|
305
|
+
accumulate='price',
|
|
306
|
+
newdata_partition_column='partition_id',
|
|
307
|
+
object_partition_column='partition_id')
|
|
308
|
+
|
|
309
|
+
# Print the result DataFrame.
|
|
310
|
+
print(res.result)
|
|
311
|
+
|
|
312
|
+
# Example 7 : TDGLMPredict() predicts the 'homestyle' using generated generalized linear model by GLM
|
|
313
|
+
# using partition by key with parameter data.
|
|
314
|
+
glm_5 = GLM(data=housing_seg,
|
|
315
|
+
input_columns=['bedrooms', 'bathrms', 'stories', 'driveway', 'recroom', 'fullbase', 'gashw', 'airco'],
|
|
316
|
+
response_column='homestyle',
|
|
317
|
+
family='binomial',
|
|
318
|
+
iter_max=1000,
|
|
319
|
+
data_partition_column='partition_id',
|
|
320
|
+
parameter_data = housing_parameter,
|
|
321
|
+
parameter_data_partition_column = 'partition_id'
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
res = TDGLMPredict(id_column="sn",
|
|
325
|
+
newdata=housing_seg,
|
|
326
|
+
object=glm_5,
|
|
327
|
+
accumulate='homestyle',
|
|
328
|
+
newdata_partition_column='partition_id',
|
|
329
|
+
object_partition_column='partition_id')
|
|
330
|
+
|
|
331
|
+
# Print the result DataFrame.
|
|
332
|
+
print(res.result)
|
|
171
333
|
"""
|