teradataml 20.0.0.0__py3-none-any.whl → 20.0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of teradataml might be problematic. Click here for more details.
- teradataml/LICENSE-3RD-PARTY.pdf +0 -0
- teradataml/LICENSE.pdf +0 -0
- teradataml/README.md +183 -0
- teradataml/__init__.py +6 -3
- teradataml/_version.py +2 -2
- teradataml/analytics/__init__.py +3 -2
- teradataml/analytics/analytic_function_executor.py +275 -40
- teradataml/analytics/analytic_query_generator.py +92 -0
- teradataml/analytics/byom/__init__.py +3 -2
- teradataml/analytics/json_parser/metadata.py +1 -0
- teradataml/analytics/json_parser/utils.py +17 -21
- teradataml/analytics/meta_class.py +40 -1
- teradataml/analytics/sqle/DecisionTreePredict.py +1 -1
- teradataml/analytics/sqle/__init__.py +10 -2
- teradataml/analytics/table_operator/__init__.py +3 -2
- teradataml/analytics/uaf/__init__.py +21 -2
- teradataml/analytics/utils.py +62 -1
- teradataml/analytics/valib.py +1 -1
- teradataml/automl/__init__.py +1553 -319
- teradataml/automl/custom_json_utils.py +139 -61
- teradataml/automl/data_preparation.py +276 -319
- teradataml/automl/data_transformation.py +163 -81
- teradataml/automl/feature_engineering.py +402 -239
- teradataml/automl/feature_exploration.py +9 -2
- teradataml/automl/model_evaluation.py +48 -51
- teradataml/automl/model_training.py +291 -189
- teradataml/catalog/byom.py +8 -8
- teradataml/catalog/model_cataloging_utils.py +1 -1
- teradataml/clients/auth_client.py +133 -0
- teradataml/clients/pkce_client.py +1 -1
- teradataml/common/aed_utils.py +3 -2
- teradataml/common/constants.py +48 -6
- teradataml/common/deprecations.py +13 -7
- teradataml/common/garbagecollector.py +156 -120
- teradataml/common/messagecodes.py +6 -1
- teradataml/common/messages.py +3 -1
- teradataml/common/sqlbundle.py +1 -1
- teradataml/common/utils.py +103 -11
- teradataml/common/wrapper_utils.py +1 -1
- teradataml/context/context.py +121 -31
- teradataml/data/advertising.csv +201 -0
- teradataml/data/bank_marketing.csv +11163 -0
- teradataml/data/bike_sharing.csv +732 -0
- teradataml/data/boston2cols.csv +721 -0
- teradataml/data/breast_cancer.csv +570 -0
- teradataml/data/complaints_test_tokenized.csv +353 -0
- teradataml/data/complaints_tokens_model.csv +348 -0
- teradataml/data/covid_confirm_sd.csv +83 -0
- teradataml/data/customer_segmentation_test.csv +2628 -0
- teradataml/data/customer_segmentation_train.csv +8069 -0
- teradataml/data/dataframe_example.json +10 -0
- teradataml/data/docs/sqle/docs_17_10/OneHotEncodingFit.py +3 -1
- teradataml/data/docs/sqle/docs_17_10/OneHotEncodingTransform.py +6 -0
- teradataml/data/docs/sqle/docs_17_10/OutlierFilterTransform.py +5 -1
- teradataml/data/docs/sqle/docs_17_20/ANOVA.py +61 -1
- teradataml/data/docs/sqle/docs_17_20/CFilter.py +132 -0
- teradataml/data/docs/sqle/docs_17_20/ColumnTransformer.py +2 -0
- teradataml/data/docs/sqle/docs_17_20/FTest.py +105 -26
- teradataml/data/docs/sqle/docs_17_20/GLM.py +162 -1
- teradataml/data/docs/sqle/docs_17_20/GetFutileColumns.py +5 -3
- teradataml/data/docs/sqle/docs_17_20/KMeans.py +48 -1
- teradataml/data/docs/sqle/docs_17_20/NaiveBayes.py +162 -0
- teradataml/data/docs/sqle/docs_17_20/NonLinearCombineFit.py +3 -2
- teradataml/data/docs/sqle/docs_17_20/OneHotEncodingFit.py +5 -0
- teradataml/data/docs/sqle/docs_17_20/OneHotEncodingTransform.py +6 -0
- teradataml/data/docs/sqle/docs_17_20/OutlierFilterFit.py +2 -0
- teradataml/data/docs/sqle/docs_17_20/Pivoting.py +279 -0
- teradataml/data/docs/sqle/docs_17_20/ROC.py +3 -2
- teradataml/data/docs/sqle/docs_17_20/SVMPredict.py +13 -2
- teradataml/data/docs/sqle/docs_17_20/ScaleFit.py +119 -1
- teradataml/data/docs/sqle/docs_17_20/ScaleTransform.py +93 -1
- teradataml/data/docs/sqle/docs_17_20/Shap.py +197 -0
- teradataml/data/docs/sqle/docs_17_20/TDGLMPredict.py +163 -1
- teradataml/data/docs/sqle/docs_17_20/TDNaiveBayesPredict.py +189 -0
- teradataml/data/docs/sqle/docs_17_20/TFIDF.py +142 -0
- teradataml/data/docs/sqle/docs_17_20/Unpivoting.py +216 -0
- teradataml/data/docs/sqle/docs_17_20/XGBoost.py +12 -4
- teradataml/data/docs/sqle/docs_17_20/XGBoostPredict.py +7 -1
- teradataml/data/docs/sqle/docs_17_20/ZTest.py +72 -7
- teradataml/data/docs/uaf/docs_17_20/ACF.py +1 -10
- teradataml/data/docs/uaf/docs_17_20/ArimaEstimate.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/ArimaForecast.py +35 -5
- teradataml/data/docs/uaf/docs_17_20/ArimaValidate.py +3 -1
- teradataml/data/docs/uaf/docs_17_20/ArimaXEstimate.py +293 -0
- teradataml/data/docs/uaf/docs_17_20/AutoArima.py +354 -0
- teradataml/data/docs/uaf/docs_17_20/BreuschGodfrey.py +3 -2
- teradataml/data/docs/uaf/docs_17_20/BreuschPaganGodfrey.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/Convolve.py +13 -10
- teradataml/data/docs/uaf/docs_17_20/Convolve2.py +4 -1
- teradataml/data/docs/uaf/docs_17_20/CumulPeriodogram.py +5 -4
- teradataml/data/docs/uaf/docs_17_20/DFFT2Conv.py +4 -4
- teradataml/data/docs/uaf/docs_17_20/DWT.py +235 -0
- teradataml/data/docs/uaf/docs_17_20/DWT2D.py +214 -0
- teradataml/data/docs/uaf/docs_17_20/DurbinWatson.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/ExtractResults.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/FilterFactory1d.py +160 -0
- teradataml/data/docs/uaf/docs_17_20/GenseriesSinusoids.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/GoldfeldQuandt.py +9 -31
- teradataml/data/docs/uaf/docs_17_20/HoltWintersForecaster.py +4 -2
- teradataml/data/docs/uaf/docs_17_20/IDFFT2.py +1 -8
- teradataml/data/docs/uaf/docs_17_20/IDWT.py +236 -0
- teradataml/data/docs/uaf/docs_17_20/IDWT2D.py +226 -0
- teradataml/data/docs/uaf/docs_17_20/IQR.py +134 -0
- teradataml/data/docs/uaf/docs_17_20/LineSpec.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/LinearRegr.py +2 -2
- teradataml/data/docs/uaf/docs_17_20/MAMean.py +3 -3
- teradataml/data/docs/uaf/docs_17_20/Matrix2Image.py +297 -0
- teradataml/data/docs/uaf/docs_17_20/MatrixMultiply.py +15 -6
- teradataml/data/docs/uaf/docs_17_20/PACF.py +0 -1
- teradataml/data/docs/uaf/docs_17_20/Portman.py +2 -2
- teradataml/data/docs/uaf/docs_17_20/PowerSpec.py +2 -2
- teradataml/data/docs/uaf/docs_17_20/Resample.py +9 -1
- teradataml/data/docs/uaf/docs_17_20/SAX.py +246 -0
- teradataml/data/docs/uaf/docs_17_20/SeasonalNormalize.py +17 -10
- teradataml/data/docs/uaf/docs_17_20/SignifPeriodicities.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/WhitesGeneral.py +3 -1
- teradataml/data/docs/uaf/docs_17_20/WindowDFFT.py +368 -0
- teradataml/data/dwt2d_dataTable.csv +65 -0
- teradataml/data/dwt_dataTable.csv +8 -0
- teradataml/data/dwt_filterTable.csv +3 -0
- teradataml/data/finance_data4.csv +13 -0
- teradataml/data/glm_example.json +28 -1
- teradataml/data/grocery_transaction.csv +19 -0
- teradataml/data/housing_train_segment.csv +201 -0
- teradataml/data/idwt2d_dataTable.csv +5 -0
- teradataml/data/idwt_dataTable.csv +8 -0
- teradataml/data/idwt_filterTable.csv +3 -0
- teradataml/data/insect2Cols.csv +61 -0
- teradataml/data/interval_data.csv +5 -0
- teradataml/data/jsons/paired_functions.json +14 -0
- teradataml/data/jsons/sqle/17.20/TD_ANOVA.json +99 -27
- teradataml/data/jsons/sqle/17.20/TD_CFilter.json +118 -0
- teradataml/data/jsons/sqle/17.20/TD_FTest.json +166 -83
- teradataml/data/jsons/sqle/17.20/TD_GLM.json +90 -14
- teradataml/data/jsons/sqle/17.20/TD_GLMPREDICT.json +48 -5
- teradataml/data/jsons/sqle/17.20/TD_GetFutileColumns.json +5 -3
- teradataml/data/jsons/sqle/17.20/TD_KMeans.json +31 -11
- teradataml/data/jsons/sqle/17.20/TD_NaiveBayes.json +193 -0
- teradataml/data/jsons/sqle/17.20/TD_NaiveBayesPredict.json +212 -0
- teradataml/data/jsons/sqle/17.20/TD_NonLinearCombineFit.json +3 -2
- teradataml/data/jsons/sqle/17.20/TD_OneClassSVM.json +9 -9
- teradataml/data/jsons/sqle/17.20/TD_Pivoting.json +280 -0
- teradataml/data/jsons/sqle/17.20/TD_ROC.json +2 -1
- teradataml/data/jsons/sqle/17.20/TD_SVM.json +16 -16
- teradataml/data/jsons/sqle/17.20/TD_SVMPredict.json +19 -1
- teradataml/data/jsons/sqle/17.20/TD_ScaleFit.json +168 -15
- teradataml/data/jsons/sqle/17.20/TD_ScaleTransform.json +50 -1
- teradataml/data/jsons/sqle/17.20/TD_Shap.json +222 -0
- teradataml/data/jsons/sqle/17.20/TD_TFIDF.json +162 -0
- teradataml/data/jsons/sqle/17.20/TD_Unpivoting.json +235 -0
- teradataml/data/jsons/sqle/17.20/TD_XGBoost.json +25 -7
- teradataml/data/jsons/sqle/17.20/TD_XGBoostPredict.json +17 -4
- teradataml/data/jsons/sqle/17.20/TD_ZTest.json +157 -80
- teradataml/data/jsons/storedprocedure/17.20/TD_FILTERFACTORY1D.json +150 -0
- teradataml/data/jsons/uaf/17.20/TD_ACF.json +1 -18
- teradataml/data/jsons/uaf/17.20/TD_ARIMAESTIMATE.json +3 -16
- teradataml/data/jsons/uaf/17.20/TD_ARIMAFORECAST.json +0 -3
- teradataml/data/jsons/uaf/17.20/TD_ARIMAVALIDATE.json +5 -3
- teradataml/data/jsons/uaf/17.20/TD_ARIMAXESTIMATE.json +362 -0
- teradataml/data/jsons/uaf/17.20/TD_AUTOARIMA.json +469 -0
- teradataml/data/jsons/uaf/17.20/TD_BINARYMATRIXOP.json +0 -3
- teradataml/data/jsons/uaf/17.20/TD_BINARYSERIESOP.json +0 -2
- teradataml/data/jsons/uaf/17.20/TD_BREUSCH_GODFREY.json +2 -1
- teradataml/data/jsons/uaf/17.20/TD_BREUSCH_PAGAN_GODFREY.json +2 -5
- teradataml/data/jsons/uaf/17.20/TD_CONVOLVE.json +3 -6
- teradataml/data/jsons/uaf/17.20/TD_CONVOLVE2.json +1 -3
- teradataml/data/jsons/uaf/17.20/TD_CUMUL_PERIODOGRAM.json +0 -5
- teradataml/data/jsons/uaf/17.20/TD_DFFT.json +1 -4
- teradataml/data/jsons/uaf/17.20/TD_DFFT2.json +2 -7
- teradataml/data/jsons/uaf/17.20/TD_DFFT2CONV.json +1 -2
- teradataml/data/jsons/uaf/17.20/TD_DFFTCONV.json +0 -2
- teradataml/data/jsons/uaf/17.20/TD_DTW.json +3 -6
- teradataml/data/jsons/uaf/17.20/TD_DWT.json +173 -0
- teradataml/data/jsons/uaf/17.20/TD_DWT2D.json +160 -0
- teradataml/data/jsons/uaf/17.20/TD_FITMETRICS.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_GOLDFELD_QUANDT.json +16 -30
- teradataml/data/jsons/uaf/17.20/{TD_HOLT_WINTERS_FORECAST.json → TD_HOLT_WINTERS_FORECASTER.json} +1 -2
- teradataml/data/jsons/uaf/17.20/TD_IDFFT2.json +1 -15
- teradataml/data/jsons/uaf/17.20/TD_IDWT.json +162 -0
- teradataml/data/jsons/uaf/17.20/TD_IDWT2D.json +149 -0
- teradataml/data/jsons/uaf/17.20/TD_IQR.json +117 -0
- teradataml/data/jsons/uaf/17.20/TD_LINEAR_REGR.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_LINESPEC.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_MAMEAN.json +1 -3
- teradataml/data/jsons/uaf/17.20/TD_MATRIX2IMAGE.json +209 -0
- teradataml/data/jsons/uaf/17.20/TD_PACF.json +2 -2
- teradataml/data/jsons/uaf/17.20/TD_POWERSPEC.json +5 -5
- teradataml/data/jsons/uaf/17.20/TD_RESAMPLE.json +48 -28
- teradataml/data/jsons/uaf/17.20/TD_SAX.json +208 -0
- teradataml/data/jsons/uaf/17.20/TD_SEASONALNORMALIZE.json +12 -6
- teradataml/data/jsons/uaf/17.20/TD_SIMPLEEXP.json +0 -1
- teradataml/data/jsons/uaf/17.20/TD_TRACKINGOP.json +8 -8
- teradataml/data/jsons/uaf/17.20/TD_UNDIFF.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_UNNORMALIZE.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_WINDOWDFFT.json +400 -0
- teradataml/data/kmeans_example.json +5 -0
- teradataml/data/kmeans_table.csv +10 -0
- teradataml/data/load_example_data.py +8 -2
- teradataml/data/naivebayestextclassifier_example.json +1 -1
- teradataml/data/naivebayestextclassifierpredict_example.json +11 -0
- teradataml/data/onehot_encoder_train.csv +4 -0
- teradataml/data/openml_example.json +29 -0
- teradataml/data/peppers.png +0 -0
- teradataml/data/real_values.csv +14 -0
- teradataml/data/sax_example.json +8 -0
- teradataml/data/scale_attributes.csv +3 -0
- teradataml/data/scale_example.json +52 -1
- teradataml/data/scale_input_part_sparse.csv +31 -0
- teradataml/data/scale_input_partitioned.csv +16 -0
- teradataml/data/scale_input_sparse.csv +11 -0
- teradataml/data/scale_parameters.csv +3 -0
- teradataml/data/scripts/deploy_script.py +21 -2
- teradataml/data/scripts/sklearn/sklearn_fit.py +40 -37
- teradataml/data/scripts/sklearn/sklearn_fit_predict.py +22 -30
- teradataml/data/scripts/sklearn/sklearn_function.template +42 -24
- teradataml/data/scripts/sklearn/sklearn_model_selection_split.py +23 -33
- teradataml/data/scripts/sklearn/sklearn_neighbors.py +19 -28
- teradataml/data/scripts/sklearn/sklearn_score.py +32 -32
- teradataml/data/scripts/sklearn/sklearn_transform.py +85 -42
- teradataml/data/star_pivot.csv +8 -0
- teradataml/data/templates/open_source_ml.json +2 -1
- teradataml/data/teradataml_example.json +97 -1
- teradataml/data/timestamp_data.csv +4 -0
- teradataml/data/titanic_dataset_unpivoted.csv +19 -0
- teradataml/data/uaf_example.json +55 -1
- teradataml/data/unpivot_example.json +15 -0
- teradataml/data/url_data.csv +9 -0
- teradataml/data/windowdfft.csv +16 -0
- teradataml/data/ztest_example.json +16 -0
- teradataml/dataframe/copy_to.py +9 -4
- teradataml/dataframe/data_transfer.py +125 -64
- teradataml/dataframe/dataframe.py +575 -57
- teradataml/dataframe/dataframe_utils.py +47 -9
- teradataml/dataframe/fastload.py +273 -90
- teradataml/dataframe/functions.py +339 -0
- teradataml/dataframe/row.py +160 -0
- teradataml/dataframe/setop.py +2 -2
- teradataml/dataframe/sql.py +740 -18
- teradataml/dataframe/window.py +1 -1
- teradataml/dbutils/dbutils.py +324 -18
- teradataml/geospatial/geodataframe.py +1 -1
- teradataml/geospatial/geodataframecolumn.py +1 -1
- teradataml/hyperparameter_tuner/optimizer.py +13 -13
- teradataml/lib/aed_0_1.dll +0 -0
- teradataml/opensource/sklearn/_sklearn_wrapper.py +254 -122
- teradataml/options/__init__.py +16 -5
- teradataml/options/configure.py +39 -6
- teradataml/options/display.py +2 -2
- teradataml/plot/axis.py +4 -4
- teradataml/scriptmgmt/UserEnv.py +26 -19
- teradataml/scriptmgmt/lls_utils.py +120 -16
- teradataml/table_operators/Script.py +4 -5
- teradataml/table_operators/TableOperator.py +160 -26
- teradataml/table_operators/table_operator_util.py +88 -41
- teradataml/table_operators/templates/dataframe_udf.template +63 -0
- teradataml/telemetry_utils/__init__.py +0 -0
- teradataml/telemetry_utils/queryband.py +52 -0
- teradataml/utils/validators.py +41 -3
- {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/METADATA +191 -6
- {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/RECORD +263 -185
- {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/WHEEL +0 -0
- {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/top_level.txt +0 -0
- {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/zip-safe +0 -0
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
def GetFutileColumns(data=None, object=None, category_summary_column=
|
|
1
|
+
def GetFutileColumns(data=None, object=None, category_summary_column='ColumnName', threshold_value=0.95, **generic_arguments):
|
|
2
2
|
"""
|
|
3
3
|
DESCRIPTION:
|
|
4
4
|
GetFutileColumns() function returns the futile column names if either
|
|
@@ -31,14 +31,16 @@ def GetFutileColumns(data=None, object=None, category_summary_column=None, thres
|
|
|
31
31
|
Types: teradataml DataFrame or CategoricalSummary
|
|
32
32
|
|
|
33
33
|
category_summary_column:
|
|
34
|
-
|
|
34
|
+
Optional Argument.
|
|
35
35
|
Specifies the column from categorical summary DataFrame which provides names of
|
|
36
36
|
the columns in "data".
|
|
37
|
+
Default Value: 'ColumnName'
|
|
37
38
|
Types: str
|
|
38
39
|
|
|
39
40
|
threshold_value:
|
|
40
|
-
|
|
41
|
+
Optional Argument.
|
|
41
42
|
Specifies the threshold value for the columns in "data".
|
|
43
|
+
Default Value: 0.95
|
|
42
44
|
Types: float
|
|
43
45
|
|
|
44
46
|
**generic_arguments:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
def KMeans(data=None, centroids_data=None, id_column=None, target_columns=None, num_clusters=None,
|
|
2
2
|
seed=None, threshold=0.0395, iter_max=10, num_init=1, output_cluster_assignment=False,
|
|
3
|
-
**generic_arguments):
|
|
3
|
+
initialcentroids_method="RANDOM", **generic_arguments):
|
|
4
4
|
"""
|
|
5
5
|
DESCRIPTION:
|
|
6
6
|
The K-means() function groups a set of observations into k clusters
|
|
@@ -45,6 +45,10 @@ def KMeans(data=None, centroids_data=None, id_column=None, target_columns=None,
|
|
|
45
45
|
Optional Argument.
|
|
46
46
|
Specifies the input teradataml DataFrame containing
|
|
47
47
|
set of initial centroids.
|
|
48
|
+
Note:
|
|
49
|
+
* This argument is not required if "num_clusters" provided.
|
|
50
|
+
* If provided, the function uses the initial centroids
|
|
51
|
+
from this input.
|
|
48
52
|
Types: teradataml DataFrame
|
|
49
53
|
|
|
50
54
|
id_column:
|
|
@@ -105,6 +109,15 @@ def KMeans(data=None, centroids_data=None, id_column=None, target_columns=None,
|
|
|
105
109
|
Specifies whether to output Cluster Assignment information.
|
|
106
110
|
Default Value: False
|
|
107
111
|
Types: bool
|
|
112
|
+
|
|
113
|
+
initialcentroids_method:
|
|
114
|
+
Optional Argument.
|
|
115
|
+
Specifies the initialization method to be used for selecting initial set of centroids.
|
|
116
|
+
Permitted Values: 'RANDOM', 'KMEANS++'
|
|
117
|
+
Default Value: 'RANDOM'
|
|
118
|
+
Note:
|
|
119
|
+
* This argument is not required if "centroids_data" is provided.
|
|
120
|
+
Types: str
|
|
108
121
|
|
|
109
122
|
**generic_arguments:
|
|
110
123
|
Specifies the generic keyword arguments SQLE functions accept. Below
|
|
@@ -167,9 +180,11 @@ def KMeans(data=None, centroids_data=None, id_column=None, target_columns=None,
|
|
|
167
180
|
|
|
168
181
|
# Load the example data.
|
|
169
182
|
load_example_data("kmeans", "computers_train1")
|
|
183
|
+
load_example_data("kmeans",'kmeans_table')
|
|
170
184
|
|
|
171
185
|
# Create teradataml DataFrame objects.
|
|
172
186
|
computers_train1 = DataFrame.from_table("computers_train1")
|
|
187
|
+
kmeans_tab = DataFrame('kmeans_table')
|
|
173
188
|
|
|
174
189
|
# Check the list of available analytic functions.
|
|
175
190
|
display_analytic_functions()
|
|
@@ -191,6 +206,7 @@ def KMeans(data=None, centroids_data=None, id_column=None, target_columns=None,
|
|
|
191
206
|
# Get the set of initial centroids by accessing the group of rows
|
|
192
207
|
# from input data.
|
|
193
208
|
kmeans_initial_centroids_table = computers_train1.loc[[19, 97]]
|
|
209
|
+
kmeans_initial_centroids = kmeans_tab.loc[[2, 4]]
|
|
194
210
|
|
|
195
211
|
KMeans_out_1 = KMeans(id_column="id",
|
|
196
212
|
target_columns=['price', 'speed'],
|
|
@@ -201,4 +217,35 @@ def KMeans(data=None, centroids_data=None, id_column=None, target_columns=None,
|
|
|
201
217
|
print(KMeans_out_1.result)
|
|
202
218
|
print(KMeans_out_1.model_data)
|
|
203
219
|
|
|
220
|
+
# Example 3 : Grouping a set of observations into 2 clusters by
|
|
221
|
+
# specifying the number of clusters and seed value
|
|
222
|
+
# with output cluster assignment information.
|
|
223
|
+
obj = KMeans(data=kmeans_tab,
|
|
224
|
+
id_column='id',
|
|
225
|
+
target_columns=['c1', 'c2'],
|
|
226
|
+
threshold=0.0395,
|
|
227
|
+
iter_max=3,
|
|
228
|
+
centroids_data=kmeans_initial_centroids,
|
|
229
|
+
output_cluster_assignment=True
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
# Print the result DataFrames.
|
|
233
|
+
print(obj.result)
|
|
234
|
+
|
|
235
|
+
# Example 4 : Grouping a set of observations into 3 clusters by
|
|
236
|
+
# specifying the number of clusters for initial centroids
|
|
237
|
+
# method as KMEANS++.
|
|
238
|
+
obj = KMeans(data=kmeans_tab,
|
|
239
|
+
id_column='id',
|
|
240
|
+
target_columns=['c1', 'c2'],
|
|
241
|
+
threshold=0.0395,
|
|
242
|
+
iter_max=3,
|
|
243
|
+
num_clusters=3,
|
|
244
|
+
output_cluster_assignment=True,
|
|
245
|
+
initialcentroids_method="KMEANS++"
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
# Print the result DataFrames.
|
|
249
|
+
print(obj.result)
|
|
250
|
+
|
|
204
251
|
"""
|
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
def NaiveBayes(data = None, response_column = None, numeric_inputs = None,
|
|
2
|
+
categorical_inputs = None, attribute_name_column = None,
|
|
3
|
+
attribute_value_column = None, attribute_type = None,
|
|
4
|
+
numeric_attributes = None, categorical_attributes = None,
|
|
5
|
+
**generic_arguments):
|
|
6
|
+
"""
|
|
7
|
+
DESCRIPTION:
|
|
8
|
+
Function generates classification model using NaiveBayes
|
|
9
|
+
algorithm.
|
|
10
|
+
The Naive Bayes classification algorithm uses a training dataset with known discrete outcomes
|
|
11
|
+
and either discrete or continuous numeric input variables, along with categorical variables, to generate a model.
|
|
12
|
+
This model can then be used to predict the outcomes of future observations based on their input variable values.
|
|
13
|
+
|
|
14
|
+
PARAMETERS:
|
|
15
|
+
data:
|
|
16
|
+
Required Argument.
|
|
17
|
+
Specifies the input teradataml DataFrame .
|
|
18
|
+
Types: teradataml DataFrame
|
|
19
|
+
|
|
20
|
+
response_column:
|
|
21
|
+
Required Argument.
|
|
22
|
+
Specifies the name of the column in "data" containing response values.
|
|
23
|
+
Types: str
|
|
24
|
+
|
|
25
|
+
numeric_inputs:
|
|
26
|
+
Optional Argument.
|
|
27
|
+
Specifies the names of the columns in "data" containing numeric attributes values.
|
|
28
|
+
Types: str OR list of Strings (str)
|
|
29
|
+
|
|
30
|
+
categorical_inputs:
|
|
31
|
+
Optional Argument.
|
|
32
|
+
Specifies the names of the columns in "data" containing categorical attributes values.
|
|
33
|
+
Types: str OR list of Strings (str)
|
|
34
|
+
|
|
35
|
+
attribute_name_column:
|
|
36
|
+
Optional Argument.
|
|
37
|
+
Specifies the names of the columns in "data" containing attributes names.
|
|
38
|
+
Types: str
|
|
39
|
+
|
|
40
|
+
attribute_value_column:
|
|
41
|
+
Optional Argument.
|
|
42
|
+
Specifies the names of the columns in "data" containing attributes values.
|
|
43
|
+
Types: str
|
|
44
|
+
|
|
45
|
+
attribute_type:
|
|
46
|
+
Optional Argument, Required if "data" is in sparse format and
|
|
47
|
+
both "numeric_attributes" and "categorical_attributes" are not provided.
|
|
48
|
+
Specifies the attribute type.
|
|
49
|
+
Permitted Values:
|
|
50
|
+
* ALLNUMERIC - if all the attributes are of numeric type.
|
|
51
|
+
* ALLCATEGORICAL - if all the attributes are of categorical type.
|
|
52
|
+
Types: str
|
|
53
|
+
|
|
54
|
+
numeric_attributes:
|
|
55
|
+
Optional Argument.
|
|
56
|
+
Specifies the numeric attributes names.
|
|
57
|
+
Types: str OR list of strs
|
|
58
|
+
|
|
59
|
+
categorical_attributes:
|
|
60
|
+
Optional Argument.
|
|
61
|
+
Specifies the categorical attributes names.
|
|
62
|
+
Types: str OR list of strs
|
|
63
|
+
|
|
64
|
+
**generic_arguments:
|
|
65
|
+
Specifies the generic keyword arguments SQLE functions accept. Below
|
|
66
|
+
are the generic keyword arguments:
|
|
67
|
+
persist:
|
|
68
|
+
Optional Argument.
|
|
69
|
+
Specifies whether to persist the results of the
|
|
70
|
+
function in a table or not. When set to True,
|
|
71
|
+
results are persisted in a table; otherwise,
|
|
72
|
+
results are garbage collected at the end of the
|
|
73
|
+
session.
|
|
74
|
+
Default Value: False
|
|
75
|
+
Types: bool
|
|
76
|
+
|
|
77
|
+
volatile:
|
|
78
|
+
Optional Argument.
|
|
79
|
+
Specifies whether to put the results of the
|
|
80
|
+
function in a volatile table or not. When set to
|
|
81
|
+
True, results are stored in a volatile table,
|
|
82
|
+
otherwise not.
|
|
83
|
+
Default Value: False
|
|
84
|
+
Types: bool
|
|
85
|
+
|
|
86
|
+
Function allows the user to partition, hash, order or local
|
|
87
|
+
order the input data. These generic arguments are available
|
|
88
|
+
for each argument that accepts teradataml DataFrame as
|
|
89
|
+
input and can be accessed as:
|
|
90
|
+
* "<input_data_arg_name>_partition_column" accepts str or
|
|
91
|
+
list of str (Strings)
|
|
92
|
+
* "<input_data_arg_name>_hash_column" accepts str or list
|
|
93
|
+
of str (Strings)
|
|
94
|
+
* "<input_data_arg_name>_order_column" accepts str or list
|
|
95
|
+
of str (Strings)
|
|
96
|
+
* "local_order_<input_data_arg_name>" accepts boolean
|
|
97
|
+
Note:
|
|
98
|
+
These generic arguments are supported by teradataml if
|
|
99
|
+
the underlying SQL Engine function supports, else an
|
|
100
|
+
exception is raised.
|
|
101
|
+
|
|
102
|
+
RETURNS:
|
|
103
|
+
Instance of NaiveBayes.
|
|
104
|
+
Output teradataml DataFrames can be accessed using attribute
|
|
105
|
+
references, such as NaiveBayesObj.<attribute_name>.
|
|
106
|
+
Output teradataml DataFrame attribute name is:
|
|
107
|
+
result
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
RAISES:
|
|
111
|
+
TeradataMlException, TypeError, ValueError
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
EXAMPLES:
|
|
115
|
+
# Notes:
|
|
116
|
+
# 1. Get the connection to Vantage, before importing the
|
|
117
|
+
# function in user space.
|
|
118
|
+
# 2. User can import the function, if it is available on
|
|
119
|
+
# Vantage user is connected to.
|
|
120
|
+
# 3. To check the list of analytic functions available on
|
|
121
|
+
# Vantage user connected to, use
|
|
122
|
+
# "display_analytic_functions()".
|
|
123
|
+
|
|
124
|
+
# Load the example data.
|
|
125
|
+
load_example_data("decisionforestpredict", ["housing_train", "housing_test"])
|
|
126
|
+
|
|
127
|
+
# Create teradataml DataFrame objects.
|
|
128
|
+
housing_train = DataFrame.from_table("housing_train")
|
|
129
|
+
|
|
130
|
+
# Check the list of available analytic functions.
|
|
131
|
+
display_analytic_functions()
|
|
132
|
+
|
|
133
|
+
# Import function NaiveBayes.
|
|
134
|
+
from teradataml import NaiveBayes
|
|
135
|
+
|
|
136
|
+
# Example 1: NaiveBayes function to generate classification model using Dense input.
|
|
137
|
+
NaiveBayes_out = NaiveBayes(data=housing_train, response_column='homestyle',
|
|
138
|
+
numeric_inputs=['price','lotsize','bedrooms','bathrms','stories','garagepl'],
|
|
139
|
+
categorical_inputs=['driveway','recroom','fullbase','gashw','airco','prefarea'])
|
|
140
|
+
|
|
141
|
+
# Print the result DataFrame.
|
|
142
|
+
print( NaiveBayes_out.result)
|
|
143
|
+
|
|
144
|
+
# Example 2: NaiveBayes function to generate classification model using Sparse input.
|
|
145
|
+
|
|
146
|
+
# Unpivoting the data for sparse input to naive bayes.
|
|
147
|
+
upvt_data = Unpivoting(data = housing_train, id_column = 'sn',
|
|
148
|
+
target_columns = ['price','lotsize','bedrooms','bathrms','stories','garagepl','driveway',
|
|
149
|
+
'recroom','fullbase','gashw','airco','prefarea'],
|
|
150
|
+
attribute_column = "AttributeName", value_column = "AttributeValue",
|
|
151
|
+
accumulate = 'homestyle')
|
|
152
|
+
|
|
153
|
+
NaiveBayes_out = NaiveBayes(data=upvt_data.result,
|
|
154
|
+
response_column='homestyle',
|
|
155
|
+
attribute_name_column='AttributeName',
|
|
156
|
+
attribute_value_column='AttributeValue',
|
|
157
|
+
numeric_attributes=['price','lotsize','bedrooms','bathrms','stories','garagepl'],
|
|
158
|
+
categorical_attributes=['driveway','recroom','fullbase','gashw','airco','prefarea'])
|
|
159
|
+
|
|
160
|
+
# Print the result DataFrame.
|
|
161
|
+
print( NaiveBayes_out.result)
|
|
162
|
+
"""
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
def NonLinearCombineFit(data=None, target_columns=None, formula=None,
|
|
2
|
-
result_column=
|
|
2
|
+
result_column='TD_CombinedValue', **generic_arguments):
|
|
3
3
|
"""
|
|
4
4
|
DESCRIPTION:
|
|
5
5
|
The NonLinearCombineFit() function returns the target columns and a
|
|
@@ -31,9 +31,10 @@ def NonLinearCombineFit(data=None, target_columns=None, formula=None,
|
|
|
31
31
|
Types: str
|
|
32
32
|
|
|
33
33
|
result_column:
|
|
34
|
-
|
|
34
|
+
Optional Argument.
|
|
35
35
|
Specifies the name of the new feature column generated by the Transform function.
|
|
36
36
|
This function saves the specified formula in this column.
|
|
37
|
+
Default Value: 'TD_CombinedValue'
|
|
37
38
|
Types: str
|
|
38
39
|
|
|
39
40
|
**generic_arguments:
|
|
@@ -12,6 +12,11 @@ def OneHotEncodingFit(data=None, category_data=None, target_column=None,
|
|
|
12
12
|
* This function requires the UTF8 client character set for UNICODE data.
|
|
13
13
|
* This function does not support Pass Through Characters (PTCs).
|
|
14
14
|
* This function does not support KanjiSJIS or Graphic data types.
|
|
15
|
+
* For input to be considered as sparse input, column names should be
|
|
16
|
+
provided for 'data_partition_column' argument.
|
|
17
|
+
* In case of dense input, only allowed value for 'data_partition_column'
|
|
18
|
+
is PartitionKind.ANY and that for 'category_data_partition_column' is
|
|
19
|
+
PartitionKind.DIMENSION.
|
|
15
20
|
|
|
16
21
|
PARAMETERS:
|
|
17
22
|
data:
|
|
@@ -3,6 +3,12 @@ def OneHotEncodingTransform(data=None, object=None, is_input_dense=None, **gener
|
|
|
3
3
|
DESCRIPTION:
|
|
4
4
|
Function encodes specified attributes and categorical values as one-hot numeric vectors,
|
|
5
5
|
using OneHotEncodingFit() function output.
|
|
6
|
+
Notes:
|
|
7
|
+
* In case of sparse input, neither 'data_partition_column' nor
|
|
8
|
+
'object_partition_column' can be used independently.
|
|
9
|
+
* In case of dense input, if 'data_partition_column' is having value
|
|
10
|
+
PartitionKind.ANY, then 'object_partition_column' should have value
|
|
11
|
+
PartitionKind.DIMENSION.
|
|
6
12
|
|
|
7
13
|
|
|
8
14
|
PARAMETERS:
|
|
@@ -16,6 +16,8 @@ def OutlierFilterFit(data=None, target_columns=None, group_columns=None, lower_p
|
|
|
16
16
|
* For information about PTCs, see Teradata Vantage™ - Analytics
|
|
17
17
|
Database International Character Set Support.
|
|
18
18
|
* This function does not support KanjiSJIS or Graphic data types.
|
|
19
|
+
* This function does not support "data_partition_column" and "data_order_column"
|
|
20
|
+
if the corresponding Vantage version is greater than or equal to 17.20.03.20.
|
|
19
21
|
|
|
20
22
|
|
|
21
23
|
PARAMETERS:
|
|
@@ -0,0 +1,279 @@
|
|
|
1
|
+
def Pivoting(data = None, partition_columns = None, target_columns = None,
|
|
2
|
+
accumulate = None, rows_per_partition = None, pivot_column = None,
|
|
3
|
+
pivot_keys = None, pivot_keys_alias = None, default_pivot_values = None,
|
|
4
|
+
aggregation = None, delimiters = None, combined_column_sizes = None,
|
|
5
|
+
truncate_columns = None, output_column_names = None,
|
|
6
|
+
**generic_arguments):
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
"""
|
|
10
|
+
DESCRIPTION:
|
|
11
|
+
Function pivots the data, that is, changes the data from
|
|
12
|
+
sparse format to dense format.
|
|
13
|
+
Notes:
|
|
14
|
+
* 'data_partition_column' is required argument for partitioning the input data.
|
|
15
|
+
* Provide either the 'rows_per_partition', 'pivot_column', or 'aggregation' arguments
|
|
16
|
+
along with required arguments.
|
|
17
|
+
|
|
18
|
+
PARAMETERS:
|
|
19
|
+
data:
|
|
20
|
+
Required Argument.
|
|
21
|
+
Specifies the input teradataml DataFrame to be pivoted.
|
|
22
|
+
Types: teradataml DataFrame
|
|
23
|
+
|
|
24
|
+
partition_columns:
|
|
25
|
+
Required Argument.
|
|
26
|
+
Specifies the name of the column(s) in "data" on which to partition the
|
|
27
|
+
input.
|
|
28
|
+
Types: str OR list of Strings (str)
|
|
29
|
+
|
|
30
|
+
target_columns:
|
|
31
|
+
Required Argument.
|
|
32
|
+
Specifies the name of the column(s) in "data" which contains the data for
|
|
33
|
+
pivoting.
|
|
34
|
+
Types: str OR list of Strings (str)
|
|
35
|
+
|
|
36
|
+
accumulate:
|
|
37
|
+
Optional Argument.
|
|
38
|
+
Specifies the name of the column(s) in "data" to copy to the output.
|
|
39
|
+
By default, the function copies no input table columns to the output.
|
|
40
|
+
Types: str OR list of Strings (str)
|
|
41
|
+
|
|
42
|
+
rows_per_partition:
|
|
43
|
+
Optional Argument.
|
|
44
|
+
Specifies the maximum number of rows in the partition.
|
|
45
|
+
Types: int
|
|
46
|
+
|
|
47
|
+
pivot_column:
|
|
48
|
+
Optional Argument.
|
|
49
|
+
Specifies the name of the column in "data" that contains the pivot keys.
|
|
50
|
+
Note:
|
|
51
|
+
* This argument is not needed when 'rows_per_partition' is provided.
|
|
52
|
+
Types: str
|
|
53
|
+
|
|
54
|
+
pivot_keys:
|
|
55
|
+
Optional Argument.
|
|
56
|
+
Specifies the names of the pivot keys, if "pivot_column" is specified.
|
|
57
|
+
Notes:
|
|
58
|
+
* This argument is not needed when 'rows_per_partition' is provided.
|
|
59
|
+
* 'pivot_keys' are required when 'pivot_column' is specified.
|
|
60
|
+
Types: str OR list of Strings (str)
|
|
61
|
+
|
|
62
|
+
pivot_keys_alias:
|
|
63
|
+
Optional Argument.
|
|
64
|
+
Specifies the alias names of the pivot keys, if 'pivot_column' is specified.
|
|
65
|
+
Note:
|
|
66
|
+
* This argument is not needed when 'rows_per_partition' is provided.
|
|
67
|
+
Types: str OR list of Strings (str)
|
|
68
|
+
|
|
69
|
+
default_pivot_values:
|
|
70
|
+
Optional Argument.
|
|
71
|
+
Specifies one default value for each pivot_key. The nth
|
|
72
|
+
default_pivot_value applies to the nth pivot_key.
|
|
73
|
+
Note:
|
|
74
|
+
* This argument is not needed when 'rows_per_partition' is provided.
|
|
75
|
+
Types: str OR list of Strings (str)
|
|
76
|
+
|
|
77
|
+
aggregation:
|
|
78
|
+
Optional Argument.
|
|
79
|
+
Specifies the aggregation for the target columns.
|
|
80
|
+
Provide a single value {CONCAT | UNIQUE_CONCAT | SUM |
|
|
81
|
+
MIN | MAX | AVG} which will be applicable to all target columns or
|
|
82
|
+
specify multiple values for multiple target columns in
|
|
83
|
+
following format: ['ColumnName:{CONCAT|UNIQUE_CONCAT|SUM|MIN|MAX|AVG}',...].
|
|
84
|
+
Types: str OR list of Strings (str)
|
|
85
|
+
|
|
86
|
+
delimiters:
|
|
87
|
+
Optional Argument.
|
|
88
|
+
Specifies the delimiter to be used for concatenating the values of a target column.
|
|
89
|
+
Provide a single delimiter value applicable to all target columns or
|
|
90
|
+
specify multiple delimiter values for multiple target columns
|
|
91
|
+
in the following format: ['ColumnName:single_char',...].
|
|
92
|
+
Note:
|
|
93
|
+
* This argument is not needed when 'aggregation' is not specified.
|
|
94
|
+
Types: str OR list of Strings (str)
|
|
95
|
+
|
|
96
|
+
combined_column_sizes:
|
|
97
|
+
Optional Argument.
|
|
98
|
+
Specifies the maximum size of the concatenated string.
|
|
99
|
+
Provide a single integer value that applies to all target columns or
|
|
100
|
+
specify multiple size values for multiple target columns
|
|
101
|
+
in the following format ['ColumnName:size_value',...].
|
|
102
|
+
Note:
|
|
103
|
+
* This argument is not needed when 'aggregation' is not specified.
|
|
104
|
+
Types: int OR str OR list of Strings (str)
|
|
105
|
+
|
|
106
|
+
truncate_columns:
|
|
107
|
+
Optional Argument.
|
|
108
|
+
Specifies columns from the target columns for which
|
|
109
|
+
to truncate the concatenated string if it exceeds the specified size.
|
|
110
|
+
Note:
|
|
111
|
+
* This argument is not needed when 'aggregation' is not specified.
|
|
112
|
+
Types: str OR list of Strings (str)
|
|
113
|
+
|
|
114
|
+
output_column_names:
|
|
115
|
+
Optional Argument.
|
|
116
|
+
Specifies the column name to be used for the output column. The nth
|
|
117
|
+
column name value applies to the nth output column.
|
|
118
|
+
Types: str OR list of Strings (str)
|
|
119
|
+
|
|
120
|
+
**generic_arguments:
|
|
121
|
+
Specifies the generic keyword arguments SQLE functions accept. Below
|
|
122
|
+
are the generic keyword arguments:
|
|
123
|
+
persist:
|
|
124
|
+
Optional Argument.
|
|
125
|
+
Specifies whether to persist the results of the
|
|
126
|
+
function in a table or not. When set to True,
|
|
127
|
+
results are persisted in a table; otherwise,
|
|
128
|
+
results are garbage collected at the end of the
|
|
129
|
+
session.
|
|
130
|
+
Default Value: False
|
|
131
|
+
Types: bool
|
|
132
|
+
|
|
133
|
+
volatile:
|
|
134
|
+
Optional Argument.
|
|
135
|
+
Specifies whether to put the results of the
|
|
136
|
+
function in a volatile table or not. When set to
|
|
137
|
+
True, results are stored in a volatile table,
|
|
138
|
+
otherwise not.
|
|
139
|
+
Default Value: False
|
|
140
|
+
Types: bool
|
|
141
|
+
|
|
142
|
+
Function allows the user to partition, hash, order or local
|
|
143
|
+
order the input data. These generic arguments are available
|
|
144
|
+
for each argument that accepts teradataml DataFrame as
|
|
145
|
+
input and can be accessed as:
|
|
146
|
+
* "<input_data_arg_name>_partition_column" accepts str or
|
|
147
|
+
list of str (Strings)
|
|
148
|
+
* "<input_data_arg_name>_hash_column" accepts str or list
|
|
149
|
+
of str (Strings)
|
|
150
|
+
* "<input_data_arg_name>_order_column" accepts str or list
|
|
151
|
+
of str (Strings)
|
|
152
|
+
* "local_order_<input_data_arg_name>" accepts boolean
|
|
153
|
+
Note:
|
|
154
|
+
These generic arguments are supported by teradataml if
|
|
155
|
+
the underlying SQL Engine function supports, else an
|
|
156
|
+
exception is raised.
|
|
157
|
+
|
|
158
|
+
RETURNS:
|
|
159
|
+
Instance of Pivoting.
|
|
160
|
+
Output teradataml DataFrames can be accessed using attribute
|
|
161
|
+
references, such as PivotingObj.<attribute_name>.
|
|
162
|
+
Output teradataml DataFrame attribute name is:
|
|
163
|
+
result
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
RAISES:
|
|
167
|
+
TeradataMlException, TypeError, ValueError
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
EXAMPLES:
|
|
171
|
+
# Notes:
|
|
172
|
+
# 1. Get the connection to Vantage, before importing the
|
|
173
|
+
# function in user space.
|
|
174
|
+
# 2. User can import the function, if it is available on
|
|
175
|
+
# Vantage user is connected to.
|
|
176
|
+
# 3. To check the list of analytic functions available on
|
|
177
|
+
# Vantage user connected to, use
|
|
178
|
+
# "display_analytic_functions()".
|
|
179
|
+
|
|
180
|
+
# Load the example data.
|
|
181
|
+
load_example_data('unpivot', 'titanic_dataset_unpivoted')
|
|
182
|
+
load_example_data('unpivot', 'star_pivot')
|
|
183
|
+
|
|
184
|
+
# Create teradataml DataFrame objects.
|
|
185
|
+
titanic_unpvt = DataFrame.from_table('titanic_dataset_unpivoted')
|
|
186
|
+
star = DataFrame.from_table('star_pivot')
|
|
187
|
+
|
|
188
|
+
# Check the list of available analytic functions.
|
|
189
|
+
display_analytic_functions()
|
|
190
|
+
|
|
191
|
+
# Import function Pivoting.
|
|
192
|
+
from teradataml import Pivoting
|
|
193
|
+
|
|
194
|
+
# Example 1 : Pivot the input data using 'rows_per_partition'.
|
|
195
|
+
pvt1 = Pivoting(data = titanic_unpvt,
|
|
196
|
+
partition_columns = 'passenger',
|
|
197
|
+
target_columns = 'AttributeValue',
|
|
198
|
+
accumulate = 'survived',
|
|
199
|
+
rows_per_partition = 2,
|
|
200
|
+
data_partition_column='passenger',
|
|
201
|
+
data_order_column='AttributeName')
|
|
202
|
+
|
|
203
|
+
# Print the result DataFrame.
|
|
204
|
+
print( pvt1.result)
|
|
205
|
+
|
|
206
|
+
# Example 2 : Pivot the input data using 'pivot_column' and 'pivot_keys'.
|
|
207
|
+
pvt2 = Pivoting(data = titanic_unpvt,
|
|
208
|
+
partition_columns = 'passenger',
|
|
209
|
+
target_columns = 'AttributeValue',
|
|
210
|
+
accumulate = 'survived',
|
|
211
|
+
pivot_column = 'AttributeName',
|
|
212
|
+
pivot_keys = ['pclass','gender'],
|
|
213
|
+
data_partition_column = 'passenger')
|
|
214
|
+
|
|
215
|
+
# Print the result DataFrame.
|
|
216
|
+
print( pvt2.result)
|
|
217
|
+
|
|
218
|
+
# Example 3 : Pivot the input data with multiple target columns and
|
|
219
|
+
# multiple aggregation functions.
|
|
220
|
+
pvt3 = Pivoting(data = star,
|
|
221
|
+
partition_columns = ['country', 'state'],
|
|
222
|
+
target_columns = ['sales', 'cogs', 'rating'],
|
|
223
|
+
accumulate = 'yr',
|
|
224
|
+
pivot_column = 'qtr',
|
|
225
|
+
pivot_keys = ['Q1','Q2','Q3'],
|
|
226
|
+
aggregation = ['sales:SUM','cogs:AVG','rating:CONCAT'],
|
|
227
|
+
delimiters = '|',
|
|
228
|
+
combined_column_sizes = 64001,
|
|
229
|
+
data_partition_column = ['country', 'state'],
|
|
230
|
+
data_order_column = ['qtr'])
|
|
231
|
+
|
|
232
|
+
# Print the result DataFrame.
|
|
233
|
+
print( pvt3.result)
|
|
234
|
+
|
|
235
|
+
# Example 4 : Pivot the input data with multiple target columns and
|
|
236
|
+
# multiple aggregation functions.
|
|
237
|
+
pvt4 = Pivoting(data = star,
|
|
238
|
+
partition_columns = 'country',
|
|
239
|
+
target_columns = ['sales', 'cogs', 'state','rating'],
|
|
240
|
+
accumulate = 'yr',
|
|
241
|
+
aggregation = ['sales:SUM','cogs:AVG','state:UNIQUE_CONCAT','rating:CONCAT'],
|
|
242
|
+
delimiters = '|',
|
|
243
|
+
combined_column_sizes = ['state:5', 'rating:10'],
|
|
244
|
+
data_partition_column='country',
|
|
245
|
+
data_order_column='state')
|
|
246
|
+
|
|
247
|
+
# Print the result DataFrame.
|
|
248
|
+
print( pvt4.result)
|
|
249
|
+
|
|
250
|
+
# Example 5 : Pivot the input data with truncate columns.
|
|
251
|
+
pvt5 = Pivoting(data = star,
|
|
252
|
+
partition_columns = ['state'],
|
|
253
|
+
target_columns = ['country', 'rating'],
|
|
254
|
+
accumulate = 'yr',
|
|
255
|
+
pivot_column = 'qtr',
|
|
256
|
+
pivot_keys = ['Q1','Q2','Q3'],
|
|
257
|
+
aggregation = 'CONCAT',
|
|
258
|
+
combined_column_sizes = 10,
|
|
259
|
+
truncate_columns = 'country',
|
|
260
|
+
data_partition_column = 'qtr',
|
|
261
|
+
data_order_column='state')
|
|
262
|
+
|
|
263
|
+
# Print the result DataFrame.
|
|
264
|
+
print( pvt5.result)
|
|
265
|
+
|
|
266
|
+
# Example 6 : Pivot the input data with output column names.
|
|
267
|
+
pvt6 = Pivoting(data = star,
|
|
268
|
+
partition_columns = ['country','state'],
|
|
269
|
+
target_columns = ['sales', 'cogs', 'rating'],
|
|
270
|
+
accumulate = 'yr',
|
|
271
|
+
rows_per_partition = 3,
|
|
272
|
+
output_column_names=['sales_q1','sales_q2','sales_q3','cogs_q1','cogs_q2',
|
|
273
|
+
'cogs_q3','rating_q1','rating_q2','rating_q3'],
|
|
274
|
+
data_partition_column = 'qtr',
|
|
275
|
+
data_order_column=['country','state'])
|
|
276
|
+
|
|
277
|
+
# Print the result DataFrame.
|
|
278
|
+
print( pvt6.result)
|
|
279
|
+
"""
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
def ROC(data=None, probability_column=None, observation_column=None, model_id_column=None,
|
|
1
|
+
def ROC(data=None, probability_column=None, observation_column=None, model_id_column=None, positive_class='1',
|
|
2
2
|
num_thresholds=50, auc=True, gini=True, **generic_arguments):
|
|
3
3
|
"""
|
|
4
4
|
DESCRIPTION:
|
|
@@ -48,8 +48,9 @@ def ROC(data=None, probability_column=None, observation_column=None, model_id_co
|
|
|
48
48
|
Types: str
|
|
49
49
|
|
|
50
50
|
positive_class:
|
|
51
|
-
|
|
51
|
+
Optional Argument.
|
|
52
52
|
Specifies the label of the positive class.
|
|
53
|
+
Default Value: '1'
|
|
53
54
|
Types: str
|
|
54
55
|
|
|
55
56
|
num_thresholds:
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
def SVMPredict(object=None, newdata=None, id_column=None, accumulate=None,
|
|
2
|
-
output_prob=False, output_responses=None,
|
|
2
|
+
output_prob=False, output_responses=None, model_type='Classification',
|
|
3
|
+
**generic_arguments):
|
|
3
4
|
"""
|
|
4
5
|
DESCRIPTION:
|
|
5
6
|
The SVMPredict() function uses the model generated by the function SVM() to
|
|
@@ -57,6 +58,15 @@ def SVMPredict(object=None, newdata=None, id_column=None, accumulate=None,
|
|
|
57
58
|
Note:
|
|
58
59
|
Only applicable when "output_prob" is 'True'.
|
|
59
60
|
Types: str OR list of strs
|
|
61
|
+
|
|
62
|
+
model_type:
|
|
63
|
+
Optional Argument.
|
|
64
|
+
Specifies the type of the analysis.
|
|
65
|
+
Note:
|
|
66
|
+
* Required for Regression problem.
|
|
67
|
+
Permitted Values: 'Classification', 'Regression'
|
|
68
|
+
Default Value: 'Classification'
|
|
69
|
+
Types: str
|
|
60
70
|
|
|
61
71
|
**generic_arguments:
|
|
62
72
|
Specifies the generic keyword arguments SQLE functions accept. Below
|
|
@@ -155,7 +165,8 @@ def SVMPredict(object=None, newdata=None, id_column=None, accumulate=None,
|
|
|
155
165
|
SVMPredict_out1 = SVMPredict(newdata=transform_obj.result,
|
|
156
166
|
object=svm_obj1.result,
|
|
157
167
|
id_column="id",
|
|
158
|
-
accumulate="MedHouseVal"
|
|
168
|
+
accumulate="MedHouseVal",
|
|
169
|
+
model_type="Regression"
|
|
159
170
|
)
|
|
160
171
|
|
|
161
172
|
# Print the result DataFrame.
|