teradataml 20.0.0.1__py3-none-any.whl → 20.0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of teradataml might be problematic. Click here for more details.
- teradataml/LICENSE-3RD-PARTY.pdf +0 -0
- teradataml/LICENSE.pdf +0 -0
- teradataml/README.md +306 -0
- teradataml/__init__.py +10 -3
- teradataml/_version.py +1 -1
- teradataml/analytics/__init__.py +3 -2
- teradataml/analytics/analytic_function_executor.py +299 -16
- teradataml/analytics/analytic_query_generator.py +92 -0
- teradataml/analytics/byom/__init__.py +3 -2
- teradataml/analytics/json_parser/metadata.py +13 -3
- teradataml/analytics/json_parser/utils.py +13 -6
- teradataml/analytics/meta_class.py +40 -1
- teradataml/analytics/sqle/DecisionTreePredict.py +1 -1
- teradataml/analytics/sqle/__init__.py +11 -2
- teradataml/analytics/table_operator/__init__.py +4 -3
- teradataml/analytics/uaf/__init__.py +21 -2
- teradataml/analytics/utils.py +66 -1
- teradataml/analytics/valib.py +1 -1
- teradataml/automl/__init__.py +1502 -323
- teradataml/automl/custom_json_utils.py +139 -61
- teradataml/automl/data_preparation.py +247 -307
- teradataml/automl/data_transformation.py +32 -12
- teradataml/automl/feature_engineering.py +325 -86
- teradataml/automl/model_evaluation.py +44 -35
- teradataml/automl/model_training.py +122 -153
- teradataml/catalog/byom.py +8 -8
- teradataml/clients/pkce_client.py +1 -1
- teradataml/common/__init__.py +2 -1
- teradataml/common/constants.py +72 -0
- teradataml/common/deprecations.py +13 -7
- teradataml/common/garbagecollector.py +152 -120
- teradataml/common/messagecodes.py +11 -2
- teradataml/common/messages.py +4 -1
- teradataml/common/sqlbundle.py +26 -4
- teradataml/common/utils.py +225 -14
- teradataml/common/wrapper_utils.py +1 -1
- teradataml/context/context.py +82 -2
- teradataml/data/SQL_Fundamentals.pdf +0 -0
- teradataml/data/complaints_test_tokenized.csv +353 -0
- teradataml/data/complaints_tokens_model.csv +348 -0
- teradataml/data/covid_confirm_sd.csv +83 -0
- teradataml/data/dataframe_example.json +27 -1
- teradataml/data/docs/sqle/docs_17_20/CFilter.py +132 -0
- teradataml/data/docs/sqle/docs_17_20/NaiveBayes.py +162 -0
- teradataml/data/docs/sqle/docs_17_20/OutlierFilterFit.py +2 -0
- teradataml/data/docs/sqle/docs_17_20/Pivoting.py +279 -0
- teradataml/data/docs/sqle/docs_17_20/Shap.py +203 -0
- teradataml/data/docs/sqle/docs_17_20/TDNaiveBayesPredict.py +189 -0
- teradataml/data/docs/sqle/docs_17_20/TFIDF.py +142 -0
- teradataml/data/docs/sqle/docs_17_20/TextParser.py +3 -3
- teradataml/data/docs/sqle/docs_17_20/Unpivoting.py +216 -0
- teradataml/data/docs/tableoperator/docs_17_20/Image2Matrix.py +118 -0
- teradataml/data/docs/uaf/docs_17_20/ACF.py +1 -10
- teradataml/data/docs/uaf/docs_17_20/ArimaEstimate.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/ArimaForecast.py +35 -5
- teradataml/data/docs/uaf/docs_17_20/ArimaValidate.py +3 -1
- teradataml/data/docs/uaf/docs_17_20/ArimaXEstimate.py +293 -0
- teradataml/data/docs/uaf/docs_17_20/AutoArima.py +354 -0
- teradataml/data/docs/uaf/docs_17_20/BreuschGodfrey.py +3 -2
- teradataml/data/docs/uaf/docs_17_20/BreuschPaganGodfrey.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/Convolve.py +13 -10
- teradataml/data/docs/uaf/docs_17_20/Convolve2.py +4 -1
- teradataml/data/docs/uaf/docs_17_20/CopyArt.py +145 -0
- teradataml/data/docs/uaf/docs_17_20/CumulPeriodogram.py +5 -4
- teradataml/data/docs/uaf/docs_17_20/DFFT2Conv.py +4 -4
- teradataml/data/docs/uaf/docs_17_20/DWT.py +235 -0
- teradataml/data/docs/uaf/docs_17_20/DWT2D.py +214 -0
- teradataml/data/docs/uaf/docs_17_20/DickeyFuller.py +18 -21
- teradataml/data/docs/uaf/docs_17_20/DurbinWatson.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/ExtractResults.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/FilterFactory1d.py +160 -0
- teradataml/data/docs/uaf/docs_17_20/GenseriesSinusoids.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/GoldfeldQuandt.py +9 -31
- teradataml/data/docs/uaf/docs_17_20/HoltWintersForecaster.py +4 -2
- teradataml/data/docs/uaf/docs_17_20/IDFFT2.py +1 -8
- teradataml/data/docs/uaf/docs_17_20/IDWT.py +236 -0
- teradataml/data/docs/uaf/docs_17_20/IDWT2D.py +226 -0
- teradataml/data/docs/uaf/docs_17_20/IQR.py +134 -0
- teradataml/data/docs/uaf/docs_17_20/LineSpec.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/LinearRegr.py +2 -2
- teradataml/data/docs/uaf/docs_17_20/MAMean.py +3 -3
- teradataml/data/docs/uaf/docs_17_20/Matrix2Image.py +297 -0
- teradataml/data/docs/uaf/docs_17_20/MatrixMultiply.py +15 -6
- teradataml/data/docs/uaf/docs_17_20/PACF.py +0 -1
- teradataml/data/docs/uaf/docs_17_20/Portman.py +2 -2
- teradataml/data/docs/uaf/docs_17_20/PowerSpec.py +2 -2
- teradataml/data/docs/uaf/docs_17_20/Resample.py +9 -1
- teradataml/data/docs/uaf/docs_17_20/SAX.py +246 -0
- teradataml/data/docs/uaf/docs_17_20/SeasonalNormalize.py +17 -10
- teradataml/data/docs/uaf/docs_17_20/SignifPeriodicities.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/WhitesGeneral.py +3 -1
- teradataml/data/docs/uaf/docs_17_20/WindowDFFT.py +368 -0
- teradataml/data/dwt2d_dataTable.csv +65 -0
- teradataml/data/dwt_dataTable.csv +8 -0
- teradataml/data/dwt_filterTable.csv +3 -0
- teradataml/data/finance_data4.csv +13 -0
- teradataml/data/grocery_transaction.csv +19 -0
- teradataml/data/idwt2d_dataTable.csv +5 -0
- teradataml/data/idwt_dataTable.csv +8 -0
- teradataml/data/idwt_filterTable.csv +3 -0
- teradataml/data/interval_data.csv +5 -0
- teradataml/data/jsons/paired_functions.json +14 -0
- teradataml/data/jsons/sqle/17.20/TD_CFilter.json +118 -0
- teradataml/data/jsons/sqle/17.20/TD_NaiveBayes.json +193 -0
- teradataml/data/jsons/sqle/17.20/TD_NaiveBayesPredict.json +212 -0
- teradataml/data/jsons/sqle/17.20/TD_OneClassSVM.json +9 -9
- teradataml/data/jsons/sqle/17.20/TD_Pivoting.json +280 -0
- teradataml/data/jsons/sqle/17.20/TD_Shap.json +222 -0
- teradataml/data/jsons/sqle/17.20/TD_TFIDF.json +162 -0
- teradataml/data/jsons/sqle/17.20/TD_TextParser.json +1 -1
- teradataml/data/jsons/sqle/17.20/TD_Unpivoting.json +235 -0
- teradataml/data/jsons/sqle/20.00/TD_KMeans.json +250 -0
- teradataml/data/jsons/sqle/20.00/TD_SMOTE.json +266 -0
- teradataml/data/jsons/sqle/20.00/TD_VectorDistance.json +278 -0
- teradataml/data/jsons/storedprocedure/17.20/TD_COPYART.json +71 -0
- teradataml/data/jsons/storedprocedure/17.20/TD_FILTERFACTORY1D.json +150 -0
- teradataml/data/jsons/tableoperator/17.20/IMAGE2MATRIX.json +53 -0
- teradataml/data/jsons/uaf/17.20/TD_ACF.json +1 -18
- teradataml/data/jsons/uaf/17.20/TD_ARIMAESTIMATE.json +3 -16
- teradataml/data/jsons/uaf/17.20/TD_ARIMAFORECAST.json +0 -3
- teradataml/data/jsons/uaf/17.20/TD_ARIMAVALIDATE.json +5 -3
- teradataml/data/jsons/uaf/17.20/TD_ARIMAXESTIMATE.json +362 -0
- teradataml/data/jsons/uaf/17.20/TD_AUTOARIMA.json +469 -0
- teradataml/data/jsons/uaf/17.20/TD_BINARYMATRIXOP.json +0 -3
- teradataml/data/jsons/uaf/17.20/TD_BINARYSERIESOP.json +0 -2
- teradataml/data/jsons/uaf/17.20/TD_BREUSCH_GODFREY.json +2 -1
- teradataml/data/jsons/uaf/17.20/TD_BREUSCH_PAGAN_GODFREY.json +2 -5
- teradataml/data/jsons/uaf/17.20/TD_CONVOLVE.json +3 -6
- teradataml/data/jsons/uaf/17.20/TD_CONVOLVE2.json +1 -3
- teradataml/data/jsons/uaf/17.20/TD_CUMUL_PERIODOGRAM.json +0 -5
- teradataml/data/jsons/uaf/17.20/TD_DFFT.json +1 -4
- teradataml/data/jsons/uaf/17.20/TD_DFFT2.json +2 -7
- teradataml/data/jsons/uaf/17.20/TD_DFFT2CONV.json +1 -2
- teradataml/data/jsons/uaf/17.20/TD_DFFTCONV.json +0 -2
- teradataml/data/jsons/uaf/17.20/TD_DICKEY_FULLER.json +10 -19
- teradataml/data/jsons/uaf/17.20/TD_DTW.json +3 -6
- teradataml/data/jsons/uaf/17.20/TD_DWT.json +173 -0
- teradataml/data/jsons/uaf/17.20/TD_DWT2D.json +160 -0
- teradataml/data/jsons/uaf/17.20/TD_FITMETRICS.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_GOLDFELD_QUANDT.json +16 -30
- teradataml/data/jsons/uaf/17.20/{TD_HOLT_WINTERS_FORECAST.json → TD_HOLT_WINTERS_FORECASTER.json} +1 -2
- teradataml/data/jsons/uaf/17.20/TD_IDFFT2.json +1 -15
- teradataml/data/jsons/uaf/17.20/TD_IDWT.json +162 -0
- teradataml/data/jsons/uaf/17.20/TD_IDWT2D.json +149 -0
- teradataml/data/jsons/uaf/17.20/TD_IQR.json +117 -0
- teradataml/data/jsons/uaf/17.20/TD_LINEAR_REGR.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_LINESPEC.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_MAMEAN.json +1 -3
- teradataml/data/jsons/uaf/17.20/TD_MATRIX2IMAGE.json +209 -0
- teradataml/data/jsons/uaf/17.20/TD_PACF.json +2 -2
- teradataml/data/jsons/uaf/17.20/TD_POWERSPEC.json +5 -5
- teradataml/data/jsons/uaf/17.20/TD_RESAMPLE.json +48 -28
- teradataml/data/jsons/uaf/17.20/TD_SAX.json +210 -0
- teradataml/data/jsons/uaf/17.20/TD_SEASONALNORMALIZE.json +12 -6
- teradataml/data/jsons/uaf/17.20/TD_SIMPLEEXP.json +0 -1
- teradataml/data/jsons/uaf/17.20/TD_TRACKINGOP.json +8 -8
- teradataml/data/jsons/uaf/17.20/TD_UNDIFF.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_UNNORMALIZE.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_WINDOWDFFT.json +410 -0
- teradataml/data/load_example_data.py +8 -2
- teradataml/data/medical_readings.csv +101 -0
- teradataml/data/naivebayestextclassifier_example.json +1 -1
- teradataml/data/naivebayestextclassifierpredict_example.json +11 -0
- teradataml/data/patient_profile.csv +101 -0
- teradataml/data/peppers.png +0 -0
- teradataml/data/real_values.csv +14 -0
- teradataml/data/sax_example.json +8 -0
- teradataml/data/scripts/deploy_script.py +1 -1
- teradataml/data/scripts/lightgbm/dataset.template +157 -0
- teradataml/data/scripts/lightgbm/lightgbm_class_functions.template +247 -0
- teradataml/data/scripts/lightgbm/lightgbm_function.template +216 -0
- teradataml/data/scripts/lightgbm/lightgbm_sklearn.template +159 -0
- teradataml/data/scripts/sklearn/sklearn_fit.py +194 -160
- teradataml/data/scripts/sklearn/sklearn_fit_predict.py +136 -115
- teradataml/data/scripts/sklearn/sklearn_function.template +34 -16
- teradataml/data/scripts/sklearn/sklearn_model_selection_split.py +155 -137
- teradataml/data/scripts/sklearn/sklearn_neighbors.py +1 -1
- teradataml/data/scripts/sklearn/sklearn_score.py +12 -3
- teradataml/data/scripts/sklearn/sklearn_transform.py +162 -24
- teradataml/data/star_pivot.csv +8 -0
- teradataml/data/target_udt_data.csv +8 -0
- teradataml/data/templates/open_source_ml.json +3 -1
- teradataml/data/teradataml_example.json +20 -1
- teradataml/data/timestamp_data.csv +4 -0
- teradataml/data/titanic_dataset_unpivoted.csv +19 -0
- teradataml/data/uaf_example.json +55 -1
- teradataml/data/unpivot_example.json +15 -0
- teradataml/data/url_data.csv +9 -0
- teradataml/data/vectordistance_example.json +4 -0
- teradataml/data/windowdfft.csv +16 -0
- teradataml/dataframe/copy_to.py +1 -1
- teradataml/dataframe/data_transfer.py +5 -3
- teradataml/dataframe/dataframe.py +1002 -201
- teradataml/dataframe/fastload.py +3 -3
- teradataml/dataframe/functions.py +867 -0
- teradataml/dataframe/row.py +160 -0
- teradataml/dataframe/setop.py +2 -2
- teradataml/dataframe/sql.py +840 -33
- teradataml/dataframe/window.py +1 -1
- teradataml/dbutils/dbutils.py +878 -34
- teradataml/dbutils/filemgr.py +48 -1
- teradataml/geospatial/geodataframe.py +1 -1
- teradataml/geospatial/geodataframecolumn.py +1 -1
- teradataml/hyperparameter_tuner/optimizer.py +13 -13
- teradataml/lib/aed_0_1.dll +0 -0
- teradataml/opensource/__init__.py +1 -1
- teradataml/opensource/{sklearn/_class.py → _class.py} +102 -17
- teradataml/opensource/_lightgbm.py +950 -0
- teradataml/opensource/{sklearn/_wrapper_utils.py → _wrapper_utils.py} +1 -2
- teradataml/opensource/{sklearn/constants.py → constants.py} +13 -10
- teradataml/opensource/sklearn/__init__.py +0 -1
- teradataml/opensource/sklearn/_sklearn_wrapper.py +1019 -574
- teradataml/options/__init__.py +9 -23
- teradataml/options/configure.py +42 -4
- teradataml/options/display.py +2 -2
- teradataml/plot/axis.py +4 -4
- teradataml/scriptmgmt/UserEnv.py +13 -9
- teradataml/scriptmgmt/lls_utils.py +77 -23
- teradataml/store/__init__.py +13 -0
- teradataml/store/feature_store/__init__.py +0 -0
- teradataml/store/feature_store/constants.py +291 -0
- teradataml/store/feature_store/feature_store.py +2223 -0
- teradataml/store/feature_store/models.py +1505 -0
- teradataml/store/vector_store/__init__.py +1586 -0
- teradataml/table_operators/Script.py +2 -2
- teradataml/table_operators/TableOperator.py +106 -20
- teradataml/table_operators/query_generator.py +3 -0
- teradataml/table_operators/table_operator_query_generator.py +3 -1
- teradataml/table_operators/table_operator_util.py +102 -56
- teradataml/table_operators/templates/dataframe_register.template +69 -0
- teradataml/table_operators/templates/dataframe_udf.template +63 -0
- teradataml/telemetry_utils/__init__.py +0 -0
- teradataml/telemetry_utils/queryband.py +52 -0
- teradataml/utils/dtypes.py +4 -2
- teradataml/utils/validators.py +34 -2
- {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.3.dist-info}/METADATA +311 -3
- {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.3.dist-info}/RECORD +240 -157
- {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.3.dist-info}/WHEEL +0 -0
- {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.3.dist-info}/top_level.txt +0 -0
- {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.3.dist-info}/zip-safe +0 -0
|
@@ -0,0 +1,279 @@
|
|
|
1
|
+
def Pivoting(data = None, partition_columns = None, target_columns = None,
|
|
2
|
+
accumulate = None, rows_per_partition = None, pivot_column = None,
|
|
3
|
+
pivot_keys = None, pivot_keys_alias = None, default_pivot_values = None,
|
|
4
|
+
aggregation = None, delimiters = None, combined_column_sizes = None,
|
|
5
|
+
truncate_columns = None, output_column_names = None,
|
|
6
|
+
**generic_arguments):
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
"""
|
|
10
|
+
DESCRIPTION:
|
|
11
|
+
Function pivots the data, that is, changes the data from
|
|
12
|
+
sparse format to dense format.
|
|
13
|
+
Notes:
|
|
14
|
+
* 'data_partition_column' is required argument for partitioning the input data.
|
|
15
|
+
* Provide either the 'rows_per_partition', 'pivot_column', or 'aggregation' arguments
|
|
16
|
+
along with required arguments.
|
|
17
|
+
|
|
18
|
+
PARAMETERS:
|
|
19
|
+
data:
|
|
20
|
+
Required Argument.
|
|
21
|
+
Specifies the input teradataml DataFrame to be pivoted.
|
|
22
|
+
Types: teradataml DataFrame
|
|
23
|
+
|
|
24
|
+
partition_columns:
|
|
25
|
+
Required Argument.
|
|
26
|
+
Specifies the name of the column(s) in "data" on which to partition the
|
|
27
|
+
input.
|
|
28
|
+
Types: str OR list of Strings (str)
|
|
29
|
+
|
|
30
|
+
target_columns:
|
|
31
|
+
Required Argument.
|
|
32
|
+
Specifies the name of the column(s) in "data" which contains the data for
|
|
33
|
+
pivoting.
|
|
34
|
+
Types: str OR list of Strings (str)
|
|
35
|
+
|
|
36
|
+
accumulate:
|
|
37
|
+
Optional Argument.
|
|
38
|
+
Specifies the name of the column(s) in "data" to copy to the output.
|
|
39
|
+
By default, the function copies no input table columns to the output.
|
|
40
|
+
Types: str OR list of Strings (str)
|
|
41
|
+
|
|
42
|
+
rows_per_partition:
|
|
43
|
+
Optional Argument.
|
|
44
|
+
Specifies the maximum number of rows in the partition.
|
|
45
|
+
Types: int
|
|
46
|
+
|
|
47
|
+
pivot_column:
|
|
48
|
+
Optional Argument.
|
|
49
|
+
Specifies the name of the column in "data" that contains the pivot keys.
|
|
50
|
+
Note:
|
|
51
|
+
* This argument is not needed when 'rows_per_partition' is provided.
|
|
52
|
+
Types: str
|
|
53
|
+
|
|
54
|
+
pivot_keys:
|
|
55
|
+
Optional Argument.
|
|
56
|
+
Specifies the names of the pivot keys, if "pivot_column" is specified.
|
|
57
|
+
Notes:
|
|
58
|
+
* This argument is not needed when 'rows_per_partition' is provided.
|
|
59
|
+
* 'pivot_keys' are required when 'pivot_column' is specified.
|
|
60
|
+
Types: str OR list of Strings (str)
|
|
61
|
+
|
|
62
|
+
pivot_keys_alias:
|
|
63
|
+
Optional Argument.
|
|
64
|
+
Specifies the alias names of the pivot keys, if 'pivot_column' is specified.
|
|
65
|
+
Note:
|
|
66
|
+
* This argument is not needed when 'rows_per_partition' is provided.
|
|
67
|
+
Types: str OR list of Strings (str)
|
|
68
|
+
|
|
69
|
+
default_pivot_values:
|
|
70
|
+
Optional Argument.
|
|
71
|
+
Specifies one default value for each pivot_key. The nth
|
|
72
|
+
default_pivot_value applies to the nth pivot_key.
|
|
73
|
+
Note:
|
|
74
|
+
* This argument is not needed when 'rows_per_partition' is provided.
|
|
75
|
+
Types: str OR list of Strings (str)
|
|
76
|
+
|
|
77
|
+
aggregation:
|
|
78
|
+
Optional Argument.
|
|
79
|
+
Specifies the aggregation for the target columns.
|
|
80
|
+
Provide a single value {CONCAT | UNIQUE_CONCAT | SUM |
|
|
81
|
+
MIN | MAX | AVG} which will be applicable to all target columns or
|
|
82
|
+
specify multiple values for multiple target columns in
|
|
83
|
+
following format: ['ColumnName:{CONCAT|UNIQUE_CONCAT|SUM|MIN|MAX|AVG}',...].
|
|
84
|
+
Types: str OR list of Strings (str)
|
|
85
|
+
|
|
86
|
+
delimiters:
|
|
87
|
+
Optional Argument.
|
|
88
|
+
Specifies the delimiter to be used for concatenating the values of a target column.
|
|
89
|
+
Provide a single delimiter value applicable to all target columns or
|
|
90
|
+
specify multiple delimiter values for multiple target columns
|
|
91
|
+
in the following format: ['ColumnName:single_char',...].
|
|
92
|
+
Note:
|
|
93
|
+
* This argument is not needed when 'aggregation' is not specified.
|
|
94
|
+
Types: str OR list of Strings (str)
|
|
95
|
+
|
|
96
|
+
combined_column_sizes:
|
|
97
|
+
Optional Argument.
|
|
98
|
+
Specifies the maximum size of the concatenated string.
|
|
99
|
+
Provide a single integer value that applies to all target columns or
|
|
100
|
+
specify multiple size values for multiple target columns
|
|
101
|
+
in the following format ['ColumnName:size_value',...].
|
|
102
|
+
Note:
|
|
103
|
+
* This argument is not needed when 'aggregation' is not specified.
|
|
104
|
+
Types: int OR str OR list of Strings (str)
|
|
105
|
+
|
|
106
|
+
truncate_columns:
|
|
107
|
+
Optional Argument.
|
|
108
|
+
Specifies columns from the target columns for which
|
|
109
|
+
to truncate the concatenated string if it exceeds the specified size.
|
|
110
|
+
Note:
|
|
111
|
+
* This argument is not needed when 'aggregation' is not specified.
|
|
112
|
+
Types: str OR list of Strings (str)
|
|
113
|
+
|
|
114
|
+
output_column_names:
|
|
115
|
+
Optional Argument.
|
|
116
|
+
Specifies the column name to be used for the output column. The nth
|
|
117
|
+
column name value applies to the nth output column.
|
|
118
|
+
Types: str OR list of Strings (str)
|
|
119
|
+
|
|
120
|
+
**generic_arguments:
|
|
121
|
+
Specifies the generic keyword arguments SQLE functions accept. Below
|
|
122
|
+
are the generic keyword arguments:
|
|
123
|
+
persist:
|
|
124
|
+
Optional Argument.
|
|
125
|
+
Specifies whether to persist the results of the
|
|
126
|
+
function in a table or not. When set to True,
|
|
127
|
+
results are persisted in a table; otherwise,
|
|
128
|
+
results are garbage collected at the end of the
|
|
129
|
+
session.
|
|
130
|
+
Default Value: False
|
|
131
|
+
Types: bool
|
|
132
|
+
|
|
133
|
+
volatile:
|
|
134
|
+
Optional Argument.
|
|
135
|
+
Specifies whether to put the results of the
|
|
136
|
+
function in a volatile table or not. When set to
|
|
137
|
+
True, results are stored in a volatile table,
|
|
138
|
+
otherwise not.
|
|
139
|
+
Default Value: False
|
|
140
|
+
Types: bool
|
|
141
|
+
|
|
142
|
+
Function allows the user to partition, hash, order or local
|
|
143
|
+
order the input data. These generic arguments are available
|
|
144
|
+
for each argument that accepts teradataml DataFrame as
|
|
145
|
+
input and can be accessed as:
|
|
146
|
+
* "<input_data_arg_name>_partition_column" accepts str or
|
|
147
|
+
list of str (Strings)
|
|
148
|
+
* "<input_data_arg_name>_hash_column" accepts str or list
|
|
149
|
+
of str (Strings)
|
|
150
|
+
* "<input_data_arg_name>_order_column" accepts str or list
|
|
151
|
+
of str (Strings)
|
|
152
|
+
* "local_order_<input_data_arg_name>" accepts boolean
|
|
153
|
+
Note:
|
|
154
|
+
These generic arguments are supported by teradataml if
|
|
155
|
+
the underlying SQL Engine function supports, else an
|
|
156
|
+
exception is raised.
|
|
157
|
+
|
|
158
|
+
RETURNS:
|
|
159
|
+
Instance of Pivoting.
|
|
160
|
+
Output teradataml DataFrames can be accessed using attribute
|
|
161
|
+
references, such as PivotingObj.<attribute_name>.
|
|
162
|
+
Output teradataml DataFrame attribute name is:
|
|
163
|
+
result
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
RAISES:
|
|
167
|
+
TeradataMlException, TypeError, ValueError
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
EXAMPLES:
|
|
171
|
+
# Notes:
|
|
172
|
+
# 1. Get the connection to Vantage, before importing the
|
|
173
|
+
# function in user space.
|
|
174
|
+
# 2. User can import the function, if it is available on
|
|
175
|
+
# Vantage user is connected to.
|
|
176
|
+
# 3. To check the list of analytic functions available on
|
|
177
|
+
# Vantage user connected to, use
|
|
178
|
+
# "display_analytic_functions()".
|
|
179
|
+
|
|
180
|
+
# Load the example data.
|
|
181
|
+
load_example_data('unpivot', 'titanic_dataset_unpivoted')
|
|
182
|
+
load_example_data('unpivot', 'star_pivot')
|
|
183
|
+
|
|
184
|
+
# Create teradataml DataFrame objects.
|
|
185
|
+
titanic_unpvt = DataFrame.from_table('titanic_dataset_unpivoted')
|
|
186
|
+
star = DataFrame.from_table('star_pivot')
|
|
187
|
+
|
|
188
|
+
# Check the list of available analytic functions.
|
|
189
|
+
display_analytic_functions()
|
|
190
|
+
|
|
191
|
+
# Import function Pivoting.
|
|
192
|
+
from teradataml import Pivoting
|
|
193
|
+
|
|
194
|
+
# Example 1 : Pivot the input data using 'rows_per_partition'.
|
|
195
|
+
pvt1 = Pivoting(data = titanic_unpvt,
|
|
196
|
+
partition_columns = 'passenger',
|
|
197
|
+
target_columns = 'AttributeValue',
|
|
198
|
+
accumulate = 'survived',
|
|
199
|
+
rows_per_partition = 2,
|
|
200
|
+
data_partition_column='passenger',
|
|
201
|
+
data_order_column='AttributeName')
|
|
202
|
+
|
|
203
|
+
# Print the result DataFrame.
|
|
204
|
+
print( pvt1.result)
|
|
205
|
+
|
|
206
|
+
# Example 2 : Pivot the input data using 'pivot_column' and 'pivot_keys'.
|
|
207
|
+
pvt2 = Pivoting(data = titanic_unpvt,
|
|
208
|
+
partition_columns = 'passenger',
|
|
209
|
+
target_columns = 'AttributeValue',
|
|
210
|
+
accumulate = 'survived',
|
|
211
|
+
pivot_column = 'AttributeName',
|
|
212
|
+
pivot_keys = ['pclass','gender'],
|
|
213
|
+
data_partition_column = 'passenger')
|
|
214
|
+
|
|
215
|
+
# Print the result DataFrame.
|
|
216
|
+
print( pvt2.result)
|
|
217
|
+
|
|
218
|
+
# Example 3 : Pivot the input data with multiple target columns and
|
|
219
|
+
# multiple aggregation functions.
|
|
220
|
+
pvt3 = Pivoting(data = star,
|
|
221
|
+
partition_columns = ['country', 'state'],
|
|
222
|
+
target_columns = ['sales', 'cogs', 'rating'],
|
|
223
|
+
accumulate = 'yr',
|
|
224
|
+
pivot_column = 'qtr',
|
|
225
|
+
pivot_keys = ['Q1','Q2','Q3'],
|
|
226
|
+
aggregation = ['sales:SUM','cogs:AVG','rating:CONCAT'],
|
|
227
|
+
delimiters = '|',
|
|
228
|
+
combined_column_sizes = 64001,
|
|
229
|
+
data_partition_column = ['country', 'state'],
|
|
230
|
+
data_order_column = ['qtr'])
|
|
231
|
+
|
|
232
|
+
# Print the result DataFrame.
|
|
233
|
+
print( pvt3.result)
|
|
234
|
+
|
|
235
|
+
# Example 4 : Pivot the input data with multiple target columns and
|
|
236
|
+
# multiple aggregation functions.
|
|
237
|
+
pvt4 = Pivoting(data = star,
|
|
238
|
+
partition_columns = 'country',
|
|
239
|
+
target_columns = ['sales', 'cogs', 'state','rating'],
|
|
240
|
+
accumulate = 'yr',
|
|
241
|
+
aggregation = ['sales:SUM','cogs:AVG','state:UNIQUE_CONCAT','rating:CONCAT'],
|
|
242
|
+
delimiters = '|',
|
|
243
|
+
combined_column_sizes = ['state:5', 'rating:10'],
|
|
244
|
+
data_partition_column='country',
|
|
245
|
+
data_order_column='state')
|
|
246
|
+
|
|
247
|
+
# Print the result DataFrame.
|
|
248
|
+
print( pvt4.result)
|
|
249
|
+
|
|
250
|
+
# Example 5 : Pivot the input data with truncate columns.
|
|
251
|
+
pvt5 = Pivoting(data = star,
|
|
252
|
+
partition_columns = ['state'],
|
|
253
|
+
target_columns = ['country', 'rating'],
|
|
254
|
+
accumulate = 'yr',
|
|
255
|
+
pivot_column = 'qtr',
|
|
256
|
+
pivot_keys = ['Q1','Q2','Q3'],
|
|
257
|
+
aggregation = 'CONCAT',
|
|
258
|
+
combined_column_sizes = 10,
|
|
259
|
+
truncate_columns = 'country',
|
|
260
|
+
data_partition_column = 'qtr',
|
|
261
|
+
data_order_column='state')
|
|
262
|
+
|
|
263
|
+
# Print the result DataFrame.
|
|
264
|
+
print( pvt5.result)
|
|
265
|
+
|
|
266
|
+
# Example 6 : Pivot the input data with output column names.
|
|
267
|
+
pvt6 = Pivoting(data = star,
|
|
268
|
+
partition_columns = ['country','state'],
|
|
269
|
+
target_columns = ['sales', 'cogs', 'rating'],
|
|
270
|
+
accumulate = 'yr',
|
|
271
|
+
rows_per_partition = 3,
|
|
272
|
+
output_column_names=['sales_q1','sales_q2','sales_q3','cogs_q1','cogs_q2',
|
|
273
|
+
'cogs_q3','rating_q1','rating_q2','rating_q3'],
|
|
274
|
+
data_partition_column = 'qtr',
|
|
275
|
+
data_order_column=['country','state'])
|
|
276
|
+
|
|
277
|
+
# Print the result DataFrame.
|
|
278
|
+
print( pvt6.result)
|
|
279
|
+
"""
|
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
def Shap(data = None, object = None, id_column=None, training_function = "TD_GLM",
|
|
2
|
+
model_type = "Regression", input_columns = None, detailed = False,
|
|
3
|
+
accumulate = None, num_parallel_trees = 1000, num_boost_rounds = 10,
|
|
4
|
+
**generic_arguments):
|
|
5
|
+
|
|
6
|
+
"""
|
|
7
|
+
DESCRIPTION:
|
|
8
|
+
Function to get explanation for individual predictions
|
|
9
|
+
(feature contributions) in a machine learning model based on the
|
|
10
|
+
co-operative game theory optimal Shapley values.
|
|
11
|
+
|
|
12
|
+
PARAMETERS:
|
|
13
|
+
data:
|
|
14
|
+
Required Argument.
|
|
15
|
+
Specifies the teradataml DataFrame.
|
|
16
|
+
Types: teradataml DataFrame
|
|
17
|
+
|
|
18
|
+
object:
|
|
19
|
+
Required Argument.
|
|
20
|
+
Specifies the teradataml DataFrame containing the model data.
|
|
21
|
+
Types: teradataml DataFrame
|
|
22
|
+
|
|
23
|
+
id_column:
|
|
24
|
+
Required Argument.
|
|
25
|
+
Specifies the input data column name that has the unique identifier
|
|
26
|
+
for each row in the "data".
|
|
27
|
+
Types: str
|
|
28
|
+
|
|
29
|
+
training_function:
|
|
30
|
+
Required Argument.
|
|
31
|
+
Specifies the model type name.
|
|
32
|
+
Default Value: "TD_GLM"
|
|
33
|
+
Permitted Values: TD_GLM, TD_DECISIONFOREST, TD_XGBOOST
|
|
34
|
+
Types: str
|
|
35
|
+
|
|
36
|
+
model_type:
|
|
37
|
+
Required Argument.
|
|
38
|
+
Specifies the operation to be performed on input data.
|
|
39
|
+
Default Value: "Regression"
|
|
40
|
+
Permitted Values: Regression, Classification
|
|
41
|
+
Types: str
|
|
42
|
+
|
|
43
|
+
input_columns:
|
|
44
|
+
Required Argument.
|
|
45
|
+
Specifies the names of the columns in "data" used for
|
|
46
|
+
training the model (predictors, features or independent variables).
|
|
47
|
+
Types: str OR list of Strings (str)
|
|
48
|
+
|
|
49
|
+
detailed:
|
|
50
|
+
Optional Argument.
|
|
51
|
+
Specifies whether to output detailed shap information about the
|
|
52
|
+
forest trees.
|
|
53
|
+
Default Value: False
|
|
54
|
+
Types: bool
|
|
55
|
+
|
|
56
|
+
accumulate:
|
|
57
|
+
Optional Argument.
|
|
58
|
+
Specifies the names of the input columns to copy to the output teradataml DataFrame.
|
|
59
|
+
Types: str OR list of Strings (str)
|
|
60
|
+
|
|
61
|
+
num_parallel_trees:
|
|
62
|
+
Optional Argument.
|
|
63
|
+
Specify the number of parallel boosted trees. Each boosted tree
|
|
64
|
+
operates on a sample of data that fits in an AMPs memory.
|
|
65
|
+
Note:
|
|
66
|
+
* By default, "num_parallel_trees" is chosen equal to the number of AMPs with
|
|
67
|
+
data.
|
|
68
|
+
Default Value: 1000
|
|
69
|
+
Types: int
|
|
70
|
+
|
|
71
|
+
num_boost_rounds:
|
|
72
|
+
Optional Argument.
|
|
73
|
+
Specifies the number of iterations to boost the weak classifiers. The
|
|
74
|
+
iterations must be an int in the range [1, 100000].
|
|
75
|
+
Default Value: 10
|
|
76
|
+
Types: int
|
|
77
|
+
|
|
78
|
+
**generic_arguments:
|
|
79
|
+
Specifies the generic keyword arguments SQLE functions accept. Below
|
|
80
|
+
are the generic keyword arguments:
|
|
81
|
+
persist:
|
|
82
|
+
Optional Argument.
|
|
83
|
+
Specifies whether to persist the results of the
|
|
84
|
+
function in a table or not. When set to True,
|
|
85
|
+
results are persisted in a table; otherwise,
|
|
86
|
+
results are garbage collected at the end of the
|
|
87
|
+
session.
|
|
88
|
+
Default Value: False
|
|
89
|
+
Types: bool
|
|
90
|
+
|
|
91
|
+
volatile:
|
|
92
|
+
Optional Argument.
|
|
93
|
+
Specifies whether to put the results of the
|
|
94
|
+
function in a volatile table or not. When set to
|
|
95
|
+
True, results are stored in a volatile table,
|
|
96
|
+
otherwise not.
|
|
97
|
+
Default Value: False
|
|
98
|
+
Types: bool
|
|
99
|
+
|
|
100
|
+
Function allows the user to partition, hash, order or local
|
|
101
|
+
order the input data. These generic arguments are available
|
|
102
|
+
for each argument that accepts teradataml DataFrame as
|
|
103
|
+
input and can be accessed as:
|
|
104
|
+
* "<input_data_arg_name>_partition_column" accepts str or
|
|
105
|
+
list of str (Strings)
|
|
106
|
+
* "<input_data_arg_name>_hash_column" accepts str or list
|
|
107
|
+
of str (Strings)
|
|
108
|
+
* "<input_data_arg_name>_order_column" accepts str or list
|
|
109
|
+
of str (Strings)
|
|
110
|
+
* "local_order_<input_data_arg_name>" accepts boolean
|
|
111
|
+
Note:
|
|
112
|
+
These generic arguments are supported by teradataml if
|
|
113
|
+
the underlying SQL Engine function supports, else an
|
|
114
|
+
exception is raised.
|
|
115
|
+
|
|
116
|
+
RETURNS:
|
|
117
|
+
Instance of Shap.
|
|
118
|
+
Output teradataml DataFrames can be accessed using attribute
|
|
119
|
+
references, such as ShapObj.<attribute_name>.
|
|
120
|
+
Output teradataml DataFrame attribute name is:
|
|
121
|
+
1. output
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
RAISES:
|
|
125
|
+
TeradataMlException, TypeError, ValueError
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
EXAMPLES:
|
|
129
|
+
# Notes:
|
|
130
|
+
# 1. Get the connection to Vantage, before importing the
|
|
131
|
+
# function in user space.
|
|
132
|
+
# 2. User can import the function, if it is available on
|
|
133
|
+
# Vantage user is connected to.
|
|
134
|
+
# 3. To check the list of analytic functions available on
|
|
135
|
+
# Vantage user connected to, use
|
|
136
|
+
# "display_analytic_functions()".
|
|
137
|
+
|
|
138
|
+
# Load the example data.
|
|
139
|
+
load_example_data("byom", "iris_input")
|
|
140
|
+
load_example_data("teradataml", ["cal_housing_ex_raw"])
|
|
141
|
+
|
|
142
|
+
# Create teradataml DataFrame objects.
|
|
143
|
+
iris_input = DataFrame("iris_input")
|
|
144
|
+
data_input = DataFrame.from_table("cal_housing_ex_raw")
|
|
145
|
+
|
|
146
|
+
# Check the list of available analytic functions.
|
|
147
|
+
display_analytic_functions()
|
|
148
|
+
|
|
149
|
+
# Import function Shap.
|
|
150
|
+
from teradataml import Shap, XGBoost, DecisionForest, SVM
|
|
151
|
+
|
|
152
|
+
# Example 1: Shap for classification model.
|
|
153
|
+
XGBoost_out = XGBoost(data=iris_input,
|
|
154
|
+
input_columns=['sepal_length', 'sepal_width', 'petal_length', 'petal_width'],
|
|
155
|
+
response_column = 'species',
|
|
156
|
+
model_type='Classification',
|
|
157
|
+
iter_num=25)
|
|
158
|
+
|
|
159
|
+
Shap_out = Shap(data=iris_input,
|
|
160
|
+
object=XGBoost_out.result,
|
|
161
|
+
id_column='id',
|
|
162
|
+
training_function="TD_XGBOOST",
|
|
163
|
+
model_type="Classification",
|
|
164
|
+
input_columns=['sepal_length', 'sepal_width', 'petal_length', 'petal_width'],
|
|
165
|
+
detailed=True)
|
|
166
|
+
# Print the result DataFrame.
|
|
167
|
+
print(Shap_out.output_data)
|
|
168
|
+
|
|
169
|
+
# Example 2: Shap for regression model.
|
|
170
|
+
|
|
171
|
+
from teradataml import ScaleFit, ScaleTransform
|
|
172
|
+
|
|
173
|
+
# Scale "target_columns" with respect to 'STD' value of the column.
|
|
174
|
+
fit_obj = ScaleFit(data=data_input,
|
|
175
|
+
target_columns=['MedInc', 'HouseAge', 'AveRooms',
|
|
176
|
+
'AveBedrms', 'Population', 'AveOccup',
|
|
177
|
+
'Latitude', 'Longitude'],
|
|
178
|
+
scale_method="STD")
|
|
179
|
+
|
|
180
|
+
# Transform the data.
|
|
181
|
+
transform_obj = ScaleTransform(data=data_input,
|
|
182
|
+
object=fit_obj.output,
|
|
183
|
+
accumulate=["id", "MedHouseVal"])
|
|
184
|
+
|
|
185
|
+
decision_forest_out = DecisionForest(data=transform_obj.result,
|
|
186
|
+
input_columns=['MedInc', 'HouseAge', 'AveRooms',
|
|
187
|
+
'AveBedrms', 'Population', 'AveOccup',
|
|
188
|
+
'Latitude', 'Longitude'],
|
|
189
|
+
response_column="MedHouseVal",
|
|
190
|
+
model_type="Regression",
|
|
191
|
+
max_depth = 10
|
|
192
|
+
)
|
|
193
|
+
Shap_out2 = Shap(data=transform_obj.result,
|
|
194
|
+
object=decision_forest_out.result,
|
|
195
|
+
id_column='id',
|
|
196
|
+
training_function="TD_DECISIONFOREST",
|
|
197
|
+
model_type="Regression",
|
|
198
|
+
input_columns=['MedInc', 'HouseAge', 'AveRooms','AveBedrms', 'Population', 'AveOccup','Latitude', 'Longitude'],
|
|
199
|
+
detailed=True)
|
|
200
|
+
|
|
201
|
+
# Print the result DataFrame.
|
|
202
|
+
print(Shap_out2.output_data)
|
|
203
|
+
"""
|
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
def TDNaiveBayesPredict(data = None, object = None, id_column = None,
|
|
2
|
+
numeric_inputs = None, categorical_inputs = None,
|
|
3
|
+
attribute_name_column = None, attribute_value_column = None,
|
|
4
|
+
responses = None, output_prob = False, accumulate = None,
|
|
5
|
+
**generic_arguments):
|
|
6
|
+
"""
|
|
7
|
+
DESCRIPTION:
|
|
8
|
+
Function predicts classification label using model generated by NaiveBayes function
|
|
9
|
+
for a test set of data.
|
|
10
|
+
|
|
11
|
+
PARAMETERS:
|
|
12
|
+
data:
|
|
13
|
+
Required Argument.
|
|
14
|
+
Specifies the input teradataml DataFrame.
|
|
15
|
+
Types: teradataml DataFrame
|
|
16
|
+
|
|
17
|
+
object:
|
|
18
|
+
Required Argument.
|
|
19
|
+
Specifies the teradataml DataFrame containing the model data
|
|
20
|
+
or instance of NaiveBayes.
|
|
21
|
+
Types: teradataml DataFrame or NaiveBayes
|
|
22
|
+
|
|
23
|
+
id_column:
|
|
24
|
+
Required Argument.
|
|
25
|
+
Specifies the name of the column that uniquely identifies an
|
|
26
|
+
observation in the "data".
|
|
27
|
+
Types: str
|
|
28
|
+
|
|
29
|
+
numeric_inputs:
|
|
30
|
+
Optional Argument.
|
|
31
|
+
Specifies the name of the columns in "data" containing numeric attributes values.
|
|
32
|
+
Types: str OR list of Strings (str)
|
|
33
|
+
|
|
34
|
+
categorical_inputs:
|
|
35
|
+
Optional Argument.
|
|
36
|
+
Specifies the name of the columns in "data" containing categorical attributes values.
|
|
37
|
+
Types: str OR list of Strings (str)
|
|
38
|
+
|
|
39
|
+
attribute_name_column:
|
|
40
|
+
Optional Argument.
|
|
41
|
+
Specifies the name of the columns in "data" containing attributes names.
|
|
42
|
+
Types: str
|
|
43
|
+
|
|
44
|
+
attribute_value_column:
|
|
45
|
+
Optional Argument.
|
|
46
|
+
Specifies the name of the columns in "data" containing attributes values.
|
|
47
|
+
Types: str
|
|
48
|
+
|
|
49
|
+
responses:
|
|
50
|
+
Optional Argument.
|
|
51
|
+
Specifies a list of responses to output.
|
|
52
|
+
Types: str OR list of strs
|
|
53
|
+
|
|
54
|
+
output_prob:
|
|
55
|
+
Optional Argument.
|
|
56
|
+
Specifies whether to output the probability for each response.
|
|
57
|
+
Default Value: False
|
|
58
|
+
Types: bool
|
|
59
|
+
|
|
60
|
+
accumulate:
|
|
61
|
+
Optional Argument.
|
|
62
|
+
Specify the names of the columns in "data" that need to be copied
|
|
63
|
+
from the input to output teradataml DataFrame.
|
|
64
|
+
Types: str OR list of Strings (str)
|
|
65
|
+
|
|
66
|
+
**generic_arguments:
|
|
67
|
+
Specifies the generic keyword arguments SQLE functions accept. Below
|
|
68
|
+
are the generic keyword arguments:
|
|
69
|
+
persist:
|
|
70
|
+
Optional Argument.
|
|
71
|
+
Specifies whether to persist the results of the
|
|
72
|
+
function in a table or not. When set to True,
|
|
73
|
+
results are persisted in a table; otherwise,
|
|
74
|
+
results are garbage collected at the end of the
|
|
75
|
+
session.
|
|
76
|
+
Default Value: False
|
|
77
|
+
Types: bool
|
|
78
|
+
|
|
79
|
+
volatile:
|
|
80
|
+
Optional Argument.
|
|
81
|
+
Specifies whether to put the results of the
|
|
82
|
+
function in a volatile table or not. When set to
|
|
83
|
+
True, results are stored in a volatile table,
|
|
84
|
+
otherwise not.
|
|
85
|
+
Default Value: False
|
|
86
|
+
Types: bool
|
|
87
|
+
|
|
88
|
+
Function allows the user to partition, hash, order or local
|
|
89
|
+
order the input data. These generic arguments are available
|
|
90
|
+
for each argument that accepts teradataml DataFrame as
|
|
91
|
+
input and can be accessed as:
|
|
92
|
+
* "<input_data_arg_name>_partition_column" accepts str or
|
|
93
|
+
list of str (Strings)
|
|
94
|
+
* "<input_data_arg_name>_hash_column" accepts str or list
|
|
95
|
+
of str (Strings)
|
|
96
|
+
* "<input_data_arg_name>_order_column" accepts str or list
|
|
97
|
+
of str (Strings)
|
|
98
|
+
* "local_order_<input_data_arg_name>" accepts boolean
|
|
99
|
+
Note:
|
|
100
|
+
These generic arguments are supported by teradataml if
|
|
101
|
+
the underlying SQL Engine function supports, else an
|
|
102
|
+
exception is raised.
|
|
103
|
+
|
|
104
|
+
RETURNS:
|
|
105
|
+
Instance of NaiveBayesPredict.
|
|
106
|
+
Output teradataml DataFrames can be accessed using attribute
|
|
107
|
+
references, such as NaiveBayesPredictObj.<attribute_name>.
|
|
108
|
+
Output teradataml DataFrame attribute name is:
|
|
109
|
+
result
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
RAISES:
|
|
113
|
+
TeradataMlException, TypeError, ValueError
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
EXAMPLES:
|
|
117
|
+
# Notes:
|
|
118
|
+
# 1. Get the connection to Vantage, before importing the
|
|
119
|
+
# function in user space.
|
|
120
|
+
# 2. User can import the function, if it is available on
|
|
121
|
+
# Vantage user is connected to.
|
|
122
|
+
# 3. To check the list of analytic functions available on
|
|
123
|
+
# Vantage user connected to, use
|
|
124
|
+
# "display_analytic_functions()".
|
|
125
|
+
|
|
126
|
+
# Load the example data.
|
|
127
|
+
load_example_data("decisionforestpredict", ["housing_train", "housing_test"])
|
|
128
|
+
|
|
129
|
+
# Create teradataml DataFrame objects.
|
|
130
|
+
housing_train = DataFrame.from_table("housing_train")
|
|
131
|
+
housing_test = DataFrame.from_table("housing_test")
|
|
132
|
+
|
|
133
|
+
# Check the list of available analytic functions.
|
|
134
|
+
display_analytic_functions()
|
|
135
|
+
|
|
136
|
+
# Import function TDNaiveBayesPredict.
|
|
137
|
+
from teradataml import TDNaiveBayesPredict, NaiveBayes, Unpivoting
|
|
138
|
+
|
|
139
|
+
# Example 1: TDNaiveBayesPredict function to predict the classification label using Dense input.
|
|
140
|
+
NaiveBayes_out = NaiveBayes(data=housing_train, response_column='homestyle',
|
|
141
|
+
numeric_inputs=['price','lotsize','bedrooms','bathrms','stories','garagepl'],
|
|
142
|
+
categorical_inputs=['driveway','recroom','fullbase','gashw','airco','prefarea'])
|
|
143
|
+
|
|
144
|
+
NaiveBayesPredict_out = TDNaiveBayesPredict(data=housing_test, object=NaiveBayes_out.result, id_column='sn',
|
|
145
|
+
numeric_inputs=['price','lotsize','bedrooms','bathrms','stories','garagepl'],
|
|
146
|
+
categorical_inputs=['driveway','recroom','fullbase','gashw','airco','prefarea'],
|
|
147
|
+
responses=['Classic', 'Eclectic', 'bungalow'],
|
|
148
|
+
accumulate='homestyle',
|
|
149
|
+
output_prob=True
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
# Print the result DataFrame.
|
|
153
|
+
print( NaiveBayesPredict_out.result)
|
|
154
|
+
|
|
155
|
+
# Example 2: TDNaiveBayesPredict function to predict the classification label using Sparse input.
|
|
156
|
+
|
|
157
|
+
# Unpivoting the data for sparse input to naive bayes.
|
|
158
|
+
upvt_train = Unpivoting(data = housing_train, id_column = 'sn',
|
|
159
|
+
target_columns = ['price','lotsize','bedrooms','bathrms','stories','garagepl',
|
|
160
|
+
'driveway','recroom','fullbase','gashw','airco','prefarea'],
|
|
161
|
+
attribute_column = "AttributeName",
|
|
162
|
+
value_column = "AttributeValue",
|
|
163
|
+
accumulate = 'homestyle')
|
|
164
|
+
|
|
165
|
+
upvt_test = Unpivoting(data = housing_test, id_column = 'sn',
|
|
166
|
+
target_columns = ['price','lotsize','bedrooms','bathrms','stories','garagepl','driveway',
|
|
167
|
+
'recroom','fullbase','gashw','airco','prefarea'],
|
|
168
|
+
attribute_column = "AttributeName", value_column = "AttributeValue",
|
|
169
|
+
accumulate = 'homestyle')
|
|
170
|
+
|
|
171
|
+
NaiveBayes_out1 = NaiveBayes(data=upvt_train.result,
|
|
172
|
+
response_column='homestyle',
|
|
173
|
+
attribute_name_column='AttributeName',
|
|
174
|
+
attribute_value_column='AttributeValue',
|
|
175
|
+
numeric_attributes=['price','lotsize','bedrooms','bathrms','stories','garagepl'],
|
|
176
|
+
categorical_attributes=['driveway','recroom','fullbase','gashw','airco','prefarea'])
|
|
177
|
+
|
|
178
|
+
NaiveBayesPredict_out1 = TDNaiveBayesPredict(data=upvt_test.result, object=NaiveBayes_out1, id_column='sn',
|
|
179
|
+
attribute_name_column='AttributeName',
|
|
180
|
+
attribute_value_column='AttributeValue',
|
|
181
|
+
responses=['Classic', 'Eclectic', 'bungalow'],
|
|
182
|
+
accumulate='homestyle',
|
|
183
|
+
output_prob=True
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
# Print the result DataFrame.
|
|
187
|
+
print( NaiveBayesPredict_out1.result)
|
|
188
|
+
|
|
189
|
+
"""
|