teradataml 20.0.0.1__py3-none-any.whl → 20.0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of teradataml might be problematic. Click here for more details.
- teradataml/LICENSE.pdf +0 -0
- teradataml/README.md +112 -0
- teradataml/__init__.py +6 -3
- teradataml/_version.py +1 -1
- teradataml/analytics/__init__.py +3 -2
- teradataml/analytics/analytic_function_executor.py +224 -16
- teradataml/analytics/analytic_query_generator.py +92 -0
- teradataml/analytics/byom/__init__.py +3 -2
- teradataml/analytics/json_parser/metadata.py +1 -0
- teradataml/analytics/json_parser/utils.py +6 -4
- teradataml/analytics/meta_class.py +40 -1
- teradataml/analytics/sqle/DecisionTreePredict.py +1 -1
- teradataml/analytics/sqle/__init__.py +10 -2
- teradataml/analytics/table_operator/__init__.py +3 -2
- teradataml/analytics/uaf/__init__.py +21 -2
- teradataml/analytics/utils.py +62 -1
- teradataml/analytics/valib.py +1 -1
- teradataml/automl/__init__.py +1502 -323
- teradataml/automl/custom_json_utils.py +139 -61
- teradataml/automl/data_preparation.py +245 -306
- teradataml/automl/data_transformation.py +32 -12
- teradataml/automl/feature_engineering.py +313 -82
- teradataml/automl/model_evaluation.py +44 -35
- teradataml/automl/model_training.py +109 -146
- teradataml/catalog/byom.py +8 -8
- teradataml/clients/pkce_client.py +1 -1
- teradataml/common/constants.py +37 -0
- teradataml/common/deprecations.py +13 -7
- teradataml/common/garbagecollector.py +151 -120
- teradataml/common/messagecodes.py +4 -1
- teradataml/common/messages.py +2 -1
- teradataml/common/sqlbundle.py +1 -1
- teradataml/common/utils.py +97 -11
- teradataml/common/wrapper_utils.py +1 -1
- teradataml/context/context.py +72 -2
- teradataml/data/complaints_test_tokenized.csv +353 -0
- teradataml/data/complaints_tokens_model.csv +348 -0
- teradataml/data/covid_confirm_sd.csv +83 -0
- teradataml/data/dataframe_example.json +10 -0
- teradataml/data/docs/sqle/docs_17_20/CFilter.py +132 -0
- teradataml/data/docs/sqle/docs_17_20/NaiveBayes.py +162 -0
- teradataml/data/docs/sqle/docs_17_20/OutlierFilterFit.py +2 -0
- teradataml/data/docs/sqle/docs_17_20/Pivoting.py +279 -0
- teradataml/data/docs/sqle/docs_17_20/Shap.py +197 -0
- teradataml/data/docs/sqle/docs_17_20/TDNaiveBayesPredict.py +189 -0
- teradataml/data/docs/sqle/docs_17_20/TFIDF.py +142 -0
- teradataml/data/docs/sqle/docs_17_20/Unpivoting.py +216 -0
- teradataml/data/docs/uaf/docs_17_20/ACF.py +1 -10
- teradataml/data/docs/uaf/docs_17_20/ArimaEstimate.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/ArimaForecast.py +35 -5
- teradataml/data/docs/uaf/docs_17_20/ArimaValidate.py +3 -1
- teradataml/data/docs/uaf/docs_17_20/ArimaXEstimate.py +293 -0
- teradataml/data/docs/uaf/docs_17_20/AutoArima.py +354 -0
- teradataml/data/docs/uaf/docs_17_20/BreuschGodfrey.py +3 -2
- teradataml/data/docs/uaf/docs_17_20/BreuschPaganGodfrey.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/Convolve.py +13 -10
- teradataml/data/docs/uaf/docs_17_20/Convolve2.py +4 -1
- teradataml/data/docs/uaf/docs_17_20/CumulPeriodogram.py +5 -4
- teradataml/data/docs/uaf/docs_17_20/DFFT2Conv.py +4 -4
- teradataml/data/docs/uaf/docs_17_20/DWT.py +235 -0
- teradataml/data/docs/uaf/docs_17_20/DWT2D.py +214 -0
- teradataml/data/docs/uaf/docs_17_20/DurbinWatson.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/ExtractResults.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/FilterFactory1d.py +160 -0
- teradataml/data/docs/uaf/docs_17_20/GenseriesSinusoids.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/GoldfeldQuandt.py +9 -31
- teradataml/data/docs/uaf/docs_17_20/HoltWintersForecaster.py +4 -2
- teradataml/data/docs/uaf/docs_17_20/IDFFT2.py +1 -8
- teradataml/data/docs/uaf/docs_17_20/IDWT.py +236 -0
- teradataml/data/docs/uaf/docs_17_20/IDWT2D.py +226 -0
- teradataml/data/docs/uaf/docs_17_20/IQR.py +134 -0
- teradataml/data/docs/uaf/docs_17_20/LineSpec.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/LinearRegr.py +2 -2
- teradataml/data/docs/uaf/docs_17_20/MAMean.py +3 -3
- teradataml/data/docs/uaf/docs_17_20/Matrix2Image.py +297 -0
- teradataml/data/docs/uaf/docs_17_20/MatrixMultiply.py +15 -6
- teradataml/data/docs/uaf/docs_17_20/PACF.py +0 -1
- teradataml/data/docs/uaf/docs_17_20/Portman.py +2 -2
- teradataml/data/docs/uaf/docs_17_20/PowerSpec.py +2 -2
- teradataml/data/docs/uaf/docs_17_20/Resample.py +9 -1
- teradataml/data/docs/uaf/docs_17_20/SAX.py +246 -0
- teradataml/data/docs/uaf/docs_17_20/SeasonalNormalize.py +17 -10
- teradataml/data/docs/uaf/docs_17_20/SignifPeriodicities.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/WhitesGeneral.py +3 -1
- teradataml/data/docs/uaf/docs_17_20/WindowDFFT.py +368 -0
- teradataml/data/dwt2d_dataTable.csv +65 -0
- teradataml/data/dwt_dataTable.csv +8 -0
- teradataml/data/dwt_filterTable.csv +3 -0
- teradataml/data/finance_data4.csv +13 -0
- teradataml/data/grocery_transaction.csv +19 -0
- teradataml/data/idwt2d_dataTable.csv +5 -0
- teradataml/data/idwt_dataTable.csv +8 -0
- teradataml/data/idwt_filterTable.csv +3 -0
- teradataml/data/interval_data.csv +5 -0
- teradataml/data/jsons/paired_functions.json +14 -0
- teradataml/data/jsons/sqle/17.20/TD_CFilter.json +118 -0
- teradataml/data/jsons/sqle/17.20/TD_NaiveBayes.json +193 -0
- teradataml/data/jsons/sqle/17.20/TD_NaiveBayesPredict.json +212 -0
- teradataml/data/jsons/sqle/17.20/TD_OneClassSVM.json +9 -9
- teradataml/data/jsons/sqle/17.20/TD_Pivoting.json +280 -0
- teradataml/data/jsons/sqle/17.20/TD_Shap.json +222 -0
- teradataml/data/jsons/sqle/17.20/TD_TFIDF.json +162 -0
- teradataml/data/jsons/sqle/17.20/TD_Unpivoting.json +235 -0
- teradataml/data/jsons/storedprocedure/17.20/TD_FILTERFACTORY1D.json +150 -0
- teradataml/data/jsons/uaf/17.20/TD_ACF.json +1 -18
- teradataml/data/jsons/uaf/17.20/TD_ARIMAESTIMATE.json +3 -16
- teradataml/data/jsons/uaf/17.20/TD_ARIMAFORECAST.json +0 -3
- teradataml/data/jsons/uaf/17.20/TD_ARIMAVALIDATE.json +5 -3
- teradataml/data/jsons/uaf/17.20/TD_ARIMAXESTIMATE.json +362 -0
- teradataml/data/jsons/uaf/17.20/TD_AUTOARIMA.json +469 -0
- teradataml/data/jsons/uaf/17.20/TD_BINARYMATRIXOP.json +0 -3
- teradataml/data/jsons/uaf/17.20/TD_BINARYSERIESOP.json +0 -2
- teradataml/data/jsons/uaf/17.20/TD_BREUSCH_GODFREY.json +2 -1
- teradataml/data/jsons/uaf/17.20/TD_BREUSCH_PAGAN_GODFREY.json +2 -5
- teradataml/data/jsons/uaf/17.20/TD_CONVOLVE.json +3 -6
- teradataml/data/jsons/uaf/17.20/TD_CONVOLVE2.json +1 -3
- teradataml/data/jsons/uaf/17.20/TD_CUMUL_PERIODOGRAM.json +0 -5
- teradataml/data/jsons/uaf/17.20/TD_DFFT.json +1 -4
- teradataml/data/jsons/uaf/17.20/TD_DFFT2.json +2 -7
- teradataml/data/jsons/uaf/17.20/TD_DFFT2CONV.json +1 -2
- teradataml/data/jsons/uaf/17.20/TD_DFFTCONV.json +0 -2
- teradataml/data/jsons/uaf/17.20/TD_DTW.json +3 -6
- teradataml/data/jsons/uaf/17.20/TD_DWT.json +173 -0
- teradataml/data/jsons/uaf/17.20/TD_DWT2D.json +160 -0
- teradataml/data/jsons/uaf/17.20/TD_FITMETRICS.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_GOLDFELD_QUANDT.json +16 -30
- teradataml/data/jsons/uaf/17.20/{TD_HOLT_WINTERS_FORECAST.json → TD_HOLT_WINTERS_FORECASTER.json} +1 -2
- teradataml/data/jsons/uaf/17.20/TD_IDFFT2.json +1 -15
- teradataml/data/jsons/uaf/17.20/TD_IDWT.json +162 -0
- teradataml/data/jsons/uaf/17.20/TD_IDWT2D.json +149 -0
- teradataml/data/jsons/uaf/17.20/TD_IQR.json +117 -0
- teradataml/data/jsons/uaf/17.20/TD_LINEAR_REGR.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_LINESPEC.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_MAMEAN.json +1 -3
- teradataml/data/jsons/uaf/17.20/TD_MATRIX2IMAGE.json +209 -0
- teradataml/data/jsons/uaf/17.20/TD_PACF.json +2 -2
- teradataml/data/jsons/uaf/17.20/TD_POWERSPEC.json +5 -5
- teradataml/data/jsons/uaf/17.20/TD_RESAMPLE.json +48 -28
- teradataml/data/jsons/uaf/17.20/TD_SAX.json +208 -0
- teradataml/data/jsons/uaf/17.20/TD_SEASONALNORMALIZE.json +12 -6
- teradataml/data/jsons/uaf/17.20/TD_SIMPLEEXP.json +0 -1
- teradataml/data/jsons/uaf/17.20/TD_TRACKINGOP.json +8 -8
- teradataml/data/jsons/uaf/17.20/TD_UNDIFF.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_UNNORMALIZE.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_WINDOWDFFT.json +400 -0
- teradataml/data/load_example_data.py +8 -2
- teradataml/data/naivebayestextclassifier_example.json +1 -1
- teradataml/data/naivebayestextclassifierpredict_example.json +11 -0
- teradataml/data/peppers.png +0 -0
- teradataml/data/real_values.csv +14 -0
- teradataml/data/sax_example.json +8 -0
- teradataml/data/scripts/deploy_script.py +1 -1
- teradataml/data/scripts/sklearn/sklearn_fit.py +17 -10
- teradataml/data/scripts/sklearn/sklearn_fit_predict.py +2 -2
- teradataml/data/scripts/sklearn/sklearn_function.template +30 -7
- teradataml/data/scripts/sklearn/sklearn_neighbors.py +1 -1
- teradataml/data/scripts/sklearn/sklearn_score.py +12 -3
- teradataml/data/scripts/sklearn/sklearn_transform.py +55 -4
- teradataml/data/star_pivot.csv +8 -0
- teradataml/data/templates/open_source_ml.json +2 -1
- teradataml/data/teradataml_example.json +20 -1
- teradataml/data/timestamp_data.csv +4 -0
- teradataml/data/titanic_dataset_unpivoted.csv +19 -0
- teradataml/data/uaf_example.json +55 -1
- teradataml/data/unpivot_example.json +15 -0
- teradataml/data/url_data.csv +9 -0
- teradataml/data/windowdfft.csv +16 -0
- teradataml/dataframe/copy_to.py +1 -1
- teradataml/dataframe/data_transfer.py +5 -3
- teradataml/dataframe/dataframe.py +474 -41
- teradataml/dataframe/fastload.py +3 -3
- teradataml/dataframe/functions.py +339 -0
- teradataml/dataframe/row.py +160 -0
- teradataml/dataframe/setop.py +2 -2
- teradataml/dataframe/sql.py +658 -20
- teradataml/dataframe/window.py +1 -1
- teradataml/dbutils/dbutils.py +322 -16
- teradataml/geospatial/geodataframe.py +1 -1
- teradataml/geospatial/geodataframecolumn.py +1 -1
- teradataml/hyperparameter_tuner/optimizer.py +13 -13
- teradataml/lib/aed_0_1.dll +0 -0
- teradataml/opensource/sklearn/_sklearn_wrapper.py +154 -69
- teradataml/options/__init__.py +3 -1
- teradataml/options/configure.py +14 -2
- teradataml/options/display.py +2 -2
- teradataml/plot/axis.py +4 -4
- teradataml/scriptmgmt/UserEnv.py +10 -6
- teradataml/scriptmgmt/lls_utils.py +3 -2
- teradataml/table_operators/Script.py +2 -2
- teradataml/table_operators/TableOperator.py +106 -20
- teradataml/table_operators/table_operator_util.py +88 -41
- teradataml/table_operators/templates/dataframe_udf.template +63 -0
- teradataml/telemetry_utils/__init__.py +0 -0
- teradataml/telemetry_utils/queryband.py +52 -0
- teradataml/utils/validators.py +1 -1
- {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.2.dist-info}/METADATA +115 -2
- {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.2.dist-info}/RECORD +200 -140
- {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.2.dist-info}/WHEEL +0 -0
- {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.2.dist-info}/top_level.txt +0 -0
- {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.2.dist-info}/zip-safe +0 -0
|
@@ -41,7 +41,7 @@ def Convolve(data1=None, data1_filter_expr=None, data2=None,
|
|
|
41
41
|
data2:
|
|
42
42
|
Required Argument.
|
|
43
43
|
Specifies the actual filter kernel.
|
|
44
|
-
|
|
44
|
+
The time series have the following TDSeries characteristics.
|
|
45
45
|
1. "payload_content" must have one of these values:
|
|
46
46
|
* REAL
|
|
47
47
|
* COMPLEX
|
|
@@ -64,18 +64,21 @@ def Convolve(data1=None, data1_filter_expr=None, data2=None,
|
|
|
64
64
|
|
|
65
65
|
algorithm:
|
|
66
66
|
Optional Argument.
|
|
67
|
-
Specifies the options to use for convolving.
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
67
|
+
Specifies the options to use for convolving.
|
|
68
|
+
By default, the function selects the best option based
|
|
69
|
+
on the number of entries present in the two inputs,
|
|
70
|
+
and their types ( REAL, COMPLEX, and so on.)
|
|
71
|
+
CONV_SUMMATION only supports:
|
|
72
|
+
* REAL, REAL
|
|
73
|
+
* REAL, MULTIVAR_REAL
|
|
74
|
+
* MULTIVAR_REAL, REAL
|
|
75
|
+
* MULTIVAR_REAL, MULTIVAR_REAL
|
|
74
76
|
Note:
|
|
75
77
|
* This parameter is usually used for testing.
|
|
76
78
|
If this parameter is not included, the internal
|
|
77
|
-
planning logic selects the best option based
|
|
78
|
-
|
|
79
|
+
planning logic selects the best option based on
|
|
80
|
+
the number of entries present in the two inputs,
|
|
81
|
+
and their types ( REAL, COMPLEX, and so on.)
|
|
79
82
|
Permitted Values: CONV_SUMMATION, CONV_DFFT
|
|
80
83
|
Types: str
|
|
81
84
|
|
|
@@ -154,7 +154,6 @@ def Convolve2(data1=None, data1_filter_expr=None, data2=None,
|
|
|
154
154
|
data3 = DataFrame.from_table("Convolve2RealsLeft")
|
|
155
155
|
data4 = DataFrame.from_table("Convolve2RealsLeft")
|
|
156
156
|
|
|
157
|
-
|
|
158
157
|
# Example 1: Apply the Convolve2() function when payload fields of two matrices
|
|
159
158
|
# are the different to convolve two matrices into a new source
|
|
160
159
|
# image matrix.
|
|
@@ -168,6 +167,7 @@ def Convolve2(data1=None, data1_filter_expr=None, data2=None,
|
|
|
168
167
|
column_index='column_i',
|
|
169
168
|
payload_field=["B"],
|
|
170
169
|
payload_content="REAL")
|
|
170
|
+
|
|
171
171
|
data2_matrix_df = TDMatrix(data=data2,
|
|
172
172
|
id='id',
|
|
173
173
|
row_index_style="sequence",
|
|
@@ -176,6 +176,7 @@ def Convolve2(data1=None, data1_filter_expr=None, data2=None,
|
|
|
176
176
|
column_index='column_i',
|
|
177
177
|
payload_field=["A"],
|
|
178
178
|
payload_content="REAL")
|
|
179
|
+
|
|
179
180
|
# Convolve the "data1_matrix_df" and "data2_matrix_df" matrices using the Convolve2() function.
|
|
180
181
|
uaf_out1 = Convolve2(data1=data1_matrix_df,
|
|
181
182
|
data2=data2_matrix_df,
|
|
@@ -196,6 +197,7 @@ def Convolve2(data1=None, data1_filter_expr=None, data2=None,
|
|
|
196
197
|
column_index='col_seq',
|
|
197
198
|
payload_field=["A"],
|
|
198
199
|
payload_content="REAL")
|
|
200
|
+
|
|
199
201
|
data4_matrix_df = TDMatrix(data=data4,
|
|
200
202
|
id='id',
|
|
201
203
|
row_index_style="sequence",
|
|
@@ -204,6 +206,7 @@ def Convolve2(data1=None, data1_filter_expr=None, data2=None,
|
|
|
204
206
|
column_index='col_seq',
|
|
205
207
|
payload_field=["A"],
|
|
206
208
|
payload_content="REAL")
|
|
209
|
+
|
|
207
210
|
# Convolve the "data3_matrix_df" and "data4_matrix_df" matrices using the Convolve2() function.
|
|
208
211
|
uaf_out2 = Convolve2(data1=data3_matrix_df,
|
|
209
212
|
data2=data4_matrix_df,
|
|
@@ -21,7 +21,7 @@ def CumulPeriodogram(data=None, data_filter_expr=None,
|
|
|
21
21
|
2. Use ArimaValidate() to validate spectral candidates.
|
|
22
22
|
4. Execute CumulPeriodogram() using the residuals.
|
|
23
23
|
5. See the null hypothesis result from CumulPeriodogram().
|
|
24
|
-
6. Use
|
|
24
|
+
6. Use DataFrame.plot() to plot the results.
|
|
25
25
|
|
|
26
26
|
PARAMETERS:
|
|
27
27
|
data:
|
|
@@ -143,7 +143,6 @@ def CumulPeriodogram(data=None, data_filter_expr=None,
|
|
|
143
143
|
fit_metrics=True,
|
|
144
144
|
residuals=True)
|
|
145
145
|
|
|
146
|
-
|
|
147
146
|
# Example 1: Perform statistical test using CumulPeriodogram()
|
|
148
147
|
# with input as TDSeries object created over the 'fitresiduals'
|
|
149
148
|
# attribute of arima_validate generated by running ArimaValidate() and
|
|
@@ -158,7 +157,8 @@ def CumulPeriodogram(data=None, data_filter_expr=None,
|
|
|
158
157
|
payload_field="RESIDUAL",
|
|
159
158
|
payload_content="REAL")
|
|
160
159
|
|
|
161
|
-
uaf_out = CumulPeriodogram(data=data_series_df,
|
|
160
|
+
uaf_out = CumulPeriodogram(data=data_series_df,
|
|
161
|
+
significance_level=0.05)
|
|
162
162
|
|
|
163
163
|
# Print the result DataFrames.
|
|
164
164
|
print(uaf_out.result)
|
|
@@ -174,7 +174,8 @@ def CumulPeriodogram(data=None, data_filter_expr=None,
|
|
|
174
174
|
# generated by ArimaValidate() function with layer as 'ARTFITRESIDUALS'.
|
|
175
175
|
art_df = TDAnalyticResult(data=arima_validate.result, layer="ARTFITRESIDUALS")
|
|
176
176
|
|
|
177
|
-
uaf_out = CumulPeriodogram(data=art_df,
|
|
177
|
+
uaf_out = CumulPeriodogram(data=art_df,
|
|
178
|
+
significance_level=0.05)
|
|
178
179
|
|
|
179
180
|
# Print the result DataFrames.
|
|
180
181
|
print(uaf_out.result)
|
|
@@ -174,10 +174,10 @@ def DFFT2Conv(data=None, data_filter_expr=None, conv=None,
|
|
|
174
174
|
# input matrix with real numbers only for the matrix id 33.
|
|
175
175
|
filter_expr = td_matrix.id==33
|
|
176
176
|
dfft2_out = DFFT2(data=td_matrix,
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
177
|
+
data_filter_expr=filter_expr,
|
|
178
|
+
freq_style="K_INTEGRAL",
|
|
179
|
+
human_readable=False,
|
|
180
|
+
output_fmt_content="COMPLEX")
|
|
181
181
|
|
|
182
182
|
# Example 1: Convert the complex(REAL,IMAGINARY) output of DFFT2() to
|
|
183
183
|
# polar(AMPLITUDE,PHASE) in RADIAN format using TDMatrix
|
|
@@ -0,0 +1,235 @@
|
|
|
1
|
+
def DWT(data1=None, data1_filter_expr=None, data2=None,
|
|
2
|
+
data2_filter_expr=None, wavelet=None, mode="symmetric",
|
|
3
|
+
level=1, part=None, input_fmt_input_mode=None,
|
|
4
|
+
output_fmt_index_style="NUMERICAL_SEQUENCE",
|
|
5
|
+
**generic_arguments):
|
|
6
|
+
"""
|
|
7
|
+
DESCRIPTION:
|
|
8
|
+
DWT() is a function that performs discrete wavelet
|
|
9
|
+
transform (DWT).
|
|
10
|
+
|
|
11
|
+
PARAMETERS:
|
|
12
|
+
data1:
|
|
13
|
+
Required Argument.
|
|
14
|
+
Specifies the series to be used as an input.
|
|
15
|
+
Multiple payloads are supported, and each payload column is
|
|
16
|
+
transformed independently. Only REAL or MULTIVAR_REAL
|
|
17
|
+
payload content types are supported.
|
|
18
|
+
Types: TDSeries
|
|
19
|
+
|
|
20
|
+
data1_filter_expr:
|
|
21
|
+
Optional Argument.
|
|
22
|
+
Specifies the filter expression for "data1".
|
|
23
|
+
Types: ColumnExpression
|
|
24
|
+
|
|
25
|
+
data2:
|
|
26
|
+
Optional Argument.
|
|
27
|
+
Specifies the series to be used as an input. The
|
|
28
|
+
series specifies the filter. It should have two payload
|
|
29
|
+
columns corresponding to low and high pass
|
|
30
|
+
filters. Only MULTIVAR_REAL payload content type is
|
|
31
|
+
supported.
|
|
32
|
+
Types: TDSeries
|
|
33
|
+
|
|
34
|
+
data2_filter_expr:
|
|
35
|
+
Optional Argument.
|
|
36
|
+
Specifies the filter expression for "data2".
|
|
37
|
+
Types: ColumnExpression
|
|
38
|
+
|
|
39
|
+
wavelet:
|
|
40
|
+
Optional Argument.
|
|
41
|
+
Specifies the name of the wavelet.
|
|
42
|
+
Option families and names are:
|
|
43
|
+
* Daubechies: 'db1' or 'haar', 'db2', 'db3', .... ,'db38'
|
|
44
|
+
* Coiflets: 'coif1', 'coif2', ... , 'coif17'
|
|
45
|
+
* Symlets: 'sym2', 'sym3', ... ,' sym20'
|
|
46
|
+
* Discrete Meyer: 'dmey'
|
|
47
|
+
* Biorthogonal: 'bior1.1', 'bior1.3', 'bior1.5',
|
|
48
|
+
'bior2.2', 'bior2.4', 'bior2.6',
|
|
49
|
+
'bior2.8', 'bior3.1', 'bior3.3',
|
|
50
|
+
'bior3.5', 'bior3.7', 'bior3.9',
|
|
51
|
+
'bior4.4', 'bior5.5', 'bior6.8'
|
|
52
|
+
* Reverse Biorthogonal: 'rbio1.1', 'rbio1.3',
|
|
53
|
+
'rbio1.5' 'rbio2.2',
|
|
54
|
+
'rbio2.4', 'rbio2.6',
|
|
55
|
+
'rbio2.8', 'rbio3.1',
|
|
56
|
+
'rbio3.3', 'rbio3.5',
|
|
57
|
+
'rbio3.7','rbio3.9',
|
|
58
|
+
'rbio4.4', 'rbio5.5',
|
|
59
|
+
'rbio6.8'
|
|
60
|
+
Note:
|
|
61
|
+
* If 'wavelet' is specified, do not include a second
|
|
62
|
+
input series for the function. Otherwise, include
|
|
63
|
+
a second input series to provide the filter.
|
|
64
|
+
* Data type is case-sensitive.
|
|
65
|
+
Types: str
|
|
66
|
+
|
|
67
|
+
mode:
|
|
68
|
+
Optional Argument.
|
|
69
|
+
Specifies the signal extension mode. Data type is
|
|
70
|
+
case-insensitive.
|
|
71
|
+
Permitted Values:
|
|
72
|
+
* symmetric, sym, symh
|
|
73
|
+
* reflect, symw
|
|
74
|
+
* smooth, spd, sp1
|
|
75
|
+
* constant, sp0
|
|
76
|
+
* zero, zpd
|
|
77
|
+
* periodic, ppd
|
|
78
|
+
* periodization, per
|
|
79
|
+
* antisymmetric, asym, asymh
|
|
80
|
+
* antireflect, asymw
|
|
81
|
+
Default Value: symmetric
|
|
82
|
+
Types: str
|
|
83
|
+
|
|
84
|
+
level:
|
|
85
|
+
Optional Argument.
|
|
86
|
+
Specifies the level of decomposition.
|
|
87
|
+
Valid values are [1,15].
|
|
88
|
+
Default Value: 1
|
|
89
|
+
Types: int
|
|
90
|
+
|
|
91
|
+
part:
|
|
92
|
+
Optional Argument.
|
|
93
|
+
Specifies the indicator that the input is partial decomposition
|
|
94
|
+
result.
|
|
95
|
+
Note:
|
|
96
|
+
Data type is case-insensitive.
|
|
97
|
+
Permitted Values:
|
|
98
|
+
* a - the approximation
|
|
99
|
+
* d - the detail of decomposition of result.
|
|
100
|
+
Types: str
|
|
101
|
+
|
|
102
|
+
input_fmt_input_mode:
|
|
103
|
+
Optional Argument.
|
|
104
|
+
Specifies the input mode supported by the function.
|
|
105
|
+
When there are two input series, then the input_fmt_input_mode
|
|
106
|
+
specification is mandatory.
|
|
107
|
+
Permitted Values:
|
|
108
|
+
The input_fmt_input_mode parameter has the following options:
|
|
109
|
+
* ONE2ONE: Both the primary and secondary series
|
|
110
|
+
specifications contain a series name which
|
|
111
|
+
identifies the two series in the function.
|
|
112
|
+
* MANY2ONE: The MANY specification is the primary series
|
|
113
|
+
declaration. The secondary series specification
|
|
114
|
+
contains a series name that identifies the single
|
|
115
|
+
secondary series.
|
|
116
|
+
* MATCH: Both series are defined by their respective series
|
|
117
|
+
specification instance name declarations.
|
|
118
|
+
Types: str
|
|
119
|
+
|
|
120
|
+
output_fmt_index_style:
|
|
121
|
+
Optional Argument.
|
|
122
|
+
Specifies the index style of the output format.
|
|
123
|
+
Permitted Values: NUMERICAL_SEQUENCE
|
|
124
|
+
Default Value: NUMERICAL_SEQUENCE
|
|
125
|
+
Types: str
|
|
126
|
+
|
|
127
|
+
**generic_arguments:
|
|
128
|
+
Specifies the generic keyword arguments of UAF functions.
|
|
129
|
+
Below are the generic keyword arguments:
|
|
130
|
+
persist:
|
|
131
|
+
Optional Argument.
|
|
132
|
+
Specifies whether to persist the results of the
|
|
133
|
+
function in a table or not. When set to True,
|
|
134
|
+
results are persisted in a table; otherwise,
|
|
135
|
+
results are garbage collected at the end of the
|
|
136
|
+
session.
|
|
137
|
+
Note that, when UAF function is executed, an
|
|
138
|
+
analytic result table (ART) is created.
|
|
139
|
+
Default Value: False
|
|
140
|
+
Types: bool
|
|
141
|
+
|
|
142
|
+
volatile:
|
|
143
|
+
Optional Argument.
|
|
144
|
+
Specifies whether to put the results of the
|
|
145
|
+
function in a volatile ART or not. When set to
|
|
146
|
+
True, results are stored in a volatile ART,
|
|
147
|
+
otherwise not.
|
|
148
|
+
Default Value: False
|
|
149
|
+
Types: bool
|
|
150
|
+
|
|
151
|
+
output_table_name:
|
|
152
|
+
Optional Argument.
|
|
153
|
+
Specifies the name of the table to store results.
|
|
154
|
+
If not specified, a unique table name is internally
|
|
155
|
+
generated.
|
|
156
|
+
Types: str
|
|
157
|
+
|
|
158
|
+
output_db_name:
|
|
159
|
+
Optional Argument.
|
|
160
|
+
Specifies the name of the database to create output
|
|
161
|
+
table into. If not specified, table is created into
|
|
162
|
+
database specified by the user at the time of context
|
|
163
|
+
creation or configuration parameter. Argument is ignored,
|
|
164
|
+
if "output_table_name" is not specified.
|
|
165
|
+
Types: str
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
RETURNS:
|
|
169
|
+
Instance of DWT.
|
|
170
|
+
Output teradataml DataFrames can be accessed using attribute
|
|
171
|
+
references, such as DWT_obj.<attribute_name>.
|
|
172
|
+
Output teradataml DataFrame attribute name is:
|
|
173
|
+
1. result
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
RAISES:
|
|
177
|
+
TeradataMlException, TypeError, ValueError
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
EXAMPLES:
|
|
181
|
+
# Notes:
|
|
182
|
+
# 1. Get the connection to Vantage, before importing the
|
|
183
|
+
# function in user space.
|
|
184
|
+
# 2. User can import the function, if it is available on
|
|
185
|
+
# Vantage user is connected to.
|
|
186
|
+
# 3. To check the list of UAF analytic functions available
|
|
187
|
+
# on Vantage user connected to, use
|
|
188
|
+
# "display_analytic_functions()".
|
|
189
|
+
|
|
190
|
+
# Check the list of available UAF analytic functions.
|
|
191
|
+
display_analytic_functions(type="UAF")
|
|
192
|
+
|
|
193
|
+
# Import function DWT.
|
|
194
|
+
from teradataml import DWT
|
|
195
|
+
|
|
196
|
+
# Load the example data.
|
|
197
|
+
load_example_data("uaf", ["dwt_dataTable", "dwt_filterTable"])
|
|
198
|
+
|
|
199
|
+
# Create teradataml DataFrame objects.
|
|
200
|
+
data1 = DataFrame.from_table("dwt_dataTable")
|
|
201
|
+
data2 = DataFrame.from_table("dwt_filterTable")
|
|
202
|
+
|
|
203
|
+
# Create teradataml TDSeries objects.
|
|
204
|
+
data1_series_df = TDSeries(data=data1,
|
|
205
|
+
id="id",
|
|
206
|
+
row_index="rowi",
|
|
207
|
+
row_index_style="SEQUENCE",
|
|
208
|
+
payload_field="v",
|
|
209
|
+
payload_content="REAL")
|
|
210
|
+
|
|
211
|
+
data2_series_df = TDSeries(data=data2,
|
|
212
|
+
id="id",
|
|
213
|
+
row_index="seq",
|
|
214
|
+
row_index_style="SEQUENCE",
|
|
215
|
+
payload_field=["lo", "hi"],
|
|
216
|
+
payload_content="MULTIVAR_REAL")
|
|
217
|
+
|
|
218
|
+
# Example 1: Perform discrete wavelet transform using two series as input.
|
|
219
|
+
uaf_out = DWT(data1=data1_series_df,
|
|
220
|
+
data2=data2_series_df,
|
|
221
|
+
data2_filter_expr=data2_series_df.id==1,
|
|
222
|
+
input_fmt_input_mode='MANY2ONE')
|
|
223
|
+
|
|
224
|
+
# Print the result DataFrame.
|
|
225
|
+
print(uaf_out.result)
|
|
226
|
+
|
|
227
|
+
# Example 2: Perform discrete wavelet transform using single series as input and the wavelet parameter.
|
|
228
|
+
uaf_out = DWT(data1=data1_series_df,
|
|
229
|
+
wavelet='haar')
|
|
230
|
+
|
|
231
|
+
# Print the result DataFrame.
|
|
232
|
+
print(uaf_out.result)
|
|
233
|
+
|
|
234
|
+
"""
|
|
235
|
+
|
|
@@ -0,0 +1,214 @@
|
|
|
1
|
+
def DWT2D(data1=None, data1_filter_expr=None, data2=None,
|
|
2
|
+
data2_filter_expr=None, wavelet=None, mode="symmetric",
|
|
3
|
+
level=1, input_fmt_input_mode=None,
|
|
4
|
+
output_fmt_index_style="NUMERICAL_SEQUENCE",
|
|
5
|
+
**generic_arguments):
|
|
6
|
+
"""
|
|
7
|
+
DESCRIPTION:
|
|
8
|
+
DWT2D() function performs discrete wavelet transform (DWT) for
|
|
9
|
+
two-dimensional data. The algorithm is applied first
|
|
10
|
+
vertically by column axis, then horizontally by row axis.
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
PARAMETERS:
|
|
14
|
+
data1:
|
|
15
|
+
Required Argument.
|
|
16
|
+
Specifies the input matrix. Multiple payloads are supported,
|
|
17
|
+
and each payload column is transformed independently.
|
|
18
|
+
Only REAL or MULTIVAR_REAL payload content types are supported.
|
|
19
|
+
Types: TDMatrix
|
|
20
|
+
|
|
21
|
+
data1_filter_expr:
|
|
22
|
+
Optional Argument.
|
|
23
|
+
Specifies the filter expression for "data1".
|
|
24
|
+
Types: ColumnExpression
|
|
25
|
+
|
|
26
|
+
data2:
|
|
27
|
+
Optional Argument.
|
|
28
|
+
Specifies the input series. The series specifies the filter.
|
|
29
|
+
It should have two payload columns corresponding to low and high
|
|
30
|
+
pass filters. Only MULTIVAR_REAL payload content type is supported.
|
|
31
|
+
Types: TDSeries
|
|
32
|
+
|
|
33
|
+
data2_filter_expr:
|
|
34
|
+
Optional Argument.
|
|
35
|
+
Specifies the filter expression for "data2".
|
|
36
|
+
Types: ColumnExpression
|
|
37
|
+
|
|
38
|
+
wavelet:
|
|
39
|
+
Optional Argument.
|
|
40
|
+
Specifies the name of the wavelet.
|
|
41
|
+
Permitted families and names are:
|
|
42
|
+
* Daubechies: 'db1' or 'haar', 'db2', 'db3', .... ,'db38'
|
|
43
|
+
* Coiflets: 'coif1', 'coif2', ... , 'coif17'
|
|
44
|
+
* Symlets: 'sym2', 'sym3', ... ,' sym20'
|
|
45
|
+
* Discrete Meyer: 'dmey'
|
|
46
|
+
* Biorthogonal: 'bior1.1', 'bior1.3', 'bior1.5', 'bior2.2',
|
|
47
|
+
'bior2.4', 'bior2.6', 'bior2.8', 'bior3.1',
|
|
48
|
+
'bior3.3', 'bior3.5', 'bior3.7', 'bior3.9',
|
|
49
|
+
'bior4.4', 'bior5.5', 'bior6.8'
|
|
50
|
+
* Reverse Biorthogonal: 'rbio1.1', 'rbio1.3', 'rbio1.5'
|
|
51
|
+
'rbio2.2', 'rbio2.4', 'rbio2.6',
|
|
52
|
+
'rbio2.8', 'rbio3.1', 'rbio3.3',
|
|
53
|
+
'rbio3.5', 'rbio3.7','rbio3.9',
|
|
54
|
+
'rbio4.4', 'rbio5.5', 'rbio6.8'
|
|
55
|
+
Note:
|
|
56
|
+
* If 'wavelet' is specified, do not include a second
|
|
57
|
+
input series for the function. Otherwise, include
|
|
58
|
+
a second input series to provide the filter.
|
|
59
|
+
* Data type is case-sensitive.
|
|
60
|
+
Types: str
|
|
61
|
+
|
|
62
|
+
mode:
|
|
63
|
+
Optional Argument.
|
|
64
|
+
Specifies the signal extension mode. Data type is case-insensitive.
|
|
65
|
+
Permitted Values:
|
|
66
|
+
* symmetric, sym, symh
|
|
67
|
+
* reflect, symw
|
|
68
|
+
* smooth, spd, sp1
|
|
69
|
+
* constant, sp0
|
|
70
|
+
* zero, zpd
|
|
71
|
+
* periodic, ppd
|
|
72
|
+
* periodization, per
|
|
73
|
+
* antisymmetric, asym, asymh
|
|
74
|
+
* antireflect, asymw
|
|
75
|
+
Default Value: symmetric
|
|
76
|
+
Types: str
|
|
77
|
+
|
|
78
|
+
level:
|
|
79
|
+
Optional Argument.
|
|
80
|
+
Specifies the level of decomposition. Valid values are [1,15].
|
|
81
|
+
Default Value: 1
|
|
82
|
+
Types: int
|
|
83
|
+
|
|
84
|
+
input_fmt_input_mode:
|
|
85
|
+
Optional Argument.
|
|
86
|
+
Specifies the input mode supported by the function.
|
|
87
|
+
When there are two input series, then the "input_fmt_input_mode"
|
|
88
|
+
specification is mandatory.
|
|
89
|
+
Permitted Values:
|
|
90
|
+
* ONE2ONE: Both the primary and secondary series specifications
|
|
91
|
+
contain a series name which identifies the two series
|
|
92
|
+
in the function.
|
|
93
|
+
* MANY2ONE: The MANY specification is the primary series
|
|
94
|
+
declaration. The secondary series specification
|
|
95
|
+
contains a series name that identifies the single
|
|
96
|
+
secondary series.
|
|
97
|
+
* MATCH: Both series are defined by their respective series
|
|
98
|
+
specification instance name declarations.
|
|
99
|
+
Types: str
|
|
100
|
+
|
|
101
|
+
output_fmt_index_style:
|
|
102
|
+
Optional Argument.
|
|
103
|
+
Specifies the index style of the output format.
|
|
104
|
+
Permitted Values: NUMERICAL_SEQUENCE
|
|
105
|
+
Default Value: NUMERICAL_SEQUENCE
|
|
106
|
+
Types: str
|
|
107
|
+
|
|
108
|
+
**generic_arguments:
|
|
109
|
+
Specifies the generic keyword arguments of UAF functions.
|
|
110
|
+
Below are the generic keyword arguments:
|
|
111
|
+
persist:
|
|
112
|
+
Optional Argument.
|
|
113
|
+
Specifies whether to persist the results of the
|
|
114
|
+
function in a table or not. When set to True,
|
|
115
|
+
results are persisted in a table; otherwise,
|
|
116
|
+
results are garbage collected at the end of the
|
|
117
|
+
session.
|
|
118
|
+
Note that, when UAF function is executed, an
|
|
119
|
+
analytic result table (ART) is created.
|
|
120
|
+
Default Value: False
|
|
121
|
+
Types: bool
|
|
122
|
+
|
|
123
|
+
volatile:
|
|
124
|
+
Optional Argument.
|
|
125
|
+
Specifies whether to put the results of the
|
|
126
|
+
function in a volatile ART or not. When set to
|
|
127
|
+
True, results are stored in a volatile ART,
|
|
128
|
+
otherwise not.
|
|
129
|
+
Default Value: False
|
|
130
|
+
Types: bool
|
|
131
|
+
|
|
132
|
+
output_table_name:
|
|
133
|
+
Optional Argument.
|
|
134
|
+
Specifies the name of the table to store results.
|
|
135
|
+
If not specified, a unique table name is internally
|
|
136
|
+
generated.
|
|
137
|
+
Types: str
|
|
138
|
+
|
|
139
|
+
output_db_name:
|
|
140
|
+
Optional Argument.
|
|
141
|
+
Specifies the name of the database to create output
|
|
142
|
+
table into. If not specified, table is created into
|
|
143
|
+
database specified by the user at the time of context
|
|
144
|
+
creation or configuration parameter. Argument is ignored,
|
|
145
|
+
if "output_table_name" is not specified.
|
|
146
|
+
Types: str
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
RETURNS:
|
|
150
|
+
Instance of DWT2D.
|
|
151
|
+
Output teradataml DataFrames can be accessed using attribute
|
|
152
|
+
references, such as DWT2D_obj.<attribute_name>.
|
|
153
|
+
Output teradataml DataFrame attribute name is:
|
|
154
|
+
1. result
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
RAISES:
|
|
158
|
+
TeradataMlException, TypeError, ValueError
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
EXAMPLES:
|
|
162
|
+
# Notes:
|
|
163
|
+
# 1. Get the connection to Vantage, before importing the
|
|
164
|
+
# function in user space.
|
|
165
|
+
# 2. User can import the function, if it is available on
|
|
166
|
+
# Vantage user is connected to.
|
|
167
|
+
# 3. To check the list of UAF analytic functions available
|
|
168
|
+
# on Vantage user connected to, use
|
|
169
|
+
# "display_analytic_functions()".
|
|
170
|
+
|
|
171
|
+
# Check the list of available UAF analytic functions.
|
|
172
|
+
display_analytic_functions(type="UAF")
|
|
173
|
+
|
|
174
|
+
# Load the example data.
|
|
175
|
+
load_example_data("uaf", ["dwt2d_dataTable", "dwt_filterTable"])
|
|
176
|
+
|
|
177
|
+
# Create teradataml DataFrame objects.
|
|
178
|
+
data1 = DataFrame.from_table("dwt2d_dataTable")
|
|
179
|
+
data2 = DataFrame.from_table("dwt_filterTable")
|
|
180
|
+
|
|
181
|
+
# Create teradataml TDSeries object.
|
|
182
|
+
data2_series_df = TDSeries(data=data2,
|
|
183
|
+
id="id",
|
|
184
|
+
row_index="seq",
|
|
185
|
+
row_index_style="SEQUENCE",
|
|
186
|
+
payload_field=["lo", "hi"],
|
|
187
|
+
payload_content="MULTIVAR_REAL")
|
|
188
|
+
|
|
189
|
+
# Create teradataml TDMatrix object.
|
|
190
|
+
data1_matrix_df = TDMatrix(data=data1,
|
|
191
|
+
id="id",
|
|
192
|
+
row_index="y",
|
|
193
|
+
row_index_style="SEQUENCE",
|
|
194
|
+
column_index="x",
|
|
195
|
+
column_index_style="SEQUENCE",
|
|
196
|
+
payload_field="v",
|
|
197
|
+
payload_content="REAL")
|
|
198
|
+
|
|
199
|
+
# Example 1: Perform discrete wavelet transform (DWT) for two-dimensional data using both inputs.
|
|
200
|
+
uaf_out = DWT2D(data1=data1_matrix_df,
|
|
201
|
+
data2=data2_series_df,
|
|
202
|
+
data2_filter_expr=data2.id==1,
|
|
203
|
+
input_fmt_input_mode="MANY2ONE")
|
|
204
|
+
|
|
205
|
+
# Example 1: Perform discrete wavelet transform (DWT) for two-dimensional data
|
|
206
|
+
# using only one matrix as input and wavelet as 'haar'.
|
|
207
|
+
uaf_out = DWT2D(data1=data1_matrix_df,
|
|
208
|
+
wavelet='haar')
|
|
209
|
+
|
|
210
|
+
# Print the result DataFrame.
|
|
211
|
+
print(uaf_out.result)
|
|
212
|
+
|
|
213
|
+
"""
|
|
214
|
+
|
|
@@ -28,7 +28,7 @@ def DurbinWatson(data=None, data_filter_expr=None, explanatory_count=None,
|
|
|
28
28
|
explanatory_count:
|
|
29
29
|
Required Argument.
|
|
30
30
|
Specifies the number of explanatory variables in the original regression.
|
|
31
|
-
The number of explanatory variables along with the "
|
|
31
|
+
The number of explanatory variables along with the "include_constant"
|
|
32
32
|
information is needed to perform the lookup in the Durbin-Watson data.
|
|
33
33
|
Types: int
|
|
34
34
|
|
|
@@ -10,7 +10,7 @@ def ExtractResults(data=None, data_filter_expr=None, **generic_arguments):
|
|
|
10
10
|
|
|
11
11
|
The functions that have multiple layers are shown in the table.
|
|
12
12
|
Layers of each function can be extracted from the function output,
|
|
13
|
-
i.e
|
|
13
|
+
i.e., "result" attribute, using the layer name specified below:
|
|
14
14
|
|
|
15
15
|
------------------------------------------------------------------
|
|
16
16
|
| Function | Layers |
|