teradataml 20.0.0.1__py3-none-any.whl → 20.0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of teradataml might be problematic. Click here for more details.

Files changed (240) hide show
  1. teradataml/LICENSE-3RD-PARTY.pdf +0 -0
  2. teradataml/LICENSE.pdf +0 -0
  3. teradataml/README.md +306 -0
  4. teradataml/__init__.py +10 -3
  5. teradataml/_version.py +1 -1
  6. teradataml/analytics/__init__.py +3 -2
  7. teradataml/analytics/analytic_function_executor.py +299 -16
  8. teradataml/analytics/analytic_query_generator.py +92 -0
  9. teradataml/analytics/byom/__init__.py +3 -2
  10. teradataml/analytics/json_parser/metadata.py +13 -3
  11. teradataml/analytics/json_parser/utils.py +13 -6
  12. teradataml/analytics/meta_class.py +40 -1
  13. teradataml/analytics/sqle/DecisionTreePredict.py +1 -1
  14. teradataml/analytics/sqle/__init__.py +11 -2
  15. teradataml/analytics/table_operator/__init__.py +4 -3
  16. teradataml/analytics/uaf/__init__.py +21 -2
  17. teradataml/analytics/utils.py +66 -1
  18. teradataml/analytics/valib.py +1 -1
  19. teradataml/automl/__init__.py +1502 -323
  20. teradataml/automl/custom_json_utils.py +139 -61
  21. teradataml/automl/data_preparation.py +247 -307
  22. teradataml/automl/data_transformation.py +32 -12
  23. teradataml/automl/feature_engineering.py +325 -86
  24. teradataml/automl/model_evaluation.py +44 -35
  25. teradataml/automl/model_training.py +122 -153
  26. teradataml/catalog/byom.py +8 -8
  27. teradataml/clients/pkce_client.py +1 -1
  28. teradataml/common/__init__.py +2 -1
  29. teradataml/common/constants.py +72 -0
  30. teradataml/common/deprecations.py +13 -7
  31. teradataml/common/garbagecollector.py +152 -120
  32. teradataml/common/messagecodes.py +11 -2
  33. teradataml/common/messages.py +4 -1
  34. teradataml/common/sqlbundle.py +26 -4
  35. teradataml/common/utils.py +225 -14
  36. teradataml/common/wrapper_utils.py +1 -1
  37. teradataml/context/context.py +82 -2
  38. teradataml/data/SQL_Fundamentals.pdf +0 -0
  39. teradataml/data/complaints_test_tokenized.csv +353 -0
  40. teradataml/data/complaints_tokens_model.csv +348 -0
  41. teradataml/data/covid_confirm_sd.csv +83 -0
  42. teradataml/data/dataframe_example.json +27 -1
  43. teradataml/data/docs/sqle/docs_17_20/CFilter.py +132 -0
  44. teradataml/data/docs/sqle/docs_17_20/NaiveBayes.py +162 -0
  45. teradataml/data/docs/sqle/docs_17_20/OutlierFilterFit.py +2 -0
  46. teradataml/data/docs/sqle/docs_17_20/Pivoting.py +279 -0
  47. teradataml/data/docs/sqle/docs_17_20/Shap.py +203 -0
  48. teradataml/data/docs/sqle/docs_17_20/TDNaiveBayesPredict.py +189 -0
  49. teradataml/data/docs/sqle/docs_17_20/TFIDF.py +142 -0
  50. teradataml/data/docs/sqle/docs_17_20/TextParser.py +3 -3
  51. teradataml/data/docs/sqle/docs_17_20/Unpivoting.py +216 -0
  52. teradataml/data/docs/tableoperator/docs_17_20/Image2Matrix.py +118 -0
  53. teradataml/data/docs/uaf/docs_17_20/ACF.py +1 -10
  54. teradataml/data/docs/uaf/docs_17_20/ArimaEstimate.py +1 -1
  55. teradataml/data/docs/uaf/docs_17_20/ArimaForecast.py +35 -5
  56. teradataml/data/docs/uaf/docs_17_20/ArimaValidate.py +3 -1
  57. teradataml/data/docs/uaf/docs_17_20/ArimaXEstimate.py +293 -0
  58. teradataml/data/docs/uaf/docs_17_20/AutoArima.py +354 -0
  59. teradataml/data/docs/uaf/docs_17_20/BreuschGodfrey.py +3 -2
  60. teradataml/data/docs/uaf/docs_17_20/BreuschPaganGodfrey.py +1 -1
  61. teradataml/data/docs/uaf/docs_17_20/Convolve.py +13 -10
  62. teradataml/data/docs/uaf/docs_17_20/Convolve2.py +4 -1
  63. teradataml/data/docs/uaf/docs_17_20/CopyArt.py +145 -0
  64. teradataml/data/docs/uaf/docs_17_20/CumulPeriodogram.py +5 -4
  65. teradataml/data/docs/uaf/docs_17_20/DFFT2Conv.py +4 -4
  66. teradataml/data/docs/uaf/docs_17_20/DWT.py +235 -0
  67. teradataml/data/docs/uaf/docs_17_20/DWT2D.py +214 -0
  68. teradataml/data/docs/uaf/docs_17_20/DickeyFuller.py +18 -21
  69. teradataml/data/docs/uaf/docs_17_20/DurbinWatson.py +1 -1
  70. teradataml/data/docs/uaf/docs_17_20/ExtractResults.py +1 -1
  71. teradataml/data/docs/uaf/docs_17_20/FilterFactory1d.py +160 -0
  72. teradataml/data/docs/uaf/docs_17_20/GenseriesSinusoids.py +1 -1
  73. teradataml/data/docs/uaf/docs_17_20/GoldfeldQuandt.py +9 -31
  74. teradataml/data/docs/uaf/docs_17_20/HoltWintersForecaster.py +4 -2
  75. teradataml/data/docs/uaf/docs_17_20/IDFFT2.py +1 -8
  76. teradataml/data/docs/uaf/docs_17_20/IDWT.py +236 -0
  77. teradataml/data/docs/uaf/docs_17_20/IDWT2D.py +226 -0
  78. teradataml/data/docs/uaf/docs_17_20/IQR.py +134 -0
  79. teradataml/data/docs/uaf/docs_17_20/LineSpec.py +1 -1
  80. teradataml/data/docs/uaf/docs_17_20/LinearRegr.py +2 -2
  81. teradataml/data/docs/uaf/docs_17_20/MAMean.py +3 -3
  82. teradataml/data/docs/uaf/docs_17_20/Matrix2Image.py +297 -0
  83. teradataml/data/docs/uaf/docs_17_20/MatrixMultiply.py +15 -6
  84. teradataml/data/docs/uaf/docs_17_20/PACF.py +0 -1
  85. teradataml/data/docs/uaf/docs_17_20/Portman.py +2 -2
  86. teradataml/data/docs/uaf/docs_17_20/PowerSpec.py +2 -2
  87. teradataml/data/docs/uaf/docs_17_20/Resample.py +9 -1
  88. teradataml/data/docs/uaf/docs_17_20/SAX.py +246 -0
  89. teradataml/data/docs/uaf/docs_17_20/SeasonalNormalize.py +17 -10
  90. teradataml/data/docs/uaf/docs_17_20/SignifPeriodicities.py +1 -1
  91. teradataml/data/docs/uaf/docs_17_20/WhitesGeneral.py +3 -1
  92. teradataml/data/docs/uaf/docs_17_20/WindowDFFT.py +368 -0
  93. teradataml/data/dwt2d_dataTable.csv +65 -0
  94. teradataml/data/dwt_dataTable.csv +8 -0
  95. teradataml/data/dwt_filterTable.csv +3 -0
  96. teradataml/data/finance_data4.csv +13 -0
  97. teradataml/data/grocery_transaction.csv +19 -0
  98. teradataml/data/idwt2d_dataTable.csv +5 -0
  99. teradataml/data/idwt_dataTable.csv +8 -0
  100. teradataml/data/idwt_filterTable.csv +3 -0
  101. teradataml/data/interval_data.csv +5 -0
  102. teradataml/data/jsons/paired_functions.json +14 -0
  103. teradataml/data/jsons/sqle/17.20/TD_CFilter.json +118 -0
  104. teradataml/data/jsons/sqle/17.20/TD_NaiveBayes.json +193 -0
  105. teradataml/data/jsons/sqle/17.20/TD_NaiveBayesPredict.json +212 -0
  106. teradataml/data/jsons/sqle/17.20/TD_OneClassSVM.json +9 -9
  107. teradataml/data/jsons/sqle/17.20/TD_Pivoting.json +280 -0
  108. teradataml/data/jsons/sqle/17.20/TD_Shap.json +222 -0
  109. teradataml/data/jsons/sqle/17.20/TD_TFIDF.json +162 -0
  110. teradataml/data/jsons/sqle/17.20/TD_TextParser.json +1 -1
  111. teradataml/data/jsons/sqle/17.20/TD_Unpivoting.json +235 -0
  112. teradataml/data/jsons/sqle/20.00/TD_KMeans.json +250 -0
  113. teradataml/data/jsons/sqle/20.00/TD_SMOTE.json +266 -0
  114. teradataml/data/jsons/sqle/20.00/TD_VectorDistance.json +278 -0
  115. teradataml/data/jsons/storedprocedure/17.20/TD_COPYART.json +71 -0
  116. teradataml/data/jsons/storedprocedure/17.20/TD_FILTERFACTORY1D.json +150 -0
  117. teradataml/data/jsons/tableoperator/17.20/IMAGE2MATRIX.json +53 -0
  118. teradataml/data/jsons/uaf/17.20/TD_ACF.json +1 -18
  119. teradataml/data/jsons/uaf/17.20/TD_ARIMAESTIMATE.json +3 -16
  120. teradataml/data/jsons/uaf/17.20/TD_ARIMAFORECAST.json +0 -3
  121. teradataml/data/jsons/uaf/17.20/TD_ARIMAVALIDATE.json +5 -3
  122. teradataml/data/jsons/uaf/17.20/TD_ARIMAXESTIMATE.json +362 -0
  123. teradataml/data/jsons/uaf/17.20/TD_AUTOARIMA.json +469 -0
  124. teradataml/data/jsons/uaf/17.20/TD_BINARYMATRIXOP.json +0 -3
  125. teradataml/data/jsons/uaf/17.20/TD_BINARYSERIESOP.json +0 -2
  126. teradataml/data/jsons/uaf/17.20/TD_BREUSCH_GODFREY.json +2 -1
  127. teradataml/data/jsons/uaf/17.20/TD_BREUSCH_PAGAN_GODFREY.json +2 -5
  128. teradataml/data/jsons/uaf/17.20/TD_CONVOLVE.json +3 -6
  129. teradataml/data/jsons/uaf/17.20/TD_CONVOLVE2.json +1 -3
  130. teradataml/data/jsons/uaf/17.20/TD_CUMUL_PERIODOGRAM.json +0 -5
  131. teradataml/data/jsons/uaf/17.20/TD_DFFT.json +1 -4
  132. teradataml/data/jsons/uaf/17.20/TD_DFFT2.json +2 -7
  133. teradataml/data/jsons/uaf/17.20/TD_DFFT2CONV.json +1 -2
  134. teradataml/data/jsons/uaf/17.20/TD_DFFTCONV.json +0 -2
  135. teradataml/data/jsons/uaf/17.20/TD_DICKEY_FULLER.json +10 -19
  136. teradataml/data/jsons/uaf/17.20/TD_DTW.json +3 -6
  137. teradataml/data/jsons/uaf/17.20/TD_DWT.json +173 -0
  138. teradataml/data/jsons/uaf/17.20/TD_DWT2D.json +160 -0
  139. teradataml/data/jsons/uaf/17.20/TD_FITMETRICS.json +1 -1
  140. teradataml/data/jsons/uaf/17.20/TD_GOLDFELD_QUANDT.json +16 -30
  141. teradataml/data/jsons/uaf/17.20/{TD_HOLT_WINTERS_FORECAST.json → TD_HOLT_WINTERS_FORECASTER.json} +1 -2
  142. teradataml/data/jsons/uaf/17.20/TD_IDFFT2.json +1 -15
  143. teradataml/data/jsons/uaf/17.20/TD_IDWT.json +162 -0
  144. teradataml/data/jsons/uaf/17.20/TD_IDWT2D.json +149 -0
  145. teradataml/data/jsons/uaf/17.20/TD_IQR.json +117 -0
  146. teradataml/data/jsons/uaf/17.20/TD_LINEAR_REGR.json +1 -1
  147. teradataml/data/jsons/uaf/17.20/TD_LINESPEC.json +1 -1
  148. teradataml/data/jsons/uaf/17.20/TD_MAMEAN.json +1 -3
  149. teradataml/data/jsons/uaf/17.20/TD_MATRIX2IMAGE.json +209 -0
  150. teradataml/data/jsons/uaf/17.20/TD_PACF.json +2 -2
  151. teradataml/data/jsons/uaf/17.20/TD_POWERSPEC.json +5 -5
  152. teradataml/data/jsons/uaf/17.20/TD_RESAMPLE.json +48 -28
  153. teradataml/data/jsons/uaf/17.20/TD_SAX.json +210 -0
  154. teradataml/data/jsons/uaf/17.20/TD_SEASONALNORMALIZE.json +12 -6
  155. teradataml/data/jsons/uaf/17.20/TD_SIMPLEEXP.json +0 -1
  156. teradataml/data/jsons/uaf/17.20/TD_TRACKINGOP.json +8 -8
  157. teradataml/data/jsons/uaf/17.20/TD_UNDIFF.json +1 -1
  158. teradataml/data/jsons/uaf/17.20/TD_UNNORMALIZE.json +1 -1
  159. teradataml/data/jsons/uaf/17.20/TD_WINDOWDFFT.json +410 -0
  160. teradataml/data/load_example_data.py +8 -2
  161. teradataml/data/medical_readings.csv +101 -0
  162. teradataml/data/naivebayestextclassifier_example.json +1 -1
  163. teradataml/data/naivebayestextclassifierpredict_example.json +11 -0
  164. teradataml/data/patient_profile.csv +101 -0
  165. teradataml/data/peppers.png +0 -0
  166. teradataml/data/real_values.csv +14 -0
  167. teradataml/data/sax_example.json +8 -0
  168. teradataml/data/scripts/deploy_script.py +1 -1
  169. teradataml/data/scripts/lightgbm/dataset.template +157 -0
  170. teradataml/data/scripts/lightgbm/lightgbm_class_functions.template +247 -0
  171. teradataml/data/scripts/lightgbm/lightgbm_function.template +216 -0
  172. teradataml/data/scripts/lightgbm/lightgbm_sklearn.template +159 -0
  173. teradataml/data/scripts/sklearn/sklearn_fit.py +194 -160
  174. teradataml/data/scripts/sklearn/sklearn_fit_predict.py +136 -115
  175. teradataml/data/scripts/sklearn/sklearn_function.template +34 -16
  176. teradataml/data/scripts/sklearn/sklearn_model_selection_split.py +155 -137
  177. teradataml/data/scripts/sklearn/sklearn_neighbors.py +1 -1
  178. teradataml/data/scripts/sklearn/sklearn_score.py +12 -3
  179. teradataml/data/scripts/sklearn/sklearn_transform.py +162 -24
  180. teradataml/data/star_pivot.csv +8 -0
  181. teradataml/data/target_udt_data.csv +8 -0
  182. teradataml/data/templates/open_source_ml.json +3 -1
  183. teradataml/data/teradataml_example.json +20 -1
  184. teradataml/data/timestamp_data.csv +4 -0
  185. teradataml/data/titanic_dataset_unpivoted.csv +19 -0
  186. teradataml/data/uaf_example.json +55 -1
  187. teradataml/data/unpivot_example.json +15 -0
  188. teradataml/data/url_data.csv +9 -0
  189. teradataml/data/vectordistance_example.json +4 -0
  190. teradataml/data/windowdfft.csv +16 -0
  191. teradataml/dataframe/copy_to.py +1 -1
  192. teradataml/dataframe/data_transfer.py +5 -3
  193. teradataml/dataframe/dataframe.py +1002 -201
  194. teradataml/dataframe/fastload.py +3 -3
  195. teradataml/dataframe/functions.py +867 -0
  196. teradataml/dataframe/row.py +160 -0
  197. teradataml/dataframe/setop.py +2 -2
  198. teradataml/dataframe/sql.py +840 -33
  199. teradataml/dataframe/window.py +1 -1
  200. teradataml/dbutils/dbutils.py +878 -34
  201. teradataml/dbutils/filemgr.py +48 -1
  202. teradataml/geospatial/geodataframe.py +1 -1
  203. teradataml/geospatial/geodataframecolumn.py +1 -1
  204. teradataml/hyperparameter_tuner/optimizer.py +13 -13
  205. teradataml/lib/aed_0_1.dll +0 -0
  206. teradataml/opensource/__init__.py +1 -1
  207. teradataml/opensource/{sklearn/_class.py → _class.py} +102 -17
  208. teradataml/opensource/_lightgbm.py +950 -0
  209. teradataml/opensource/{sklearn/_wrapper_utils.py → _wrapper_utils.py} +1 -2
  210. teradataml/opensource/{sklearn/constants.py → constants.py} +13 -10
  211. teradataml/opensource/sklearn/__init__.py +0 -1
  212. teradataml/opensource/sklearn/_sklearn_wrapper.py +1019 -574
  213. teradataml/options/__init__.py +9 -23
  214. teradataml/options/configure.py +42 -4
  215. teradataml/options/display.py +2 -2
  216. teradataml/plot/axis.py +4 -4
  217. teradataml/scriptmgmt/UserEnv.py +13 -9
  218. teradataml/scriptmgmt/lls_utils.py +77 -23
  219. teradataml/store/__init__.py +13 -0
  220. teradataml/store/feature_store/__init__.py +0 -0
  221. teradataml/store/feature_store/constants.py +291 -0
  222. teradataml/store/feature_store/feature_store.py +2223 -0
  223. teradataml/store/feature_store/models.py +1505 -0
  224. teradataml/store/vector_store/__init__.py +1586 -0
  225. teradataml/table_operators/Script.py +2 -2
  226. teradataml/table_operators/TableOperator.py +106 -20
  227. teradataml/table_operators/query_generator.py +3 -0
  228. teradataml/table_operators/table_operator_query_generator.py +3 -1
  229. teradataml/table_operators/table_operator_util.py +102 -56
  230. teradataml/table_operators/templates/dataframe_register.template +69 -0
  231. teradataml/table_operators/templates/dataframe_udf.template +63 -0
  232. teradataml/telemetry_utils/__init__.py +0 -0
  233. teradataml/telemetry_utils/queryband.py +52 -0
  234. teradataml/utils/dtypes.py +4 -2
  235. teradataml/utils/validators.py +34 -2
  236. {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.3.dist-info}/METADATA +311 -3
  237. {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.3.dist-info}/RECORD +240 -157
  238. {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.3.dist-info}/WHEEL +0 -0
  239. {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.3.dist-info}/top_level.txt +0 -0
  240. {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.3.dist-info}/zip-safe +0 -0
@@ -0,0 +1,214 @@
1
+ def DWT2D(data1=None, data1_filter_expr=None, data2=None,
2
+ data2_filter_expr=None, wavelet=None, mode="symmetric",
3
+ level=1, input_fmt_input_mode=None,
4
+ output_fmt_index_style="NUMERICAL_SEQUENCE",
5
+ **generic_arguments):
6
+ """
7
+ DESCRIPTION:
8
+ DWT2D() function performs discrete wavelet transform (DWT) for
9
+ two-dimensional data. The algorithm is applied first
10
+ vertically by column axis, then horizontally by row axis.
11
+
12
+
13
+ PARAMETERS:
14
+ data1:
15
+ Required Argument.
16
+ Specifies the input matrix. Multiple payloads are supported,
17
+ and each payload column is transformed independently.
18
+ Only REAL or MULTIVAR_REAL payload content types are supported.
19
+ Types: TDMatrix
20
+
21
+ data1_filter_expr:
22
+ Optional Argument.
23
+ Specifies the filter expression for "data1".
24
+ Types: ColumnExpression
25
+
26
+ data2:
27
+ Optional Argument.
28
+ Specifies the input series. The series specifies the filter.
29
+ It should have two payload columns corresponding to low and high
30
+ pass filters. Only MULTIVAR_REAL payload content type is supported.
31
+ Types: TDSeries
32
+
33
+ data2_filter_expr:
34
+ Optional Argument.
35
+ Specifies the filter expression for "data2".
36
+ Types: ColumnExpression
37
+
38
+ wavelet:
39
+ Optional Argument.
40
+ Specifies the name of the wavelet.
41
+ Permitted families and names are:
42
+ * Daubechies: 'db1' or 'haar', 'db2', 'db3', .... ,'db38'
43
+ * Coiflets: 'coif1', 'coif2', ... , 'coif17'
44
+ * Symlets: 'sym2', 'sym3', ... ,' sym20'
45
+ * Discrete Meyer: 'dmey'
46
+ * Biorthogonal: 'bior1.1', 'bior1.3', 'bior1.5', 'bior2.2',
47
+ 'bior2.4', 'bior2.6', 'bior2.8', 'bior3.1',
48
+ 'bior3.3', 'bior3.5', 'bior3.7', 'bior3.9',
49
+ 'bior4.4', 'bior5.5', 'bior6.8'
50
+ * Reverse Biorthogonal: 'rbio1.1', 'rbio1.3', 'rbio1.5'
51
+ 'rbio2.2', 'rbio2.4', 'rbio2.6',
52
+ 'rbio2.8', 'rbio3.1', 'rbio3.3',
53
+ 'rbio3.5', 'rbio3.7','rbio3.9',
54
+ 'rbio4.4', 'rbio5.5', 'rbio6.8'
55
+ Note:
56
+ * If 'wavelet' is specified, do not include a second
57
+ input series for the function. Otherwise, include
58
+ a second input series to provide the filter.
59
+ * Data type is case-sensitive.
60
+ Types: str
61
+
62
+ mode:
63
+ Optional Argument.
64
+ Specifies the signal extension mode. Data type is case-insensitive.
65
+ Permitted Values:
66
+ * symmetric, sym, symh
67
+ * reflect, symw
68
+ * smooth, spd, sp1
69
+ * constant, sp0
70
+ * zero, zpd
71
+ * periodic, ppd
72
+ * periodization, per
73
+ * antisymmetric, asym, asymh
74
+ * antireflect, asymw
75
+ Default Value: symmetric
76
+ Types: str
77
+
78
+ level:
79
+ Optional Argument.
80
+ Specifies the level of decomposition. Valid values are [1,15].
81
+ Default Value: 1
82
+ Types: int
83
+
84
+ input_fmt_input_mode:
85
+ Optional Argument.
86
+ Specifies the input mode supported by the function.
87
+ When there are two input series, then the "input_fmt_input_mode"
88
+ specification is mandatory.
89
+ Permitted Values:
90
+ * ONE2ONE: Both the primary and secondary series specifications
91
+ contain a series name which identifies the two series
92
+ in the function.
93
+ * MANY2ONE: The MANY specification is the primary series
94
+ declaration. The secondary series specification
95
+ contains a series name that identifies the single
96
+ secondary series.
97
+ * MATCH: Both series are defined by their respective series
98
+ specification instance name declarations.
99
+ Types: str
100
+
101
+ output_fmt_index_style:
102
+ Optional Argument.
103
+ Specifies the index style of the output format.
104
+ Permitted Values: NUMERICAL_SEQUENCE
105
+ Default Value: NUMERICAL_SEQUENCE
106
+ Types: str
107
+
108
+ **generic_arguments:
109
+ Specifies the generic keyword arguments of UAF functions.
110
+ Below are the generic keyword arguments:
111
+ persist:
112
+ Optional Argument.
113
+ Specifies whether to persist the results of the
114
+ function in a table or not. When set to True,
115
+ results are persisted in a table; otherwise,
116
+ results are garbage collected at the end of the
117
+ session.
118
+ Note that, when UAF function is executed, an
119
+ analytic result table (ART) is created.
120
+ Default Value: False
121
+ Types: bool
122
+
123
+ volatile:
124
+ Optional Argument.
125
+ Specifies whether to put the results of the
126
+ function in a volatile ART or not. When set to
127
+ True, results are stored in a volatile ART,
128
+ otherwise not.
129
+ Default Value: False
130
+ Types: bool
131
+
132
+ output_table_name:
133
+ Optional Argument.
134
+ Specifies the name of the table to store results.
135
+ If not specified, a unique table name is internally
136
+ generated.
137
+ Types: str
138
+
139
+ output_db_name:
140
+ Optional Argument.
141
+ Specifies the name of the database to create output
142
+ table into. If not specified, table is created into
143
+ database specified by the user at the time of context
144
+ creation or configuration parameter. Argument is ignored,
145
+ if "output_table_name" is not specified.
146
+ Types: str
147
+
148
+
149
+ RETURNS:
150
+ Instance of DWT2D.
151
+ Output teradataml DataFrames can be accessed using attribute
152
+ references, such as DWT2D_obj.<attribute_name>.
153
+ Output teradataml DataFrame attribute name is:
154
+ 1. result
155
+
156
+
157
+ RAISES:
158
+ TeradataMlException, TypeError, ValueError
159
+
160
+
161
+ EXAMPLES:
162
+ # Notes:
163
+ # 1. Get the connection to Vantage, before importing the
164
+ # function in user space.
165
+ # 2. User can import the function, if it is available on
166
+ # Vantage user is connected to.
167
+ # 3. To check the list of UAF analytic functions available
168
+ # on Vantage user connected to, use
169
+ # "display_analytic_functions()".
170
+
171
+ # Check the list of available UAF analytic functions.
172
+ display_analytic_functions(type="UAF")
173
+
174
+ # Load the example data.
175
+ load_example_data("uaf", ["dwt2d_dataTable", "dwt_filterTable"])
176
+
177
+ # Create teradataml DataFrame objects.
178
+ data1 = DataFrame.from_table("dwt2d_dataTable")
179
+ data2 = DataFrame.from_table("dwt_filterTable")
180
+
181
+ # Create teradataml TDSeries object.
182
+ data2_series_df = TDSeries(data=data2,
183
+ id="id",
184
+ row_index="seq",
185
+ row_index_style="SEQUENCE",
186
+ payload_field=["lo", "hi"],
187
+ payload_content="MULTIVAR_REAL")
188
+
189
+ # Create teradataml TDMatrix object.
190
+ data1_matrix_df = TDMatrix(data=data1,
191
+ id="id",
192
+ row_index="y",
193
+ row_index_style="SEQUENCE",
194
+ column_index="x",
195
+ column_index_style="SEQUENCE",
196
+ payload_field="v",
197
+ payload_content="REAL")
198
+
199
+ # Example 1: Perform discrete wavelet transform (DWT) for two-dimensional data using both inputs.
200
+ uaf_out = DWT2D(data1=data1_matrix_df,
201
+ data2=data2_series_df,
202
+ data2_filter_expr=data2.id==1,
203
+ input_fmt_input_mode="MANY2ONE")
204
+
205
+ # Example 1: Perform discrete wavelet transform (DWT) for two-dimensional data
206
+ # using only one matrix as input and wavelet as 'haar'.
207
+ uaf_out = DWT2D(data1=data1_matrix_df,
208
+ wavelet='haar')
209
+
210
+ # Print the result DataFrame.
211
+ print(uaf_out.result)
212
+
213
+ """
214
+
@@ -1,5 +1,5 @@
1
1
  def DickeyFuller(data=None, data_filter_expr=None, algorithm=None,
2
- max_lags=None, drift_trend_formula=None,
2
+ max_lags=0,
3
3
  **generic_arguments):
4
4
  """
5
5
  DESCRIPTION:
@@ -10,18 +10,23 @@ def DickeyFuller(data=None, data_filter_expr=None, algorithm=None,
10
10
  other factors.
11
11
 
12
12
  The following procedure is an example of how to use DickeyFuller() function:
13
- * Run regression tests.
14
- * Determine the algorithm for Dickey Fuller statistic data.
15
- * Run DickeyFuller() function using the algorithm.
16
- * (Result shows series contains unit roots) Use DIFF() and
17
- SeasonalNormalize() functions to remove unit roots.
13
+ * Run DickeyFuller() on the time series being modeled.
14
+ * Retrieve the results of the DickeyFuller() test to determine if the
15
+ time series contains any unit roots.
16
+ * If unit roots are present, use a technique such as differencing such as Diff()
17
+ or seasonal normalization, such as SeasonalNormalize(), to create a new series,
18
+ then rerun the DickeyFuller() test to verify that the differenced or
19
+ seasonally-normalized series unit root are removed.
20
+ * If the result shows unit roots, use Diff() and SeasonalNormalize()
21
+ to remove unit roots.
18
22
 
19
23
 
20
24
  PARAMETERS:
21
25
  data:
22
26
  Required Argument.
23
- Speciifes a single logical-runtime series as an input.
24
- Types: TDSeries
27
+ Specifies a single logical-runtime series as an input or TDAnalyticResult which
28
+ contains ARTFITRESIDUALS layer.
29
+ Types: TDSeries, TDAnalyticResult
25
30
 
26
31
  data_filter_expr:
27
32
  Optional Argument.
@@ -34,26 +39,18 @@ def DickeyFuller(data=None, data_filter_expr=None, algorithm=None,
34
39
  Permitted Values:
35
40
  * NONE: Random walk
36
41
  * DRIFT: Random walk with drift
37
- * TREND: Random walk with linear trend
38
42
  * DRIFTNTREND: Random walk with drift and trend
39
- * FORMULA: Random walk with selected drift, trend and
40
- auxiliary lags
43
+ * SQUARED: Random walk with drift, trend, and
44
+ quadratic trend.
41
45
  Types: str
42
46
 
43
47
  max_lags:
44
48
  Optional Argument.
45
49
  Specifies the maximum number of lags to use with the regression
46
- equation.
50
+ equation. Range is [0, 100]
51
+ DefaultValue: 0
47
52
  Types: int
48
53
 
49
- drift_trend_formula:
50
- Optional Argument.
51
- Specifies the formula used to represent the drift and trend portions
52
- of the regression.
53
- Note:
54
- * Valid only when "algorithm" is set to 'formula'.
55
- Types: str
56
-
57
54
  **generic_arguments:
58
55
  Specifies the generic keyword arguments of UAF functions.
59
56
  Below are the generic keyword arguments:
@@ -136,7 +133,7 @@ def DickeyFuller(data=None, data_filter_expr=None, algorithm=None,
136
133
  # for the presence of the unit roots using random walk with
137
134
  # linear trend for regression.
138
135
  uaf_out = DickeyFuller(data=data_series_df,
139
- algorithm='TREND')
136
+ algorithm='DRIFT')
140
137
 
141
138
  # Print the result DataFrame.
142
139
  print(uaf_out.result)
@@ -28,7 +28,7 @@ def DurbinWatson(data=None, data_filter_expr=None, explanatory_count=None,
28
28
  explanatory_count:
29
29
  Required Argument.
30
30
  Specifies the number of explanatory variables in the original regression.
31
- The number of explanatory variables along with the "include_contant"
31
+ The number of explanatory variables along with the "include_constant"
32
32
  information is needed to perform the lookup in the Durbin-Watson data.
33
33
  Types: int
34
34
 
@@ -10,7 +10,7 @@ def ExtractResults(data=None, data_filter_expr=None, **generic_arguments):
10
10
 
11
11
  The functions that have multiple layers are shown in the table.
12
12
  Layers of each function can be extracted from the function output,
13
- i.e. "result" attribute, using the layer name specified below:
13
+ i.e., "result" attribute, using the layer name specified below:
14
14
 
15
15
  ------------------------------------------------------------------
16
16
  | Function | Layers |
@@ -0,0 +1,160 @@
1
+ def FilterFactory1d(filter_id=None, filter_type=None,
2
+ window_type=None, filter_length=None,
3
+ transition_bandwidth=None, low_cutoff=None,
4
+ high_cutoff=None, sampling_frequency=None,
5
+ filter_description=None, **generic_arguments):
6
+ """
7
+ DESCRIPTION:
8
+ FilterFactory1d() function creates finite impulse response (FIR)
9
+ filter coefficients. The filters are based on certain parameters
10
+ and stored into a common table for reuse.
11
+ Note:
12
+ User needs EXECUTE PROCEDURE privelge on SYSLIB
13
+
14
+ PARAMETERS:
15
+ filter_id:
16
+ Required Argument.
17
+ Specifies the filter identifier, based on filter coefficients
18
+ stored in the table.
19
+ Types: int
20
+
21
+ filter_type:
22
+ Required Argument.
23
+ Specifies the type of filter to generate.
24
+ Permitted Values:
25
+ * LOWPASS - To remove frequencies above low_cutoff.
26
+ * HIGHPASS - To remove frequencies below high_cutoff.
27
+ * BANDPASS - To remove frequencies below low_cutoff and
28
+ above high_cutoff.
29
+ * BANDSTOP - To remove frequencies between low_cutoff
30
+ and high_cutoff.
31
+ Types: str
32
+
33
+ window_type:
34
+ Optional Argument.
35
+ Specifies the window function to the filter that maintains a
36
+ smooth drop-off to zero, and avoids extra artifacts in the
37
+ frequency domain. The default is to leave the filter
38
+ coefficients as they are, and not apply any windowing function.
39
+ Permitted Values: BLACKMAN, HAMMING, HANNING, BARTLETT
40
+ Types: str
41
+
42
+ filter_length:
43
+ Optional Argument.
44
+ Specifies the length of the filter to generate.
45
+ Overrides "transition_bandwidth" argument if both are supplied,
46
+ and renders the other an optional argument.
47
+ Default is approximately 4/("transition_bandwidth"/
48
+ "sampling_frequency").
49
+ Types: int
50
+
51
+ transition_bandwidth:
52
+ Optional Argument.
53
+ Specifies the maximum allowed size for the range of
54
+ frequencies for filter transitions between a passband and stopband.
55
+ This also determines the number of coefficients to be generated.
56
+ Value must be greater than 0.
57
+ A smaller value produces faster drop off at the cost of more coefficients.
58
+ Not used when "filter_length" is supplied.
59
+ Default is bandwidth from "filter_length".
60
+ Types: float
61
+
62
+ low_cutoff:
63
+ Optional Argument.
64
+ Specifies the lower frequency that change between a passband
65
+ and stopband occurs. It must be greater
66
+ than 0. It is not used by default with 'HIGHPASS' filter.
67
+ Types: float
68
+
69
+ high_cutoff:
70
+ Optional Argument.
71
+ Specifies the higher frequency that change
72
+ between a passband and stopband occurs. It must be greater
73
+ than 0 and not used by default with 'LOWPASS' filter.
74
+ Types: float
75
+
76
+ sampling_frequency:
77
+ Required Argument.
78
+ Specifies the frequency that the data to be filtered was
79
+ sampled. It must be greater than 0.
80
+ Types: float
81
+
82
+ filter_description:
83
+ Optional Argument.
84
+ Specifies the description for the filter coefficients
85
+ that contain the same filter ID. Description is only
86
+ written to one row for each filter generated, and
87
+ ROW_I is 0. Default is a string describing parameters.
88
+ Types: str
89
+
90
+ **generic_arguments:
91
+ Specifies the generic keyword arguments of UAF functions.
92
+ Below are the generic keyword arguments:
93
+ persist:
94
+ Optional Argument.
95
+ Specifies whether to persist the results of the
96
+ function in a table or not. When set to True,
97
+ results are persisted in a table; otherwise,
98
+ results are garbage collected at the end of the
99
+ session.
100
+ Note that, when UAF function is executed, an
101
+ analytic result table (ART) is created.
102
+ Default Value: False
103
+ Types: bool
104
+
105
+ volatile:
106
+ Optional Argument.
107
+ Specifies whether to put the results of the
108
+ function in a volatile ART or not. When set to
109
+ True, results are stored in a volatile ART,
110
+ otherwise not.
111
+ Default Value: False
112
+ Types: bool
113
+
114
+ output_table_name:
115
+ Optional Argument.
116
+ Specifies the name of the table to store results.
117
+ If not specified, a unique table name is internally
118
+ generated.
119
+ Types: str
120
+
121
+ output_db_name:
122
+ Optional Argument.
123
+ Specifies the name of the database to create output
124
+ table into. If not specified, table is created into
125
+ database specified by the user at the time of context
126
+ creation or configuration parameter. Argument is ignored,
127
+ if "output_table_name" is not specified.
128
+ Types: str
129
+
130
+ RAISES:
131
+ TeradataMlException, TypeError, ValueError
132
+
133
+
134
+ EXAMPLES:
135
+ # Notes:
136
+ # 1. Get the connection to Vantage, before importing the
137
+ # function in user space.
138
+ # 2. User can import the function, if it is available on
139
+ # Vantage user is connected to.
140
+ # 3. To check the list of UAF analytic functions available
141
+ # on Vantage user connected to, use
142
+ # "display_analytic_functions()".
143
+
144
+ # Check the list of available UAF analytic functions.
145
+ display_analytic_functions(type="UAF")
146
+
147
+ # Import function FilterFactory1d.
148
+ from teradataml import FilterFactory1d
149
+
150
+ # Example 1: Create finite impulse response (FIR) filter coefficients.
151
+ res = FilterFactory1d(filter_id = 33,
152
+ filter_type = 'lowpass',
153
+ window_type = 'blackman',
154
+ transition_bandwidth = 20.0,
155
+ low_cutoff = 40.0,
156
+ sampling_frequency = 200)
157
+ print(res.result)
158
+
159
+ """
160
+
@@ -19,7 +19,7 @@ def GenseriesSinusoids(data=None, data_filter_expr=None, periodicities=None,
19
19
  exclude from the data set.
20
20
  * Use the BinarySeriesOp() function to subtract the generated series
21
21
  from the original series using "mathop" argument value as 'SUB'.
22
- * User the PowerSpec() function to verify that target periodicities
22
+ * Use the PowerSpec() function to verify that target periodicities
23
23
  have been removed from the original series.
24
24
 
25
25
 
@@ -1,6 +1,6 @@
1
1
  def GoldfeldQuandt(data=None, data_filter_expr=None,
2
- orig_regr_paramcnt=None, weights=False, formula=None,
3
- algorithm=None, start_idx=None, omit=None,
2
+ const_term=True, algorithm=None,
3
+ start_idx=None, omit=None,
4
4
  significance_level=None, test="GREATER",
5
5
  **generic_arguments):
6
6
  """
@@ -24,35 +24,15 @@ def GoldfeldQuandt(data=None, data_filter_expr=None,
24
24
  Specifies the filter expression for "data".
25
25
  Types: ColumnExpression
26
26
 
27
- orig_regr_paramcnt:
28
- Required Argument.
29
- Specifies the number of responses and explanatory variables
30
- present in the original regression.
31
- Types: int
32
-
33
- weights:
27
+ const_term:
34
28
  Optional Argument.
35
- Specifies whether the last series found in the payload
36
- is to be interpreted as a series of weights that can
37
- be used to perform a weighted least squares regression
38
- solution. When set to True, the last series is interpreted
39
- as series of weights, otherwise not.
40
- Default Value: False
29
+ Specifies the indicator of whether the regression performed should
30
+ use a Y-intercept coefficient.
31
+ When set to True, means the regression is performed on “Y=C+aX1+bX2+…”.
32
+ When set to False, means the regression is performed on “Y=aX1+bX2+…”.
33
+ Default Value: True
41
34
  Types: bool
42
35
 
43
- formula:
44
- Required Argument.
45
- Specifies the formula used in the regression operation.
46
- The name of the response variable must always be Y,
47
- and the name of the explanatory variable must always be X1.
48
- For example, "Y = B0 + B1 * X1".
49
- Notes:
50
- * The "formula" argument must be specified along with the
51
- "algorithm" argument.
52
- * Use the following link to refer the formula rules:
53
- "https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-Unbounded-Array-Framework-Time-Series-Reference-17.20/Mathematic-Operators-and-Functions/Formula-Rules"
54
- Types: str
55
-
56
36
  algorithm:
57
37
  Required Argument.
58
38
  Specifies the algorithm used for the regression.
@@ -205,12 +185,10 @@ def GoldfeldQuandt(data=None, data_filter_expr=None,
205
185
  payload_field=["y1", "x1"],
206
186
  payload_content="MULTIVAR_REAL")
207
187
 
208
- # Execute GoldfeldQuandt for TDGenSeries.
188
+ # Execute GoldfeldQuandt.
209
189
  uaf_out = GoldfeldQuandt(data=data_series_df,
210
- formula="Y = B0 + B1*X1",
211
190
  omit=2.0,
212
191
  significance_level=0.05,
213
- orig_regr_paramcnt=2,
214
192
  algorithm="QR")
215
193
 
216
194
  # Print the result DataFrame.
@@ -1,5 +1,5 @@
1
1
  def HoltWintersForecaster(data=None, data_filter_expr=None, forecast_periods=None,
2
- alpha=None, beta=None, gamma=None, seasonal_periods=1,
2
+ alpha=None, beta=None, gamma=None, seasonal_periods=None,
3
3
  init_level=None, init_trend=None, init_season=None,
4
4
  model=None, fit_percentage=100,
5
5
  prediction_intervals="BOTH", fit_metrics=False,
@@ -67,7 +67,6 @@ def HoltWintersForecaster(data=None, data_filter_expr=None, forecast_periods=Non
67
67
  parameter is 3. Value must be greater than or equal to 1.
68
68
  Note:
69
69
  Required when "gamma" or "init_season" is specified.
70
- Default Value: 1
71
70
  Types: int
72
71
 
73
72
  init_level:
@@ -248,10 +247,13 @@ def HoltWintersForecaster(data=None, data_filter_expr=None, forecast_periods=Non
248
247
 
249
248
  # Print the result DataFrames.
250
249
  print(uaf_out.result)
250
+
251
251
  # Print the model statistics result.
252
252
  print(uaf_out.fitmetadata)
253
+
253
254
  # Print the selection metrics result.
254
255
  print(uaf_out.selmetrics)
256
+
255
257
  # Print the residuals statistics result.
256
258
  print(uaf_out.fitresiduals)
257
259
 
@@ -1,5 +1,5 @@
1
1
  def IDFFT2(data=None, data_filter_expr=None, human_readable=True,
2
- output_fmt_content=None, output_fmt_row_major=1,
2
+ output_fmt_content=None,
3
3
  **generic_arguments):
4
4
  """
5
5
  DESCRIPTION:
@@ -71,13 +71,6 @@ def IDFFT2(data=None, data_filter_expr=None, human_readable=True,
71
71
  MULTIVAR_AMPL_PHASE
72
72
  Types: str
73
73
 
74
- output_fmt_row_major:
75
- Optional Argument.
76
- Specifies whether the matrix output should be in a row-major-centric
77
- or column-major-centric manner.
78
- Default Value: True
79
- Types: bool
80
-
81
74
  **generic_arguments:
82
75
  Specifies the generic keyword arguments of UAF functions.
83
76
  Below are the generic keyword arguments: