teradataml 20.0.0.0__py3-none-any.whl → 20.0.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of teradataml might be problematic. Click here for more details.

Files changed (263) hide show
  1. teradataml/LICENSE-3RD-PARTY.pdf +0 -0
  2. teradataml/LICENSE.pdf +0 -0
  3. teradataml/README.md +183 -0
  4. teradataml/__init__.py +6 -3
  5. teradataml/_version.py +2 -2
  6. teradataml/analytics/__init__.py +3 -2
  7. teradataml/analytics/analytic_function_executor.py +275 -40
  8. teradataml/analytics/analytic_query_generator.py +92 -0
  9. teradataml/analytics/byom/__init__.py +3 -2
  10. teradataml/analytics/json_parser/metadata.py +1 -0
  11. teradataml/analytics/json_parser/utils.py +17 -21
  12. teradataml/analytics/meta_class.py +40 -1
  13. teradataml/analytics/sqle/DecisionTreePredict.py +1 -1
  14. teradataml/analytics/sqle/__init__.py +10 -2
  15. teradataml/analytics/table_operator/__init__.py +3 -2
  16. teradataml/analytics/uaf/__init__.py +21 -2
  17. teradataml/analytics/utils.py +62 -1
  18. teradataml/analytics/valib.py +1 -1
  19. teradataml/automl/__init__.py +1553 -319
  20. teradataml/automl/custom_json_utils.py +139 -61
  21. teradataml/automl/data_preparation.py +276 -319
  22. teradataml/automl/data_transformation.py +163 -81
  23. teradataml/automl/feature_engineering.py +402 -239
  24. teradataml/automl/feature_exploration.py +9 -2
  25. teradataml/automl/model_evaluation.py +48 -51
  26. teradataml/automl/model_training.py +291 -189
  27. teradataml/catalog/byom.py +8 -8
  28. teradataml/catalog/model_cataloging_utils.py +1 -1
  29. teradataml/clients/auth_client.py +133 -0
  30. teradataml/clients/pkce_client.py +1 -1
  31. teradataml/common/aed_utils.py +3 -2
  32. teradataml/common/constants.py +48 -6
  33. teradataml/common/deprecations.py +13 -7
  34. teradataml/common/garbagecollector.py +156 -120
  35. teradataml/common/messagecodes.py +6 -1
  36. teradataml/common/messages.py +3 -1
  37. teradataml/common/sqlbundle.py +1 -1
  38. teradataml/common/utils.py +103 -11
  39. teradataml/common/wrapper_utils.py +1 -1
  40. teradataml/context/context.py +121 -31
  41. teradataml/data/advertising.csv +201 -0
  42. teradataml/data/bank_marketing.csv +11163 -0
  43. teradataml/data/bike_sharing.csv +732 -0
  44. teradataml/data/boston2cols.csv +721 -0
  45. teradataml/data/breast_cancer.csv +570 -0
  46. teradataml/data/complaints_test_tokenized.csv +353 -0
  47. teradataml/data/complaints_tokens_model.csv +348 -0
  48. teradataml/data/covid_confirm_sd.csv +83 -0
  49. teradataml/data/customer_segmentation_test.csv +2628 -0
  50. teradataml/data/customer_segmentation_train.csv +8069 -0
  51. teradataml/data/dataframe_example.json +10 -0
  52. teradataml/data/docs/sqle/docs_17_10/OneHotEncodingFit.py +3 -1
  53. teradataml/data/docs/sqle/docs_17_10/OneHotEncodingTransform.py +6 -0
  54. teradataml/data/docs/sqle/docs_17_10/OutlierFilterTransform.py +5 -1
  55. teradataml/data/docs/sqle/docs_17_20/ANOVA.py +61 -1
  56. teradataml/data/docs/sqle/docs_17_20/CFilter.py +132 -0
  57. teradataml/data/docs/sqle/docs_17_20/ColumnTransformer.py +2 -0
  58. teradataml/data/docs/sqle/docs_17_20/FTest.py +105 -26
  59. teradataml/data/docs/sqle/docs_17_20/GLM.py +162 -1
  60. teradataml/data/docs/sqle/docs_17_20/GetFutileColumns.py +5 -3
  61. teradataml/data/docs/sqle/docs_17_20/KMeans.py +48 -1
  62. teradataml/data/docs/sqle/docs_17_20/NaiveBayes.py +162 -0
  63. teradataml/data/docs/sqle/docs_17_20/NonLinearCombineFit.py +3 -2
  64. teradataml/data/docs/sqle/docs_17_20/OneHotEncodingFit.py +5 -0
  65. teradataml/data/docs/sqle/docs_17_20/OneHotEncodingTransform.py +6 -0
  66. teradataml/data/docs/sqle/docs_17_20/OutlierFilterFit.py +2 -0
  67. teradataml/data/docs/sqle/docs_17_20/Pivoting.py +279 -0
  68. teradataml/data/docs/sqle/docs_17_20/ROC.py +3 -2
  69. teradataml/data/docs/sqle/docs_17_20/SVMPredict.py +13 -2
  70. teradataml/data/docs/sqle/docs_17_20/ScaleFit.py +119 -1
  71. teradataml/data/docs/sqle/docs_17_20/ScaleTransform.py +93 -1
  72. teradataml/data/docs/sqle/docs_17_20/Shap.py +197 -0
  73. teradataml/data/docs/sqle/docs_17_20/TDGLMPredict.py +163 -1
  74. teradataml/data/docs/sqle/docs_17_20/TDNaiveBayesPredict.py +189 -0
  75. teradataml/data/docs/sqle/docs_17_20/TFIDF.py +142 -0
  76. teradataml/data/docs/sqle/docs_17_20/Unpivoting.py +216 -0
  77. teradataml/data/docs/sqle/docs_17_20/XGBoost.py +12 -4
  78. teradataml/data/docs/sqle/docs_17_20/XGBoostPredict.py +7 -1
  79. teradataml/data/docs/sqle/docs_17_20/ZTest.py +72 -7
  80. teradataml/data/docs/uaf/docs_17_20/ACF.py +1 -10
  81. teradataml/data/docs/uaf/docs_17_20/ArimaEstimate.py +1 -1
  82. teradataml/data/docs/uaf/docs_17_20/ArimaForecast.py +35 -5
  83. teradataml/data/docs/uaf/docs_17_20/ArimaValidate.py +3 -1
  84. teradataml/data/docs/uaf/docs_17_20/ArimaXEstimate.py +293 -0
  85. teradataml/data/docs/uaf/docs_17_20/AutoArima.py +354 -0
  86. teradataml/data/docs/uaf/docs_17_20/BreuschGodfrey.py +3 -2
  87. teradataml/data/docs/uaf/docs_17_20/BreuschPaganGodfrey.py +1 -1
  88. teradataml/data/docs/uaf/docs_17_20/Convolve.py +13 -10
  89. teradataml/data/docs/uaf/docs_17_20/Convolve2.py +4 -1
  90. teradataml/data/docs/uaf/docs_17_20/CumulPeriodogram.py +5 -4
  91. teradataml/data/docs/uaf/docs_17_20/DFFT2Conv.py +4 -4
  92. teradataml/data/docs/uaf/docs_17_20/DWT.py +235 -0
  93. teradataml/data/docs/uaf/docs_17_20/DWT2D.py +214 -0
  94. teradataml/data/docs/uaf/docs_17_20/DurbinWatson.py +1 -1
  95. teradataml/data/docs/uaf/docs_17_20/ExtractResults.py +1 -1
  96. teradataml/data/docs/uaf/docs_17_20/FilterFactory1d.py +160 -0
  97. teradataml/data/docs/uaf/docs_17_20/GenseriesSinusoids.py +1 -1
  98. teradataml/data/docs/uaf/docs_17_20/GoldfeldQuandt.py +9 -31
  99. teradataml/data/docs/uaf/docs_17_20/HoltWintersForecaster.py +4 -2
  100. teradataml/data/docs/uaf/docs_17_20/IDFFT2.py +1 -8
  101. teradataml/data/docs/uaf/docs_17_20/IDWT.py +236 -0
  102. teradataml/data/docs/uaf/docs_17_20/IDWT2D.py +226 -0
  103. teradataml/data/docs/uaf/docs_17_20/IQR.py +134 -0
  104. teradataml/data/docs/uaf/docs_17_20/LineSpec.py +1 -1
  105. teradataml/data/docs/uaf/docs_17_20/LinearRegr.py +2 -2
  106. teradataml/data/docs/uaf/docs_17_20/MAMean.py +3 -3
  107. teradataml/data/docs/uaf/docs_17_20/Matrix2Image.py +297 -0
  108. teradataml/data/docs/uaf/docs_17_20/MatrixMultiply.py +15 -6
  109. teradataml/data/docs/uaf/docs_17_20/PACF.py +0 -1
  110. teradataml/data/docs/uaf/docs_17_20/Portman.py +2 -2
  111. teradataml/data/docs/uaf/docs_17_20/PowerSpec.py +2 -2
  112. teradataml/data/docs/uaf/docs_17_20/Resample.py +9 -1
  113. teradataml/data/docs/uaf/docs_17_20/SAX.py +246 -0
  114. teradataml/data/docs/uaf/docs_17_20/SeasonalNormalize.py +17 -10
  115. teradataml/data/docs/uaf/docs_17_20/SignifPeriodicities.py +1 -1
  116. teradataml/data/docs/uaf/docs_17_20/WhitesGeneral.py +3 -1
  117. teradataml/data/docs/uaf/docs_17_20/WindowDFFT.py +368 -0
  118. teradataml/data/dwt2d_dataTable.csv +65 -0
  119. teradataml/data/dwt_dataTable.csv +8 -0
  120. teradataml/data/dwt_filterTable.csv +3 -0
  121. teradataml/data/finance_data4.csv +13 -0
  122. teradataml/data/glm_example.json +28 -1
  123. teradataml/data/grocery_transaction.csv +19 -0
  124. teradataml/data/housing_train_segment.csv +201 -0
  125. teradataml/data/idwt2d_dataTable.csv +5 -0
  126. teradataml/data/idwt_dataTable.csv +8 -0
  127. teradataml/data/idwt_filterTable.csv +3 -0
  128. teradataml/data/insect2Cols.csv +61 -0
  129. teradataml/data/interval_data.csv +5 -0
  130. teradataml/data/jsons/paired_functions.json +14 -0
  131. teradataml/data/jsons/sqle/17.20/TD_ANOVA.json +99 -27
  132. teradataml/data/jsons/sqle/17.20/TD_CFilter.json +118 -0
  133. teradataml/data/jsons/sqle/17.20/TD_FTest.json +166 -83
  134. teradataml/data/jsons/sqle/17.20/TD_GLM.json +90 -14
  135. teradataml/data/jsons/sqle/17.20/TD_GLMPREDICT.json +48 -5
  136. teradataml/data/jsons/sqle/17.20/TD_GetFutileColumns.json +5 -3
  137. teradataml/data/jsons/sqle/17.20/TD_KMeans.json +31 -11
  138. teradataml/data/jsons/sqle/17.20/TD_NaiveBayes.json +193 -0
  139. teradataml/data/jsons/sqle/17.20/TD_NaiveBayesPredict.json +212 -0
  140. teradataml/data/jsons/sqle/17.20/TD_NonLinearCombineFit.json +3 -2
  141. teradataml/data/jsons/sqle/17.20/TD_OneClassSVM.json +9 -9
  142. teradataml/data/jsons/sqle/17.20/TD_Pivoting.json +280 -0
  143. teradataml/data/jsons/sqle/17.20/TD_ROC.json +2 -1
  144. teradataml/data/jsons/sqle/17.20/TD_SVM.json +16 -16
  145. teradataml/data/jsons/sqle/17.20/TD_SVMPredict.json +19 -1
  146. teradataml/data/jsons/sqle/17.20/TD_ScaleFit.json +168 -15
  147. teradataml/data/jsons/sqle/17.20/TD_ScaleTransform.json +50 -1
  148. teradataml/data/jsons/sqle/17.20/TD_Shap.json +222 -0
  149. teradataml/data/jsons/sqle/17.20/TD_TFIDF.json +162 -0
  150. teradataml/data/jsons/sqle/17.20/TD_Unpivoting.json +235 -0
  151. teradataml/data/jsons/sqle/17.20/TD_XGBoost.json +25 -7
  152. teradataml/data/jsons/sqle/17.20/TD_XGBoostPredict.json +17 -4
  153. teradataml/data/jsons/sqle/17.20/TD_ZTest.json +157 -80
  154. teradataml/data/jsons/storedprocedure/17.20/TD_FILTERFACTORY1D.json +150 -0
  155. teradataml/data/jsons/uaf/17.20/TD_ACF.json +1 -18
  156. teradataml/data/jsons/uaf/17.20/TD_ARIMAESTIMATE.json +3 -16
  157. teradataml/data/jsons/uaf/17.20/TD_ARIMAFORECAST.json +0 -3
  158. teradataml/data/jsons/uaf/17.20/TD_ARIMAVALIDATE.json +5 -3
  159. teradataml/data/jsons/uaf/17.20/TD_ARIMAXESTIMATE.json +362 -0
  160. teradataml/data/jsons/uaf/17.20/TD_AUTOARIMA.json +469 -0
  161. teradataml/data/jsons/uaf/17.20/TD_BINARYMATRIXOP.json +0 -3
  162. teradataml/data/jsons/uaf/17.20/TD_BINARYSERIESOP.json +0 -2
  163. teradataml/data/jsons/uaf/17.20/TD_BREUSCH_GODFREY.json +2 -1
  164. teradataml/data/jsons/uaf/17.20/TD_BREUSCH_PAGAN_GODFREY.json +2 -5
  165. teradataml/data/jsons/uaf/17.20/TD_CONVOLVE.json +3 -6
  166. teradataml/data/jsons/uaf/17.20/TD_CONVOLVE2.json +1 -3
  167. teradataml/data/jsons/uaf/17.20/TD_CUMUL_PERIODOGRAM.json +0 -5
  168. teradataml/data/jsons/uaf/17.20/TD_DFFT.json +1 -4
  169. teradataml/data/jsons/uaf/17.20/TD_DFFT2.json +2 -7
  170. teradataml/data/jsons/uaf/17.20/TD_DFFT2CONV.json +1 -2
  171. teradataml/data/jsons/uaf/17.20/TD_DFFTCONV.json +0 -2
  172. teradataml/data/jsons/uaf/17.20/TD_DTW.json +3 -6
  173. teradataml/data/jsons/uaf/17.20/TD_DWT.json +173 -0
  174. teradataml/data/jsons/uaf/17.20/TD_DWT2D.json +160 -0
  175. teradataml/data/jsons/uaf/17.20/TD_FITMETRICS.json +1 -1
  176. teradataml/data/jsons/uaf/17.20/TD_GOLDFELD_QUANDT.json +16 -30
  177. teradataml/data/jsons/uaf/17.20/{TD_HOLT_WINTERS_FORECAST.json → TD_HOLT_WINTERS_FORECASTER.json} +1 -2
  178. teradataml/data/jsons/uaf/17.20/TD_IDFFT2.json +1 -15
  179. teradataml/data/jsons/uaf/17.20/TD_IDWT.json +162 -0
  180. teradataml/data/jsons/uaf/17.20/TD_IDWT2D.json +149 -0
  181. teradataml/data/jsons/uaf/17.20/TD_IQR.json +117 -0
  182. teradataml/data/jsons/uaf/17.20/TD_LINEAR_REGR.json +1 -1
  183. teradataml/data/jsons/uaf/17.20/TD_LINESPEC.json +1 -1
  184. teradataml/data/jsons/uaf/17.20/TD_MAMEAN.json +1 -3
  185. teradataml/data/jsons/uaf/17.20/TD_MATRIX2IMAGE.json +209 -0
  186. teradataml/data/jsons/uaf/17.20/TD_PACF.json +2 -2
  187. teradataml/data/jsons/uaf/17.20/TD_POWERSPEC.json +5 -5
  188. teradataml/data/jsons/uaf/17.20/TD_RESAMPLE.json +48 -28
  189. teradataml/data/jsons/uaf/17.20/TD_SAX.json +208 -0
  190. teradataml/data/jsons/uaf/17.20/TD_SEASONALNORMALIZE.json +12 -6
  191. teradataml/data/jsons/uaf/17.20/TD_SIMPLEEXP.json +0 -1
  192. teradataml/data/jsons/uaf/17.20/TD_TRACKINGOP.json +8 -8
  193. teradataml/data/jsons/uaf/17.20/TD_UNDIFF.json +1 -1
  194. teradataml/data/jsons/uaf/17.20/TD_UNNORMALIZE.json +1 -1
  195. teradataml/data/jsons/uaf/17.20/TD_WINDOWDFFT.json +400 -0
  196. teradataml/data/kmeans_example.json +5 -0
  197. teradataml/data/kmeans_table.csv +10 -0
  198. teradataml/data/load_example_data.py +8 -2
  199. teradataml/data/naivebayestextclassifier_example.json +1 -1
  200. teradataml/data/naivebayestextclassifierpredict_example.json +11 -0
  201. teradataml/data/onehot_encoder_train.csv +4 -0
  202. teradataml/data/openml_example.json +29 -0
  203. teradataml/data/peppers.png +0 -0
  204. teradataml/data/real_values.csv +14 -0
  205. teradataml/data/sax_example.json +8 -0
  206. teradataml/data/scale_attributes.csv +3 -0
  207. teradataml/data/scale_example.json +52 -1
  208. teradataml/data/scale_input_part_sparse.csv +31 -0
  209. teradataml/data/scale_input_partitioned.csv +16 -0
  210. teradataml/data/scale_input_sparse.csv +11 -0
  211. teradataml/data/scale_parameters.csv +3 -0
  212. teradataml/data/scripts/deploy_script.py +21 -2
  213. teradataml/data/scripts/sklearn/sklearn_fit.py +40 -37
  214. teradataml/data/scripts/sklearn/sklearn_fit_predict.py +22 -30
  215. teradataml/data/scripts/sklearn/sklearn_function.template +42 -24
  216. teradataml/data/scripts/sklearn/sklearn_model_selection_split.py +23 -33
  217. teradataml/data/scripts/sklearn/sklearn_neighbors.py +19 -28
  218. teradataml/data/scripts/sklearn/sklearn_score.py +32 -32
  219. teradataml/data/scripts/sklearn/sklearn_transform.py +85 -42
  220. teradataml/data/star_pivot.csv +8 -0
  221. teradataml/data/templates/open_source_ml.json +2 -1
  222. teradataml/data/teradataml_example.json +97 -1
  223. teradataml/data/timestamp_data.csv +4 -0
  224. teradataml/data/titanic_dataset_unpivoted.csv +19 -0
  225. teradataml/data/uaf_example.json +55 -1
  226. teradataml/data/unpivot_example.json +15 -0
  227. teradataml/data/url_data.csv +9 -0
  228. teradataml/data/windowdfft.csv +16 -0
  229. teradataml/data/ztest_example.json +16 -0
  230. teradataml/dataframe/copy_to.py +9 -4
  231. teradataml/dataframe/data_transfer.py +125 -64
  232. teradataml/dataframe/dataframe.py +575 -57
  233. teradataml/dataframe/dataframe_utils.py +47 -9
  234. teradataml/dataframe/fastload.py +273 -90
  235. teradataml/dataframe/functions.py +339 -0
  236. teradataml/dataframe/row.py +160 -0
  237. teradataml/dataframe/setop.py +2 -2
  238. teradataml/dataframe/sql.py +740 -18
  239. teradataml/dataframe/window.py +1 -1
  240. teradataml/dbutils/dbutils.py +324 -18
  241. teradataml/geospatial/geodataframe.py +1 -1
  242. teradataml/geospatial/geodataframecolumn.py +1 -1
  243. teradataml/hyperparameter_tuner/optimizer.py +13 -13
  244. teradataml/lib/aed_0_1.dll +0 -0
  245. teradataml/opensource/sklearn/_sklearn_wrapper.py +254 -122
  246. teradataml/options/__init__.py +16 -5
  247. teradataml/options/configure.py +39 -6
  248. teradataml/options/display.py +2 -2
  249. teradataml/plot/axis.py +4 -4
  250. teradataml/scriptmgmt/UserEnv.py +26 -19
  251. teradataml/scriptmgmt/lls_utils.py +120 -16
  252. teradataml/table_operators/Script.py +4 -5
  253. teradataml/table_operators/TableOperator.py +160 -26
  254. teradataml/table_operators/table_operator_util.py +88 -41
  255. teradataml/table_operators/templates/dataframe_udf.template +63 -0
  256. teradataml/telemetry_utils/__init__.py +0 -0
  257. teradataml/telemetry_utils/queryband.py +52 -0
  258. teradataml/utils/validators.py +41 -3
  259. {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/METADATA +191 -6
  260. {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/RECORD +263 -185
  261. {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/WHEEL +0 -0
  262. {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/top_level.txt +0 -0
  263. {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/zip-safe +0 -0
@@ -1,5 +1,9 @@
1
1
  def ScaleFit(data=None, target_columns=None, scale_method=None, miss_value="KEEP",
2
- global_scale=False, multiplier='1', intercept='0', **generic_arguments):
2
+ global_scale=False, multiplier='1', intercept='0',
3
+ parameter_data=None, attribute_data=None, partition_columns=None,
4
+ ignoreinvalid_locationscale=False, unused_attributes="UNSCALED",
5
+ attribute_name_column=None, attribute_value_column=None, target_attributes=None,
6
+ **generic_arguments):
3
7
  """
4
8
  DESCRIPTION:
5
9
  ScaleFit() function outputs statistics to input to ScaleTransform() function,
@@ -15,6 +19,9 @@ def ScaleFit(data=None, target_columns=None, scale_method=None, miss_value="KEEP
15
19
  Required Argument.
16
20
  Specifies the input teradataml DataFrame column(s) for which to output statistics.
17
21
  The columns must contain numeric data in the range (-1e\u00B3\u2070\u2078, 1e\u00B3\u2070\u2078).
22
+ Note:
23
+ * This argument cannot be used with "target_attributes", "attribute_name_column",
24
+ "attribute_value_column".
18
25
  Types: str OR list of Strings (str)
19
26
 
20
27
  scale_method:
@@ -124,6 +131,60 @@ def ScaleFit(data=None, target_columns=None, scale_method=None, miss_value="KEEP
124
131
  Default Value: "0"
125
132
  Types: str OR list of String (str)
126
133
 
134
+ parameter_data:
135
+ Optional Argument.
136
+ Specifies the input teradataml DataFrame containing the parameters.
137
+ Note:
138
+ * This is valid when "data_partition_column" is used.
139
+ Types: teradataml DataFrame
140
+
141
+ attribute_data:
142
+ Optional Argument.
143
+ Specifies the input teradataml DataFrame containing the attributes.
144
+ Note:
145
+ * This is valid when "data_partition_column" is used.
146
+ Types: teradataml DataFrame
147
+
148
+ partition_columns:
149
+ Optional Argument.
150
+ Specifies the column name in the "data" to partition the input.
151
+ Types: str OR list of Strings (str)
152
+
153
+ ignoreinvalid_locationscale:
154
+ Optional Argument.
155
+ Specifies whether to ignore invalid values of location and scale parameters.
156
+ Default Value: False
157
+ Types: bool
158
+
159
+ unused_attributes:
160
+ Optional Argument.
161
+ Specifies whether to emit out unused attributes of different partitions
162
+ as unscaled values or NULLs (for dense input).
163
+ Permitted Values: 'NULLIFY', 'UNSCALED'
164
+ Default Value: 'UNSCALED'
165
+ Types: str
166
+
167
+ attribute_name_column:
168
+ Optional Argument.
169
+ Specifies the column name in the "attribute_data" which contains attribute names.
170
+ Note:
171
+ * This is required for sparse input.
172
+ Types: str
173
+
174
+ attribute_value_column:
175
+ Optional Argument.
176
+ Specifies the column name in the "attribute_data" which contains attribute values.
177
+ Note:
178
+ * This is required for sparse input.
179
+ Types: str
180
+
181
+ target_attributes:
182
+ Optional Argument.
183
+ Specifies the attributes for which scaling should be performed.
184
+ Note:
185
+ * This is required for sparse input.
186
+ Types: str OR list of Strings (str)
187
+
127
188
  **generic_arguments:
128
189
  Specifies the generic keyword arguments SQLE functions accept.
129
190
  Below are the generic keyword arguments:
@@ -174,9 +235,16 @@ def ScaleFit(data=None, target_columns=None, scale_method=None, miss_value="KEEP
174
235
 
175
236
  # Load the example data.
176
237
  load_example_data("teradataml", ["scale_housing"])
238
+ load_example_data('scale', ["scale_attributes", "scale_parameters",
239
+ "scale_input_partitioned", "scale_input_sparse","scale_input_part_sparse"])
177
240
 
178
241
  # Create teradataml DataFrame.
179
242
  scaling_house = DataFrame.from_table("scale_housing")
243
+ scale_attribute = DataFrame.from_table("scale_attributes")
244
+ scale_parameter = DataFrame.from_table("scale_parameters")
245
+ scale_inp_part = DataFrame.from_table("scale_input_partitioned")
246
+ scale_inp_sparse = DataFrame.from_table("scale_input_sparse")
247
+ scale_inp_part_sparse = DataFrame.from_table("scale_input_part_sparse")
180
248
 
181
249
  # Check the list of available analytic functions.
182
250
  display_analytic_functions()
@@ -194,4 +262,54 @@ def ScaleFit(data=None, target_columns=None, scale_method=None, miss_value="KEEP
194
262
  # Print the result DataFrame.
195
263
  print(fit_obj.output)
196
264
  print(fit_obj.output_data)
265
+
266
+ # Example 2: Create statistics to scale "fare" and "age" columns
267
+ # with respect to maximum absolute value with partition column
268
+ # for dense input.
269
+ fit_obj = ScaleFit(data=scale_inp_part,
270
+ attribute_data=scale_attribute,
271
+ parameter_data=scale_parameter,
272
+ target_columns=['fare', 'age'],
273
+ scale_method="maxabs",
274
+ miss_value="zero",
275
+ global_scale=False,
276
+ data_partition_column='pid',
277
+ attribute_data_partition_column='pid',
278
+ parameter_data_partition_column='pid')
279
+
280
+ # Print the result DataFrame.
281
+ print(fit_obj.output)
282
+ print(fit_obj.output_data)
283
+
284
+ # Example 3: Create statistics to scale "fare" column with respect to
285
+ # range for sparse input.
286
+ fit_obj = ScaleFit(data=scale_inp_sparse,
287
+ target_attribute=['fare'],
288
+ scale_method="range",
289
+ miss_value="keep",
290
+ global_scale=False,
291
+ attribute_name_column='attribute_column',
292
+ attribute_value_column='attribute_value')
293
+
294
+ # Print the result DataFrame.
295
+ print(fit_obj.output)
296
+ print(fit_obj.output_data)
297
+
298
+ # Example 4: Create statistics to scale "fare" column with respect to
299
+ # maximum absolute value for sparse input with partition column.
300
+ fit_obj = ScaleFit(data=scale_inp_part_sparse,
301
+ parameter_data=scale_parameter,
302
+ attribute_data=scale_attribute,
303
+ scale_method="maxabs",
304
+ miss_value="zero",
305
+ global_scale=False,
306
+ attribute_name_column='attribute_column',
307
+ attribute_value_column='attribute_value',
308
+ data_partition_column='pid',
309
+ attribute_data_partition_column='pid',
310
+ parameter_data_partition_column='pid')
311
+
312
+ # Print the result DataFrame.
313
+ print(fit_obj.output)
314
+ print(fit_obj.output_data)
197
315
  """
@@ -1,4 +1,5 @@
1
- def ScaleTransform(data=None, object=None, accumulate=None, **generic_arguments):
1
+ def ScaleTransform(data=None, object=None, accumulate=None, attribute_name_column=None,
2
+ attribute_value_column=None, **generic_arguments):
2
3
  """
3
4
  DESCRIPTION:
4
5
  ScaleTransform() function scales specified columns in input data, using ScaleFit() function output.
@@ -21,6 +22,20 @@ def ScaleTransform(data=None, object=None, accumulate=None, **generic_arguments)
21
22
  Specifies the names of input teradataml DataFrame columns to copy to the output.
22
23
  Types: str OR list of Strings (str)
23
24
 
25
+ attribute_name_column:
26
+ Optional Argument.
27
+ Specifies the column name in the "attribute_data" which contains attribute names.
28
+ Note:
29
+ * This is required for sparse input.
30
+ Types: str
31
+
32
+ attribute_value_column:
33
+ Optional Argument.
34
+ Specifies the column name in the "attribute_data" which contains attribute values.
35
+ Note:
36
+ * This is required for sparse input.
37
+ Types: str
38
+
24
39
  **generic_arguments:
25
40
  Specifies the generic keyword arguments SQLE functions accept.
26
41
  Below are the generic keyword arguments:
@@ -70,9 +85,16 @@ def ScaleTransform(data=None, object=None, accumulate=None, **generic_arguments)
70
85
 
71
86
  # Load the example data.
72
87
  load_example_data("teradataml", ["scale_housing"])
88
+ load_example_data('scale', ["scale_attributes", "scale_parameters",
89
+ "scale_input_partitioned", "scale_input_sparse","scale_input_part_sparse"])
73
90
 
74
91
  # Create teradataml DataFrame.
75
92
  scaling_house = DataFrame.from_table("scale_housing")
93
+ scale_attribute = DataFrame.from_table("scale_attributes")
94
+ scale_parameter = DataFrame.from_table("scale_parameters")
95
+ scale_inp_part = DataFrame.from_table("scale_input_partitioned")
96
+ scale_inp_sparse = DataFrame.from_table("scale_input_sparse")
97
+ scale_inp_part_sparse = DataFrame.from_table("scale_input_part_sparse")
76
98
 
77
99
  # Check the list of available analytic functions.
78
100
  display_analytic_functions()
@@ -107,4 +129,74 @@ def ScaleTransform(data=None, object=None, accumulate=None, **generic_arguments)
107
129
 
108
130
  # Print the result DataFrame.
109
131
  print(obj1.result)
132
+
133
+ # Example 3: Create statistics to scale "fare" and "age" columns with respect to
134
+ # maximum absolute value for partitioned input.
135
+ fit_obj = ScaleFit(data=scale_inp_part,
136
+ attribute_data=scale_attribute,
137
+ parameter_data=scale_parameter,
138
+ target_columns=['fare', 'age'],
139
+ scale_method="maxabs",
140
+ miss_value="zero",
141
+ global_scale=False,
142
+ data_partition_column='pid',
143
+ attribute_data_partition_column='pid',
144
+ parameter_data_partition_column='pid')
145
+
146
+ obj = ScaleTransform(data=scale_inp_part,
147
+ object=fit_obj.output,
148
+ accumulate=['pid','passenger'],
149
+ data_partition_column='pid',
150
+ object_partition_column='pid')
151
+
152
+ # Print the result DataFrame.
153
+ print(obj.result)
154
+
155
+
156
+ # Example 4: Create statistics to scale "fare" column with respect to
157
+ # range for sparse input.
158
+ fit_obj = ScaleFit(data=scale_inp_sparse,
159
+ target_attribute=['fare'],
160
+ scale_method="range",
161
+ miss_value="keep",
162
+ global_scale=False,
163
+ attribute_name_column='attribute_column',
164
+ attribute_value_column='attribute_value')
165
+
166
+ obj = ScaleTransform(data=scale_inp_sparse,
167
+ object=fit_obj.output,
168
+ accumulate=['passenger'],
169
+ attribute_name_column='attribute_column',
170
+ attribute_value_column='attribute_value'
171
+ )
172
+
173
+ # Print the result DataFrame.
174
+ print(obj.result)
175
+
176
+
177
+ # Example 5: Create statistics to scale "fare" column with respect to
178
+ # maximum absolute value for sparse input with partition column.
179
+ fit_obj = ScaleFit(data=scale_inp_part_sparse,
180
+ parameter_data=scale_parameter,
181
+ attribute_data=scale_attribute,
182
+ scale_method="maxabs",
183
+ miss_value="zero",
184
+ global_scale=False,
185
+ attribute_name_column='attribute_column',
186
+ attribute_value_column='attribute_value',
187
+ data_partition_column='pid',
188
+ attribute_data_partition_column='pid',
189
+ parameter_data_partition_column='pid')
190
+
191
+ obj = ScaleTransform(data=scale_inp_part_sparse,
192
+ object=fit_obj.output,
193
+ accumulate=["passenger",'pid'],
194
+ attribute_name_column='attribute_column',
195
+ attribute_value_column='attribute_value',
196
+ object_partition_column='pid',
197
+ data_partition_column='pid')
198
+
199
+ # Print the result DataFrame.
200
+ print(obj.result)
201
+
110
202
  """
@@ -0,0 +1,197 @@
1
+ def Shap(data = None, object = None, training_function = "TD_GLM",
2
+ model_type = "Regression", input_columns = None, detailed = False,
3
+ accumulate = None, num_parallel_trees = 1000, num_boost_rounds = 10,
4
+ **generic_arguments):
5
+
6
+ """
7
+ DESCRIPTION:
8
+ Function to get explanation for individual predictions
9
+ (feature contributions) in a machine learning model based on the
10
+ co-operative game theory optimal Shapley values.
11
+
12
+ PARAMETERS:
13
+ data:
14
+ Required Argument.
15
+ Specifies the teradataml DataFrame.
16
+ Types: teradataml DataFrame
17
+
18
+ object:
19
+ Required Argument.
20
+ Specifies the teradataml DataFrame containing the model data.
21
+ Types: teradataml DataFrame
22
+
23
+ training_function:
24
+ Required Argument.
25
+ Specifies the model type name.
26
+ Default Value: "TD_GLM"
27
+ Permitted Values: TD_GLM, TD_DECISIONFOREST, TD_XGBOOST
28
+ Types: str
29
+
30
+ model_type:
31
+ Required Argument.
32
+ Specifies the operation to be performed on input data.
33
+ Default Value: "Regression"
34
+ Permitted Values: Regression, Classification
35
+ Types: str
36
+
37
+ input_columns:
38
+ Required Argument.
39
+ Specifies the names of the columns in "data" used for
40
+ training the model (predictors, features or independent variables).
41
+ Types: str OR list of Strings (str)
42
+
43
+ detailed:
44
+ Optional Argument.
45
+ Specifies whether to output detailed shap information about the
46
+ forest trees.
47
+ Default Value: False
48
+ Types: bool
49
+
50
+ accumulate:
51
+ Optional Argument.
52
+ Specifies the names of the input columns to copy to the output teradataml DataFrame.
53
+ Types: str OR list of Strings (str)
54
+
55
+ num_parallel_trees:
56
+ Optional Argument.
57
+ Specify the number of parallel boosted trees. Each boosted tree
58
+ operates on a sample of data that fits in an AMPs memory.
59
+ Note:
60
+ * By default, "num_parallel_trees" is chosen equal to the number of AMPs with
61
+ data.
62
+ Default Value: 1000
63
+ Types: int
64
+
65
+ num_boost_rounds:
66
+ Optional Argument.
67
+ Specifies the number of iterations to boost the weak classifiers. The
68
+ iterations must be an int in the range [1, 100000].
69
+ Default Value: 10
70
+ Types: int
71
+
72
+ **generic_arguments:
73
+ Specifies the generic keyword arguments SQLE functions accept. Below
74
+ are the generic keyword arguments:
75
+ persist:
76
+ Optional Argument.
77
+ Specifies whether to persist the results of the
78
+ function in a table or not. When set to True,
79
+ results are persisted in a table; otherwise,
80
+ results are garbage collected at the end of the
81
+ session.
82
+ Default Value: False
83
+ Types: bool
84
+
85
+ volatile:
86
+ Optional Argument.
87
+ Specifies whether to put the results of the
88
+ function in a volatile table or not. When set to
89
+ True, results are stored in a volatile table,
90
+ otherwise not.
91
+ Default Value: False
92
+ Types: bool
93
+
94
+ Function allows the user to partition, hash, order or local
95
+ order the input data. These generic arguments are available
96
+ for each argument that accepts teradataml DataFrame as
97
+ input and can be accessed as:
98
+ * "<input_data_arg_name>_partition_column" accepts str or
99
+ list of str (Strings)
100
+ * "<input_data_arg_name>_hash_column" accepts str or list
101
+ of str (Strings)
102
+ * "<input_data_arg_name>_order_column" accepts str or list
103
+ of str (Strings)
104
+ * "local_order_<input_data_arg_name>" accepts boolean
105
+ Note:
106
+ These generic arguments are supported by teradataml if
107
+ the underlying SQL Engine function supports, else an
108
+ exception is raised.
109
+
110
+ RETURNS:
111
+ Instance of Shap.
112
+ Output teradataml DataFrames can be accessed using attribute
113
+ references, such as ShapObj.<attribute_name>.
114
+ Output teradataml DataFrame attribute name is:
115
+ 1. output
116
+
117
+
118
+ RAISES:
119
+ TeradataMlException, TypeError, ValueError
120
+
121
+
122
+ EXAMPLES:
123
+ # Notes:
124
+ # 1. Get the connection to Vantage, before importing the
125
+ # function in user space.
126
+ # 2. User can import the function, if it is available on
127
+ # Vantage user is connected to.
128
+ # 3. To check the list of analytic functions available on
129
+ # Vantage user connected to, use
130
+ # "display_analytic_functions()".
131
+
132
+ # Load the example data.
133
+ load_example_data("byom", "iris_input")
134
+ load_example_data("teradataml", ["cal_housing_ex_raw"])
135
+
136
+ # Create teradataml DataFrame objects.
137
+ iris_input = DataFrame("iris_input")
138
+ data_input = DataFrame.from_table("cal_housing_ex_raw")
139
+
140
+ # Check the list of available analytic functions.
141
+ display_analytic_functions()
142
+
143
+ # Import function Shap.
144
+ from teradataml import Shap, XGBoost, DecisionForest, SVM
145
+
146
+ # Example 1: Shap for classification model.
147
+ XGBoost_out = XGBoost(data=iris_input,
148
+ input_columns=['sepal_length', 'sepal_width', 'petal_length', 'petal_width'],
149
+ response_column = 'species',
150
+ model_type='Classification',
151
+ iter_num=25)
152
+
153
+ Shap_out = Shap(data=iris_input,
154
+ object=XGBoost_out.result,
155
+ id_column='id',
156
+ training_function="TD_XGBOOST",
157
+ model_type="Classification",
158
+ input_columns=['sepal_length', 'sepal_width', 'petal_length', 'petal_width'],
159
+ detailed=True)
160
+ # Print the result DataFrame.
161
+ print(Shap_out.output_data)
162
+
163
+ # Example 2: Shap for regression model.
164
+
165
+ from teradataml import ScaleFit, ScaleTransform
166
+
167
+ # Scale "target_columns" with respect to 'STD' value of the column.
168
+ fit_obj = ScaleFit(data=data_input,
169
+ target_columns=['MedInc', 'HouseAge', 'AveRooms',
170
+ 'AveBedrms', 'Population', 'AveOccup',
171
+ 'Latitude', 'Longitude'],
172
+ scale_method="STD")
173
+
174
+ # Transform the data.
175
+ transform_obj = ScaleTransform(data=data_input,
176
+ object=fit_obj.output,
177
+ accumulate=["id", "MedHouseVal"])
178
+
179
+ decision_forest_out = DecisionForest(data=transform_obj.result,
180
+ input_columns=['MedInc', 'HouseAge', 'AveRooms',
181
+ 'AveBedrms', 'Population', 'AveOccup',
182
+ 'Latitude', 'Longitude'],
183
+ response_column="MedHouseVal",
184
+ model_type="Regression",
185
+ max_depth = 10
186
+ )
187
+ Shap_out2 = Shap(data=transform_obj.result,
188
+ object=decision_forest_out.result,
189
+ id_column='id',
190
+ training_function="TD_DECISIONFOREST",
191
+ model_type="Regression",
192
+ input_columns=['MedInc', 'HouseAge', 'AveRooms','AveBedrms', 'Population', 'AveOccup','Latitude', 'Longitude'],
193
+ detailed=True)
194
+
195
+ # Print the result DataFrame.
196
+ print(Shap_out2.output_data)
197
+ """
@@ -1,5 +1,6 @@
1
1
  def TDGLMPredict(object=None, newdata=None, id_column=None, accumulate=None, output_prob=False,
2
- output_responses=None, **generic_arguments):
2
+ output_responses=None, partition_column=None, family="GAUSSIAN",
3
+ **generic_arguments):
3
4
  """
4
5
  DESCRIPTION:
5
6
  The TDGLMPredict() function predicts target values (regression) and class labels
@@ -57,6 +58,18 @@ def TDGLMPredict(object=None, newdata=None, id_column=None, accumulate=None, out
57
58
  Note:
58
59
  Only applicable if "output_prob" is True.
59
60
  Types: str OR list of strs
61
+
62
+ partition_column:
63
+ Optional Argument.
64
+ Specifies the column names of "data" on which to partition the input.
65
+ Types: str OR list of Strings (str)
66
+
67
+ family:
68
+ Optional Argument.
69
+ Specifies the distribution exponential family.
70
+ Permitted Values: 'GAUSSIAN', 'BINOMIAL'
71
+ Default Value: 'GAUSSIAN'
72
+ Types: str
60
73
 
61
74
  **generic_arguments:
62
75
  Specifies the generic keyword arguments SQLE functions accept. Below
@@ -168,4 +181,153 @@ def TDGLMPredict(object=None, newdata=None, id_column=None, accumulate=None, out
168
181
 
169
182
  # Print the result DataFrame.
170
183
  print(TDGLMPredict_out1.result)
184
+
185
+ # Example 3 : TDGLMPredict() predicts the 'medv' using generated regression model by GLM
186
+ # using stepwise regression algorithm.
187
+ # This example uses the boston dataset and scales the data.
188
+ # Scaled data is used as input data to generate the GLM model and predict the target values.
189
+
190
+ # loading the example data
191
+ load_example_data("decisionforest", ["boston"])
192
+ load_example_data('glm', ['housing_train_segment', 'housing_train_parameter', 'housing_train_attribute'])
193
+
194
+ # Create teradataml DataFrame objects.
195
+ boston_df = DataFrame('boston')
196
+ housing_seg = DataFrame('housing_train_segment')
197
+ housing_parameter = DataFrame('housing_train_parameter')
198
+ housing_attribute = DataFrame('housing_train_attribute')
199
+
200
+ # scaling the data
201
+ # Scale "target_columns" with respect to 'STD' value of the column.
202
+ fit_obj = ScaleFit(data=boston_df,
203
+ target_columns=['crim','zn','indus','chas','nox','rm','age','dis','rad','tax','ptratio','black','lstat',],
204
+ scale_method="STD")
205
+
206
+ # Scale values specified in the input data using the fit data generated by the ScaleFit() function above.
207
+ obj = ScaleTransform(object=fit_obj.output,
208
+ data=boston_df,
209
+ accumulate=["id","medv"])
210
+
211
+ boston = obj.result
212
+
213
+ # Generate generalized linear model(GLM) using stepwise regression algorithm.
214
+ glm_1 = GLM(data=boston,
215
+ input_columns=['indus','chas','nox','rm'],
216
+ response_column='medv',
217
+ family='GAUSSIAN',
218
+ lambda1=0.02,
219
+ alpha=0.33,
220
+ batch_size=10,
221
+ learning_rate='optimal',
222
+ iter_max=36,
223
+ iter_num_no_change=100,
224
+ tolerance=0.0001,
225
+ initial_eta=0.02,
226
+ stepwise_direction='backward',
227
+ max_steps_num=10)
228
+
229
+ # Predict target values using generated regression model by GLM and newdata.
230
+ res = TDGLMPredict(id_column="id",
231
+ newdata=boston,
232
+ object=glm_1,
233
+ accumulate='medv')
234
+
235
+ # Print the result DataFrame.
236
+ print(res.result)
237
+
238
+ # Example 4 : TDGLMPredict() predicts the 'medv' using generated regression model by GLM
239
+ # stepwise regression algorithm with initial_stepwise_columns.
240
+ glm_2 = GLM(data=boston,
241
+ input_columns=['crim','zn','indus','chas','nox','rm','age','dis','rad','tax','ptratio','black','lstat'],
242
+ response_column='medv',
243
+ family='GAUSSIAN',
244
+ lambda1=0.02,
245
+ alpha=0.33,
246
+ batch_size=10,
247
+ learning_rate='optimal',
248
+ iter_max=36,
249
+ iter_num_no_change=100,
250
+ tolerance=0.0001,
251
+ initial_eta=0.02,
252
+ stepwise_direction='bidirectional',
253
+ max_steps_num=10,
254
+ initial_stepwise_columns=['rad','tax']
255
+ )
256
+
257
+ # Predict target values using generated regression model by GLM and newdata.
258
+ res = TDGLMPredict(id_column="id",
259
+ newdata=boston,
260
+ object=glm_2,
261
+ accumulate='medv')
262
+
263
+ # Print the result DataFrame.
264
+ print(res.result)
265
+
266
+ # Example 5 : TDGLMPredict() predicts the 'price' using generated regression model by GLM
267
+ # using partition by key.
268
+ glm_3 = GLM(data=housing_seg,
269
+ input_columns=['bedrooms', 'bathrms', 'stories', 'driveway', 'recroom', 'fullbase', 'gashw', 'airco'],
270
+ response_column='price',
271
+ family='GAUSSIAN',
272
+ batch_size=10,
273
+ iter_max=1000,
274
+ data_partition_column='partition_id'
275
+ )
276
+
277
+ # Predict target values using generated regression model by GLM and newdata.
278
+ res = TDGLMPredict(id_column="sn",
279
+ newdata=housing_seg,
280
+ object=glm_3,
281
+ accumulate='price',
282
+ newdata_partition_column='partition_id',
283
+ object_partition_column='partition_id')
284
+
285
+ # Print the result DataFrame.
286
+ print(res.result)
287
+
288
+ # Example 6 : TDGLMPredict() predicts the 'price' using generated regression model by GLM
289
+ # using partition by key with attribute data.
290
+ glm_4 = GLM(data=housing_seg,
291
+ input_columns=['bedrooms', 'bathrms', 'stories', 'driveway', 'recroom', 'fullbase', 'gashw', 'airco'],
292
+ response_column='price',
293
+ family='GAUSSIAN',
294
+ batch_size=10,
295
+ iter_max=1000,
296
+ data_partition_column='partition_id',
297
+ attribute_data = housing_attribute,
298
+ attribute_data_partition_column = 'partition_id'
299
+ )
300
+
301
+ # Predict target values using generated regression model by GLM and newdata.
302
+ res = TDGLMPredict(id_column="sn",
303
+ newdata=housing_seg,
304
+ object=glm_4,
305
+ accumulate='price',
306
+ newdata_partition_column='partition_id',
307
+ object_partition_column='partition_id')
308
+
309
+ # Print the result DataFrame.
310
+ print(res.result)
311
+
312
+ # Example 7 : TDGLMPredict() predicts the 'homestyle' using generated generalized linear model by GLM
313
+ # using partition by key with parameter data.
314
+ glm_5 = GLM(data=housing_seg,
315
+ input_columns=['bedrooms', 'bathrms', 'stories', 'driveway', 'recroom', 'fullbase', 'gashw', 'airco'],
316
+ response_column='homestyle',
317
+ family='binomial',
318
+ iter_max=1000,
319
+ data_partition_column='partition_id',
320
+ parameter_data = housing_parameter,
321
+ parameter_data_partition_column = 'partition_id'
322
+ )
323
+
324
+ res = TDGLMPredict(id_column="sn",
325
+ newdata=housing_seg,
326
+ object=glm_5,
327
+ accumulate='homestyle',
328
+ newdata_partition_column='partition_id',
329
+ object_partition_column='partition_id')
330
+
331
+ # Print the result DataFrame.
332
+ print(res.result)
171
333
  """