teradataml 20.0.0.1__py3-none-any.whl → 20.0.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of teradataml might be problematic. Click here for more details.

Files changed (200) hide show
  1. teradataml/LICENSE.pdf +0 -0
  2. teradataml/README.md +112 -0
  3. teradataml/__init__.py +6 -3
  4. teradataml/_version.py +1 -1
  5. teradataml/analytics/__init__.py +3 -2
  6. teradataml/analytics/analytic_function_executor.py +224 -16
  7. teradataml/analytics/analytic_query_generator.py +92 -0
  8. teradataml/analytics/byom/__init__.py +3 -2
  9. teradataml/analytics/json_parser/metadata.py +1 -0
  10. teradataml/analytics/json_parser/utils.py +6 -4
  11. teradataml/analytics/meta_class.py +40 -1
  12. teradataml/analytics/sqle/DecisionTreePredict.py +1 -1
  13. teradataml/analytics/sqle/__init__.py +10 -2
  14. teradataml/analytics/table_operator/__init__.py +3 -2
  15. teradataml/analytics/uaf/__init__.py +21 -2
  16. teradataml/analytics/utils.py +62 -1
  17. teradataml/analytics/valib.py +1 -1
  18. teradataml/automl/__init__.py +1502 -323
  19. teradataml/automl/custom_json_utils.py +139 -61
  20. teradataml/automl/data_preparation.py +245 -306
  21. teradataml/automl/data_transformation.py +32 -12
  22. teradataml/automl/feature_engineering.py +313 -82
  23. teradataml/automl/model_evaluation.py +44 -35
  24. teradataml/automl/model_training.py +109 -146
  25. teradataml/catalog/byom.py +8 -8
  26. teradataml/clients/pkce_client.py +1 -1
  27. teradataml/common/constants.py +37 -0
  28. teradataml/common/deprecations.py +13 -7
  29. teradataml/common/garbagecollector.py +151 -120
  30. teradataml/common/messagecodes.py +4 -1
  31. teradataml/common/messages.py +2 -1
  32. teradataml/common/sqlbundle.py +1 -1
  33. teradataml/common/utils.py +97 -11
  34. teradataml/common/wrapper_utils.py +1 -1
  35. teradataml/context/context.py +72 -2
  36. teradataml/data/complaints_test_tokenized.csv +353 -0
  37. teradataml/data/complaints_tokens_model.csv +348 -0
  38. teradataml/data/covid_confirm_sd.csv +83 -0
  39. teradataml/data/dataframe_example.json +10 -0
  40. teradataml/data/docs/sqle/docs_17_20/CFilter.py +132 -0
  41. teradataml/data/docs/sqle/docs_17_20/NaiveBayes.py +162 -0
  42. teradataml/data/docs/sqle/docs_17_20/OutlierFilterFit.py +2 -0
  43. teradataml/data/docs/sqle/docs_17_20/Pivoting.py +279 -0
  44. teradataml/data/docs/sqle/docs_17_20/Shap.py +197 -0
  45. teradataml/data/docs/sqle/docs_17_20/TDNaiveBayesPredict.py +189 -0
  46. teradataml/data/docs/sqle/docs_17_20/TFIDF.py +142 -0
  47. teradataml/data/docs/sqle/docs_17_20/Unpivoting.py +216 -0
  48. teradataml/data/docs/uaf/docs_17_20/ACF.py +1 -10
  49. teradataml/data/docs/uaf/docs_17_20/ArimaEstimate.py +1 -1
  50. teradataml/data/docs/uaf/docs_17_20/ArimaForecast.py +35 -5
  51. teradataml/data/docs/uaf/docs_17_20/ArimaValidate.py +3 -1
  52. teradataml/data/docs/uaf/docs_17_20/ArimaXEstimate.py +293 -0
  53. teradataml/data/docs/uaf/docs_17_20/AutoArima.py +354 -0
  54. teradataml/data/docs/uaf/docs_17_20/BreuschGodfrey.py +3 -2
  55. teradataml/data/docs/uaf/docs_17_20/BreuschPaganGodfrey.py +1 -1
  56. teradataml/data/docs/uaf/docs_17_20/Convolve.py +13 -10
  57. teradataml/data/docs/uaf/docs_17_20/Convolve2.py +4 -1
  58. teradataml/data/docs/uaf/docs_17_20/CumulPeriodogram.py +5 -4
  59. teradataml/data/docs/uaf/docs_17_20/DFFT2Conv.py +4 -4
  60. teradataml/data/docs/uaf/docs_17_20/DWT.py +235 -0
  61. teradataml/data/docs/uaf/docs_17_20/DWT2D.py +214 -0
  62. teradataml/data/docs/uaf/docs_17_20/DurbinWatson.py +1 -1
  63. teradataml/data/docs/uaf/docs_17_20/ExtractResults.py +1 -1
  64. teradataml/data/docs/uaf/docs_17_20/FilterFactory1d.py +160 -0
  65. teradataml/data/docs/uaf/docs_17_20/GenseriesSinusoids.py +1 -1
  66. teradataml/data/docs/uaf/docs_17_20/GoldfeldQuandt.py +9 -31
  67. teradataml/data/docs/uaf/docs_17_20/HoltWintersForecaster.py +4 -2
  68. teradataml/data/docs/uaf/docs_17_20/IDFFT2.py +1 -8
  69. teradataml/data/docs/uaf/docs_17_20/IDWT.py +236 -0
  70. teradataml/data/docs/uaf/docs_17_20/IDWT2D.py +226 -0
  71. teradataml/data/docs/uaf/docs_17_20/IQR.py +134 -0
  72. teradataml/data/docs/uaf/docs_17_20/LineSpec.py +1 -1
  73. teradataml/data/docs/uaf/docs_17_20/LinearRegr.py +2 -2
  74. teradataml/data/docs/uaf/docs_17_20/MAMean.py +3 -3
  75. teradataml/data/docs/uaf/docs_17_20/Matrix2Image.py +297 -0
  76. teradataml/data/docs/uaf/docs_17_20/MatrixMultiply.py +15 -6
  77. teradataml/data/docs/uaf/docs_17_20/PACF.py +0 -1
  78. teradataml/data/docs/uaf/docs_17_20/Portman.py +2 -2
  79. teradataml/data/docs/uaf/docs_17_20/PowerSpec.py +2 -2
  80. teradataml/data/docs/uaf/docs_17_20/Resample.py +9 -1
  81. teradataml/data/docs/uaf/docs_17_20/SAX.py +246 -0
  82. teradataml/data/docs/uaf/docs_17_20/SeasonalNormalize.py +17 -10
  83. teradataml/data/docs/uaf/docs_17_20/SignifPeriodicities.py +1 -1
  84. teradataml/data/docs/uaf/docs_17_20/WhitesGeneral.py +3 -1
  85. teradataml/data/docs/uaf/docs_17_20/WindowDFFT.py +368 -0
  86. teradataml/data/dwt2d_dataTable.csv +65 -0
  87. teradataml/data/dwt_dataTable.csv +8 -0
  88. teradataml/data/dwt_filterTable.csv +3 -0
  89. teradataml/data/finance_data4.csv +13 -0
  90. teradataml/data/grocery_transaction.csv +19 -0
  91. teradataml/data/idwt2d_dataTable.csv +5 -0
  92. teradataml/data/idwt_dataTable.csv +8 -0
  93. teradataml/data/idwt_filterTable.csv +3 -0
  94. teradataml/data/interval_data.csv +5 -0
  95. teradataml/data/jsons/paired_functions.json +14 -0
  96. teradataml/data/jsons/sqle/17.20/TD_CFilter.json +118 -0
  97. teradataml/data/jsons/sqle/17.20/TD_NaiveBayes.json +193 -0
  98. teradataml/data/jsons/sqle/17.20/TD_NaiveBayesPredict.json +212 -0
  99. teradataml/data/jsons/sqle/17.20/TD_OneClassSVM.json +9 -9
  100. teradataml/data/jsons/sqle/17.20/TD_Pivoting.json +280 -0
  101. teradataml/data/jsons/sqle/17.20/TD_Shap.json +222 -0
  102. teradataml/data/jsons/sqle/17.20/TD_TFIDF.json +162 -0
  103. teradataml/data/jsons/sqle/17.20/TD_Unpivoting.json +235 -0
  104. teradataml/data/jsons/storedprocedure/17.20/TD_FILTERFACTORY1D.json +150 -0
  105. teradataml/data/jsons/uaf/17.20/TD_ACF.json +1 -18
  106. teradataml/data/jsons/uaf/17.20/TD_ARIMAESTIMATE.json +3 -16
  107. teradataml/data/jsons/uaf/17.20/TD_ARIMAFORECAST.json +0 -3
  108. teradataml/data/jsons/uaf/17.20/TD_ARIMAVALIDATE.json +5 -3
  109. teradataml/data/jsons/uaf/17.20/TD_ARIMAXESTIMATE.json +362 -0
  110. teradataml/data/jsons/uaf/17.20/TD_AUTOARIMA.json +469 -0
  111. teradataml/data/jsons/uaf/17.20/TD_BINARYMATRIXOP.json +0 -3
  112. teradataml/data/jsons/uaf/17.20/TD_BINARYSERIESOP.json +0 -2
  113. teradataml/data/jsons/uaf/17.20/TD_BREUSCH_GODFREY.json +2 -1
  114. teradataml/data/jsons/uaf/17.20/TD_BREUSCH_PAGAN_GODFREY.json +2 -5
  115. teradataml/data/jsons/uaf/17.20/TD_CONVOLVE.json +3 -6
  116. teradataml/data/jsons/uaf/17.20/TD_CONVOLVE2.json +1 -3
  117. teradataml/data/jsons/uaf/17.20/TD_CUMUL_PERIODOGRAM.json +0 -5
  118. teradataml/data/jsons/uaf/17.20/TD_DFFT.json +1 -4
  119. teradataml/data/jsons/uaf/17.20/TD_DFFT2.json +2 -7
  120. teradataml/data/jsons/uaf/17.20/TD_DFFT2CONV.json +1 -2
  121. teradataml/data/jsons/uaf/17.20/TD_DFFTCONV.json +0 -2
  122. teradataml/data/jsons/uaf/17.20/TD_DTW.json +3 -6
  123. teradataml/data/jsons/uaf/17.20/TD_DWT.json +173 -0
  124. teradataml/data/jsons/uaf/17.20/TD_DWT2D.json +160 -0
  125. teradataml/data/jsons/uaf/17.20/TD_FITMETRICS.json +1 -1
  126. teradataml/data/jsons/uaf/17.20/TD_GOLDFELD_QUANDT.json +16 -30
  127. teradataml/data/jsons/uaf/17.20/{TD_HOLT_WINTERS_FORECAST.json → TD_HOLT_WINTERS_FORECASTER.json} +1 -2
  128. teradataml/data/jsons/uaf/17.20/TD_IDFFT2.json +1 -15
  129. teradataml/data/jsons/uaf/17.20/TD_IDWT.json +162 -0
  130. teradataml/data/jsons/uaf/17.20/TD_IDWT2D.json +149 -0
  131. teradataml/data/jsons/uaf/17.20/TD_IQR.json +117 -0
  132. teradataml/data/jsons/uaf/17.20/TD_LINEAR_REGR.json +1 -1
  133. teradataml/data/jsons/uaf/17.20/TD_LINESPEC.json +1 -1
  134. teradataml/data/jsons/uaf/17.20/TD_MAMEAN.json +1 -3
  135. teradataml/data/jsons/uaf/17.20/TD_MATRIX2IMAGE.json +209 -0
  136. teradataml/data/jsons/uaf/17.20/TD_PACF.json +2 -2
  137. teradataml/data/jsons/uaf/17.20/TD_POWERSPEC.json +5 -5
  138. teradataml/data/jsons/uaf/17.20/TD_RESAMPLE.json +48 -28
  139. teradataml/data/jsons/uaf/17.20/TD_SAX.json +208 -0
  140. teradataml/data/jsons/uaf/17.20/TD_SEASONALNORMALIZE.json +12 -6
  141. teradataml/data/jsons/uaf/17.20/TD_SIMPLEEXP.json +0 -1
  142. teradataml/data/jsons/uaf/17.20/TD_TRACKINGOP.json +8 -8
  143. teradataml/data/jsons/uaf/17.20/TD_UNDIFF.json +1 -1
  144. teradataml/data/jsons/uaf/17.20/TD_UNNORMALIZE.json +1 -1
  145. teradataml/data/jsons/uaf/17.20/TD_WINDOWDFFT.json +400 -0
  146. teradataml/data/load_example_data.py +8 -2
  147. teradataml/data/naivebayestextclassifier_example.json +1 -1
  148. teradataml/data/naivebayestextclassifierpredict_example.json +11 -0
  149. teradataml/data/peppers.png +0 -0
  150. teradataml/data/real_values.csv +14 -0
  151. teradataml/data/sax_example.json +8 -0
  152. teradataml/data/scripts/deploy_script.py +1 -1
  153. teradataml/data/scripts/sklearn/sklearn_fit.py +17 -10
  154. teradataml/data/scripts/sklearn/sklearn_fit_predict.py +2 -2
  155. teradataml/data/scripts/sklearn/sklearn_function.template +30 -7
  156. teradataml/data/scripts/sklearn/sklearn_neighbors.py +1 -1
  157. teradataml/data/scripts/sklearn/sklearn_score.py +12 -3
  158. teradataml/data/scripts/sklearn/sklearn_transform.py +55 -4
  159. teradataml/data/star_pivot.csv +8 -0
  160. teradataml/data/templates/open_source_ml.json +2 -1
  161. teradataml/data/teradataml_example.json +20 -1
  162. teradataml/data/timestamp_data.csv +4 -0
  163. teradataml/data/titanic_dataset_unpivoted.csv +19 -0
  164. teradataml/data/uaf_example.json +55 -1
  165. teradataml/data/unpivot_example.json +15 -0
  166. teradataml/data/url_data.csv +9 -0
  167. teradataml/data/windowdfft.csv +16 -0
  168. teradataml/dataframe/copy_to.py +1 -1
  169. teradataml/dataframe/data_transfer.py +5 -3
  170. teradataml/dataframe/dataframe.py +474 -41
  171. teradataml/dataframe/fastload.py +3 -3
  172. teradataml/dataframe/functions.py +339 -0
  173. teradataml/dataframe/row.py +160 -0
  174. teradataml/dataframe/setop.py +2 -2
  175. teradataml/dataframe/sql.py +658 -20
  176. teradataml/dataframe/window.py +1 -1
  177. teradataml/dbutils/dbutils.py +322 -16
  178. teradataml/geospatial/geodataframe.py +1 -1
  179. teradataml/geospatial/geodataframecolumn.py +1 -1
  180. teradataml/hyperparameter_tuner/optimizer.py +13 -13
  181. teradataml/lib/aed_0_1.dll +0 -0
  182. teradataml/opensource/sklearn/_sklearn_wrapper.py +154 -69
  183. teradataml/options/__init__.py +3 -1
  184. teradataml/options/configure.py +14 -2
  185. teradataml/options/display.py +2 -2
  186. teradataml/plot/axis.py +4 -4
  187. teradataml/scriptmgmt/UserEnv.py +10 -6
  188. teradataml/scriptmgmt/lls_utils.py +3 -2
  189. teradataml/table_operators/Script.py +2 -2
  190. teradataml/table_operators/TableOperator.py +106 -20
  191. teradataml/table_operators/table_operator_util.py +88 -41
  192. teradataml/table_operators/templates/dataframe_udf.template +63 -0
  193. teradataml/telemetry_utils/__init__.py +0 -0
  194. teradataml/telemetry_utils/queryband.py +52 -0
  195. teradataml/utils/validators.py +1 -1
  196. {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.2.dist-info}/METADATA +115 -2
  197. {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.2.dist-info}/RECORD +200 -140
  198. {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.2.dist-info}/WHEEL +0 -0
  199. {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.2.dist-info}/top_level.txt +0 -0
  200. {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.2.dist-info}/zip-safe +0 -0
@@ -0,0 +1,162 @@
1
+ def NaiveBayes(data = None, response_column = None, numeric_inputs = None,
2
+ categorical_inputs = None, attribute_name_column = None,
3
+ attribute_value_column = None, attribute_type = None,
4
+ numeric_attributes = None, categorical_attributes = None,
5
+ **generic_arguments):
6
+ """
7
+ DESCRIPTION:
8
+ Function generates classification model using NaiveBayes
9
+ algorithm.
10
+ The Naive Bayes classification algorithm uses a training dataset with known discrete outcomes
11
+ and either discrete or continuous numeric input variables, along with categorical variables, to generate a model.
12
+ This model can then be used to predict the outcomes of future observations based on their input variable values.
13
+
14
+ PARAMETERS:
15
+ data:
16
+ Required Argument.
17
+ Specifies the input teradataml DataFrame .
18
+ Types: teradataml DataFrame
19
+
20
+ response_column:
21
+ Required Argument.
22
+ Specifies the name of the column in "data" containing response values.
23
+ Types: str
24
+
25
+ numeric_inputs:
26
+ Optional Argument.
27
+ Specifies the names of the columns in "data" containing numeric attributes values.
28
+ Types: str OR list of Strings (str)
29
+
30
+ categorical_inputs:
31
+ Optional Argument.
32
+ Specifies the names of the columns in "data" containing categorical attributes values.
33
+ Types: str OR list of Strings (str)
34
+
35
+ attribute_name_column:
36
+ Optional Argument.
37
+ Specifies the names of the columns in "data" containing attributes names.
38
+ Types: str
39
+
40
+ attribute_value_column:
41
+ Optional Argument.
42
+ Specifies the names of the columns in "data" containing attributes values.
43
+ Types: str
44
+
45
+ attribute_type:
46
+ Optional Argument, Required if "data" is in sparse format and
47
+ both "numeric_attributes" and "categorical_attributes" are not provided.
48
+ Specifies the attribute type.
49
+ Permitted Values:
50
+ * ALLNUMERIC - if all the attributes are of numeric type.
51
+ * ALLCATEGORICAL - if all the attributes are of categorical type.
52
+ Types: str
53
+
54
+ numeric_attributes:
55
+ Optional Argument.
56
+ Specifies the numeric attributes names.
57
+ Types: str OR list of strs
58
+
59
+ categorical_attributes:
60
+ Optional Argument.
61
+ Specifies the categorical attributes names.
62
+ Types: str OR list of strs
63
+
64
+ **generic_arguments:
65
+ Specifies the generic keyword arguments SQLE functions accept. Below
66
+ are the generic keyword arguments:
67
+ persist:
68
+ Optional Argument.
69
+ Specifies whether to persist the results of the
70
+ function in a table or not. When set to True,
71
+ results are persisted in a table; otherwise,
72
+ results are garbage collected at the end of the
73
+ session.
74
+ Default Value: False
75
+ Types: bool
76
+
77
+ volatile:
78
+ Optional Argument.
79
+ Specifies whether to put the results of the
80
+ function in a volatile table or not. When set to
81
+ True, results are stored in a volatile table,
82
+ otherwise not.
83
+ Default Value: False
84
+ Types: bool
85
+
86
+ Function allows the user to partition, hash, order or local
87
+ order the input data. These generic arguments are available
88
+ for each argument that accepts teradataml DataFrame as
89
+ input and can be accessed as:
90
+ * "<input_data_arg_name>_partition_column" accepts str or
91
+ list of str (Strings)
92
+ * "<input_data_arg_name>_hash_column" accepts str or list
93
+ of str (Strings)
94
+ * "<input_data_arg_name>_order_column" accepts str or list
95
+ of str (Strings)
96
+ * "local_order_<input_data_arg_name>" accepts boolean
97
+ Note:
98
+ These generic arguments are supported by teradataml if
99
+ the underlying SQL Engine function supports, else an
100
+ exception is raised.
101
+
102
+ RETURNS:
103
+ Instance of NaiveBayes.
104
+ Output teradataml DataFrames can be accessed using attribute
105
+ references, such as NaiveBayesObj.<attribute_name>.
106
+ Output teradataml DataFrame attribute name is:
107
+ result
108
+
109
+
110
+ RAISES:
111
+ TeradataMlException, TypeError, ValueError
112
+
113
+
114
+ EXAMPLES:
115
+ # Notes:
116
+ # 1. Get the connection to Vantage, before importing the
117
+ # function in user space.
118
+ # 2. User can import the function, if it is available on
119
+ # Vantage user is connected to.
120
+ # 3. To check the list of analytic functions available on
121
+ # Vantage user connected to, use
122
+ # "display_analytic_functions()".
123
+
124
+ # Load the example data.
125
+ load_example_data("decisionforestpredict", ["housing_train", "housing_test"])
126
+
127
+ # Create teradataml DataFrame objects.
128
+ housing_train = DataFrame.from_table("housing_train")
129
+
130
+ # Check the list of available analytic functions.
131
+ display_analytic_functions()
132
+
133
+ # Import function NaiveBayes.
134
+ from teradataml import NaiveBayes
135
+
136
+ # Example 1: NaiveBayes function to generate classification model using Dense input.
137
+ NaiveBayes_out = NaiveBayes(data=housing_train, response_column='homestyle',
138
+ numeric_inputs=['price','lotsize','bedrooms','bathrms','stories','garagepl'],
139
+ categorical_inputs=['driveway','recroom','fullbase','gashw','airco','prefarea'])
140
+
141
+ # Print the result DataFrame.
142
+ print( NaiveBayes_out.result)
143
+
144
+ # Example 2: NaiveBayes function to generate classification model using Sparse input.
145
+
146
+ # Unpivoting the data for sparse input to naive bayes.
147
+ upvt_data = Unpivoting(data = housing_train, id_column = 'sn',
148
+ target_columns = ['price','lotsize','bedrooms','bathrms','stories','garagepl','driveway',
149
+ 'recroom','fullbase','gashw','airco','prefarea'],
150
+ attribute_column = "AttributeName", value_column = "AttributeValue",
151
+ accumulate = 'homestyle')
152
+
153
+ NaiveBayes_out = NaiveBayes(data=upvt_data.result,
154
+ response_column='homestyle',
155
+ attribute_name_column='AttributeName',
156
+ attribute_value_column='AttributeValue',
157
+ numeric_attributes=['price','lotsize','bedrooms','bathrms','stories','garagepl'],
158
+ categorical_attributes=['driveway','recroom','fullbase','gashw','airco','prefarea'])
159
+
160
+ # Print the result DataFrame.
161
+ print( NaiveBayes_out.result)
162
+ """
@@ -16,6 +16,8 @@ def OutlierFilterFit(data=None, target_columns=None, group_columns=None, lower_p
16
16
  * For information about PTCs, see Teradata Vantage™ - Analytics
17
17
  Database International Character Set Support.
18
18
  * This function does not support KanjiSJIS or Graphic data types.
19
+ * This function does not support "data_partition_column" and "data_order_column"
20
+ if the corresponding Vantage version is greater than or equal to 17.20.03.20.
19
21
 
20
22
 
21
23
  PARAMETERS:
@@ -0,0 +1,279 @@
1
+ def Pivoting(data = None, partition_columns = None, target_columns = None,
2
+ accumulate = None, rows_per_partition = None, pivot_column = None,
3
+ pivot_keys = None, pivot_keys_alias = None, default_pivot_values = None,
4
+ aggregation = None, delimiters = None, combined_column_sizes = None,
5
+ truncate_columns = None, output_column_names = None,
6
+ **generic_arguments):
7
+
8
+
9
+ """
10
+ DESCRIPTION:
11
+ Function pivots the data, that is, changes the data from
12
+ sparse format to dense format.
13
+ Notes:
14
+ * 'data_partition_column' is required argument for partitioning the input data.
15
+ * Provide either the 'rows_per_partition', 'pivot_column', or 'aggregation' arguments
16
+ along with required arguments.
17
+
18
+ PARAMETERS:
19
+ data:
20
+ Required Argument.
21
+ Specifies the input teradataml DataFrame to be pivoted.
22
+ Types: teradataml DataFrame
23
+
24
+ partition_columns:
25
+ Required Argument.
26
+ Specifies the name of the column(s) in "data" on which to partition the
27
+ input.
28
+ Types: str OR list of Strings (str)
29
+
30
+ target_columns:
31
+ Required Argument.
32
+ Specifies the name of the column(s) in "data" which contains the data for
33
+ pivoting.
34
+ Types: str OR list of Strings (str)
35
+
36
+ accumulate:
37
+ Optional Argument.
38
+ Specifies the name of the column(s) in "data" to copy to the output.
39
+ By default, the function copies no input table columns to the output.
40
+ Types: str OR list of Strings (str)
41
+
42
+ rows_per_partition:
43
+ Optional Argument.
44
+ Specifies the maximum number of rows in the partition.
45
+ Types: int
46
+
47
+ pivot_column:
48
+ Optional Argument.
49
+ Specifies the name of the column in "data" that contains the pivot keys.
50
+ Note:
51
+ * This argument is not needed when 'rows_per_partition' is provided.
52
+ Types: str
53
+
54
+ pivot_keys:
55
+ Optional Argument.
56
+ Specifies the names of the pivot keys, if "pivot_column" is specified.
57
+ Notes:
58
+ * This argument is not needed when 'rows_per_partition' is provided.
59
+ * 'pivot_keys' are required when 'pivot_column' is specified.
60
+ Types: str OR list of Strings (str)
61
+
62
+ pivot_keys_alias:
63
+ Optional Argument.
64
+ Specifies the alias names of the pivot keys, if 'pivot_column' is specified.
65
+ Note:
66
+ * This argument is not needed when 'rows_per_partition' is provided.
67
+ Types: str OR list of Strings (str)
68
+
69
+ default_pivot_values:
70
+ Optional Argument.
71
+ Specifies one default value for each pivot_key. The nth
72
+ default_pivot_value applies to the nth pivot_key.
73
+ Note:
74
+ * This argument is not needed when 'rows_per_partition' is provided.
75
+ Types: str OR list of Strings (str)
76
+
77
+ aggregation:
78
+ Optional Argument.
79
+ Specifies the aggregation for the target columns.
80
+ Provide a single value {CONCAT | UNIQUE_CONCAT | SUM |
81
+ MIN | MAX | AVG} which will be applicable to all target columns or
82
+ specify multiple values for multiple target columns in
83
+ following format: ['ColumnName:{CONCAT|UNIQUE_CONCAT|SUM|MIN|MAX|AVG}',...].
84
+ Types: str OR list of Strings (str)
85
+
86
+ delimiters:
87
+ Optional Argument.
88
+ Specifies the delimiter to be used for concatenating the values of a target column.
89
+ Provide a single delimiter value applicable to all target columns or
90
+ specify multiple delimiter values for multiple target columns
91
+ in the following format: ['ColumnName:single_char',...].
92
+ Note:
93
+ * This argument is not needed when 'aggregation' is not specified.
94
+ Types: str OR list of Strings (str)
95
+
96
+ combined_column_sizes:
97
+ Optional Argument.
98
+ Specifies the maximum size of the concatenated string.
99
+ Provide a single integer value that applies to all target columns or
100
+ specify multiple size values for multiple target columns
101
+ in the following format ['ColumnName:size_value',...].
102
+ Note:
103
+ * This argument is not needed when 'aggregation' is not specified.
104
+ Types: int OR str OR list of Strings (str)
105
+
106
+ truncate_columns:
107
+ Optional Argument.
108
+ Specifies columns from the target columns for which
109
+ to truncate the concatenated string if it exceeds the specified size.
110
+ Note:
111
+ * This argument is not needed when 'aggregation' is not specified.
112
+ Types: str OR list of Strings (str)
113
+
114
+ output_column_names:
115
+ Optional Argument.
116
+ Specifies the column name to be used for the output column. The nth
117
+ column name value applies to the nth output column.
118
+ Types: str OR list of Strings (str)
119
+
120
+ **generic_arguments:
121
+ Specifies the generic keyword arguments SQLE functions accept. Below
122
+ are the generic keyword arguments:
123
+ persist:
124
+ Optional Argument.
125
+ Specifies whether to persist the results of the
126
+ function in a table or not. When set to True,
127
+ results are persisted in a table; otherwise,
128
+ results are garbage collected at the end of the
129
+ session.
130
+ Default Value: False
131
+ Types: bool
132
+
133
+ volatile:
134
+ Optional Argument.
135
+ Specifies whether to put the results of the
136
+ function in a volatile table or not. When set to
137
+ True, results are stored in a volatile table,
138
+ otherwise not.
139
+ Default Value: False
140
+ Types: bool
141
+
142
+ Function allows the user to partition, hash, order or local
143
+ order the input data. These generic arguments are available
144
+ for each argument that accepts teradataml DataFrame as
145
+ input and can be accessed as:
146
+ * "<input_data_arg_name>_partition_column" accepts str or
147
+ list of str (Strings)
148
+ * "<input_data_arg_name>_hash_column" accepts str or list
149
+ of str (Strings)
150
+ * "<input_data_arg_name>_order_column" accepts str or list
151
+ of str (Strings)
152
+ * "local_order_<input_data_arg_name>" accepts boolean
153
+ Note:
154
+ These generic arguments are supported by teradataml if
155
+ the underlying SQL Engine function supports, else an
156
+ exception is raised.
157
+
158
+ RETURNS:
159
+ Instance of Pivoting.
160
+ Output teradataml DataFrames can be accessed using attribute
161
+ references, such as PivotingObj.<attribute_name>.
162
+ Output teradataml DataFrame attribute name is:
163
+ result
164
+
165
+
166
+ RAISES:
167
+ TeradataMlException, TypeError, ValueError
168
+
169
+
170
+ EXAMPLES:
171
+ # Notes:
172
+ # 1. Get the connection to Vantage, before importing the
173
+ # function in user space.
174
+ # 2. User can import the function, if it is available on
175
+ # Vantage user is connected to.
176
+ # 3. To check the list of analytic functions available on
177
+ # Vantage user connected to, use
178
+ # "display_analytic_functions()".
179
+
180
+ # Load the example data.
181
+ load_example_data('unpivot', 'titanic_dataset_unpivoted')
182
+ load_example_data('unpivot', 'star_pivot')
183
+
184
+ # Create teradataml DataFrame objects.
185
+ titanic_unpvt = DataFrame.from_table('titanic_dataset_unpivoted')
186
+ star = DataFrame.from_table('star_pivot')
187
+
188
+ # Check the list of available analytic functions.
189
+ display_analytic_functions()
190
+
191
+ # Import function Pivoting.
192
+ from teradataml import Pivoting
193
+
194
+ # Example 1 : Pivot the input data using 'rows_per_partition'.
195
+ pvt1 = Pivoting(data = titanic_unpvt,
196
+ partition_columns = 'passenger',
197
+ target_columns = 'AttributeValue',
198
+ accumulate = 'survived',
199
+ rows_per_partition = 2,
200
+ data_partition_column='passenger',
201
+ data_order_column='AttributeName')
202
+
203
+ # Print the result DataFrame.
204
+ print( pvt1.result)
205
+
206
+ # Example 2 : Pivot the input data using 'pivot_column' and 'pivot_keys'.
207
+ pvt2 = Pivoting(data = titanic_unpvt,
208
+ partition_columns = 'passenger',
209
+ target_columns = 'AttributeValue',
210
+ accumulate = 'survived',
211
+ pivot_column = 'AttributeName',
212
+ pivot_keys = ['pclass','gender'],
213
+ data_partition_column = 'passenger')
214
+
215
+ # Print the result DataFrame.
216
+ print( pvt2.result)
217
+
218
+ # Example 3 : Pivot the input data with multiple target columns and
219
+ # multiple aggregation functions.
220
+ pvt3 = Pivoting(data = star,
221
+ partition_columns = ['country', 'state'],
222
+ target_columns = ['sales', 'cogs', 'rating'],
223
+ accumulate = 'yr',
224
+ pivot_column = 'qtr',
225
+ pivot_keys = ['Q1','Q2','Q3'],
226
+ aggregation = ['sales:SUM','cogs:AVG','rating:CONCAT'],
227
+ delimiters = '|',
228
+ combined_column_sizes = 64001,
229
+ data_partition_column = ['country', 'state'],
230
+ data_order_column = ['qtr'])
231
+
232
+ # Print the result DataFrame.
233
+ print( pvt3.result)
234
+
235
+ # Example 4 : Pivot the input data with multiple target columns and
236
+ # multiple aggregation functions.
237
+ pvt4 = Pivoting(data = star,
238
+ partition_columns = 'country',
239
+ target_columns = ['sales', 'cogs', 'state','rating'],
240
+ accumulate = 'yr',
241
+ aggregation = ['sales:SUM','cogs:AVG','state:UNIQUE_CONCAT','rating:CONCAT'],
242
+ delimiters = '|',
243
+ combined_column_sizes = ['state:5', 'rating:10'],
244
+ data_partition_column='country',
245
+ data_order_column='state')
246
+
247
+ # Print the result DataFrame.
248
+ print( pvt4.result)
249
+
250
+ # Example 5 : Pivot the input data with truncate columns.
251
+ pvt5 = Pivoting(data = star,
252
+ partition_columns = ['state'],
253
+ target_columns = ['country', 'rating'],
254
+ accumulate = 'yr',
255
+ pivot_column = 'qtr',
256
+ pivot_keys = ['Q1','Q2','Q3'],
257
+ aggregation = 'CONCAT',
258
+ combined_column_sizes = 10,
259
+ truncate_columns = 'country',
260
+ data_partition_column = 'qtr',
261
+ data_order_column='state')
262
+
263
+ # Print the result DataFrame.
264
+ print( pvt5.result)
265
+
266
+ # Example 6 : Pivot the input data with output column names.
267
+ pvt6 = Pivoting(data = star,
268
+ partition_columns = ['country','state'],
269
+ target_columns = ['sales', 'cogs', 'rating'],
270
+ accumulate = 'yr',
271
+ rows_per_partition = 3,
272
+ output_column_names=['sales_q1','sales_q2','sales_q3','cogs_q1','cogs_q2',
273
+ 'cogs_q3','rating_q1','rating_q2','rating_q3'],
274
+ data_partition_column = 'qtr',
275
+ data_order_column=['country','state'])
276
+
277
+ # Print the result DataFrame.
278
+ print( pvt6.result)
279
+ """
@@ -0,0 +1,197 @@
1
+ def Shap(data = None, object = None, training_function = "TD_GLM",
2
+ model_type = "Regression", input_columns = None, detailed = False,
3
+ accumulate = None, num_parallel_trees = 1000, num_boost_rounds = 10,
4
+ **generic_arguments):
5
+
6
+ """
7
+ DESCRIPTION:
8
+ Function to get explanation for individual predictions
9
+ (feature contributions) in a machine learning model based on the
10
+ co-operative game theory optimal Shapley values.
11
+
12
+ PARAMETERS:
13
+ data:
14
+ Required Argument.
15
+ Specifies the teradataml DataFrame.
16
+ Types: teradataml DataFrame
17
+
18
+ object:
19
+ Required Argument.
20
+ Specifies the teradataml DataFrame containing the model data.
21
+ Types: teradataml DataFrame
22
+
23
+ training_function:
24
+ Required Argument.
25
+ Specifies the model type name.
26
+ Default Value: "TD_GLM"
27
+ Permitted Values: TD_GLM, TD_DECISIONFOREST, TD_XGBOOST
28
+ Types: str
29
+
30
+ model_type:
31
+ Required Argument.
32
+ Specifies the operation to be performed on input data.
33
+ Default Value: "Regression"
34
+ Permitted Values: Regression, Classification
35
+ Types: str
36
+
37
+ input_columns:
38
+ Required Argument.
39
+ Specifies the names of the columns in "data" used for
40
+ training the model (predictors, features or independent variables).
41
+ Types: str OR list of Strings (str)
42
+
43
+ detailed:
44
+ Optional Argument.
45
+ Specifies whether to output detailed shap information about the
46
+ forest trees.
47
+ Default Value: False
48
+ Types: bool
49
+
50
+ accumulate:
51
+ Optional Argument.
52
+ Specifies the names of the input columns to copy to the output teradataml DataFrame.
53
+ Types: str OR list of Strings (str)
54
+
55
+ num_parallel_trees:
56
+ Optional Argument.
57
+ Specify the number of parallel boosted trees. Each boosted tree
58
+ operates on a sample of data that fits in an AMPs memory.
59
+ Note:
60
+ * By default, "num_parallel_trees" is chosen equal to the number of AMPs with
61
+ data.
62
+ Default Value: 1000
63
+ Types: int
64
+
65
+ num_boost_rounds:
66
+ Optional Argument.
67
+ Specifies the number of iterations to boost the weak classifiers. The
68
+ iterations must be an int in the range [1, 100000].
69
+ Default Value: 10
70
+ Types: int
71
+
72
+ **generic_arguments:
73
+ Specifies the generic keyword arguments SQLE functions accept. Below
74
+ are the generic keyword arguments:
75
+ persist:
76
+ Optional Argument.
77
+ Specifies whether to persist the results of the
78
+ function in a table or not. When set to True,
79
+ results are persisted in a table; otherwise,
80
+ results are garbage collected at the end of the
81
+ session.
82
+ Default Value: False
83
+ Types: bool
84
+
85
+ volatile:
86
+ Optional Argument.
87
+ Specifies whether to put the results of the
88
+ function in a volatile table or not. When set to
89
+ True, results are stored in a volatile table,
90
+ otherwise not.
91
+ Default Value: False
92
+ Types: bool
93
+
94
+ Function allows the user to partition, hash, order or local
95
+ order the input data. These generic arguments are available
96
+ for each argument that accepts teradataml DataFrame as
97
+ input and can be accessed as:
98
+ * "<input_data_arg_name>_partition_column" accepts str or
99
+ list of str (Strings)
100
+ * "<input_data_arg_name>_hash_column" accepts str or list
101
+ of str (Strings)
102
+ * "<input_data_arg_name>_order_column" accepts str or list
103
+ of str (Strings)
104
+ * "local_order_<input_data_arg_name>" accepts boolean
105
+ Note:
106
+ These generic arguments are supported by teradataml if
107
+ the underlying SQL Engine function supports, else an
108
+ exception is raised.
109
+
110
+ RETURNS:
111
+ Instance of Shap.
112
+ Output teradataml DataFrames can be accessed using attribute
113
+ references, such as ShapObj.<attribute_name>.
114
+ Output teradataml DataFrame attribute name is:
115
+ 1. output
116
+
117
+
118
+ RAISES:
119
+ TeradataMlException, TypeError, ValueError
120
+
121
+
122
+ EXAMPLES:
123
+ # Notes:
124
+ # 1. Get the connection to Vantage, before importing the
125
+ # function in user space.
126
+ # 2. User can import the function, if it is available on
127
+ # Vantage user is connected to.
128
+ # 3. To check the list of analytic functions available on
129
+ # Vantage user connected to, use
130
+ # "display_analytic_functions()".
131
+
132
+ # Load the example data.
133
+ load_example_data("byom", "iris_input")
134
+ load_example_data("teradataml", ["cal_housing_ex_raw"])
135
+
136
+ # Create teradataml DataFrame objects.
137
+ iris_input = DataFrame("iris_input")
138
+ data_input = DataFrame.from_table("cal_housing_ex_raw")
139
+
140
+ # Check the list of available analytic functions.
141
+ display_analytic_functions()
142
+
143
+ # Import function Shap.
144
+ from teradataml import Shap, XGBoost, DecisionForest, SVM
145
+
146
+ # Example 1: Shap for classification model.
147
+ XGBoost_out = XGBoost(data=iris_input,
148
+ input_columns=['sepal_length', 'sepal_width', 'petal_length', 'petal_width'],
149
+ response_column = 'species',
150
+ model_type='Classification',
151
+ iter_num=25)
152
+
153
+ Shap_out = Shap(data=iris_input,
154
+ object=XGBoost_out.result,
155
+ id_column='id',
156
+ training_function="TD_XGBOOST",
157
+ model_type="Classification",
158
+ input_columns=['sepal_length', 'sepal_width', 'petal_length', 'petal_width'],
159
+ detailed=True)
160
+ # Print the result DataFrame.
161
+ print(Shap_out.output_data)
162
+
163
+ # Example 2: Shap for regression model.
164
+
165
+ from teradataml import ScaleFit, ScaleTransform
166
+
167
+ # Scale "target_columns" with respect to 'STD' value of the column.
168
+ fit_obj = ScaleFit(data=data_input,
169
+ target_columns=['MedInc', 'HouseAge', 'AveRooms',
170
+ 'AveBedrms', 'Population', 'AveOccup',
171
+ 'Latitude', 'Longitude'],
172
+ scale_method="STD")
173
+
174
+ # Transform the data.
175
+ transform_obj = ScaleTransform(data=data_input,
176
+ object=fit_obj.output,
177
+ accumulate=["id", "MedHouseVal"])
178
+
179
+ decision_forest_out = DecisionForest(data=transform_obj.result,
180
+ input_columns=['MedInc', 'HouseAge', 'AveRooms',
181
+ 'AveBedrms', 'Population', 'AveOccup',
182
+ 'Latitude', 'Longitude'],
183
+ response_column="MedHouseVal",
184
+ model_type="Regression",
185
+ max_depth = 10
186
+ )
187
+ Shap_out2 = Shap(data=transform_obj.result,
188
+ object=decision_forest_out.result,
189
+ id_column='id',
190
+ training_function="TD_DECISIONFOREST",
191
+ model_type="Regression",
192
+ input_columns=['MedInc', 'HouseAge', 'AveRooms','AveBedrms', 'Population', 'AveOccup','Latitude', 'Longitude'],
193
+ detailed=True)
194
+
195
+ # Print the result DataFrame.
196
+ print(Shap_out2.output_data)
197
+ """