teradataml 20.0.0.0__py3-none-any.whl → 20.0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of teradataml might be problematic. Click here for more details.
- teradataml/LICENSE-3RD-PARTY.pdf +0 -0
- teradataml/LICENSE.pdf +0 -0
- teradataml/README.md +71 -0
- teradataml/_version.py +2 -2
- teradataml/analytics/analytic_function_executor.py +51 -24
- teradataml/analytics/json_parser/utils.py +11 -17
- teradataml/automl/__init__.py +103 -48
- teradataml/automl/data_preparation.py +55 -37
- teradataml/automl/data_transformation.py +131 -69
- teradataml/automl/feature_engineering.py +117 -185
- teradataml/automl/feature_exploration.py +9 -2
- teradataml/automl/model_evaluation.py +13 -25
- teradataml/automl/model_training.py +214 -75
- teradataml/catalog/model_cataloging_utils.py +1 -1
- teradataml/clients/auth_client.py +133 -0
- teradataml/common/aed_utils.py +3 -2
- teradataml/common/constants.py +11 -6
- teradataml/common/garbagecollector.py +5 -0
- teradataml/common/messagecodes.py +3 -1
- teradataml/common/messages.py +2 -1
- teradataml/common/utils.py +6 -0
- teradataml/context/context.py +49 -29
- teradataml/data/advertising.csv +201 -0
- teradataml/data/bank_marketing.csv +11163 -0
- teradataml/data/bike_sharing.csv +732 -0
- teradataml/data/boston2cols.csv +721 -0
- teradataml/data/breast_cancer.csv +570 -0
- teradataml/data/customer_segmentation_test.csv +2628 -0
- teradataml/data/customer_segmentation_train.csv +8069 -0
- teradataml/data/docs/sqle/docs_17_10/OneHotEncodingFit.py +3 -1
- teradataml/data/docs/sqle/docs_17_10/OneHotEncodingTransform.py +6 -0
- teradataml/data/docs/sqle/docs_17_10/OutlierFilterTransform.py +5 -1
- teradataml/data/docs/sqle/docs_17_20/ANOVA.py +61 -1
- teradataml/data/docs/sqle/docs_17_20/ColumnTransformer.py +2 -0
- teradataml/data/docs/sqle/docs_17_20/FTest.py +105 -26
- teradataml/data/docs/sqle/docs_17_20/GLM.py +162 -1
- teradataml/data/docs/sqle/docs_17_20/GetFutileColumns.py +5 -3
- teradataml/data/docs/sqle/docs_17_20/KMeans.py +48 -1
- teradataml/data/docs/sqle/docs_17_20/NonLinearCombineFit.py +3 -2
- teradataml/data/docs/sqle/docs_17_20/OneHotEncodingFit.py +5 -0
- teradataml/data/docs/sqle/docs_17_20/OneHotEncodingTransform.py +6 -0
- teradataml/data/docs/sqle/docs_17_20/ROC.py +3 -2
- teradataml/data/docs/sqle/docs_17_20/SVMPredict.py +13 -2
- teradataml/data/docs/sqle/docs_17_20/ScaleFit.py +119 -1
- teradataml/data/docs/sqle/docs_17_20/ScaleTransform.py +93 -1
- teradataml/data/docs/sqle/docs_17_20/TDGLMPredict.py +163 -1
- teradataml/data/docs/sqle/docs_17_20/XGBoost.py +12 -4
- teradataml/data/docs/sqle/docs_17_20/XGBoostPredict.py +7 -1
- teradataml/data/docs/sqle/docs_17_20/ZTest.py +72 -7
- teradataml/data/glm_example.json +28 -1
- teradataml/data/housing_train_segment.csv +201 -0
- teradataml/data/insect2Cols.csv +61 -0
- teradataml/data/jsons/sqle/17.20/TD_ANOVA.json +99 -27
- teradataml/data/jsons/sqle/17.20/TD_FTest.json +166 -83
- teradataml/data/jsons/sqle/17.20/TD_GLM.json +90 -14
- teradataml/data/jsons/sqle/17.20/TD_GLMPREDICT.json +48 -5
- teradataml/data/jsons/sqle/17.20/TD_GetFutileColumns.json +5 -3
- teradataml/data/jsons/sqle/17.20/TD_KMeans.json +31 -11
- teradataml/data/jsons/sqle/17.20/TD_NonLinearCombineFit.json +3 -2
- teradataml/data/jsons/sqle/17.20/TD_ROC.json +2 -1
- teradataml/data/jsons/sqle/17.20/TD_SVM.json +16 -16
- teradataml/data/jsons/sqle/17.20/TD_SVMPredict.json +19 -1
- teradataml/data/jsons/sqle/17.20/TD_ScaleFit.json +168 -15
- teradataml/data/jsons/sqle/17.20/TD_ScaleTransform.json +50 -1
- teradataml/data/jsons/sqle/17.20/TD_XGBoost.json +25 -7
- teradataml/data/jsons/sqle/17.20/TD_XGBoostPredict.json +17 -4
- teradataml/data/jsons/sqle/17.20/TD_ZTest.json +157 -80
- teradataml/data/kmeans_example.json +5 -0
- teradataml/data/kmeans_table.csv +10 -0
- teradataml/data/onehot_encoder_train.csv +4 -0
- teradataml/data/openml_example.json +29 -0
- teradataml/data/scale_attributes.csv +3 -0
- teradataml/data/scale_example.json +52 -1
- teradataml/data/scale_input_part_sparse.csv +31 -0
- teradataml/data/scale_input_partitioned.csv +16 -0
- teradataml/data/scale_input_sparse.csv +11 -0
- teradataml/data/scale_parameters.csv +3 -0
- teradataml/data/scripts/deploy_script.py +20 -1
- teradataml/data/scripts/sklearn/sklearn_fit.py +23 -27
- teradataml/data/scripts/sklearn/sklearn_fit_predict.py +20 -28
- teradataml/data/scripts/sklearn/sklearn_function.template +13 -18
- teradataml/data/scripts/sklearn/sklearn_model_selection_split.py +23 -33
- teradataml/data/scripts/sklearn/sklearn_neighbors.py +18 -27
- teradataml/data/scripts/sklearn/sklearn_score.py +20 -29
- teradataml/data/scripts/sklearn/sklearn_transform.py +30 -38
- teradataml/data/teradataml_example.json +77 -0
- teradataml/data/ztest_example.json +16 -0
- teradataml/dataframe/copy_to.py +8 -3
- teradataml/dataframe/data_transfer.py +120 -61
- teradataml/dataframe/dataframe.py +102 -17
- teradataml/dataframe/dataframe_utils.py +47 -9
- teradataml/dataframe/fastload.py +272 -89
- teradataml/dataframe/sql.py +84 -0
- teradataml/dbutils/dbutils.py +2 -2
- teradataml/lib/aed_0_1.dll +0 -0
- teradataml/opensource/sklearn/_sklearn_wrapper.py +102 -55
- teradataml/options/__init__.py +13 -4
- teradataml/options/configure.py +27 -6
- teradataml/scriptmgmt/UserEnv.py +19 -16
- teradataml/scriptmgmt/lls_utils.py +117 -14
- teradataml/table_operators/Script.py +2 -3
- teradataml/table_operators/TableOperator.py +58 -10
- teradataml/utils/validators.py +40 -2
- {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.1.dist-info}/METADATA +78 -6
- {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.1.dist-info}/RECORD +108 -90
- {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.1.dist-info}/WHEEL +0 -0
- {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.1.dist-info}/top_level.txt +0 -0
- {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.1.dist-info}/zip-safe +0 -0
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
def ROC(data=None, probability_column=None, observation_column=None, model_id_column=None,
|
|
1
|
+
def ROC(data=None, probability_column=None, observation_column=None, model_id_column=None, positive_class='1',
|
|
2
2
|
num_thresholds=50, auc=True, gini=True, **generic_arguments):
|
|
3
3
|
"""
|
|
4
4
|
DESCRIPTION:
|
|
@@ -48,8 +48,9 @@ def ROC(data=None, probability_column=None, observation_column=None, model_id_co
|
|
|
48
48
|
Types: str
|
|
49
49
|
|
|
50
50
|
positive_class:
|
|
51
|
-
|
|
51
|
+
Optional Argument.
|
|
52
52
|
Specifies the label of the positive class.
|
|
53
|
+
Default Value: '1'
|
|
53
54
|
Types: str
|
|
54
55
|
|
|
55
56
|
num_thresholds:
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
def SVMPredict(object=None, newdata=None, id_column=None, accumulate=None,
|
|
2
|
-
output_prob=False, output_responses=None,
|
|
2
|
+
output_prob=False, output_responses=None, model_type='Classification',
|
|
3
|
+
**generic_arguments):
|
|
3
4
|
"""
|
|
4
5
|
DESCRIPTION:
|
|
5
6
|
The SVMPredict() function uses the model generated by the function SVM() to
|
|
@@ -57,6 +58,15 @@ def SVMPredict(object=None, newdata=None, id_column=None, accumulate=None,
|
|
|
57
58
|
Note:
|
|
58
59
|
Only applicable when "output_prob" is 'True'.
|
|
59
60
|
Types: str OR list of strs
|
|
61
|
+
|
|
62
|
+
model_type:
|
|
63
|
+
Optional Argument.
|
|
64
|
+
Specifies the type of the analysis.
|
|
65
|
+
Note:
|
|
66
|
+
* Required for Regression problem.
|
|
67
|
+
Permitted Values: 'Classification', 'Regression'
|
|
68
|
+
Default Value: 'Classification'
|
|
69
|
+
Types: str
|
|
60
70
|
|
|
61
71
|
**generic_arguments:
|
|
62
72
|
Specifies the generic keyword arguments SQLE functions accept. Below
|
|
@@ -155,7 +165,8 @@ def SVMPredict(object=None, newdata=None, id_column=None, accumulate=None,
|
|
|
155
165
|
SVMPredict_out1 = SVMPredict(newdata=transform_obj.result,
|
|
156
166
|
object=svm_obj1.result,
|
|
157
167
|
id_column="id",
|
|
158
|
-
accumulate="MedHouseVal"
|
|
168
|
+
accumulate="MedHouseVal",
|
|
169
|
+
model_type="Regression"
|
|
159
170
|
)
|
|
160
171
|
|
|
161
172
|
# Print the result DataFrame.
|
|
@@ -1,5 +1,9 @@
|
|
|
1
1
|
def ScaleFit(data=None, target_columns=None, scale_method=None, miss_value="KEEP",
|
|
2
|
-
global_scale=False, multiplier='1', intercept='0',
|
|
2
|
+
global_scale=False, multiplier='1', intercept='0',
|
|
3
|
+
parameter_data=None, attribute_data=None, partition_columns=None,
|
|
4
|
+
ignoreinvalid_locationscale=False, unused_attributes="UNSCALED",
|
|
5
|
+
attribute_name_column=None, attribute_value_column=None, target_attributes=None,
|
|
6
|
+
**generic_arguments):
|
|
3
7
|
"""
|
|
4
8
|
DESCRIPTION:
|
|
5
9
|
ScaleFit() function outputs statistics to input to ScaleTransform() function,
|
|
@@ -15,6 +19,9 @@ def ScaleFit(data=None, target_columns=None, scale_method=None, miss_value="KEEP
|
|
|
15
19
|
Required Argument.
|
|
16
20
|
Specifies the input teradataml DataFrame column(s) for which to output statistics.
|
|
17
21
|
The columns must contain numeric data in the range (-1e\u00B3\u2070\u2078, 1e\u00B3\u2070\u2078).
|
|
22
|
+
Note:
|
|
23
|
+
* This argument cannot be used with "target_attributes", "attribute_name_column",
|
|
24
|
+
"attribute_value_column".
|
|
18
25
|
Types: str OR list of Strings (str)
|
|
19
26
|
|
|
20
27
|
scale_method:
|
|
@@ -124,6 +131,60 @@ def ScaleFit(data=None, target_columns=None, scale_method=None, miss_value="KEEP
|
|
|
124
131
|
Default Value: "0"
|
|
125
132
|
Types: str OR list of String (str)
|
|
126
133
|
|
|
134
|
+
parameter_data:
|
|
135
|
+
Optional Argument.
|
|
136
|
+
Specifies the input teradataml DataFrame containing the parameters.
|
|
137
|
+
Note:
|
|
138
|
+
* This is valid when "data_partition_column" is used.
|
|
139
|
+
Types: teradataml DataFrame
|
|
140
|
+
|
|
141
|
+
attribute_data:
|
|
142
|
+
Optional Argument.
|
|
143
|
+
Specifies the input teradataml DataFrame containing the attributes.
|
|
144
|
+
Note:
|
|
145
|
+
* This is valid when "data_partition_column" is used.
|
|
146
|
+
Types: teradataml DataFrame
|
|
147
|
+
|
|
148
|
+
partition_columns:
|
|
149
|
+
Optional Argument.
|
|
150
|
+
Specifies the column name in the "data" to partition the input.
|
|
151
|
+
Types: str OR list of Strings (str)
|
|
152
|
+
|
|
153
|
+
ignoreinvalid_locationscale:
|
|
154
|
+
Optional Argument.
|
|
155
|
+
Specifies whether to ignore invalid values of location and scale parameters.
|
|
156
|
+
Default Value: False
|
|
157
|
+
Types: bool
|
|
158
|
+
|
|
159
|
+
unused_attributes:
|
|
160
|
+
Optional Argument.
|
|
161
|
+
Specifies whether to emit out unused attributes of different partitions
|
|
162
|
+
as unscaled values or NULLs (for dense input).
|
|
163
|
+
Permitted Values: 'NULLIFY', 'UNSCALED'
|
|
164
|
+
Default Value: 'UNSCALED'
|
|
165
|
+
Types: str
|
|
166
|
+
|
|
167
|
+
attribute_name_column:
|
|
168
|
+
Optional Argument.
|
|
169
|
+
Specifies the column name in the "attribute_data" which contains attribute names.
|
|
170
|
+
Note:
|
|
171
|
+
* This is required for sparse input.
|
|
172
|
+
Types: str
|
|
173
|
+
|
|
174
|
+
attribute_value_column:
|
|
175
|
+
Optional Argument.
|
|
176
|
+
Specifies the column name in the "attribute_data" which contains attribute values.
|
|
177
|
+
Note:
|
|
178
|
+
* This is required for sparse input.
|
|
179
|
+
Types: str
|
|
180
|
+
|
|
181
|
+
target_attributes:
|
|
182
|
+
Optional Argument.
|
|
183
|
+
Specifies the attributes for which scaling should be performed.
|
|
184
|
+
Note:
|
|
185
|
+
* This is required for sparse input.
|
|
186
|
+
Types: str OR list of Strings (str)
|
|
187
|
+
|
|
127
188
|
**generic_arguments:
|
|
128
189
|
Specifies the generic keyword arguments SQLE functions accept.
|
|
129
190
|
Below are the generic keyword arguments:
|
|
@@ -174,9 +235,16 @@ def ScaleFit(data=None, target_columns=None, scale_method=None, miss_value="KEEP
|
|
|
174
235
|
|
|
175
236
|
# Load the example data.
|
|
176
237
|
load_example_data("teradataml", ["scale_housing"])
|
|
238
|
+
load_example_data('scale', ["scale_attributes", "scale_parameters",
|
|
239
|
+
"scale_input_partitioned", "scale_input_sparse","scale_input_part_sparse"])
|
|
177
240
|
|
|
178
241
|
# Create teradataml DataFrame.
|
|
179
242
|
scaling_house = DataFrame.from_table("scale_housing")
|
|
243
|
+
scale_attribute = DataFrame.from_table("scale_attributes")
|
|
244
|
+
scale_parameter = DataFrame.from_table("scale_parameters")
|
|
245
|
+
scale_inp_part = DataFrame.from_table("scale_input_partitioned")
|
|
246
|
+
scale_inp_sparse = DataFrame.from_table("scale_input_sparse")
|
|
247
|
+
scale_inp_part_sparse = DataFrame.from_table("scale_input_part_sparse")
|
|
180
248
|
|
|
181
249
|
# Check the list of available analytic functions.
|
|
182
250
|
display_analytic_functions()
|
|
@@ -194,4 +262,54 @@ def ScaleFit(data=None, target_columns=None, scale_method=None, miss_value="KEEP
|
|
|
194
262
|
# Print the result DataFrame.
|
|
195
263
|
print(fit_obj.output)
|
|
196
264
|
print(fit_obj.output_data)
|
|
265
|
+
|
|
266
|
+
# Example 2: Create statistics to scale "fare" and "age" columns
|
|
267
|
+
# with respect to maximum absolute value with partition column
|
|
268
|
+
# for dense input.
|
|
269
|
+
fit_obj = ScaleFit(data=scale_inp_part,
|
|
270
|
+
attribute_data=scale_attribute,
|
|
271
|
+
parameter_data=scale_parameter,
|
|
272
|
+
target_columns=['fare', 'age'],
|
|
273
|
+
scale_method="maxabs",
|
|
274
|
+
miss_value="zero",
|
|
275
|
+
global_scale=False,
|
|
276
|
+
data_partition_column='pid',
|
|
277
|
+
attribute_data_partition_column='pid',
|
|
278
|
+
parameter_data_partition_column='pid')
|
|
279
|
+
|
|
280
|
+
# Print the result DataFrame.
|
|
281
|
+
print(fit_obj.output)
|
|
282
|
+
print(fit_obj.output_data)
|
|
283
|
+
|
|
284
|
+
# Example 3: Create statistics to scale "fare" column with respect to
|
|
285
|
+
# range for sparse input.
|
|
286
|
+
fit_obj = ScaleFit(data=scale_inp_sparse,
|
|
287
|
+
target_attribute=['fare'],
|
|
288
|
+
scale_method="range",
|
|
289
|
+
miss_value="keep",
|
|
290
|
+
global_scale=False,
|
|
291
|
+
attribute_name_column='attribute_column',
|
|
292
|
+
attribute_value_column='attribute_value')
|
|
293
|
+
|
|
294
|
+
# Print the result DataFrame.
|
|
295
|
+
print(fit_obj.output)
|
|
296
|
+
print(fit_obj.output_data)
|
|
297
|
+
|
|
298
|
+
# Example 4: Create statistics to scale "fare" column with respect to
|
|
299
|
+
# maximum absolute value for sparse input with partition column.
|
|
300
|
+
fit_obj = ScaleFit(data=scale_inp_part_sparse,
|
|
301
|
+
parameter_data=scale_parameter,
|
|
302
|
+
attribute_data=scale_attribute,
|
|
303
|
+
scale_method="maxabs",
|
|
304
|
+
miss_value="zero",
|
|
305
|
+
global_scale=False,
|
|
306
|
+
attribute_name_column='attribute_column',
|
|
307
|
+
attribute_value_column='attribute_value',
|
|
308
|
+
data_partition_column='pid',
|
|
309
|
+
attribute_data_partition_column='pid',
|
|
310
|
+
parameter_data_partition_column='pid')
|
|
311
|
+
|
|
312
|
+
# Print the result DataFrame.
|
|
313
|
+
print(fit_obj.output)
|
|
314
|
+
print(fit_obj.output_data)
|
|
197
315
|
"""
|
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
def ScaleTransform(data=None, object=None, accumulate=None,
|
|
1
|
+
def ScaleTransform(data=None, object=None, accumulate=None, attribute_name_column=None,
|
|
2
|
+
attribute_value_column=None, **generic_arguments):
|
|
2
3
|
"""
|
|
3
4
|
DESCRIPTION:
|
|
4
5
|
ScaleTransform() function scales specified columns in input data, using ScaleFit() function output.
|
|
@@ -21,6 +22,20 @@ def ScaleTransform(data=None, object=None, accumulate=None, **generic_arguments)
|
|
|
21
22
|
Specifies the names of input teradataml DataFrame columns to copy to the output.
|
|
22
23
|
Types: str OR list of Strings (str)
|
|
23
24
|
|
|
25
|
+
attribute_name_column:
|
|
26
|
+
Optional Argument.
|
|
27
|
+
Specifies the column name in the "attribute_data" which contains attribute names.
|
|
28
|
+
Note:
|
|
29
|
+
* This is required for sparse input.
|
|
30
|
+
Types: str
|
|
31
|
+
|
|
32
|
+
attribute_value_column:
|
|
33
|
+
Optional Argument.
|
|
34
|
+
Specifies the column name in the "attribute_data" which contains attribute values.
|
|
35
|
+
Note:
|
|
36
|
+
* This is required for sparse input.
|
|
37
|
+
Types: str
|
|
38
|
+
|
|
24
39
|
**generic_arguments:
|
|
25
40
|
Specifies the generic keyword arguments SQLE functions accept.
|
|
26
41
|
Below are the generic keyword arguments:
|
|
@@ -70,9 +85,16 @@ def ScaleTransform(data=None, object=None, accumulate=None, **generic_arguments)
|
|
|
70
85
|
|
|
71
86
|
# Load the example data.
|
|
72
87
|
load_example_data("teradataml", ["scale_housing"])
|
|
88
|
+
load_example_data('scale', ["scale_attributes", "scale_parameters",
|
|
89
|
+
"scale_input_partitioned", "scale_input_sparse","scale_input_part_sparse"])
|
|
73
90
|
|
|
74
91
|
# Create teradataml DataFrame.
|
|
75
92
|
scaling_house = DataFrame.from_table("scale_housing")
|
|
93
|
+
scale_attribute = DataFrame.from_table("scale_attributes")
|
|
94
|
+
scale_parameter = DataFrame.from_table("scale_parameters")
|
|
95
|
+
scale_inp_part = DataFrame.from_table("scale_input_partitioned")
|
|
96
|
+
scale_inp_sparse = DataFrame.from_table("scale_input_sparse")
|
|
97
|
+
scale_inp_part_sparse = DataFrame.from_table("scale_input_part_sparse")
|
|
76
98
|
|
|
77
99
|
# Check the list of available analytic functions.
|
|
78
100
|
display_analytic_functions()
|
|
@@ -107,4 +129,74 @@ def ScaleTransform(data=None, object=None, accumulate=None, **generic_arguments)
|
|
|
107
129
|
|
|
108
130
|
# Print the result DataFrame.
|
|
109
131
|
print(obj1.result)
|
|
132
|
+
|
|
133
|
+
# Example 3: Create statistics to scale "fare" and "age" columns with respect to
|
|
134
|
+
# maximum absolute value for partitioned input.
|
|
135
|
+
fit_obj = ScaleFit(data=scale_inp_part,
|
|
136
|
+
attribute_data=scale_attribute,
|
|
137
|
+
parameter_data=scale_parameter,
|
|
138
|
+
target_columns=['fare', 'age'],
|
|
139
|
+
scale_method="maxabs",
|
|
140
|
+
miss_value="zero",
|
|
141
|
+
global_scale=False,
|
|
142
|
+
data_partition_column='pid',
|
|
143
|
+
attribute_data_partition_column='pid',
|
|
144
|
+
parameter_data_partition_column='pid')
|
|
145
|
+
|
|
146
|
+
obj = ScaleTransform(data=scale_inp_part,
|
|
147
|
+
object=fit_obj.output,
|
|
148
|
+
accumulate=['pid','passenger'],
|
|
149
|
+
data_partition_column='pid',
|
|
150
|
+
object_partition_column='pid')
|
|
151
|
+
|
|
152
|
+
# Print the result DataFrame.
|
|
153
|
+
print(obj.result)
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
# Example 4: Create statistics to scale "fare" column with respect to
|
|
157
|
+
# range for sparse input.
|
|
158
|
+
fit_obj = ScaleFit(data=scale_inp_sparse,
|
|
159
|
+
target_attribute=['fare'],
|
|
160
|
+
scale_method="range",
|
|
161
|
+
miss_value="keep",
|
|
162
|
+
global_scale=False,
|
|
163
|
+
attribute_name_column='attribute_column',
|
|
164
|
+
attribute_value_column='attribute_value')
|
|
165
|
+
|
|
166
|
+
obj = ScaleTransform(data=scale_inp_sparse,
|
|
167
|
+
object=fit_obj.output,
|
|
168
|
+
accumulate=['passenger'],
|
|
169
|
+
attribute_name_column='attribute_column',
|
|
170
|
+
attribute_value_column='attribute_value'
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
# Print the result DataFrame.
|
|
174
|
+
print(obj.result)
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
# Example 5: Create statistics to scale "fare" column with respect to
|
|
178
|
+
# maximum absolute value for sparse input with partition column.
|
|
179
|
+
fit_obj = ScaleFit(data=scale_inp_part_sparse,
|
|
180
|
+
parameter_data=scale_parameter,
|
|
181
|
+
attribute_data=scale_attribute,
|
|
182
|
+
scale_method="maxabs",
|
|
183
|
+
miss_value="zero",
|
|
184
|
+
global_scale=False,
|
|
185
|
+
attribute_name_column='attribute_column',
|
|
186
|
+
attribute_value_column='attribute_value',
|
|
187
|
+
data_partition_column='pid',
|
|
188
|
+
attribute_data_partition_column='pid',
|
|
189
|
+
parameter_data_partition_column='pid')
|
|
190
|
+
|
|
191
|
+
obj = ScaleTransform(data=scale_inp_part_sparse,
|
|
192
|
+
object=fit_obj.output,
|
|
193
|
+
accumulate=["passenger",'pid'],
|
|
194
|
+
attribute_name_column='attribute_column',
|
|
195
|
+
attribute_value_column='attribute_value',
|
|
196
|
+
object_partition_column='pid',
|
|
197
|
+
data_partition_column='pid')
|
|
198
|
+
|
|
199
|
+
# Print the result DataFrame.
|
|
200
|
+
print(obj.result)
|
|
201
|
+
|
|
110
202
|
"""
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
def TDGLMPredict(object=None, newdata=None, id_column=None, accumulate=None, output_prob=False,
|
|
2
|
-
output_responses=None,
|
|
2
|
+
output_responses=None, partition_column=None, family="GAUSSIAN",
|
|
3
|
+
**generic_arguments):
|
|
3
4
|
"""
|
|
4
5
|
DESCRIPTION:
|
|
5
6
|
The TDGLMPredict() function predicts target values (regression) and class labels
|
|
@@ -57,6 +58,18 @@ def TDGLMPredict(object=None, newdata=None, id_column=None, accumulate=None, out
|
|
|
57
58
|
Note:
|
|
58
59
|
Only applicable if "output_prob" is True.
|
|
59
60
|
Types: str OR list of strs
|
|
61
|
+
|
|
62
|
+
partition_column:
|
|
63
|
+
Optional Argument.
|
|
64
|
+
Specifies the column names of "data" on which to partition the input.
|
|
65
|
+
Types: str OR list of Strings (str)
|
|
66
|
+
|
|
67
|
+
family:
|
|
68
|
+
Optional Argument.
|
|
69
|
+
Specifies the distribution exponential family.
|
|
70
|
+
Permitted Values: 'GAUSSIAN', 'BINOMIAL'
|
|
71
|
+
Default Value: 'GAUSSIAN'
|
|
72
|
+
Types: str
|
|
60
73
|
|
|
61
74
|
**generic_arguments:
|
|
62
75
|
Specifies the generic keyword arguments SQLE functions accept. Below
|
|
@@ -168,4 +181,153 @@ def TDGLMPredict(object=None, newdata=None, id_column=None, accumulate=None, out
|
|
|
168
181
|
|
|
169
182
|
# Print the result DataFrame.
|
|
170
183
|
print(TDGLMPredict_out1.result)
|
|
184
|
+
|
|
185
|
+
# Example 3 : TDGLMPredict() predicts the 'medv' using generated regression model by GLM
|
|
186
|
+
# using stepwise regression algorithm.
|
|
187
|
+
# This example uses the boston dataset and scales the data.
|
|
188
|
+
# Scaled data is used as input data to generate the GLM model and predict the target values.
|
|
189
|
+
|
|
190
|
+
# loading the example data
|
|
191
|
+
load_example_data("decisionforest", ["boston"])
|
|
192
|
+
load_example_data('glm', ['housing_train_segment', 'housing_train_parameter', 'housing_train_attribute'])
|
|
193
|
+
|
|
194
|
+
# Create teradataml DataFrame objects.
|
|
195
|
+
boston_df = DataFrame('boston')
|
|
196
|
+
housing_seg = DataFrame('housing_train_segment')
|
|
197
|
+
housing_parameter = DataFrame('housing_train_parameter')
|
|
198
|
+
housing_attribute = DataFrame('housing_train_attribute')
|
|
199
|
+
|
|
200
|
+
# scaling the data
|
|
201
|
+
# Scale "target_columns" with respect to 'STD' value of the column.
|
|
202
|
+
fit_obj = ScaleFit(data=boston_df,
|
|
203
|
+
target_columns=['crim','zn','indus','chas','nox','rm','age','dis','rad','tax','ptratio','black','lstat',],
|
|
204
|
+
scale_method="STD")
|
|
205
|
+
|
|
206
|
+
# Scale values specified in the input data using the fit data generated by the ScaleFit() function above.
|
|
207
|
+
obj = ScaleTransform(object=fit_obj.output,
|
|
208
|
+
data=boston_df,
|
|
209
|
+
accumulate=["id","medv"])
|
|
210
|
+
|
|
211
|
+
boston = obj.result
|
|
212
|
+
|
|
213
|
+
# Generate generalized linear model(GLM) using stepwise regression algorithm.
|
|
214
|
+
glm_1 = GLM(data=boston,
|
|
215
|
+
input_columns=['indus','chas','nox','rm'],
|
|
216
|
+
response_column='medv',
|
|
217
|
+
family='GAUSSIAN',
|
|
218
|
+
lambda1=0.02,
|
|
219
|
+
alpha=0.33,
|
|
220
|
+
batch_size=10,
|
|
221
|
+
learning_rate='optimal',
|
|
222
|
+
iter_max=36,
|
|
223
|
+
iter_num_no_change=100,
|
|
224
|
+
tolerance=0.0001,
|
|
225
|
+
initial_eta=0.02,
|
|
226
|
+
stepwise_direction='backward',
|
|
227
|
+
max_steps_num=10)
|
|
228
|
+
|
|
229
|
+
# Predict target values using generated regression model by GLM and newdata.
|
|
230
|
+
res = TDGLMPredict(id_column="id",
|
|
231
|
+
newdata=boston,
|
|
232
|
+
object=glm_1,
|
|
233
|
+
accumulate='medv')
|
|
234
|
+
|
|
235
|
+
# Print the result DataFrame.
|
|
236
|
+
print(res.result)
|
|
237
|
+
|
|
238
|
+
# Example 4 : TDGLMPredict() predicts the 'medv' using generated regression model by GLM
|
|
239
|
+
# stepwise regression algorithm with initial_stepwise_columns.
|
|
240
|
+
glm_2 = GLM(data=boston,
|
|
241
|
+
input_columns=['crim','zn','indus','chas','nox','rm','age','dis','rad','tax','ptratio','black','lstat'],
|
|
242
|
+
response_column='medv',
|
|
243
|
+
family='GAUSSIAN',
|
|
244
|
+
lambda1=0.02,
|
|
245
|
+
alpha=0.33,
|
|
246
|
+
batch_size=10,
|
|
247
|
+
learning_rate='optimal',
|
|
248
|
+
iter_max=36,
|
|
249
|
+
iter_num_no_change=100,
|
|
250
|
+
tolerance=0.0001,
|
|
251
|
+
initial_eta=0.02,
|
|
252
|
+
stepwise_direction='bidirectional',
|
|
253
|
+
max_steps_num=10,
|
|
254
|
+
initial_stepwise_columns=['rad','tax']
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
# Predict target values using generated regression model by GLM and newdata.
|
|
258
|
+
res = TDGLMPredict(id_column="id",
|
|
259
|
+
newdata=boston,
|
|
260
|
+
object=glm_2,
|
|
261
|
+
accumulate='medv')
|
|
262
|
+
|
|
263
|
+
# Print the result DataFrame.
|
|
264
|
+
print(res.result)
|
|
265
|
+
|
|
266
|
+
# Example 5 : TDGLMPredict() predicts the 'price' using generated regression model by GLM
|
|
267
|
+
# using partition by key.
|
|
268
|
+
glm_3 = GLM(data=housing_seg,
|
|
269
|
+
input_columns=['bedrooms', 'bathrms', 'stories', 'driveway', 'recroom', 'fullbase', 'gashw', 'airco'],
|
|
270
|
+
response_column='price',
|
|
271
|
+
family='GAUSSIAN',
|
|
272
|
+
batch_size=10,
|
|
273
|
+
iter_max=1000,
|
|
274
|
+
data_partition_column='partition_id'
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
# Predict target values using generated regression model by GLM and newdata.
|
|
278
|
+
res = TDGLMPredict(id_column="sn",
|
|
279
|
+
newdata=housing_seg,
|
|
280
|
+
object=glm_3,
|
|
281
|
+
accumulate='price',
|
|
282
|
+
newdata_partition_column='partition_id',
|
|
283
|
+
object_partition_column='partition_id')
|
|
284
|
+
|
|
285
|
+
# Print the result DataFrame.
|
|
286
|
+
print(res.result)
|
|
287
|
+
|
|
288
|
+
# Example 6 : TDGLMPredict() predicts the 'price' using generated regression model by GLM
|
|
289
|
+
# using partition by key with attribute data.
|
|
290
|
+
glm_4 = GLM(data=housing_seg,
|
|
291
|
+
input_columns=['bedrooms', 'bathrms', 'stories', 'driveway', 'recroom', 'fullbase', 'gashw', 'airco'],
|
|
292
|
+
response_column='price',
|
|
293
|
+
family='GAUSSIAN',
|
|
294
|
+
batch_size=10,
|
|
295
|
+
iter_max=1000,
|
|
296
|
+
data_partition_column='partition_id',
|
|
297
|
+
attribute_data = housing_attribute,
|
|
298
|
+
attribute_data_partition_column = 'partition_id'
|
|
299
|
+
)
|
|
300
|
+
|
|
301
|
+
# Predict target values using generated regression model by GLM and newdata.
|
|
302
|
+
res = TDGLMPredict(id_column="sn",
|
|
303
|
+
newdata=housing_seg,
|
|
304
|
+
object=glm_4,
|
|
305
|
+
accumulate='price',
|
|
306
|
+
newdata_partition_column='partition_id',
|
|
307
|
+
object_partition_column='partition_id')
|
|
308
|
+
|
|
309
|
+
# Print the result DataFrame.
|
|
310
|
+
print(res.result)
|
|
311
|
+
|
|
312
|
+
# Example 7 : TDGLMPredict() predicts the 'homestyle' using generated generalized linear model by GLM
|
|
313
|
+
# using partition by key with parameter data.
|
|
314
|
+
glm_5 = GLM(data=housing_seg,
|
|
315
|
+
input_columns=['bedrooms', 'bathrms', 'stories', 'driveway', 'recroom', 'fullbase', 'gashw', 'airco'],
|
|
316
|
+
response_column='homestyle',
|
|
317
|
+
family='binomial',
|
|
318
|
+
iter_max=1000,
|
|
319
|
+
data_partition_column='partition_id',
|
|
320
|
+
parameter_data = housing_parameter,
|
|
321
|
+
parameter_data_partition_column = 'partition_id'
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
res = TDGLMPredict(id_column="sn",
|
|
325
|
+
newdata=housing_seg,
|
|
326
|
+
object=glm_5,
|
|
327
|
+
accumulate='homestyle',
|
|
328
|
+
newdata_partition_column='partition_id',
|
|
329
|
+
object_partition_column='partition_id')
|
|
330
|
+
|
|
331
|
+
# Print the result DataFrame.
|
|
332
|
+
print(res.result)
|
|
171
333
|
"""
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
def XGBoost(formula=None, data=None, input_columns=None, response_column=None, max_depth=5,
|
|
2
2
|
num_boosted_trees=-1, min_node_size=1, seed=1, model_type='REGRESSION',
|
|
3
|
-
coverage_factor=1.0, min_impurity=0.0, lambda1=
|
|
4
|
-
|
|
3
|
+
coverage_factor=1.0, min_impurity=0.0, lambda1=1, shrinkage_factor=0.5,
|
|
4
|
+
column_sampling=1.0, iter_num=10, tree_size=-1, base_score=0.0,
|
|
5
5
|
**generic_arguments):
|
|
6
6
|
"""
|
|
7
7
|
DESCRIPTION:
|
|
@@ -174,7 +174,7 @@ def XGBoost(formula=None, data=None, input_columns=None, response_column=None, m
|
|
|
174
174
|
Notes:
|
|
175
175
|
* The "lambda1" must be in the range [0, 100000].
|
|
176
176
|
* The value 0 specifies no regularization.
|
|
177
|
-
Default Value:
|
|
177
|
+
Default Value: 1
|
|
178
178
|
Types: float OR int
|
|
179
179
|
|
|
180
180
|
shrinkage_factor:
|
|
@@ -185,7 +185,7 @@ def XGBoost(formula=None, data=None, input_columns=None, response_column=None, m
|
|
|
185
185
|
Notes:
|
|
186
186
|
* The "shrinkage_factor" is a DOUBLE PRECISION value in the range (0, 1].
|
|
187
187
|
* The value 1 specifies no shrinkage.
|
|
188
|
-
Default Value: 0.
|
|
188
|
+
Default Value: 0.5
|
|
189
189
|
Types: float
|
|
190
190
|
|
|
191
191
|
column_sampling:
|
|
@@ -217,6 +217,14 @@ def XGBoost(formula=None, data=None, input_columns=None, response_column=None, m
|
|
|
217
217
|
Default Value: -1
|
|
218
218
|
Types: int
|
|
219
219
|
|
|
220
|
+
base_score:
|
|
221
|
+
Optional Argument.
|
|
222
|
+
Specifies the initial prediction value for all data points.
|
|
223
|
+
Note:
|
|
224
|
+
* The "base_score" must be in the range [-1e50, 1e50].
|
|
225
|
+
Default Value: 0.0
|
|
226
|
+
Types: float
|
|
227
|
+
|
|
220
228
|
**generic_arguments:
|
|
221
229
|
Specifies the generic keyword arguments SQLE functions accept. Below
|
|
222
230
|
are the generic keyword arguments:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
def XGBoostPredict(newdata=None, object=None, id_column=None, num_boosted_tree=1000,
|
|
2
2
|
iter_num=3, accumulate=None, output_prob=False, model_type="REGRESSION",
|
|
3
|
-
output_responses=None,
|
|
3
|
+
output_responses=None, detailed=False, **generic_arguments):
|
|
4
4
|
"""
|
|
5
5
|
DESCRIPTION:
|
|
6
6
|
The XGBoostPredict() function runs the predictive algorithm based on the model generated
|
|
@@ -123,6 +123,12 @@ def XGBoostPredict(newdata=None, object=None, id_column=None, num_boosted_tree=1
|
|
|
123
123
|
'Classification'.
|
|
124
124
|
Types: str OR list of str(s)
|
|
125
125
|
|
|
126
|
+
detailed:
|
|
127
|
+
Optional Argument.
|
|
128
|
+
Specifies whether to output detailed information of each prediction.
|
|
129
|
+
Default Value: False
|
|
130
|
+
Types: bool
|
|
131
|
+
|
|
126
132
|
**generic_arguments:
|
|
127
133
|
Specifies the generic keyword arguments SQLE functions accept. Below
|
|
128
134
|
are the generic keyword arguments:
|