teradataml 20.0.0.0__py3-none-any.whl → 20.0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of teradataml might be problematic. Click here for more details.
- teradataml/LICENSE-3RD-PARTY.pdf +0 -0
- teradataml/LICENSE.pdf +0 -0
- teradataml/README.md +183 -0
- teradataml/__init__.py +6 -3
- teradataml/_version.py +2 -2
- teradataml/analytics/__init__.py +3 -2
- teradataml/analytics/analytic_function_executor.py +275 -40
- teradataml/analytics/analytic_query_generator.py +92 -0
- teradataml/analytics/byom/__init__.py +3 -2
- teradataml/analytics/json_parser/metadata.py +1 -0
- teradataml/analytics/json_parser/utils.py +17 -21
- teradataml/analytics/meta_class.py +40 -1
- teradataml/analytics/sqle/DecisionTreePredict.py +1 -1
- teradataml/analytics/sqle/__init__.py +10 -2
- teradataml/analytics/table_operator/__init__.py +3 -2
- teradataml/analytics/uaf/__init__.py +21 -2
- teradataml/analytics/utils.py +62 -1
- teradataml/analytics/valib.py +1 -1
- teradataml/automl/__init__.py +1553 -319
- teradataml/automl/custom_json_utils.py +139 -61
- teradataml/automl/data_preparation.py +276 -319
- teradataml/automl/data_transformation.py +163 -81
- teradataml/automl/feature_engineering.py +402 -239
- teradataml/automl/feature_exploration.py +9 -2
- teradataml/automl/model_evaluation.py +48 -51
- teradataml/automl/model_training.py +291 -189
- teradataml/catalog/byom.py +8 -8
- teradataml/catalog/model_cataloging_utils.py +1 -1
- teradataml/clients/auth_client.py +133 -0
- teradataml/clients/pkce_client.py +1 -1
- teradataml/common/aed_utils.py +3 -2
- teradataml/common/constants.py +48 -6
- teradataml/common/deprecations.py +13 -7
- teradataml/common/garbagecollector.py +156 -120
- teradataml/common/messagecodes.py +6 -1
- teradataml/common/messages.py +3 -1
- teradataml/common/sqlbundle.py +1 -1
- teradataml/common/utils.py +103 -11
- teradataml/common/wrapper_utils.py +1 -1
- teradataml/context/context.py +121 -31
- teradataml/data/advertising.csv +201 -0
- teradataml/data/bank_marketing.csv +11163 -0
- teradataml/data/bike_sharing.csv +732 -0
- teradataml/data/boston2cols.csv +721 -0
- teradataml/data/breast_cancer.csv +570 -0
- teradataml/data/complaints_test_tokenized.csv +353 -0
- teradataml/data/complaints_tokens_model.csv +348 -0
- teradataml/data/covid_confirm_sd.csv +83 -0
- teradataml/data/customer_segmentation_test.csv +2628 -0
- teradataml/data/customer_segmentation_train.csv +8069 -0
- teradataml/data/dataframe_example.json +10 -0
- teradataml/data/docs/sqle/docs_17_10/OneHotEncodingFit.py +3 -1
- teradataml/data/docs/sqle/docs_17_10/OneHotEncodingTransform.py +6 -0
- teradataml/data/docs/sqle/docs_17_10/OutlierFilterTransform.py +5 -1
- teradataml/data/docs/sqle/docs_17_20/ANOVA.py +61 -1
- teradataml/data/docs/sqle/docs_17_20/CFilter.py +132 -0
- teradataml/data/docs/sqle/docs_17_20/ColumnTransformer.py +2 -0
- teradataml/data/docs/sqle/docs_17_20/FTest.py +105 -26
- teradataml/data/docs/sqle/docs_17_20/GLM.py +162 -1
- teradataml/data/docs/sqle/docs_17_20/GetFutileColumns.py +5 -3
- teradataml/data/docs/sqle/docs_17_20/KMeans.py +48 -1
- teradataml/data/docs/sqle/docs_17_20/NaiveBayes.py +162 -0
- teradataml/data/docs/sqle/docs_17_20/NonLinearCombineFit.py +3 -2
- teradataml/data/docs/sqle/docs_17_20/OneHotEncodingFit.py +5 -0
- teradataml/data/docs/sqle/docs_17_20/OneHotEncodingTransform.py +6 -0
- teradataml/data/docs/sqle/docs_17_20/OutlierFilterFit.py +2 -0
- teradataml/data/docs/sqle/docs_17_20/Pivoting.py +279 -0
- teradataml/data/docs/sqle/docs_17_20/ROC.py +3 -2
- teradataml/data/docs/sqle/docs_17_20/SVMPredict.py +13 -2
- teradataml/data/docs/sqle/docs_17_20/ScaleFit.py +119 -1
- teradataml/data/docs/sqle/docs_17_20/ScaleTransform.py +93 -1
- teradataml/data/docs/sqle/docs_17_20/Shap.py +197 -0
- teradataml/data/docs/sqle/docs_17_20/TDGLMPredict.py +163 -1
- teradataml/data/docs/sqle/docs_17_20/TDNaiveBayesPredict.py +189 -0
- teradataml/data/docs/sqle/docs_17_20/TFIDF.py +142 -0
- teradataml/data/docs/sqle/docs_17_20/Unpivoting.py +216 -0
- teradataml/data/docs/sqle/docs_17_20/XGBoost.py +12 -4
- teradataml/data/docs/sqle/docs_17_20/XGBoostPredict.py +7 -1
- teradataml/data/docs/sqle/docs_17_20/ZTest.py +72 -7
- teradataml/data/docs/uaf/docs_17_20/ACF.py +1 -10
- teradataml/data/docs/uaf/docs_17_20/ArimaEstimate.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/ArimaForecast.py +35 -5
- teradataml/data/docs/uaf/docs_17_20/ArimaValidate.py +3 -1
- teradataml/data/docs/uaf/docs_17_20/ArimaXEstimate.py +293 -0
- teradataml/data/docs/uaf/docs_17_20/AutoArima.py +354 -0
- teradataml/data/docs/uaf/docs_17_20/BreuschGodfrey.py +3 -2
- teradataml/data/docs/uaf/docs_17_20/BreuschPaganGodfrey.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/Convolve.py +13 -10
- teradataml/data/docs/uaf/docs_17_20/Convolve2.py +4 -1
- teradataml/data/docs/uaf/docs_17_20/CumulPeriodogram.py +5 -4
- teradataml/data/docs/uaf/docs_17_20/DFFT2Conv.py +4 -4
- teradataml/data/docs/uaf/docs_17_20/DWT.py +235 -0
- teradataml/data/docs/uaf/docs_17_20/DWT2D.py +214 -0
- teradataml/data/docs/uaf/docs_17_20/DurbinWatson.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/ExtractResults.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/FilterFactory1d.py +160 -0
- teradataml/data/docs/uaf/docs_17_20/GenseriesSinusoids.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/GoldfeldQuandt.py +9 -31
- teradataml/data/docs/uaf/docs_17_20/HoltWintersForecaster.py +4 -2
- teradataml/data/docs/uaf/docs_17_20/IDFFT2.py +1 -8
- teradataml/data/docs/uaf/docs_17_20/IDWT.py +236 -0
- teradataml/data/docs/uaf/docs_17_20/IDWT2D.py +226 -0
- teradataml/data/docs/uaf/docs_17_20/IQR.py +134 -0
- teradataml/data/docs/uaf/docs_17_20/LineSpec.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/LinearRegr.py +2 -2
- teradataml/data/docs/uaf/docs_17_20/MAMean.py +3 -3
- teradataml/data/docs/uaf/docs_17_20/Matrix2Image.py +297 -0
- teradataml/data/docs/uaf/docs_17_20/MatrixMultiply.py +15 -6
- teradataml/data/docs/uaf/docs_17_20/PACF.py +0 -1
- teradataml/data/docs/uaf/docs_17_20/Portman.py +2 -2
- teradataml/data/docs/uaf/docs_17_20/PowerSpec.py +2 -2
- teradataml/data/docs/uaf/docs_17_20/Resample.py +9 -1
- teradataml/data/docs/uaf/docs_17_20/SAX.py +246 -0
- teradataml/data/docs/uaf/docs_17_20/SeasonalNormalize.py +17 -10
- teradataml/data/docs/uaf/docs_17_20/SignifPeriodicities.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/WhitesGeneral.py +3 -1
- teradataml/data/docs/uaf/docs_17_20/WindowDFFT.py +368 -0
- teradataml/data/dwt2d_dataTable.csv +65 -0
- teradataml/data/dwt_dataTable.csv +8 -0
- teradataml/data/dwt_filterTable.csv +3 -0
- teradataml/data/finance_data4.csv +13 -0
- teradataml/data/glm_example.json +28 -1
- teradataml/data/grocery_transaction.csv +19 -0
- teradataml/data/housing_train_segment.csv +201 -0
- teradataml/data/idwt2d_dataTable.csv +5 -0
- teradataml/data/idwt_dataTable.csv +8 -0
- teradataml/data/idwt_filterTable.csv +3 -0
- teradataml/data/insect2Cols.csv +61 -0
- teradataml/data/interval_data.csv +5 -0
- teradataml/data/jsons/paired_functions.json +14 -0
- teradataml/data/jsons/sqle/17.20/TD_ANOVA.json +99 -27
- teradataml/data/jsons/sqle/17.20/TD_CFilter.json +118 -0
- teradataml/data/jsons/sqle/17.20/TD_FTest.json +166 -83
- teradataml/data/jsons/sqle/17.20/TD_GLM.json +90 -14
- teradataml/data/jsons/sqle/17.20/TD_GLMPREDICT.json +48 -5
- teradataml/data/jsons/sqle/17.20/TD_GetFutileColumns.json +5 -3
- teradataml/data/jsons/sqle/17.20/TD_KMeans.json +31 -11
- teradataml/data/jsons/sqle/17.20/TD_NaiveBayes.json +193 -0
- teradataml/data/jsons/sqle/17.20/TD_NaiveBayesPredict.json +212 -0
- teradataml/data/jsons/sqle/17.20/TD_NonLinearCombineFit.json +3 -2
- teradataml/data/jsons/sqle/17.20/TD_OneClassSVM.json +9 -9
- teradataml/data/jsons/sqle/17.20/TD_Pivoting.json +280 -0
- teradataml/data/jsons/sqle/17.20/TD_ROC.json +2 -1
- teradataml/data/jsons/sqle/17.20/TD_SVM.json +16 -16
- teradataml/data/jsons/sqle/17.20/TD_SVMPredict.json +19 -1
- teradataml/data/jsons/sqle/17.20/TD_ScaleFit.json +168 -15
- teradataml/data/jsons/sqle/17.20/TD_ScaleTransform.json +50 -1
- teradataml/data/jsons/sqle/17.20/TD_Shap.json +222 -0
- teradataml/data/jsons/sqle/17.20/TD_TFIDF.json +162 -0
- teradataml/data/jsons/sqle/17.20/TD_Unpivoting.json +235 -0
- teradataml/data/jsons/sqle/17.20/TD_XGBoost.json +25 -7
- teradataml/data/jsons/sqle/17.20/TD_XGBoostPredict.json +17 -4
- teradataml/data/jsons/sqle/17.20/TD_ZTest.json +157 -80
- teradataml/data/jsons/storedprocedure/17.20/TD_FILTERFACTORY1D.json +150 -0
- teradataml/data/jsons/uaf/17.20/TD_ACF.json +1 -18
- teradataml/data/jsons/uaf/17.20/TD_ARIMAESTIMATE.json +3 -16
- teradataml/data/jsons/uaf/17.20/TD_ARIMAFORECAST.json +0 -3
- teradataml/data/jsons/uaf/17.20/TD_ARIMAVALIDATE.json +5 -3
- teradataml/data/jsons/uaf/17.20/TD_ARIMAXESTIMATE.json +362 -0
- teradataml/data/jsons/uaf/17.20/TD_AUTOARIMA.json +469 -0
- teradataml/data/jsons/uaf/17.20/TD_BINARYMATRIXOP.json +0 -3
- teradataml/data/jsons/uaf/17.20/TD_BINARYSERIESOP.json +0 -2
- teradataml/data/jsons/uaf/17.20/TD_BREUSCH_GODFREY.json +2 -1
- teradataml/data/jsons/uaf/17.20/TD_BREUSCH_PAGAN_GODFREY.json +2 -5
- teradataml/data/jsons/uaf/17.20/TD_CONVOLVE.json +3 -6
- teradataml/data/jsons/uaf/17.20/TD_CONVOLVE2.json +1 -3
- teradataml/data/jsons/uaf/17.20/TD_CUMUL_PERIODOGRAM.json +0 -5
- teradataml/data/jsons/uaf/17.20/TD_DFFT.json +1 -4
- teradataml/data/jsons/uaf/17.20/TD_DFFT2.json +2 -7
- teradataml/data/jsons/uaf/17.20/TD_DFFT2CONV.json +1 -2
- teradataml/data/jsons/uaf/17.20/TD_DFFTCONV.json +0 -2
- teradataml/data/jsons/uaf/17.20/TD_DTW.json +3 -6
- teradataml/data/jsons/uaf/17.20/TD_DWT.json +173 -0
- teradataml/data/jsons/uaf/17.20/TD_DWT2D.json +160 -0
- teradataml/data/jsons/uaf/17.20/TD_FITMETRICS.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_GOLDFELD_QUANDT.json +16 -30
- teradataml/data/jsons/uaf/17.20/{TD_HOLT_WINTERS_FORECAST.json → TD_HOLT_WINTERS_FORECASTER.json} +1 -2
- teradataml/data/jsons/uaf/17.20/TD_IDFFT2.json +1 -15
- teradataml/data/jsons/uaf/17.20/TD_IDWT.json +162 -0
- teradataml/data/jsons/uaf/17.20/TD_IDWT2D.json +149 -0
- teradataml/data/jsons/uaf/17.20/TD_IQR.json +117 -0
- teradataml/data/jsons/uaf/17.20/TD_LINEAR_REGR.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_LINESPEC.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_MAMEAN.json +1 -3
- teradataml/data/jsons/uaf/17.20/TD_MATRIX2IMAGE.json +209 -0
- teradataml/data/jsons/uaf/17.20/TD_PACF.json +2 -2
- teradataml/data/jsons/uaf/17.20/TD_POWERSPEC.json +5 -5
- teradataml/data/jsons/uaf/17.20/TD_RESAMPLE.json +48 -28
- teradataml/data/jsons/uaf/17.20/TD_SAX.json +208 -0
- teradataml/data/jsons/uaf/17.20/TD_SEASONALNORMALIZE.json +12 -6
- teradataml/data/jsons/uaf/17.20/TD_SIMPLEEXP.json +0 -1
- teradataml/data/jsons/uaf/17.20/TD_TRACKINGOP.json +8 -8
- teradataml/data/jsons/uaf/17.20/TD_UNDIFF.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_UNNORMALIZE.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_WINDOWDFFT.json +400 -0
- teradataml/data/kmeans_example.json +5 -0
- teradataml/data/kmeans_table.csv +10 -0
- teradataml/data/load_example_data.py +8 -2
- teradataml/data/naivebayestextclassifier_example.json +1 -1
- teradataml/data/naivebayestextclassifierpredict_example.json +11 -0
- teradataml/data/onehot_encoder_train.csv +4 -0
- teradataml/data/openml_example.json +29 -0
- teradataml/data/peppers.png +0 -0
- teradataml/data/real_values.csv +14 -0
- teradataml/data/sax_example.json +8 -0
- teradataml/data/scale_attributes.csv +3 -0
- teradataml/data/scale_example.json +52 -1
- teradataml/data/scale_input_part_sparse.csv +31 -0
- teradataml/data/scale_input_partitioned.csv +16 -0
- teradataml/data/scale_input_sparse.csv +11 -0
- teradataml/data/scale_parameters.csv +3 -0
- teradataml/data/scripts/deploy_script.py +21 -2
- teradataml/data/scripts/sklearn/sklearn_fit.py +40 -37
- teradataml/data/scripts/sklearn/sklearn_fit_predict.py +22 -30
- teradataml/data/scripts/sklearn/sklearn_function.template +42 -24
- teradataml/data/scripts/sklearn/sklearn_model_selection_split.py +23 -33
- teradataml/data/scripts/sklearn/sklearn_neighbors.py +19 -28
- teradataml/data/scripts/sklearn/sklearn_score.py +32 -32
- teradataml/data/scripts/sklearn/sklearn_transform.py +85 -42
- teradataml/data/star_pivot.csv +8 -0
- teradataml/data/templates/open_source_ml.json +2 -1
- teradataml/data/teradataml_example.json +97 -1
- teradataml/data/timestamp_data.csv +4 -0
- teradataml/data/titanic_dataset_unpivoted.csv +19 -0
- teradataml/data/uaf_example.json +55 -1
- teradataml/data/unpivot_example.json +15 -0
- teradataml/data/url_data.csv +9 -0
- teradataml/data/windowdfft.csv +16 -0
- teradataml/data/ztest_example.json +16 -0
- teradataml/dataframe/copy_to.py +9 -4
- teradataml/dataframe/data_transfer.py +125 -64
- teradataml/dataframe/dataframe.py +575 -57
- teradataml/dataframe/dataframe_utils.py +47 -9
- teradataml/dataframe/fastload.py +273 -90
- teradataml/dataframe/functions.py +339 -0
- teradataml/dataframe/row.py +160 -0
- teradataml/dataframe/setop.py +2 -2
- teradataml/dataframe/sql.py +740 -18
- teradataml/dataframe/window.py +1 -1
- teradataml/dbutils/dbutils.py +324 -18
- teradataml/geospatial/geodataframe.py +1 -1
- teradataml/geospatial/geodataframecolumn.py +1 -1
- teradataml/hyperparameter_tuner/optimizer.py +13 -13
- teradataml/lib/aed_0_1.dll +0 -0
- teradataml/opensource/sklearn/_sklearn_wrapper.py +254 -122
- teradataml/options/__init__.py +16 -5
- teradataml/options/configure.py +39 -6
- teradataml/options/display.py +2 -2
- teradataml/plot/axis.py +4 -4
- teradataml/scriptmgmt/UserEnv.py +26 -19
- teradataml/scriptmgmt/lls_utils.py +120 -16
- teradataml/table_operators/Script.py +4 -5
- teradataml/table_operators/TableOperator.py +160 -26
- teradataml/table_operators/table_operator_util.py +88 -41
- teradataml/table_operators/templates/dataframe_udf.template +63 -0
- teradataml/telemetry_utils/__init__.py +0 -0
- teradataml/telemetry_utils/queryband.py +52 -0
- teradataml/utils/validators.py +41 -3
- {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/METADATA +191 -6
- {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/RECORD +263 -185
- {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/WHEEL +0 -0
- {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/top_level.txt +0 -0
- {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/zip-safe +0 -0
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
{
|
|
2
|
+
"json_schema_major_version": "1",
|
|
3
|
+
"json_schema_minor_version": "1",
|
|
4
|
+
"json_content_version": "1",
|
|
5
|
+
"function_name": "TD_CFilter",
|
|
6
|
+
"function_version": "1.0",
|
|
7
|
+
"function_alias_name": "TD_CFilter",
|
|
8
|
+
"function_type": "fastpath",
|
|
9
|
+
"function_category": "Association Analysis",
|
|
10
|
+
"function_r_name": "aa.td.cfilter",
|
|
11
|
+
"short_description": "This function calculates several statistical measures of how likely each pair of items is to be purchased together.",
|
|
12
|
+
"long_description": "This function calculates several statistical measures of how likely each pair of items is to be purchased together.",
|
|
13
|
+
"input_tables": [
|
|
14
|
+
{
|
|
15
|
+
"requiredInputKind": [
|
|
16
|
+
"PartitionByAny"
|
|
17
|
+
],
|
|
18
|
+
"isOrdered": false,
|
|
19
|
+
"partitionByOne": false,
|
|
20
|
+
"name": "InputTable",
|
|
21
|
+
"alternateNames": [],
|
|
22
|
+
"isRequired": true,
|
|
23
|
+
"rDescription": "Specifies the table containing the input data to filter.",
|
|
24
|
+
"description": "Specifies the table containing the input data to filter.",
|
|
25
|
+
"datatype": "TABLE_ALIAS",
|
|
26
|
+
"allowsLists": false,
|
|
27
|
+
"rName": "data",
|
|
28
|
+
"useInR": true,
|
|
29
|
+
"rOrderNum": 1
|
|
30
|
+
}
|
|
31
|
+
],
|
|
32
|
+
"argument_clauses": [
|
|
33
|
+
{
|
|
34
|
+
"targetTable": [
|
|
35
|
+
"InputTable"
|
|
36
|
+
],
|
|
37
|
+
"checkDuplicate": true,
|
|
38
|
+
"allowedTypes": [],
|
|
39
|
+
"allowedTypeGroups": [
|
|
40
|
+
"STRING"
|
|
41
|
+
],
|
|
42
|
+
"matchLengthOfArgument": "",
|
|
43
|
+
"allowPadding": false,
|
|
44
|
+
"name": "TargetColumn",
|
|
45
|
+
"alternateNames": [],
|
|
46
|
+
"isRequired": true,
|
|
47
|
+
"rDescription": "Specify the column from the input table which contains the data for filter.",
|
|
48
|
+
"description": "Specify the column from the input table which contains the data for filter.",
|
|
49
|
+
"datatype": "COLUMNS",
|
|
50
|
+
"allowsLists": false,
|
|
51
|
+
"rName": "target.column",
|
|
52
|
+
"useInR": true,
|
|
53
|
+
"rOrderNum": 2
|
|
54
|
+
},
|
|
55
|
+
{
|
|
56
|
+
"targetTable": [
|
|
57
|
+
"InputTable"
|
|
58
|
+
],
|
|
59
|
+
"checkDuplicate": true,
|
|
60
|
+
"allowedTypes": [],
|
|
61
|
+
"allowedTypeGroups": [
|
|
62
|
+
"ALL"
|
|
63
|
+
],
|
|
64
|
+
"matchLengthOfArgument": "",
|
|
65
|
+
"allowPadding": false,
|
|
66
|
+
"name": "TransactionIDColumns",
|
|
67
|
+
"alternateNames": [],
|
|
68
|
+
"isRequired": true,
|
|
69
|
+
"rDescription": "Specifies the transactionID column to define groups of items listed in the input columns that are purchased together.",
|
|
70
|
+
"description": "Specifies the transactionID column to define groups of items listed in the input columns that are purchased together.",
|
|
71
|
+
"datatype": "COLUMNS",
|
|
72
|
+
"allowsLists": true,
|
|
73
|
+
"rName": "transaction.id.columns",
|
|
74
|
+
"useInR": true,
|
|
75
|
+
"rOrderNum": 3
|
|
76
|
+
},
|
|
77
|
+
{
|
|
78
|
+
"targetTable": [
|
|
79
|
+
"InputTable"
|
|
80
|
+
],
|
|
81
|
+
"checkDuplicate": true,
|
|
82
|
+
"allowedTypes": [],
|
|
83
|
+
"allowedTypeGroups": [
|
|
84
|
+
"ALL"
|
|
85
|
+
],
|
|
86
|
+
"matchLengthOfArgument": "",
|
|
87
|
+
"allowPadding": false,
|
|
88
|
+
"name": "PartitionColumns",
|
|
89
|
+
"alternateNames": [],
|
|
90
|
+
"isRequired": false,
|
|
91
|
+
"rDescription": "Specify the name of the input table columns on which to partition the input.",
|
|
92
|
+
"description": "Specify the name of the input table columns on which to partition the input.",
|
|
93
|
+
"datatype": "COLUMNS",
|
|
94
|
+
"allowsLists": true,
|
|
95
|
+
"rName": "partition.columns",
|
|
96
|
+
"useInR": true,
|
|
97
|
+
"rOrderNum": 4
|
|
98
|
+
},
|
|
99
|
+
{
|
|
100
|
+
"defaultValue": 100,
|
|
101
|
+
"lowerBound": 0,
|
|
102
|
+
"upperBound": 2147483647,
|
|
103
|
+
"lowerBoundType": "INCLUSIVE",
|
|
104
|
+
"upperBoundType": "INCLUSIVE",
|
|
105
|
+
"allowNaN": false,
|
|
106
|
+
"name": "MaxDistinctItems",
|
|
107
|
+
"alternateNames": [],
|
|
108
|
+
"isRequired": false,
|
|
109
|
+
"rDescription": "Specifies the maximum size of the item set. The default value is 100.",
|
|
110
|
+
"description": "Specifies the maximum size of the item set. The default value is 100.",
|
|
111
|
+
"datatype": "INTEGER",
|
|
112
|
+
"allowsLists": false,
|
|
113
|
+
"rName": "max.distinct.items",
|
|
114
|
+
"useInR": true,
|
|
115
|
+
"rOrderNum": 5
|
|
116
|
+
}
|
|
117
|
+
]
|
|
118
|
+
}
|
|
@@ -1,26 +1,27 @@
|
|
|
1
1
|
{
|
|
2
2
|
"json_schema_major_version": "1",
|
|
3
|
-
"json_schema_minor_version": "
|
|
4
|
-
"json_content_version": "
|
|
3
|
+
"json_schema_minor_version": "2",
|
|
4
|
+
"json_content_version": "2",
|
|
5
5
|
"function_name": "TD_FTest",
|
|
6
|
-
"
|
|
6
|
+
"function_alias_name" : "TD_FTest",
|
|
7
|
+
"function_version": "2.0",
|
|
8
|
+
"commence_db_version" : "17.20.00",
|
|
9
|
+
"change_db_version" : "17.20.03.XX",
|
|
7
10
|
"function_type": "fastpath",
|
|
8
11
|
"function_category": "Hypothesis Testing",
|
|
9
|
-
"function_alias_name": "TD_FTest",
|
|
10
12
|
"function_r_name": "aa.td_ftest",
|
|
11
13
|
"short_description": "hypothesis test function to perform ftest analysis on a data set.",
|
|
12
14
|
"long_description": "fastpath function to perform ftest analysis on a data set.",
|
|
13
15
|
"input_tables": [
|
|
14
16
|
{
|
|
15
|
-
"isOrdered": false,
|
|
16
|
-
"partitionByOne": false,
|
|
17
17
|
"name": "InputTable",
|
|
18
18
|
"alternateNames": [],
|
|
19
19
|
"isRequired": true,
|
|
20
|
-
"rDescription": "The input table for ftest analysis",
|
|
21
|
-
"description": "The input table for ftest analysis.",
|
|
22
20
|
"datatype": "TABLE_ALIAS",
|
|
23
|
-
"
|
|
21
|
+
"partitionByOne": false,
|
|
22
|
+
"isOrdered": false,
|
|
23
|
+
"description": "The input table for ftest analysis.",
|
|
24
|
+
"rDescription": "The input table for ftest analysis",
|
|
24
25
|
"rName": "data",
|
|
25
26
|
"useInR": true,
|
|
26
27
|
"rOrderNum": 1
|
|
@@ -28,27 +29,30 @@
|
|
|
28
29
|
],
|
|
29
30
|
"argument_clauses": [
|
|
30
31
|
{
|
|
31
|
-
"
|
|
32
|
-
"lowerBound": 0,
|
|
33
|
-
"upperBound": 1,
|
|
34
|
-
"lowerBoundType": "INCLUSIVE",
|
|
35
|
-
"upperBoundType": "INCLUSIVE",
|
|
36
|
-
"allowNaN": false,
|
|
37
|
-
"isOutputColumn": false,
|
|
38
|
-
"matchLengthOfArgument": "",
|
|
39
|
-
"allowPadding": false,
|
|
40
|
-
"name": "Alpha",
|
|
32
|
+
"name": "FirstSampleColumn",
|
|
41
33
|
"alternateNames": [],
|
|
42
34
|
"isRequired": false,
|
|
43
|
-
"
|
|
44
|
-
|
|
45
|
-
|
|
35
|
+
"targetTable": [
|
|
36
|
+
"InputTable"
|
|
37
|
+
],
|
|
38
|
+
"checkDuplicate": true,
|
|
39
|
+
"allowedTypes": [],
|
|
40
|
+
"allowedTypeGroups": [
|
|
41
|
+
"NUMERIC"
|
|
42
|
+
],
|
|
43
|
+
"rOrderNum": 2,
|
|
44
|
+
"description": "Specifies the first sample column in f test",
|
|
45
|
+
"rDescription": "Specifies the first sample column in f test",
|
|
46
|
+
"datatype": "COLUMNS",
|
|
46
47
|
"allowsLists": false,
|
|
47
|
-
"rName": "
|
|
48
|
+
"rName": "first.sample.column",
|
|
48
49
|
"useInR": true,
|
|
49
|
-
"
|
|
50
|
+
"rFormulaUsage" : false
|
|
50
51
|
},
|
|
51
52
|
{
|
|
53
|
+
"name": "SecondSampleColumn",
|
|
54
|
+
"alternateNames": [],
|
|
55
|
+
"isRequired": false,
|
|
52
56
|
"targetTable": [
|
|
53
57
|
"InputTable"
|
|
54
58
|
],
|
|
@@ -57,22 +61,39 @@
|
|
|
57
61
|
"allowedTypeGroups": [
|
|
58
62
|
"NUMERIC"
|
|
59
63
|
],
|
|
60
|
-
"
|
|
61
|
-
"
|
|
62
|
-
"
|
|
63
|
-
"
|
|
64
|
-
"
|
|
64
|
+
"rOrderNum": 3,
|
|
65
|
+
"description": "Specifies the second sample column in f test",
|
|
66
|
+
"rDescription": "Specifies the second sample column in f test",
|
|
67
|
+
"datatype": "COLUMNS",
|
|
68
|
+
"allowsLists": false,
|
|
69
|
+
"rName": "second.sample.column",
|
|
70
|
+
"useInR": true,
|
|
71
|
+
"rFormulaUsage" : false
|
|
72
|
+
},
|
|
73
|
+
{
|
|
74
|
+
"name": "SampleNameColumn",
|
|
65
75
|
"alternateNames": [],
|
|
66
76
|
"isRequired": false,
|
|
67
|
-
"
|
|
68
|
-
|
|
69
|
-
|
|
77
|
+
"targetTable": [
|
|
78
|
+
"InputTable"
|
|
79
|
+
],
|
|
80
|
+
"checkDuplicate": true,
|
|
81
|
+
"allowedTypes": [],
|
|
82
|
+
"allowedTypeGroups": [
|
|
83
|
+
"STRING"
|
|
84
|
+
],
|
|
85
|
+
"rOrderNum": 4,
|
|
86
|
+
"description": "Specifies the input table column containing the names of the samples included in the f test. This argument is used when Input is in sample-value format.",
|
|
87
|
+
"rDescription": "Specifies the input table column containing the names of the samples included in the f test. This argument is used when Input is in sample-value format.",
|
|
88
|
+
"datatype": "COLUMNS",
|
|
70
89
|
"allowsLists": false,
|
|
71
|
-
"rName": "
|
|
72
|
-
"useInR": true
|
|
73
|
-
"rOrderNum": 3
|
|
90
|
+
"rName": "sample.name.column",
|
|
91
|
+
"useInR": true
|
|
74
92
|
},
|
|
75
93
|
{
|
|
94
|
+
"name": "SampleValueColumn",
|
|
95
|
+
"alternateNames": [],
|
|
96
|
+
"isRequired": false,
|
|
76
97
|
"targetTable": [
|
|
77
98
|
"InputTable"
|
|
78
99
|
],
|
|
@@ -81,106 +102,168 @@
|
|
|
81
102
|
"allowedTypeGroups": [
|
|
82
103
|
"NUMERIC"
|
|
83
104
|
],
|
|
84
|
-
"
|
|
85
|
-
"
|
|
86
|
-
"
|
|
105
|
+
"rOrderNum": 5,
|
|
106
|
+
"description": "Specifies the input table column containing the values for each sample member. This argument is used when Input is in sample-value format.",
|
|
107
|
+
"rDescription": "Specifies the input table column containing the values for each sample member. This argument is used when Input is in sample-value format.",
|
|
108
|
+
"datatype": "COLUMNS",
|
|
109
|
+
"allowsLists": false,
|
|
110
|
+
"rName": "sample.value.column",
|
|
111
|
+
"useInR": true
|
|
112
|
+
},
|
|
113
|
+
{
|
|
114
|
+
"name": "FirstSampleName",
|
|
115
|
+
"alternateNames": [],
|
|
116
|
+
"isRequired": false,
|
|
117
|
+
"defaultValue" : "",
|
|
118
|
+
"rOrderNum": 6,
|
|
119
|
+
"description": "Specifies the name of the first sample included in the f test. This argument is used when Input is in sample-value format.",
|
|
120
|
+
"rDescription": "Specifies the name of the first sample included in the f test. This argument is used when Input is in sample-value format.",
|
|
121
|
+
"datatype": "STRING",
|
|
122
|
+
"allowsLists": false,
|
|
87
123
|
"allowPadding": false,
|
|
88
|
-
"
|
|
124
|
+
"rName": "first.sample.name",
|
|
125
|
+
"useInR": true,
|
|
126
|
+
"rDefaultValue" : ""
|
|
127
|
+
},
|
|
128
|
+
{
|
|
129
|
+
"name": "SecondSampleName",
|
|
89
130
|
"alternateNames": [],
|
|
90
131
|
"isRequired": false,
|
|
91
|
-
"
|
|
92
|
-
"
|
|
93
|
-
"
|
|
132
|
+
"defaultValue" : "",
|
|
133
|
+
"rOrderNum": 7,
|
|
134
|
+
"description": "Specifies the name of the second sample included in the f test. This argument is used when Input is in sample-value format.",
|
|
135
|
+
"rDescription": "Specifies the name of the second sample included in the f test. This argument is used when Input is in sample-value format.",
|
|
136
|
+
"datatype": "STRING",
|
|
94
137
|
"allowsLists": false,
|
|
95
|
-
"
|
|
138
|
+
"allowPadding": false,
|
|
139
|
+
"rName": "second.sample.name",
|
|
96
140
|
"useInR": true,
|
|
97
|
-
"
|
|
141
|
+
"rDefaultValue" : ""
|
|
98
142
|
},
|
|
99
143
|
{
|
|
100
|
-
|
|
144
|
+
"name": "AlternativeHypothesis",
|
|
145
|
+
"alternateNames": [],
|
|
146
|
+
"isRequired": false,
|
|
147
|
+
"defaultValue": "two-tailed",
|
|
148
|
+
"rOrderNum": 8,
|
|
149
|
+
"permittedValues": [
|
|
101
150
|
"lower-tailed",
|
|
102
151
|
"two-tailed",
|
|
103
152
|
"upper-tailed"
|
|
104
153
|
],
|
|
105
|
-
"defaultValue": "two-tailed",
|
|
106
|
-
"allowNaN": false,
|
|
107
|
-
"isOutputColumn": false,
|
|
108
154
|
"matchLengthOfArgument": "",
|
|
109
|
-
"
|
|
110
|
-
"
|
|
111
|
-
"alternateNames": [],
|
|
112
|
-
"isRequired": false,
|
|
113
|
-
"rDescription": "Specifies the alternate hypothesis",
|
|
114
|
-
"description": "Specifies the alternate hypothesis",
|
|
155
|
+
"description": "Specifies the alternative hypothesis",
|
|
156
|
+
"rDescription": "Specifies the alternative hypothesis",
|
|
115
157
|
"datatype": "STRING",
|
|
116
158
|
"allowsLists": false,
|
|
159
|
+
"allowPadding": false,
|
|
117
160
|
"rName": "alternate.hypothesis",
|
|
118
161
|
"useInR": true,
|
|
119
|
-
"
|
|
162
|
+
"rDefaultValue" : ""
|
|
120
163
|
},
|
|
121
164
|
{
|
|
122
|
-
"allowNaN": false,
|
|
123
|
-
"isOutputColumn": false,
|
|
124
|
-
"matchLengthOfArgument": "",
|
|
125
|
-
"allowPadding": false,
|
|
126
165
|
"name": "FirstSampleVariance",
|
|
127
166
|
"alternateNames": [],
|
|
128
167
|
"isRequired": false,
|
|
129
|
-
"
|
|
168
|
+
"rOrderNum": 9,
|
|
169
|
+
"lowerBound" : 0,
|
|
170
|
+
"lowerBoundType" : "EXCLUSIVE",
|
|
171
|
+
"upperBound" : 1e10,
|
|
172
|
+
"upperBoundType" : "EXCLUSIVE",
|
|
173
|
+
"allowNaN": false,
|
|
174
|
+
"isOutputColumn": false,
|
|
175
|
+
"matchLengthOfArgument": "",
|
|
130
176
|
"description": "Specifies the first sample variance",
|
|
131
|
-
"
|
|
177
|
+
"rDescription": "Specifies the first sample variance",
|
|
178
|
+
"datatype": "DOUBLE",
|
|
132
179
|
"allowsLists": false,
|
|
180
|
+
"allowPadding": false,
|
|
133
181
|
"rName": "first.sample.variance",
|
|
134
182
|
"useInR": true,
|
|
135
|
-
"
|
|
183
|
+
"rDefaultValue" : ""
|
|
136
184
|
},
|
|
137
185
|
{
|
|
138
|
-
"allowNaN": false,
|
|
139
|
-
"isOutputColumn": false,
|
|
140
|
-
"matchLengthOfArgument": "",
|
|
141
|
-
"allowPadding": false,
|
|
142
186
|
"name": "SecondSampleVariance",
|
|
143
187
|
"alternateNames": [],
|
|
144
188
|
"isRequired": false,
|
|
145
|
-
"
|
|
189
|
+
"rOrderNum": 10,
|
|
190
|
+
"lowerBound" : 0,
|
|
191
|
+
"lowerBoundType" : "EXCLUSIVE",
|
|
192
|
+
"upperBound" : 1e10,
|
|
193
|
+
"upperBoundType" : "EXCLUSIVE",
|
|
194
|
+
"allowNaN": false,
|
|
195
|
+
"isOutputColumn": false,
|
|
196
|
+
"matchLengthOfArgument": "",
|
|
146
197
|
"description": "Specifies the second sample variance",
|
|
147
|
-
"
|
|
198
|
+
"rDescription": "Specifies the second sample variance",
|
|
199
|
+
"datatype": "DOUBLE",
|
|
148
200
|
"allowsLists": false,
|
|
201
|
+
"allowPadding": false,
|
|
149
202
|
"rName": "second.sample.variance",
|
|
150
203
|
"useInR": true,
|
|
151
|
-
"
|
|
204
|
+
"rDefaultValue" : ""
|
|
152
205
|
},
|
|
153
206
|
{
|
|
154
|
-
"allowNaN": false,
|
|
155
|
-
"isOutputColumn": false,
|
|
156
|
-
"matchLengthOfArgument": "",
|
|
157
|
-
"allowPadding": false,
|
|
158
207
|
"name": "df1",
|
|
159
208
|
"alternateNames": [],
|
|
160
209
|
"isRequired": false,
|
|
161
|
-
"
|
|
210
|
+
"rOrderNum": 11,
|
|
211
|
+
"lowerBound" : 1,
|
|
212
|
+
"lowerBoundType" : "INCLUSIVE",
|
|
213
|
+
"upperBound" : 999999999999999,
|
|
214
|
+
"upperBoundType" : "EXCLUSIVE",
|
|
215
|
+
"allowNaN": false,
|
|
216
|
+
"isOutputColumn": false,
|
|
217
|
+
"matchLengthOfArgument": "",
|
|
162
218
|
"description": "Specifies the df of the first sample",
|
|
163
|
-
"
|
|
219
|
+
"rDescription": "Specifies the df of the first sample",
|
|
220
|
+
"datatype": "LONG",
|
|
164
221
|
"allowsLists": false,
|
|
222
|
+
"allowPadding": false,
|
|
165
223
|
"rName": "df1",
|
|
166
224
|
"useInR": true,
|
|
167
|
-
"
|
|
225
|
+
"rDefaultValue" : ""
|
|
168
226
|
},
|
|
169
227
|
{
|
|
228
|
+
"name": "df2",
|
|
229
|
+
"alternateNames": [],
|
|
230
|
+
"isRequired": false,
|
|
231
|
+
"rOrderNum": 12,
|
|
232
|
+
"lowerBound" : 1,
|
|
233
|
+
"lowerBoundType" : "INCLUSIVE",
|
|
234
|
+
"upperBound" : 999999999999999,
|
|
235
|
+
"upperBoundType" : "EXCLUSIVE",
|
|
170
236
|
"allowNaN": false,
|
|
171
237
|
"isOutputColumn": false,
|
|
172
238
|
"matchLengthOfArgument": "",
|
|
239
|
+
"description": "Specifies the df of the second sample",
|
|
240
|
+
"rDescription": "Specifies the df of the second sample",
|
|
241
|
+
"datatype": "LONG",
|
|
242
|
+
"allowsLists": false,
|
|
173
243
|
"allowPadding": false,
|
|
174
|
-
"
|
|
244
|
+
"rName": "df2",
|
|
245
|
+
"useInR": true,
|
|
246
|
+
"rDefaultValue" : ""
|
|
247
|
+
},
|
|
248
|
+
{
|
|
249
|
+
"name": "Alpha",
|
|
175
250
|
"alternateNames": [],
|
|
176
251
|
"isRequired": false,
|
|
177
|
-
"
|
|
178
|
-
"
|
|
179
|
-
"
|
|
252
|
+
"defaultValue": 0.05,
|
|
253
|
+
"rOrderNum": 13,
|
|
254
|
+
"lowerBound": 0,
|
|
255
|
+
"lowerBoundType": "INCLUSIVE",
|
|
256
|
+
"upperBound": 1,
|
|
257
|
+
"upperBoundType": "INCLUSIVE",
|
|
258
|
+
"allowNaN": false,
|
|
259
|
+
"description": "Specifies the value of alpha in hypothesis function",
|
|
260
|
+
"rDescription": "Specifies the value of alpha in hypothesis function",
|
|
261
|
+
"datatype": "DOUBLE",
|
|
180
262
|
"allowsLists": false,
|
|
181
|
-
"
|
|
263
|
+
"allowPadding": false,
|
|
264
|
+
"rName": "alpha",
|
|
182
265
|
"useInR": true,
|
|
183
|
-
"
|
|
266
|
+
"rDefaultValue" : ""
|
|
184
267
|
}
|
|
185
268
|
]
|
|
186
|
-
}
|
|
269
|
+
}
|
|
@@ -8,12 +8,14 @@
|
|
|
8
8
|
"function_category": "Model Training",
|
|
9
9
|
"function_alias_name": "TD_GLM",
|
|
10
10
|
"function_r_name": "aa.td_glm",
|
|
11
|
-
"
|
|
12
|
-
"
|
|
11
|
+
"ref_function_r_name": "aa.td_glm",
|
|
12
|
+
"short_description": "This function generates one or more generalized linear models.",
|
|
13
|
+
"long_description": "This function generates one or more generalized linear models.",
|
|
13
14
|
"input_tables": [
|
|
14
15
|
{
|
|
15
16
|
"requiredInputKind": [
|
|
16
|
-
"PartitionByAny"
|
|
17
|
+
"PartitionByAny",
|
|
18
|
+
"PartitionByKey"
|
|
17
19
|
],
|
|
18
20
|
"isOrdered": false,
|
|
19
21
|
"partitionByOne": false,
|
|
@@ -27,6 +29,40 @@
|
|
|
27
29
|
"rName": "data",
|
|
28
30
|
"useInR": true,
|
|
29
31
|
"rOrderNum": 1
|
|
32
|
+
},
|
|
33
|
+
{
|
|
34
|
+
"requiredInputKind": [
|
|
35
|
+
"PartitionByKey"
|
|
36
|
+
],
|
|
37
|
+
"isOrdered": false,
|
|
38
|
+
"partitionByOne": false,
|
|
39
|
+
"name": "AttributeTable",
|
|
40
|
+
"alternateNames": [],
|
|
41
|
+
"isRequired": false,
|
|
42
|
+
"rDescription": "Specifies the name of the attribute table. This is valid when InputTable is partition by key. ",
|
|
43
|
+
"description": "Specifies the name of the attribute table. This is valid when InputTable is partition by key. ",
|
|
44
|
+
"datatype": "TABLE_ALIAS",
|
|
45
|
+
"allowsLists": false,
|
|
46
|
+
"rName": "attribute.table",
|
|
47
|
+
"useInR": true,
|
|
48
|
+
"rOrderNum": 2
|
|
49
|
+
},
|
|
50
|
+
{
|
|
51
|
+
"requiredInputKind": [
|
|
52
|
+
"PartitionByKey"
|
|
53
|
+
],
|
|
54
|
+
"isOrdered": false,
|
|
55
|
+
"partitionByOne": false,
|
|
56
|
+
"name": "ParameterTable",
|
|
57
|
+
"alternateNames": [],
|
|
58
|
+
"isRequired": false,
|
|
59
|
+
"rDescription": "Specifies the name of the parameter table. This is valid when InputTable is partition by key. ",
|
|
60
|
+
"description": "Specifies the name of the parameter table. This is valid when InputTable is partition by key. ",
|
|
61
|
+
"datatype": "TABLE_ALIAS",
|
|
62
|
+
"allowsLists": false,
|
|
63
|
+
"rName": "parameter.table",
|
|
64
|
+
"useInR": true,
|
|
65
|
+
"rOrderNum": 3
|
|
30
66
|
}
|
|
31
67
|
],
|
|
32
68
|
"output_tables": [
|
|
@@ -36,8 +72,8 @@
|
|
|
36
72
|
"name": "MetaInformationTable",
|
|
37
73
|
"alternateNames": [],
|
|
38
74
|
"isRequired": false,
|
|
39
|
-
"rDescription": "Specifies the name of the table in which the training progress information per iteration is stored.",
|
|
40
|
-
"description": "Specifies the name of the table in which the training progress information per iteration is stored.",
|
|
75
|
+
"rDescription": "Specifies the name of the table in which the training progress information per iteration is stored. This is valid when InputTable is partition by any. ",
|
|
76
|
+
"description": "Specifies the name of the table in which the training progress information per iteration is stored. This is valid when InputTable is partition by any. ",
|
|
41
77
|
"datatype": "TABLE_NAME",
|
|
42
78
|
"allowsLists": false,
|
|
43
79
|
"rName": "output.table",
|
|
@@ -359,8 +395,8 @@
|
|
|
359
395
|
"name": "LocalSGDIterations",
|
|
360
396
|
"alternateNames": [],
|
|
361
397
|
"isRequired": false,
|
|
362
|
-
"rDescription": "Specify the number of local iterations to be used for Local SGD algorithm. Must be a positive integer value. A value of 0 implies Local SGD is disabled. A value higher than 0 enables Local SGD and that many local iterations are performed before updating the weights for the global model. With Local SGD algorithm, recommended values for arguments are as follows: LocalSGDIterations: 10, MaxIterNum: 100, BatchSize: 50, IterNumNoChange: 5.",
|
|
363
|
-
"description": "Specify the number of local iterations to be used for Local SGD algorithm. Must be a positive integer value. A value of 0 implies Local SGD is disabled. A value higher than 0 enables Local SGD and that many local iterations are performed before updating the weights for the global model. With Local SGD algorithm, recommended values for arguments are as follows: LocalSGDIterations: 10, MaxIterNum: 100, BatchSize: 50, IterNumNoChange: 5.",
|
|
398
|
+
"rDescription": "Specify the number of local iterations to be used for Local SGD algorithm. Must be a positive integer value. A value of 0 implies Local SGD is disabled. A value higher than 0 enables Local SGD and that many local iterations are performed before updating the weights for the global model. With Local SGD algorithm, recommended values for arguments are as follows: LocalSGDIterations: 10, MaxIterNum: 100, BatchSize: 50, IterNumNoChange: 5. This is valid when InputTable is partition by any. ",
|
|
399
|
+
"description": "Specify the number of local iterations to be used for Local SGD algorithm. Must be a positive integer value. A value of 0 implies Local SGD is disabled. A value higher than 0 enables Local SGD and that many local iterations are performed before updating the weights for the global model. With Local SGD algorithm, recommended values for arguments are as follows: LocalSGDIterations: 10, MaxIterNum: 100, BatchSize: 50, IterNumNoChange: 5. This is valid when InputTable is partition by any. ",
|
|
364
400
|
"datatype": "INTEGER",
|
|
365
401
|
"allowsLists": false,
|
|
366
402
|
"rName": "local.sgd.iterations",
|
|
@@ -378,8 +414,8 @@
|
|
|
378
414
|
"name": "StepwiseDirection",
|
|
379
415
|
"alternateNames": [],
|
|
380
416
|
"isRequired": false,
|
|
381
|
-
"rDescription": "Specify the type of algorithm to be used. Acceptable values are 'forward', 'backward', 'both', and 'bidirectional'.",
|
|
382
|
-
"description": "Specify the type of algorithm to be used. Acceptable values are 'forward', 'backward', 'both', and 'bidirectional'.",
|
|
417
|
+
"rDescription": "Specify the type of algorithm to be used. Acceptable values are 'forward', 'backward', 'both', and 'bidirectional'. This is valid when InputTable is partition by any. ",
|
|
418
|
+
"description": "Specify the type of algorithm to be used. Acceptable values are 'forward', 'backward', 'both', and 'bidirectional'. This is valid when InputTable is partition by any. ",
|
|
383
419
|
"datatype": "STRING",
|
|
384
420
|
"allowsLists": false,
|
|
385
421
|
"rName": "stepwise.direction",
|
|
@@ -396,8 +432,8 @@
|
|
|
396
432
|
"name": "MaxStepsNum",
|
|
397
433
|
"alternateNames": [],
|
|
398
434
|
"isRequired": false,
|
|
399
|
-
"rDescription": "Specify the maximum number of steps to be used for the Stepwise Algorithm.",
|
|
400
|
-
"description": "Specify the maximum number of steps to be used for the Stepwise Algorithm.",
|
|
435
|
+
"rDescription": "Specify the maximum number of steps to be used for the Stepwise Algorithm. This is valid when InputTable is partition by any. ",
|
|
436
|
+
"description": "Specify the maximum number of steps to be used for the Stepwise Algorithm. This is valid when InputTable is partition by any. ",
|
|
401
437
|
"datatype": "INTEGER",
|
|
402
438
|
"allowsLists": false,
|
|
403
439
|
"rName": "max.steps.num",
|
|
@@ -418,14 +454,54 @@
|
|
|
418
454
|
"name": "InitialStepwiseColumns",
|
|
419
455
|
"alternateNames": [],
|
|
420
456
|
"isRequired": false,
|
|
421
|
-
"rDescription": "Specify the names of the initial state model columns that need to be used as starting point for Stepwise Regression algorithm (predictors, features or independent variables).",
|
|
422
|
-
"description": "Specify the names of the initial state model columns that need to be used as starting point for Stepwise Regression algorithm (predictors, features or independent variables).",
|
|
457
|
+
"rDescription": "Specify the names of the initial state model columns that need to be used as starting point for Stepwise Regression algorithm (predictors, features or independent variables). This is valid when InputTable is partition by any. ",
|
|
458
|
+
"description": "Specify the names of the initial state model columns that need to be used as starting point for Stepwise Regression algorithm (predictors, features or independent variables). This is valid when InputTable is partition by any. ",
|
|
423
459
|
"datatype": "COLUMNS",
|
|
424
460
|
"allowsLists": true,
|
|
425
461
|
"rName": "initial.stepwise.columns",
|
|
426
462
|
"useInR": true,
|
|
427
463
|
"rFormulaUsage": true,
|
|
428
464
|
"rOrderNum": 21
|
|
465
|
+
},
|
|
466
|
+
{
|
|
467
|
+
"permittedValues": [
|
|
468
|
+
"BATCH",
|
|
469
|
+
"EPOCH"
|
|
470
|
+
],
|
|
471
|
+
"defaultValue": "BATCH",
|
|
472
|
+
"isOutputColumn": false,
|
|
473
|
+
"name": "IterationMode",
|
|
474
|
+
"alternateNames": [],
|
|
475
|
+
"isRequired": false,
|
|
476
|
+
"rDescription": "Specify the iteration mode. Acceptable values are Batch, Epoch, where Batch is one iteration per batch and Epoch is one iteration per epoch. This is valid when InputTable is partition by key. ",
|
|
477
|
+
"description": "Specify the iteration mode. Acceptable values are Batch, Epoch, where Batch is one iteration per batch and Epoch is one iteration per epoch. This is valid when InputTable is partition by key. ",
|
|
478
|
+
"datatype": "STRING",
|
|
479
|
+
"allowsLists": false,
|
|
480
|
+
"rName": "iteration.mode",
|
|
481
|
+
"useInR": true,
|
|
482
|
+
"rOrderNum": 22
|
|
483
|
+
},
|
|
484
|
+
{
|
|
485
|
+
"targetTable": [
|
|
486
|
+
"InputTable"
|
|
487
|
+
],
|
|
488
|
+
"checkDuplicate": false,
|
|
489
|
+
"allowedTypes": [],
|
|
490
|
+
"allowedTypeGroups": [
|
|
491
|
+
"STRING"
|
|
492
|
+
],
|
|
493
|
+
"matchLengthOfArgument": "",
|
|
494
|
+
"allowPadding": false,
|
|
495
|
+
"name": "PartitionColumn",
|
|
496
|
+
"alternateNames": [],
|
|
497
|
+
"isRequired": false,
|
|
498
|
+
"rDescription": "Specify the name of the InputTable columns on which to partition the input. The name should be consistent with the partition_by_column in the ON clause. If the partition_by_column is unicode with foreign language characters, it is necessary to specify PartitionColumn argument. Note: Column range is not supported for PartitionColumn argument. This is valid when InputTable is partition by key. ",
|
|
499
|
+
"description": "Specify the name of the InputTable columns on which to partition the input. The name should be consistent with the partition_by_column in the ON clause. If the partition_by_column is unicode with foreign language characters, it is necessary to specify PartitionColumn argument. Note: Column range is not supported for PartitionColumn argument. This is valid when InputTable is partition by key. ",
|
|
500
|
+
"datatype": "COLUMNS",
|
|
501
|
+
"allowsLists": false,
|
|
502
|
+
"rName": "partition.column",
|
|
503
|
+
"useInR": true,
|
|
504
|
+
"rOrderNum": 23
|
|
429
505
|
}
|
|
430
506
|
]
|
|
431
|
-
}
|
|
507
|
+
}
|