teradataml 20.0.0.2__py3-none-any.whl → 20.0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of teradataml might be problematic. Click here for more details.

Files changed (126) hide show
  1. teradataml/LICENSE-3RD-PARTY.pdf +0 -0
  2. teradataml/README.md +315 -2
  3. teradataml/__init__.py +4 -0
  4. teradataml/_version.py +1 -1
  5. teradataml/analytics/analytic_function_executor.py +95 -8
  6. teradataml/analytics/byom/__init__.py +1 -1
  7. teradataml/analytics/json_parser/metadata.py +12 -3
  8. teradataml/analytics/json_parser/utils.py +7 -2
  9. teradataml/analytics/sqle/__init__.py +5 -1
  10. teradataml/analytics/table_operator/__init__.py +1 -1
  11. teradataml/analytics/uaf/__init__.py +1 -1
  12. teradataml/analytics/utils.py +4 -0
  13. teradataml/analytics/valib.py +18 -4
  14. teradataml/automl/__init__.py +51 -6
  15. teradataml/automl/data_preparation.py +59 -35
  16. teradataml/automl/data_transformation.py +58 -33
  17. teradataml/automl/feature_engineering.py +27 -12
  18. teradataml/automl/model_training.py +73 -46
  19. teradataml/common/constants.py +88 -29
  20. teradataml/common/garbagecollector.py +2 -1
  21. teradataml/common/messagecodes.py +19 -3
  22. teradataml/common/messages.py +6 -1
  23. teradataml/common/sqlbundle.py +64 -12
  24. teradataml/common/utils.py +246 -47
  25. teradataml/common/warnings.py +11 -0
  26. teradataml/context/context.py +161 -27
  27. teradataml/data/amazon_reviews_25.csv +26 -0
  28. teradataml/data/byom_example.json +11 -0
  29. teradataml/data/dataframe_example.json +18 -2
  30. teradataml/data/docs/byom/docs/DataRobotPredict.py +2 -2
  31. teradataml/data/docs/byom/docs/DataikuPredict.py +40 -1
  32. teradataml/data/docs/byom/docs/H2OPredict.py +2 -2
  33. teradataml/data/docs/byom/docs/ONNXEmbeddings.py +242 -0
  34. teradataml/data/docs/byom/docs/ONNXPredict.py +2 -2
  35. teradataml/data/docs/byom/docs/PMMLPredict.py +2 -2
  36. teradataml/data/docs/sqle/docs_17_20/NaiveBayes.py +1 -1
  37. teradataml/data/docs/sqle/docs_17_20/Shap.py +34 -6
  38. teradataml/data/docs/sqle/docs_17_20/TDNaiveBayesPredict.py +4 -4
  39. teradataml/data/docs/sqle/docs_17_20/TextParser.py +3 -3
  40. teradataml/data/docs/tableoperator/docs_17_20/Image2Matrix.py +118 -0
  41. teradataml/data/docs/uaf/docs_17_20/CopyArt.py +145 -0
  42. teradataml/data/docs/uaf/docs_17_20/DWT2D.py +4 -1
  43. teradataml/data/docs/uaf/docs_17_20/DickeyFuller.py +18 -21
  44. teradataml/data/hnsw_alter_data.csv +5 -0
  45. teradataml/data/hnsw_data.csv +10 -0
  46. teradataml/data/jsons/byom/h2opredict.json +1 -1
  47. teradataml/data/jsons/byom/onnxembeddings.json +266 -0
  48. teradataml/data/jsons/sqle/17.20/TD_Shap.json +0 -1
  49. teradataml/data/jsons/sqle/17.20/TD_TextParser.json +1 -1
  50. teradataml/data/jsons/sqle/20.00/TD_HNSW.json +296 -0
  51. teradataml/data/jsons/sqle/20.00/TD_HNSWPredict.json +206 -0
  52. teradataml/data/jsons/sqle/20.00/TD_HNSWSummary.json +32 -0
  53. teradataml/data/jsons/sqle/20.00/TD_KMeans.json +250 -0
  54. teradataml/data/jsons/sqle/20.00/TD_SMOTE.json +266 -0
  55. teradataml/data/jsons/sqle/20.00/TD_VectorDistance.json +278 -0
  56. teradataml/data/jsons/storedprocedure/17.20/TD_COPYART.json +71 -0
  57. teradataml/data/jsons/tableoperator/17.20/IMAGE2MATRIX.json +53 -0
  58. teradataml/data/jsons/uaf/17.20/TD_DICKEY_FULLER.json +10 -19
  59. teradataml/data/jsons/uaf/17.20/TD_SAX.json +3 -1
  60. teradataml/data/jsons/uaf/17.20/TD_WINDOWDFFT.json +15 -5
  61. teradataml/data/medical_readings.csv +101 -0
  62. teradataml/data/patient_profile.csv +101 -0
  63. teradataml/data/scripts/lightgbm/dataset.template +157 -0
  64. teradataml/data/scripts/lightgbm/lightgbm_class_functions.template +247 -0
  65. teradataml/data/scripts/lightgbm/lightgbm_function.template +216 -0
  66. teradataml/data/scripts/lightgbm/lightgbm_sklearn.template +159 -0
  67. teradataml/data/scripts/sklearn/sklearn_fit.py +194 -167
  68. teradataml/data/scripts/sklearn/sklearn_fit_predict.py +136 -115
  69. teradataml/data/scripts/sklearn/sklearn_function.template +14 -19
  70. teradataml/data/scripts/sklearn/sklearn_model_selection_split.py +155 -137
  71. teradataml/data/scripts/sklearn/sklearn_transform.py +129 -42
  72. teradataml/data/target_udt_data.csv +8 -0
  73. teradataml/data/templates/open_source_ml.json +3 -2
  74. teradataml/data/teradataml_example.json +8 -0
  75. teradataml/data/vectordistance_example.json +4 -0
  76. teradataml/dataframe/copy_to.py +8 -3
  77. teradataml/dataframe/data_transfer.py +11 -1
  78. teradataml/dataframe/dataframe.py +1049 -285
  79. teradataml/dataframe/dataframe_utils.py +152 -20
  80. teradataml/dataframe/functions.py +578 -35
  81. teradataml/dataframe/setop.py +11 -6
  82. teradataml/dataframe/sql.py +185 -16
  83. teradataml/dbutils/dbutils.py +1049 -115
  84. teradataml/dbutils/filemgr.py +48 -1
  85. teradataml/hyperparameter_tuner/optimizer.py +12 -1
  86. teradataml/lib/aed_0_1.dll +0 -0
  87. teradataml/opensource/__init__.py +1 -1
  88. teradataml/opensource/_base.py +1466 -0
  89. teradataml/opensource/_class.py +464 -0
  90. teradataml/opensource/{sklearn/constants.py → _constants.py} +21 -14
  91. teradataml/opensource/_lightgbm.py +949 -0
  92. teradataml/opensource/_sklearn.py +1008 -0
  93. teradataml/opensource/{sklearn/_wrapper_utils.py → _wrapper_utils.py} +5 -6
  94. teradataml/options/__init__.py +54 -38
  95. teradataml/options/configure.py +131 -27
  96. teradataml/options/display.py +13 -2
  97. teradataml/plot/axis.py +47 -8
  98. teradataml/plot/figure.py +33 -0
  99. teradataml/plot/plot.py +63 -13
  100. teradataml/scriptmgmt/UserEnv.py +5 -5
  101. teradataml/scriptmgmt/lls_utils.py +130 -40
  102. teradataml/store/__init__.py +12 -0
  103. teradataml/store/feature_store/__init__.py +0 -0
  104. teradataml/store/feature_store/constants.py +291 -0
  105. teradataml/store/feature_store/feature_store.py +2318 -0
  106. teradataml/store/feature_store/models.py +1505 -0
  107. teradataml/table_operators/Apply.py +32 -18
  108. teradataml/table_operators/Script.py +3 -1
  109. teradataml/table_operators/TableOperator.py +3 -1
  110. teradataml/table_operators/query_generator.py +3 -0
  111. teradataml/table_operators/table_operator_query_generator.py +3 -1
  112. teradataml/table_operators/table_operator_util.py +37 -38
  113. teradataml/table_operators/templates/dataframe_register.template +69 -0
  114. teradataml/utils/dtypes.py +51 -2
  115. teradataml/utils/internal_buffer.py +18 -0
  116. teradataml/utils/validators.py +99 -8
  117. {teradataml-20.0.0.2.dist-info → teradataml-20.0.0.4.dist-info}/METADATA +321 -5
  118. {teradataml-20.0.0.2.dist-info → teradataml-20.0.0.4.dist-info}/RECORD +121 -94
  119. teradataml/libaed_0_1.dylib +0 -0
  120. teradataml/libaed_0_1.so +0 -0
  121. teradataml/opensource/sklearn/__init__.py +0 -1
  122. teradataml/opensource/sklearn/_class.py +0 -255
  123. teradataml/opensource/sklearn/_sklearn_wrapper.py +0 -1800
  124. {teradataml-20.0.0.2.dist-info → teradataml-20.0.0.4.dist-info}/WHEEL +0 -0
  125. {teradataml-20.0.0.2.dist-info → teradataml-20.0.0.4.dist-info}/top_level.txt +0 -0
  126. {teradataml-20.0.0.2.dist-info → teradataml-20.0.0.4.dist-info}/zip-safe +0 -0
@@ -202,7 +202,10 @@ def DWT2D(data1=None, data1_filter_expr=None, data2=None,
202
202
  data2_filter_expr=data2.id==1,
203
203
  input_fmt_input_mode="MANY2ONE")
204
204
 
205
- # Example 1: Perform discrete wavelet transform (DWT) for two-dimensional data
205
+ # Print the result DataFrame.
206
+ print(uaf_out.result)
207
+
208
+ # Example 2: Perform discrete wavelet transform (DWT) for two-dimensional data
206
209
  # using only one matrix as input and wavelet as 'haar'.
207
210
  uaf_out = DWT2D(data1=data1_matrix_df,
208
211
  wavelet='haar')
@@ -1,5 +1,5 @@
1
1
  def DickeyFuller(data=None, data_filter_expr=None, algorithm=None,
2
- max_lags=None, drift_trend_formula=None,
2
+ max_lags=0,
3
3
  **generic_arguments):
4
4
  """
5
5
  DESCRIPTION:
@@ -10,18 +10,23 @@ def DickeyFuller(data=None, data_filter_expr=None, algorithm=None,
10
10
  other factors.
11
11
 
12
12
  The following procedure is an example of how to use DickeyFuller() function:
13
- * Run regression tests.
14
- * Determine the algorithm for Dickey Fuller statistic data.
15
- * Run DickeyFuller() function using the algorithm.
16
- * (Result shows series contains unit roots) Use DIFF() and
17
- SeasonalNormalize() functions to remove unit roots.
13
+ * Run DickeyFuller() on the time series being modeled.
14
+ * Retrieve the results of the DickeyFuller() test to determine if the
15
+ time series contains any unit roots.
16
+ * If unit roots are present, use a technique such as differencing such as Diff()
17
+ or seasonal normalization, such as SeasonalNormalize(), to create a new series,
18
+ then rerun the DickeyFuller() test to verify that the differenced or
19
+ seasonally-normalized series unit root are removed.
20
+ * If the result shows unit roots, use Diff() and SeasonalNormalize()
21
+ to remove unit roots.
18
22
 
19
23
 
20
24
  PARAMETERS:
21
25
  data:
22
26
  Required Argument.
23
- Speciifes a single logical-runtime series as an input.
24
- Types: TDSeries
27
+ Specifies a single logical-runtime series as an input or TDAnalyticResult which
28
+ contains ARTFITRESIDUALS layer.
29
+ Types: TDSeries, TDAnalyticResult
25
30
 
26
31
  data_filter_expr:
27
32
  Optional Argument.
@@ -34,26 +39,18 @@ def DickeyFuller(data=None, data_filter_expr=None, algorithm=None,
34
39
  Permitted Values:
35
40
  * NONE: Random walk
36
41
  * DRIFT: Random walk with drift
37
- * TREND: Random walk with linear trend
38
42
  * DRIFTNTREND: Random walk with drift and trend
39
- * FORMULA: Random walk with selected drift, trend and
40
- auxiliary lags
43
+ * SQUARED: Random walk with drift, trend, and
44
+ quadratic trend.
41
45
  Types: str
42
46
 
43
47
  max_lags:
44
48
  Optional Argument.
45
49
  Specifies the maximum number of lags to use with the regression
46
- equation.
50
+ equation. Range is [0, 100]
51
+ DefaultValue: 0
47
52
  Types: int
48
53
 
49
- drift_trend_formula:
50
- Optional Argument.
51
- Specifies the formula used to represent the drift and trend portions
52
- of the regression.
53
- Note:
54
- * Valid only when "algorithm" is set to 'formula'.
55
- Types: str
56
-
57
54
  **generic_arguments:
58
55
  Specifies the generic keyword arguments of UAF functions.
59
56
  Below are the generic keyword arguments:
@@ -136,7 +133,7 @@ def DickeyFuller(data=None, data_filter_expr=None, algorithm=None,
136
133
  # for the presence of the unit roots using random walk with
137
134
  # linear trend for regression.
138
135
  uaf_out = DickeyFuller(data=data_series_df,
139
- algorithm='TREND')
136
+ algorithm='DRIFT')
140
137
 
141
138
  # Print the result DataFrame.
142
139
  print(uaf_out.result)
@@ -0,0 +1,5 @@
1
+ id,array_col
2
+ 10,"1,1"
3
+ 11,"2,2"
4
+ 12,"3,3"
5
+ 13,"4,4"
@@ -0,0 +1,10 @@
1
+ id,array_col
2
+ 1,"18,18"
3
+ 2,"19,19"
4
+ 3,"20,20"
5
+ 4,"55,55"
6
+ 5,"56,56"
7
+ 6,"57,57"
8
+ 7,"88,88"
9
+ 8,"89,89"
10
+ 9,"90,90"
@@ -142,7 +142,7 @@
142
142
  "rDescription": " Specifies the model type as 'DAI' or 'OpenSource' for H2O model prediction. ",
143
143
  "description": " Specifies the model type as 'DAI' or 'OpenSource' for H2O model prediction. ",
144
144
  "datatype": "STRING",
145
- "allowsLists": true,
145
+ "allowsLists": false,
146
146
  "rName": "model.type",
147
147
  "useInR": true,
148
148
  "rOrderNum": 6
@@ -0,0 +1,266 @@
1
+ {
2
+ "json_schema_major_version": "1",
3
+ "json_schema_minor_version": "2",
4
+ "json_content_version": "1",
5
+ "function_name": "ONNXEmbeddings",
6
+ "function_version": "1.0",
7
+ "function_type": "byom",
8
+ "function_r_name": "aa.onnx.embeddings",
9
+ "function_alias_name": "ONNXEmbeddings",
10
+ "short_description": "This Function generates embeddings values using an ONNX model in Vantage",
11
+ "long_description": "This function is used to calculate embeddings values in Vantage with a HuggingFace model that has been created outside Vantage and exported to vantage using ONNX format",
12
+ "input_tables": [
13
+ {
14
+ "requiredInputKind": [
15
+ "PartitionByAny",
16
+ "PartitionByKey"
17
+ ],
18
+ "isOrdered": false,
19
+ "partitionByOne": false,
20
+ "partitionByOneInclusive": false,
21
+ "name": "InputTable",
22
+ "alternateNames": [],
23
+ "isRequired": true,
24
+ "rDescription": "The input table that contains the text from which we generate embedding values ",
25
+ "description": "The input table that contains the text from which we generate embedding values ",
26
+ "datatype": "TABLE_ALIAS",
27
+ "allowsLists": false,
28
+ "rName": "newdata",
29
+ "useInR": true,
30
+ "rOrderNum": 1
31
+ },
32
+ {
33
+ "requiredInputKind": [
34
+ "Dimension"
35
+ ],
36
+ "isOrdered": false,
37
+ "partitionByOne": false,
38
+ "partitionByOneInclusive": false,
39
+ "name": "ModelTable",
40
+ "alternateNames": [],
41
+ "isRequired": true,
42
+ "rDescription": "The model table to be used for calculating embedding values ",
43
+ "description": "The model table to be used for calculating embedding values ",
44
+ "datatype": "TABLE_ALIAS",
45
+ "allowsLists": false,
46
+ "rName": "modeldata",
47
+ "useInR": true,
48
+ "rOrderNum": 2
49
+ },
50
+ {
51
+ "requiredInputKind": [
52
+ "Dimension"
53
+ ],
54
+ "isOrdered": false,
55
+ "partitionByOne": false,
56
+ "partitionByOneInclusive": false,
57
+ "name": "TokenizerTable",
58
+ "alternateNames": [],
59
+ "isRequired": true,
60
+ "rDescription": "The tokenizer table which contains the tokenizer json file ",
61
+ "description": "The tokenizer table which contains the tokenizer json file ",
62
+ "datatype": "TABLE_ALIAS",
63
+ "allowsLists": false,
64
+ "rName": "tokenizerdata",
65
+ "useInR": true,
66
+ "rOrderNum": 3
67
+ }
68
+ ],
69
+ "argument_clauses": [
70
+ {
71
+ "targetTable": [
72
+ "InputTable"
73
+ ],
74
+ "checkDuplicate": true,
75
+ "allowedTypes": [],
76
+ "allowedTypeGroups": [
77
+ "ALL"
78
+ ],
79
+ "matchLengthOfArgument": "",
80
+ "allowPadding": true,
81
+ "name": "Accumulate",
82
+ "alternateNames": [],
83
+ "isRequired": true,
84
+ "rDescription": "Specifies the names of input_table columns to copy to the output table.",
85
+ "description": "Specify the names of the input columns to copy to the output table. ",
86
+ "datatype": "COLUMNS",
87
+ "allowsLists": true,
88
+ "rName": "accumulate",
89
+ "useInR": true,
90
+ "rOrderNum": 4
91
+ },
92
+ {
93
+ "checkDuplicate": true,
94
+ "allowedTypes": [],
95
+ "allowedTypeGroups": [
96
+ "ALL"
97
+ ],
98
+ "matchLengthOfArgument": "",
99
+ "allowPadding": true,
100
+ "name": "ModelOutputTensor",
101
+ "alternateNames": [],
102
+ "isRequired": true,
103
+ "rDescription": "Specifies the column of the model's possible output fields that the user wants to calculate and output ",
104
+ "description": "Specifies the column of the model's possible output fields that the user wants to calculate and output ",
105
+ "datatype": "STRING",
106
+ "allowsLists": false,
107
+ "rName": "model.output.tensor",
108
+ "useInR": true,
109
+ "rOrderNum": 5
110
+ },
111
+ {
112
+ "defaultValue": 512,
113
+ "checkDuplicate": true,
114
+ "allowedTypes": [],
115
+ "allowedTypeGroups": [
116
+ "ALL"
117
+ ],
118
+ "matchLengthOfArgument": "",
119
+ "allowPadding": true,
120
+ "name": "EncodeMaxLength",
121
+ "alternateNames": [],
122
+ "isRequired": false,
123
+ "rDescription": "Specifies the maximum length of the tokenizer output token encodings(only applies for models with symbolic dimensions) ",
124
+ "description": "Specifies the maximum length of the tokenizer output token encodings(only applies for models with symbolic dimensions) ",
125
+ "datatype": "INTEGER",
126
+ "allowsLists": false,
127
+ "rName": "encode.max.length",
128
+ "useInR": true,
129
+ "rOrderNum": 6
130
+ },
131
+ {
132
+ "defaultValue": false,
133
+ "checkDuplicate": true,
134
+ "allowedTypes": [],
135
+ "allowedTypeGroups": [
136
+ "ALL"
137
+ ],
138
+ "matchLengthOfArgument": "",
139
+ "allowPadding": true,
140
+ "name": "ShowModelProperties",
141
+ "alternateNames": [],
142
+ "isRequired": false,
143
+ "rDescription": " Show default or expanded ModelInputFieldsMap based on input model for defaults or ModelInputFieldsMap for expansion. ",
144
+ "description": " Show default or expanded ModelInputFieldsMap based on input model for defaults or ModelInputFieldsMap for expansion. ",
145
+ "datatype": "BOOLEAN",
146
+ "allowsLists": false,
147
+ "rName": "show.model.properties",
148
+ "useInR": true,
149
+ "rOrderNum": 7
150
+ },
151
+ {
152
+ "defaultValue": "emb_",
153
+ "checkDuplicate": true,
154
+ "allowedTypes": [],
155
+ "allowedTypeGroups": [
156
+ "ALL"
157
+ ],
158
+ "matchLengthOfArgument": "",
159
+ "allowPadding": true,
160
+ "name": "OutputColumnPrefix",
161
+ "alternateNames": [],
162
+ "isRequired": false,
163
+ "rDescription": "Specifies the column prefix for each of the output columns when using FLOAT32 OutputFormat ",
164
+ "description": "Specifies the column prefix for each of the output columns when using FLOAT32 OutputFormat ",
165
+ "datatype": "STRING",
166
+ "allowsLists": false,
167
+ "rName": "output.column.prefix",
168
+ "useInR": true,
169
+ "rOrderNum": 8
170
+ },
171
+ {
172
+ "defaultValue": "VARBYTE(3072)",
173
+ "checkDuplicate": true,
174
+ "allowedTypes": [],
175
+ "allowedTypeGroups": [
176
+ "ALL"
177
+ ],
178
+ "matchLengthOfArgument": "",
179
+ "allowPadding": true,
180
+ "name": "OutputFormat",
181
+ "alternateNames": [],
182
+ "isRequired": false,
183
+ "rDescription": "Specifies the output format for the model embeddings output ",
184
+ "description": "Specifies the output format for the model embeddings output ",
185
+ "datatype": "STRING",
186
+ "allowsLists": false,
187
+ "rName": "output.format",
188
+ "useInR": true,
189
+ "rOrderNum": 9
190
+ },
191
+ {
192
+ "permittedValues": [
193
+ "true",
194
+ "t",
195
+ "yes",
196
+ "y",
197
+ "1",
198
+ "false",
199
+ "f",
200
+ "no",
201
+ "n",
202
+ "0",
203
+ "*",
204
+ "current_cached_model"
205
+ ],
206
+ "defaultValue": "false",
207
+ "checkDuplicate": true,
208
+ "allowedTypes": [],
209
+ "allowedTypeGroups": [
210
+ "ALL"
211
+ ],
212
+ "matchLengthOfArgument": "",
213
+ "allowPadding": true,
214
+ "name": "OverwriteCachedModel",
215
+ "alternateNames": [],
216
+ "isRequired": false,
217
+ "rDescription": "Specifies the model name that needs to be removed from the cache. * can also be used to remove the models ",
218
+ "description": " Specifies the model name that needs to be removed from the cache. * can also be used to remove the models ",
219
+ "datatype": "STRING",
220
+ "allowsLists": false,
221
+ "rName": "overwrite.cached.models",
222
+ "useInR": true,
223
+ "rOrderNum": 10
224
+ },
225
+ {
226
+ "defaultValue": false,
227
+ "checkDuplicate": true,
228
+ "allowedTypes": [],
229
+ "allowedTypeGroups": [
230
+ "ALL"
231
+ ],
232
+ "matchLengthOfArgument": "",
233
+ "allowPadding": true,
234
+ "name": "IsDebug",
235
+ "alternateNames": [],
236
+ "isRequired": false,
237
+ "rDescription": "Print additional information in trace table regarding execution of ONNXPredict ",
238
+ "description": " Print additional information in trace table regarding execution of ONNXPredict ",
239
+ "datatype": "BOOLEAN",
240
+ "allowsLists": false,
241
+ "rName": "is.debug",
242
+ "useInR": true,
243
+ "rOrderNum": 11
244
+ },
245
+ {
246
+ "defaultValue": true,
247
+ "checkDuplicate": true,
248
+ "allowedTypes": [],
249
+ "allowedTypeGroups": [
250
+ "ALL"
251
+ ],
252
+ "matchLengthOfArgument": "",
253
+ "allowPadding": true,
254
+ "name": "EnableMemoryCheck",
255
+ "alternateNames": [],
256
+ "isRequired": false,
257
+ "rDescription": "If true, verifies if there is enough native memory for large models ",
258
+ "description": "If true, verifies if there is enough native memory for large models ",
259
+ "datatype": "BOOLEAN",
260
+ "allowsLists": false,
261
+ "rName": "enable.memory.check",
262
+ "useInR": true,
263
+ "rOrderNum": 12
264
+ }
265
+ ]
266
+ }
@@ -96,7 +96,6 @@
96
96
  "name": "TrainingFunction",
97
97
  "isRequired": true,
98
98
  "datatype": "STRING",
99
- "defaultValue": "TD_GLM",
100
99
  "isOutputColumn": false,
101
100
  "alternateNames": [],
102
101
  "rDescription": "Specifies the model type name.",
@@ -80,7 +80,7 @@
80
80
  "description": "Specifies whether to convert input text to lowercase.",
81
81
  "datatype": "BOOLEAN",
82
82
  "allowsLists": false,
83
- "rName": "covert.to.lowercase",
83
+ "rName": "convert.to.lowercase",
84
84
  "useInR": true,
85
85
  "rOrderNum": 4
86
86
  },