teradataml 20.0.0.6__py3-none-any.whl → 20.0.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of teradataml might be problematic. Click here for more details.

Files changed (96) hide show
  1. teradataml/README.md +210 -0
  2. teradataml/__init__.py +1 -1
  3. teradataml/_version.py +1 -1
  4. teradataml/analytics/analytic_function_executor.py +162 -76
  5. teradataml/analytics/byom/__init__.py +1 -1
  6. teradataml/analytics/json_parser/__init__.py +2 -0
  7. teradataml/analytics/json_parser/analytic_functions_argument.py +95 -2
  8. teradataml/analytics/json_parser/metadata.py +22 -4
  9. teradataml/analytics/sqle/DecisionTreePredict.py +3 -2
  10. teradataml/analytics/sqle/NaiveBayesPredict.py +3 -2
  11. teradataml/analytics/sqle/__init__.py +3 -0
  12. teradataml/analytics/utils.py +4 -1
  13. teradataml/automl/__init__.py +2369 -464
  14. teradataml/automl/autodataprep/__init__.py +15 -0
  15. teradataml/automl/custom_json_utils.py +184 -112
  16. teradataml/automl/data_preparation.py +113 -58
  17. teradataml/automl/data_transformation.py +154 -53
  18. teradataml/automl/feature_engineering.py +113 -53
  19. teradataml/automl/feature_exploration.py +548 -25
  20. teradataml/automl/model_evaluation.py +260 -32
  21. teradataml/automl/model_training.py +399 -206
  22. teradataml/clients/auth_client.py +2 -2
  23. teradataml/common/aed_utils.py +11 -2
  24. teradataml/common/bulk_exposed_utils.py +4 -2
  25. teradataml/common/constants.py +62 -2
  26. teradataml/common/garbagecollector.py +50 -21
  27. teradataml/common/messagecodes.py +47 -2
  28. teradataml/common/messages.py +19 -1
  29. teradataml/common/sqlbundle.py +23 -6
  30. teradataml/common/utils.py +116 -10
  31. teradataml/context/aed_context.py +16 -10
  32. teradataml/data/Employee.csv +5 -0
  33. teradataml/data/Employee_Address.csv +4 -0
  34. teradataml/data/Employee_roles.csv +5 -0
  35. teradataml/data/JulesBelvezeDummyData.csv +100 -0
  36. teradataml/data/byom_example.json +5 -0
  37. teradataml/data/creditcard_data.csv +284618 -0
  38. teradataml/data/docs/byom/docs/ONNXSeq2Seq.py +255 -0
  39. teradataml/data/docs/sqle/docs_17_10/NGramSplitter.py +1 -1
  40. teradataml/data/docs/sqle/docs_17_20/NGramSplitter.py +1 -1
  41. teradataml/data/docs/sqle/docs_17_20/TextParser.py +1 -1
  42. teradataml/data/jsons/byom/ONNXSeq2Seq.json +287 -0
  43. teradataml/data/jsons/sqle/20.00/AI_AnalyzeSentiment.json +3 -7
  44. teradataml/data/jsons/sqle/20.00/AI_AskLLM.json +3 -7
  45. teradataml/data/jsons/sqle/20.00/AI_DetectLanguage.json +3 -7
  46. teradataml/data/jsons/sqle/20.00/AI_ExtractKeyPhrases.json +3 -7
  47. teradataml/data/jsons/sqle/20.00/AI_MaskPII.json +3 -7
  48. teradataml/data/jsons/sqle/20.00/AI_RecognizeEntities.json +3 -7
  49. teradataml/data/jsons/sqle/20.00/AI_RecognizePIIEntities.json +3 -7
  50. teradataml/data/jsons/sqle/20.00/AI_TextClassifier.json +3 -7
  51. teradataml/data/jsons/sqle/20.00/AI_TextEmbeddings.json +3 -7
  52. teradataml/data/jsons/sqle/20.00/AI_TextSummarize.json +3 -7
  53. teradataml/data/jsons/sqle/20.00/AI_TextTranslate.json +3 -7
  54. teradataml/data/jsons/sqle/20.00/TD_API_AzureML.json +151 -0
  55. teradataml/data/jsons/sqle/20.00/TD_API_Sagemaker.json +182 -0
  56. teradataml/data/jsons/sqle/20.00/TD_API_VertexAI.json +183 -0
  57. teradataml/data/load_example_data.py +29 -11
  58. teradataml/data/payment_fraud_dataset.csv +10001 -0
  59. teradataml/data/teradataml_example.json +67 -0
  60. teradataml/dataframe/copy_to.py +714 -54
  61. teradataml/dataframe/dataframe.py +1153 -33
  62. teradataml/dataframe/dataframe_utils.py +8 -3
  63. teradataml/dataframe/functions.py +168 -1
  64. teradataml/dataframe/setop.py +4 -1
  65. teradataml/dataframe/sql.py +141 -9
  66. teradataml/dbutils/dbutils.py +470 -35
  67. teradataml/dbutils/filemgr.py +1 -1
  68. teradataml/hyperparameter_tuner/optimizer.py +456 -142
  69. teradataml/lib/aed_0_1.dll +0 -0
  70. teradataml/lib/libaed_0_1.dylib +0 -0
  71. teradataml/lib/libaed_0_1.so +0 -0
  72. teradataml/lib/libaed_0_1_aarch64.so +0 -0
  73. teradataml/scriptmgmt/UserEnv.py +234 -34
  74. teradataml/scriptmgmt/lls_utils.py +43 -17
  75. teradataml/sdk/_json_parser.py +1 -1
  76. teradataml/sdk/api_client.py +9 -6
  77. teradataml/sdk/modelops/_client.py +3 -0
  78. teradataml/series/series.py +12 -7
  79. teradataml/store/feature_store/constants.py +601 -234
  80. teradataml/store/feature_store/feature_store.py +2886 -616
  81. teradataml/store/feature_store/mind_map.py +639 -0
  82. teradataml/store/feature_store/models.py +5831 -214
  83. teradataml/store/feature_store/utils.py +390 -0
  84. teradataml/table_operators/table_operator_util.py +1 -1
  85. teradataml/table_operators/templates/dataframe_register.template +6 -2
  86. teradataml/table_operators/templates/dataframe_udf.template +6 -2
  87. teradataml/utils/docstring.py +527 -0
  88. teradataml/utils/dtypes.py +93 -0
  89. teradataml/utils/internal_buffer.py +2 -2
  90. teradataml/utils/utils.py +41 -2
  91. teradataml/utils/validators.py +694 -17
  92. {teradataml-20.0.0.6.dist-info → teradataml-20.0.0.7.dist-info}/METADATA +213 -2
  93. {teradataml-20.0.0.6.dist-info → teradataml-20.0.0.7.dist-info}/RECORD +96 -81
  94. {teradataml-20.0.0.6.dist-info → teradataml-20.0.0.7.dist-info}/WHEEL +0 -0
  95. {teradataml-20.0.0.6.dist-info → teradataml-20.0.0.7.dist-info}/top_level.txt +0 -0
  96. {teradataml-20.0.0.6.dist-info → teradataml-20.0.0.7.dist-info}/zip-safe +0 -0
@@ -0,0 +1,255 @@
1
+ def ONNXSeq2Seq(newdata=None, modeldata=None, tokenizerdata=None, accumulate=None, model_output_tensor=None,
2
+ encode_max_length=512, show_model_properties=False, output_length=1000,
3
+ overwrite_cached_models="false", is_debug=False, skip_special_tokens=True,
4
+ enable_memory_check=False, const_=None, **generic_arguments):
5
+ """
6
+ DESCRIPTION:
7
+ The ONNXSeq2Seq() function applies a sequence-to-sequence model that has
8
+ been created outside Vantage and exported to Vantage in ONNX format
9
+ to input data within Teradata Vantage.
10
+
11
+ PARAMETERS:
12
+ newdata:
13
+ Required Argument.
14
+ Specifies the input teradataml DataFrame that contains
15
+ the data to be scored.
16
+ Types: teradataml DataFrame
17
+
18
+ modeldata:
19
+ Required Argument.
20
+ Specifies the model teradataml DataFrame to be used for
21
+ scoring.
22
+ Note:
23
+ * Use `retrieve_byom()` to get the teradataml DataFrame that contains the model.
24
+ Types: teradataml DataFrame
25
+
26
+ tokenizerdata:
27
+ Required Argument.
28
+ Specifies the tokenizer teradataml DataFrame
29
+ which contains the tokenizer json file.
30
+ Types: teradataml DataFrame
31
+
32
+ accumulate:
33
+ Required Argument.
34
+ Specifies the name(s) of input teradataml DataFrame column(s) to
35
+ copy to the output. By default, the function copies all input
36
+ teradataml DataFrame columns to the output.
37
+ Types: str OR list of Strings (str) OR Feature OR list of Features
38
+
39
+ model_output_tensor:
40
+ Required Argument.
41
+ Specifies the column of the model's possible output fields
42
+ that the user wants to calculate and output.
43
+ Types: str
44
+
45
+ encode_max_length:
46
+ Optional Argument.
47
+ Specifies the maximum length of the tokenizer output token
48
+ encodings(only applies for models with symbolic dimensions).
49
+ Default Value: 512
50
+ Types: int
51
+
52
+ show_model_properties:
53
+ Optional Argument.
54
+ Specifies the default or expanded "model_input_fields_map" based on
55
+ input model for defaults or "model_input_fields_map" for expansion.
56
+ Default Value: False
57
+ Types: bool
58
+
59
+ output_length:
60
+ Optional Argument.
61
+ Specifies the output length for the model sequence output, in number of
62
+ characters for the VARCHAR output. If the value exceeds the maximum Unicode
63
+ VARCHAR size of 32000, a CLOB is created automatically.
64
+ Default Value: 1000
65
+ Types: int
66
+
67
+ overwrite_cached_models:
68
+ Optional Argument.
69
+ Specifies the model name that needs to be removed from the cache.
70
+ When a model loaded into the memory of the node fits in the cache,
71
+ it stays in the cache until being evicted to make space for another
72
+ model that needs to be loaded. Therefore, a model can remain in the
73
+ cache even after the completion of function execution. Other functions
74
+ that use the same model can use it, saving the cost of reloading it
75
+ into memory. User should overwrite a cached model only when it is updated,
76
+ to make sure that the Predict function uses the updated model instead
77
+ of the cached model.
78
+ Note:
79
+ Do not use the "overwrite_cached_models" argument except when user
80
+ is trying to replace a previously cached model. Using the argument
81
+ in other cases, including in concurrent queries or multiple times
82
+ within a short period of time lead to an OOM error.
83
+ Default Value: "false"
84
+ Permitted Values: true, t, yes, y, 1, false, f, no, n, 0, *,
85
+ current_cached_model
86
+ Types: str
87
+
88
+ is_debug:
89
+ Optional Argument.
90
+ Specifies whether debug statements are added to a trace table or not.
91
+ When set to True, debug statements are added to a trace table that must
92
+ be created beforehand.
93
+ Notes:
94
+ * Only available with BYOM version 3.00.00.02 and later.
95
+ * To save logs for debugging, user can create an error log by using
96
+ the is_debug=True parameter in the predict functions.
97
+ A database trace table is used to collect this information which
98
+ does impact performance of the function, so using small data input
99
+ sizes is recommended.
100
+ * To generate this log, user must do the following:
101
+ 1. Create a global trace table with columns vproc_ID BYTE(2),
102
+ Sequence INTEGER, Trace_Output VARCHAR(31000)
103
+ 2. Turn on session function tracing:
104
+ SET SESSION FUNCTION TRACE USING '' FOR TABLE <trace_table_name_created_in_step_1>;
105
+ 3. Execute function with "is_debug" set to True.
106
+ 4. Debug information is logged to the table created in step 1.
107
+ 5. To turn off the logging, either disconnect from the session or
108
+ run following SQL:
109
+ SET SESSION FUNCTION TRACE OFF;
110
+ The trace table is temporary and the information is deleted if user
111
+ logs off from the session. If long term persistence is necessary,
112
+ user can copy the table to a permanent table before leaving the
113
+ session.
114
+ Default Value: False
115
+ Types: bool
116
+
117
+ enable_memory_check:
118
+ Optional Argument.
119
+ Specifies whether there is enough native memory for large models.
120
+ Default Value: True
121
+ Types: bool
122
+
123
+ skip_special_tokens:
124
+ Optional Argument.
125
+ Specifies whether to skip special tokens in the output.
126
+ Default Value: True
127
+ Types: bool
128
+
129
+ const_*:
130
+ Optional Argument.
131
+ Specifies the constant value to be used as a model input by using
132
+ the argument name const_<field_name>. Providing constant values as
133
+ parameters reduces system overhead, since the value will not be
134
+ added to every row being scored.
135
+ Types: str
136
+
137
+ **generic_arguments:
138
+ Specifies the generic keyword arguments SQLE functions accept. Below
139
+ are the generic keyword arguments:
140
+ persist:
141
+ Optional Argument.
142
+ Specifies whether to persist the results of the
143
+ function in a table or not. When set to True,
144
+ results are persisted in a table; otherwise,
145
+ results are garbage collected at the end of the
146
+ session.
147
+ Default Value: False
148
+ Types: bool
149
+
150
+ volatile:
151
+ Optional Argument.
152
+ Specifies whether to put the results of the
153
+ function in a volatile table or not. When set to
154
+ True, results are stored in a volatile table,
155
+ otherwise not.
156
+ Default Value: False
157
+ Types: bool
158
+
159
+ Function allows the user to partition, hash, order or local
160
+ order the input data. These generic arguments are available
161
+ for each argument that accepts teradataml DataFrame as
162
+ input and can be accessed as:
163
+ * "<input_data_arg_name>_partition_column" accepts str or
164
+ list of str (Strings) or PartitionKind
165
+ * "<input_data_arg_name>_hash_column" accepts str or list
166
+ of str (Strings)
167
+ * "<input_data_arg_name>_order_column" accepts str or list
168
+ of str (Strings)
169
+ * "local_order_<input_data_arg_name>" accepts boolean
170
+ Note:
171
+ These generic arguments are supported by teradataml if
172
+ the underlying SQL Engine function supports, else an
173
+ exception is raised.
174
+
175
+ RETURNS:
176
+ Instance of ONNXSeq2Seq.
177
+ Output teradataml DataFrame can be accessed using attribute
178
+ references, such as ONNXSeq2Seq.<attribute_name>.
179
+ Output teradataml DataFrame attribute name is:
180
+ result
181
+
182
+
183
+ RAISES:
184
+ TeradataMlException, TypeError, ValueError
185
+
186
+
187
+ EXAMPLES:
188
+ # Notes:
189
+ # 1. Get the connection to Vantage to execute the function.
190
+ # 2. One must import the required functions mentioned in
191
+ # the example from teradataml.
192
+ # 3. Function will raise error if not supported on the Vantage
193
+ # user is connected to.
194
+ # 4. To execute BYOM functions, set 'configure.byom_install_location' to the
195
+ # database name where BYOM functions are installed.
196
+
197
+ # Import required libraries / functions.
198
+ import os, teradataml
199
+ from teradataml import get_connection, DataFrame
200
+ from teradataml import save_byom, retrieve_byom, load_example_data
201
+ from teradataml import configure, display_analytic_functions, execute_sql
202
+
203
+ # Load example data.
204
+ load_example_data("byom", "JulesBelvezeDummyData")
205
+
206
+ # Create teradataml DataFrame objects.
207
+ jules_data = DataFrame.from_table("JulesBelvezeDummyData")
208
+
209
+ # Assigning txt column name to rev_txt column.
210
+ jules_data = jules_data[jules_data.id <= 5]
211
+ jules_data = jules_data.assign(txt=jules_data.content)
212
+
213
+ # Set install location of BYOM functions.
214
+ configure.byom_install_location = "td_mldb"
215
+
216
+ # Check the list of available analytic functions.
217
+ display_analytic_functions(type="BYOM")
218
+
219
+ # Note: User must have the model and tokenizer data already loaded in the database.
220
+ # Retrieve model.
221
+ modeldata = retrieve_byom("t5-small-headline-generator_fixed", table_name="seq_models")
222
+ tokenizerdata = retrieve_byom("t5-small-headline-generator_fixed", table_name="seq_tokenizers")
223
+
224
+ # Assigning tokenizer_id, tokenizer to model_id, model in Seq2Seq_tokenizers.
225
+ tokenizerdata_a1 = tokenizerdata.assign(tokenizer_id=tokenizerdata.model_id)
226
+ tokenizerdata_a2 = tokenizerdata_a1.assign(tokenizer=tokenizerdata_a1.model)
227
+
228
+ # Example 1: Showcasing the model properties of t5-small-headline-generator_fixed model
229
+ # that has been created outside the Vantage.
230
+ ONNXSeq2Seq_out = ONNXSeq2Seq(modeldata = modeldata,
231
+ tokenizerdata=tokenizerdata_a2.select(['tokenizer_id', 'tokenizer']),
232
+ newdata=jules_data.select(["id", "txt"]),
233
+ accumulate='id',
234
+ model_output_tensor= 'sequences',
235
+ show_model_properties=True)
236
+
237
+ # Print the results.
238
+ print(ONNXSeq2Seq_out.result)
239
+
240
+ # Example 2: Using ONNXSeq2Seq with constant values.
241
+ ONNXSeq2Seq_out = ONNXSeq2Seq(modeldata = modeldata,
242
+ tokenizerdata=tokenizerdata_a2.select(['tokenizer_id', 'tokenizer']),
243
+ newdata=jules_data.select(["id", "txt"]),
244
+ accumulate='id',
245
+ model_output_tensor= 'sequences',
246
+ const_min_length=10,
247
+ const_max_length=84,
248
+ const_num_beams=4,
249
+ const_repetition_penalty=1.2,
250
+ const_length_penalty=2.0,
251
+ const_num_return_sequences=1)
252
+
253
+ # Print the results.
254
+ print(ONNXSeq2Seq_out.result)
255
+ """
@@ -37,7 +37,7 @@ def NGramSplitter(data=None, text_column=None, delimiter=" ", grams=None, overla
37
37
  default value is the set of all whitespace characters which includes
38
38
  the characters for space, tab, newline, carriage return and some
39
39
  others.
40
- Default Value: "[\s]+"
40
+ Default Value: "[\\s]+"
41
41
  Types: str
42
42
 
43
43
  grams:
@@ -37,7 +37,7 @@ def NGramSplitter(data=None, text_column=None, delimiter=" ", grams=None, overla
37
37
  default value is the set of all whitespace characters which includes
38
38
  the characters for space, tab, newline, carriage return and some
39
39
  others.
40
- Default Value: "[\s]+"
40
+ Default Value: "[\\s]+"
41
41
  Types: str
42
42
 
43
43
  grams:
@@ -1,7 +1,7 @@
1
1
  def TextParser(data=None, object=None, text_column=None, enforce_token_limit=False,
2
2
  convert_to_lowercase=True, stem_tokens=False, remove_stopwords=False,
3
3
  accumulate=None, delimiter=" \t\n\f\r", delimiter_regex=None,
4
- punctuation="!#$%&()*+,-./:;?@\^_`{|}~", token_col_name=None,
4
+ punctuation=r"!#$%&()*+,-./:;?@\^_`{|}~", token_col_name=None,
5
5
  doc_id_column=None, list_positions=False, token_frequency=False,
6
6
  output_by_word=True, **generic_arguments):
7
7
  """
@@ -0,0 +1,287 @@
1
+ {
2
+ "json_schema_major_version": "1",
3
+ "json_schema_minor_version": "2",
4
+ "json_content_version": "1",
5
+ "function_name": "ONNXSeq2Seq",
6
+ "function_version": "1.0",
7
+ "function_type": "byom",
8
+ "function_r_name": "aa.onnx.seq2seq",
9
+ "function_alias_name": "ONNXSeq2Seq",
10
+ "short_description": "This Function generates sequence values using an ONNX model in Vantage",
11
+ "long_description": "This function is used to calculate sequence values in Vantage with a HuggingFace model that has been created outside Vantage and exported to vantage using ONNX format",
12
+ "input_tables": [
13
+ {
14
+ "requiredInputKind": [
15
+ "PartitionByAny",
16
+ "PartitionByKey"
17
+ ],
18
+ "isOrdered": false,
19
+ "partitionByOne": false,
20
+ "partitionByOneInclusive": false,
21
+ "name": "InputTable",
22
+ "alternateNames": [],
23
+ "isRequired": true,
24
+ "rDescription": "The input table that contains the text from which we generate sequence values ",
25
+ "description": "The input table that contains the text from which we generate sequence values ",
26
+ "datatype": "TABLE_ALIAS",
27
+ "allowsLists": false,
28
+ "rName": "newdata",
29
+ "useInR": true,
30
+ "rOrderNum": 1
31
+ },
32
+ {
33
+ "requiredInputKind": [
34
+ "Dimension"
35
+ ],
36
+ "isOrdered": false,
37
+ "partitionByOne": false,
38
+ "partitionByOneInclusive": false,
39
+ "name": "ModelTable",
40
+ "alternateNames": [],
41
+ "isRequired": true,
42
+ "rDescription": "The model table to be used for calculating sequence values ",
43
+ "description": "The model table to be used for calculating sequence values ",
44
+ "datatype": "TABLE_ALIAS",
45
+ "allowsLists": false,
46
+ "rName": "modeldata",
47
+ "useInR": true,
48
+ "rOrderNum": 2
49
+ },
50
+ {
51
+ "requiredInputKind": [
52
+ "Dimension"
53
+ ],
54
+ "isOrdered": false,
55
+ "partitionByOne": false,
56
+ "partitionByOneInclusive": false,
57
+ "name": "TokenizerTable",
58
+ "alternateNames": [],
59
+ "isRequired": true,
60
+ "rDescription": "The tokenizer table which contains the tokenizer json file ",
61
+ "description": "The tokenizer table which contains the tokenizer json file ",
62
+ "datatype": "TABLE_ALIAS",
63
+ "allowsLists": false,
64
+ "rName": "tokenizerdata",
65
+ "useInR": true,
66
+ "rOrderNum": 3
67
+ }
68
+ ],
69
+ "argument_clauses": [
70
+ {
71
+ "targetTable": [
72
+ "InputTable"
73
+ ],
74
+ "checkDuplicate": true,
75
+ "allowedTypes": [],
76
+ "allowedTypeGroups": [
77
+ "ALL"
78
+ ],
79
+ "matchLengthOfArgument": "",
80
+ "allowPadding": true,
81
+ "name": "Accumulate",
82
+ "alternateNames": [],
83
+ "isRequired": true,
84
+ "rDescription": "Specifies the names of input_table columns to copy to the output table.",
85
+ "description": "Specify the names of the input columns to copy to the output table. ",
86
+ "datatype": "COLUMNS",
87
+ "allowsLists": true,
88
+ "rName": "accumulate",
89
+ "useInR": true,
90
+ "rOrderNum": 4
91
+ },
92
+ {
93
+ "checkDuplicate": true,
94
+ "allowedTypes": [],
95
+ "allowedTypeGroups": [
96
+ "ALL"
97
+ ],
98
+ "matchLengthOfArgument": "",
99
+ "allowPadding": true,
100
+ "name": "ModelOutputTensor",
101
+ "alternateNames": [],
102
+ "isRequired": true,
103
+ "rDescription": "Specifies the column of the model's possible output fields that the user wants to calculate and output ",
104
+ "description": "Specifies the column of the model's possible output fields that the user wants to calculate and output ",
105
+ "datatype": "STRING",
106
+ "allowsLists": false,
107
+ "rName": "model.output.tensor",
108
+ "useInR": true,
109
+ "rOrderNum": 5
110
+ },
111
+ {
112
+ "defaultValue": 512,
113
+ "checkDuplicate": true,
114
+ "allowedTypes": [],
115
+ "allowedTypeGroups": [
116
+ "ALL"
117
+ ],
118
+ "matchLengthOfArgument": "",
119
+ "allowPadding": true,
120
+ "name": "EncodeMaxLength",
121
+ "alternateNames": [],
122
+ "isRequired": false,
123
+ "rDescription": "Specifies the maximum length of the tokenizer output token encodings(only applies for models with symbolic dimensions) ",
124
+ "description": "Specifies the maximum length of the tokenizer output token encodings(only applies for models with symbolic dimensions) ",
125
+ "datatype": "INTEGER",
126
+ "allowsLists": false,
127
+ "rName": "encode.max.length",
128
+ "useInR": true,
129
+ "rOrderNum": 6
130
+ },
131
+ {
132
+ "defaultValue": false,
133
+ "checkDuplicate": true,
134
+ "allowedTypes": [],
135
+ "allowedTypeGroups": [
136
+ "ALL"
137
+ ],
138
+ "matchLengthOfArgument": "",
139
+ "allowPadding": true,
140
+ "name": "ShowModelProperties",
141
+ "alternateNames": [],
142
+ "isRequired": false,
143
+ "rDescription": "Show input and output tensors for the ONNX model.",
144
+ "description": "Show input and output tensors for the ONNX model.",
145
+ "datatype": "BOOLEAN",
146
+ "allowsLists": false,
147
+ "rName": "show.model.properties",
148
+ "useInR": true,
149
+ "rOrderNum": 7
150
+ },
151
+ {
152
+ "defaultValue": 1000,
153
+ "checkDuplicate": true,
154
+ "allowedTypes": [],
155
+ "allowedTypeGroups": [
156
+ "ALL"
157
+ ],
158
+ "matchLengthOfArgument": "",
159
+ "allowPadding": true,
160
+ "name": "OutputLength",
161
+ "alternateNames": [],
162
+ "isRequired": false,
163
+ "rDescription": "Specifies the output format for the model sequence output ",
164
+ "description": "Specifies the output format for the model sequence output ",
165
+ "datatype": "INTEGER",
166
+ "allowsLists": false,
167
+ "rName": "output.length",
168
+ "useInR": true,
169
+ "rOrderNum": 8
170
+ },
171
+ {
172
+ "permittedValues": [
173
+ "true",
174
+ "t",
175
+ "yes",
176
+ "y",
177
+ "1",
178
+ "false",
179
+ "f",
180
+ "no",
181
+ "n",
182
+ "0",
183
+ "*",
184
+ "current_cached_model"
185
+ ],
186
+ "defaultValue": "false",
187
+ "checkDuplicate": true,
188
+ "allowedTypes": [],
189
+ "allowedTypeGroups": [
190
+ "ALL"
191
+ ],
192
+ "matchLengthOfArgument": "",
193
+ "allowPadding": true,
194
+ "name": "OverwriteCachedModel",
195
+ "alternateNames": [],
196
+ "isRequired": false,
197
+ "rDescription": "Specifies the model name that needs to be removed from the cache. * can also be used to remove the models ",
198
+ "description": " Specifies the model name that needs to be removed from the cache. * can also be used to remove the models ",
199
+ "datatype": "STRING",
200
+ "allowsLists": false,
201
+ "rName": "overwrite.cached.models",
202
+ "useInR": true,
203
+ "rOrderNum": 9
204
+ },
205
+ {
206
+ "defaultValue": false,
207
+ "checkDuplicate": true,
208
+ "allowedTypes": [],
209
+ "allowedTypeGroups": [
210
+ "ALL"
211
+ ],
212
+ "matchLengthOfArgument": "",
213
+ "allowPadding": true,
214
+ "name": "IsDebug",
215
+ "alternateNames": [],
216
+ "isRequired": false,
217
+ "rDescription": "Print additional information in trace table regarding execution of ONNXPredict ",
218
+ "description": " Print additional information in trace table regarding execution of ONNXPredict ",
219
+ "datatype": "BOOLEAN",
220
+ "allowsLists": false,
221
+ "rName": "is.debug",
222
+ "useInR": true,
223
+ "rOrderNum": 10
224
+ },
225
+ {
226
+ "defaultValue": true,
227
+ "checkDuplicate": true,
228
+ "allowedTypes": [],
229
+ "allowedTypeGroups": [
230
+ "ALL"
231
+ ],
232
+ "matchLengthOfArgument": "",
233
+ "allowPadding": true,
234
+ "name": "EnableMemoryCheck",
235
+ "alternateNames": [],
236
+ "isRequired": false,
237
+ "rDescription": "If true, verifies if there is enough native memory for large models ",
238
+ "description": "If true, verifies if there is enough native memory for large models ",
239
+ "datatype": "BOOLEAN",
240
+ "allowsLists": false,
241
+ "rName": "enable.memory.check",
242
+ "useInR": true,
243
+ "rOrderNum": 11
244
+ },
245
+ {
246
+ "defaultValue": true,
247
+ "checkDuplicate": true,
248
+ "allowedTypes": [],
249
+ "allowedTypeGroups": [
250
+ "ALL"
251
+ ],
252
+ "matchLengthOfArgument": "",
253
+ "allowPadding": true,
254
+ "name": "SkipSpecialTokens",
255
+ "alternateNames": [],
256
+ "isRequired": false,
257
+ "rDescription": "If true, skips special tokens on output ",
258
+ "description": "If true, skips special tokens on output ",
259
+ "datatype": "BOOLEAN",
260
+ "allowsLists": false,
261
+ "rName": "skip.special.tokens",
262
+ "useInR": true,
263
+ "rOrderNum": 12
264
+ },
265
+ {
266
+ "checkDuplicate": true,
267
+ "allowedTypes": ["NUMERIC","STRING"],
268
+ "allowedTypeGroups": [
269
+ "ALL"
270
+ ],
271
+ "matchLengthOfArgument": "",
272
+ "allowPadding": true,
273
+ "name": "Const_",
274
+ "regexMatch": true,
275
+ "matchName": "const_",
276
+ "alternateNames": [],
277
+ "isRequired": false,
278
+ "rDescription": "Argument starts with prefix Const_ and then the name of a constant and its value. Can be a string or a number. ",
279
+ "description": "Argument starts with prefix Const_ and then the name of a constant and its value. Can be a string or a number.",
280
+ "datatype": ["STRING","NUMERIC"],
281
+ "allowsLists": false,
282
+ "rName": "const_*",
283
+ "useInR": true,
284
+ "rOrderNum": 13
285
+ }
286
+ ]
287
+ }
@@ -268,20 +268,16 @@
268
268
  },
269
269
 
270
270
  {
271
- "defaultValue": true,
272
- "permittedValues": [
273
- "true",
274
- "false"
275
- ],
271
+ "permittedValues": ["TRUE", "FALSE"],
276
272
  "isOutputColumn": false,
277
273
  "name": "isDebug",
278
274
  "alternateNames": [],
279
275
  "isRequired": false,
280
276
  "rDescription": "Specify whether error logging is required.",
281
277
  "description": "Specify whether error logging is required.",
282
- "datatype": "BOOLEAN",
278
+ "datatype": "STRING",
283
279
  "allowsLists": false,
284
- "rName": "isDebug",
280
+ "rName": "is.debug",
285
281
  "useInR": true,
286
282
  "rOrderNum": 17
287
283
  },
@@ -291,20 +291,16 @@
291
291
  },
292
292
 
293
293
  {
294
- "defaultValue": true,
295
- "permittedValues": [
296
- "true",
297
- "false"
298
- ],
294
+ "permittedValues": ["TRUE", "FALSE"],
299
295
  "isOutputColumn": false,
300
296
  "name": "isDebug",
301
297
  "alternateNames": [],
302
298
  "isRequired": false,
303
299
  "rDescription": "Specify whether error logging is required.",
304
300
  "description": "Specify whether error logging is required.",
305
- "datatype": "BOOLEAN",
301
+ "datatype": "STRING",
306
302
  "allowsLists": false,
307
- "rName": "isDebug",
303
+ "rName": "is.debug",
308
304
  "useInR": true,
309
305
  "rOrderNum": 17
310
306
  },
@@ -268,20 +268,16 @@
268
268
  },
269
269
 
270
270
  {
271
- "defaultValue": true,
272
- "permittedValues": [
273
- "true",
274
- "false"
275
- ],
271
+ "permittedValues": ["TRUE", "FALSE"],
276
272
  "isOutputColumn": false,
277
273
  "name": "isDebug",
278
274
  "alternateNames": [],
279
275
  "isRequired": false,
280
276
  "rDescription": "Specify whether error logging is required.",
281
277
  "description": "Specify whether error logging is required.",
282
- "datatype": "BOOLEAN",
278
+ "datatype": "STRING",
283
279
  "allowsLists": false,
284
- "rName": "isDebug",
280
+ "rName": "is.debug",
285
281
  "useInR": true,
286
282
  "rOrderNum": 17
287
283
  },
@@ -268,20 +268,16 @@
268
268
  },
269
269
 
270
270
  {
271
- "defaultValue": true,
272
- "permittedValues": [
273
- "true",
274
- "false"
275
- ],
271
+ "permittedValues": ["TRUE", "FALSE"],
276
272
  "isOutputColumn": false,
277
273
  "name": "isDebug",
278
274
  "alternateNames": [],
279
275
  "isRequired": false,
280
276
  "rDescription": "Specify whether error logging is required.",
281
277
  "description": "Specify whether error logging is required.",
282
- "datatype": "BOOLEAN",
278
+ "datatype": "STRING",
283
279
  "allowsLists": false,
284
- "rName": "isDebug",
280
+ "rName": "is.debug",
285
281
  "useInR": true,
286
282
  "rOrderNum": 17
287
283
  },