teradataml 20.0.0.0__py3-none-any.whl → 20.0.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of teradataml might be problematic. Click here for more details.

Files changed (263) hide show
  1. teradataml/LICENSE-3RD-PARTY.pdf +0 -0
  2. teradataml/LICENSE.pdf +0 -0
  3. teradataml/README.md +183 -0
  4. teradataml/__init__.py +6 -3
  5. teradataml/_version.py +2 -2
  6. teradataml/analytics/__init__.py +3 -2
  7. teradataml/analytics/analytic_function_executor.py +275 -40
  8. teradataml/analytics/analytic_query_generator.py +92 -0
  9. teradataml/analytics/byom/__init__.py +3 -2
  10. teradataml/analytics/json_parser/metadata.py +1 -0
  11. teradataml/analytics/json_parser/utils.py +17 -21
  12. teradataml/analytics/meta_class.py +40 -1
  13. teradataml/analytics/sqle/DecisionTreePredict.py +1 -1
  14. teradataml/analytics/sqle/__init__.py +10 -2
  15. teradataml/analytics/table_operator/__init__.py +3 -2
  16. teradataml/analytics/uaf/__init__.py +21 -2
  17. teradataml/analytics/utils.py +62 -1
  18. teradataml/analytics/valib.py +1 -1
  19. teradataml/automl/__init__.py +1553 -319
  20. teradataml/automl/custom_json_utils.py +139 -61
  21. teradataml/automl/data_preparation.py +276 -319
  22. teradataml/automl/data_transformation.py +163 -81
  23. teradataml/automl/feature_engineering.py +402 -239
  24. teradataml/automl/feature_exploration.py +9 -2
  25. teradataml/automl/model_evaluation.py +48 -51
  26. teradataml/automl/model_training.py +291 -189
  27. teradataml/catalog/byom.py +8 -8
  28. teradataml/catalog/model_cataloging_utils.py +1 -1
  29. teradataml/clients/auth_client.py +133 -0
  30. teradataml/clients/pkce_client.py +1 -1
  31. teradataml/common/aed_utils.py +3 -2
  32. teradataml/common/constants.py +48 -6
  33. teradataml/common/deprecations.py +13 -7
  34. teradataml/common/garbagecollector.py +156 -120
  35. teradataml/common/messagecodes.py +6 -1
  36. teradataml/common/messages.py +3 -1
  37. teradataml/common/sqlbundle.py +1 -1
  38. teradataml/common/utils.py +103 -11
  39. teradataml/common/wrapper_utils.py +1 -1
  40. teradataml/context/context.py +121 -31
  41. teradataml/data/advertising.csv +201 -0
  42. teradataml/data/bank_marketing.csv +11163 -0
  43. teradataml/data/bike_sharing.csv +732 -0
  44. teradataml/data/boston2cols.csv +721 -0
  45. teradataml/data/breast_cancer.csv +570 -0
  46. teradataml/data/complaints_test_tokenized.csv +353 -0
  47. teradataml/data/complaints_tokens_model.csv +348 -0
  48. teradataml/data/covid_confirm_sd.csv +83 -0
  49. teradataml/data/customer_segmentation_test.csv +2628 -0
  50. teradataml/data/customer_segmentation_train.csv +8069 -0
  51. teradataml/data/dataframe_example.json +10 -0
  52. teradataml/data/docs/sqle/docs_17_10/OneHotEncodingFit.py +3 -1
  53. teradataml/data/docs/sqle/docs_17_10/OneHotEncodingTransform.py +6 -0
  54. teradataml/data/docs/sqle/docs_17_10/OutlierFilterTransform.py +5 -1
  55. teradataml/data/docs/sqle/docs_17_20/ANOVA.py +61 -1
  56. teradataml/data/docs/sqle/docs_17_20/CFilter.py +132 -0
  57. teradataml/data/docs/sqle/docs_17_20/ColumnTransformer.py +2 -0
  58. teradataml/data/docs/sqle/docs_17_20/FTest.py +105 -26
  59. teradataml/data/docs/sqle/docs_17_20/GLM.py +162 -1
  60. teradataml/data/docs/sqle/docs_17_20/GetFutileColumns.py +5 -3
  61. teradataml/data/docs/sqle/docs_17_20/KMeans.py +48 -1
  62. teradataml/data/docs/sqle/docs_17_20/NaiveBayes.py +162 -0
  63. teradataml/data/docs/sqle/docs_17_20/NonLinearCombineFit.py +3 -2
  64. teradataml/data/docs/sqle/docs_17_20/OneHotEncodingFit.py +5 -0
  65. teradataml/data/docs/sqle/docs_17_20/OneHotEncodingTransform.py +6 -0
  66. teradataml/data/docs/sqle/docs_17_20/OutlierFilterFit.py +2 -0
  67. teradataml/data/docs/sqle/docs_17_20/Pivoting.py +279 -0
  68. teradataml/data/docs/sqle/docs_17_20/ROC.py +3 -2
  69. teradataml/data/docs/sqle/docs_17_20/SVMPredict.py +13 -2
  70. teradataml/data/docs/sqle/docs_17_20/ScaleFit.py +119 -1
  71. teradataml/data/docs/sqle/docs_17_20/ScaleTransform.py +93 -1
  72. teradataml/data/docs/sqle/docs_17_20/Shap.py +197 -0
  73. teradataml/data/docs/sqle/docs_17_20/TDGLMPredict.py +163 -1
  74. teradataml/data/docs/sqle/docs_17_20/TDNaiveBayesPredict.py +189 -0
  75. teradataml/data/docs/sqle/docs_17_20/TFIDF.py +142 -0
  76. teradataml/data/docs/sqle/docs_17_20/Unpivoting.py +216 -0
  77. teradataml/data/docs/sqle/docs_17_20/XGBoost.py +12 -4
  78. teradataml/data/docs/sqle/docs_17_20/XGBoostPredict.py +7 -1
  79. teradataml/data/docs/sqle/docs_17_20/ZTest.py +72 -7
  80. teradataml/data/docs/uaf/docs_17_20/ACF.py +1 -10
  81. teradataml/data/docs/uaf/docs_17_20/ArimaEstimate.py +1 -1
  82. teradataml/data/docs/uaf/docs_17_20/ArimaForecast.py +35 -5
  83. teradataml/data/docs/uaf/docs_17_20/ArimaValidate.py +3 -1
  84. teradataml/data/docs/uaf/docs_17_20/ArimaXEstimate.py +293 -0
  85. teradataml/data/docs/uaf/docs_17_20/AutoArima.py +354 -0
  86. teradataml/data/docs/uaf/docs_17_20/BreuschGodfrey.py +3 -2
  87. teradataml/data/docs/uaf/docs_17_20/BreuschPaganGodfrey.py +1 -1
  88. teradataml/data/docs/uaf/docs_17_20/Convolve.py +13 -10
  89. teradataml/data/docs/uaf/docs_17_20/Convolve2.py +4 -1
  90. teradataml/data/docs/uaf/docs_17_20/CumulPeriodogram.py +5 -4
  91. teradataml/data/docs/uaf/docs_17_20/DFFT2Conv.py +4 -4
  92. teradataml/data/docs/uaf/docs_17_20/DWT.py +235 -0
  93. teradataml/data/docs/uaf/docs_17_20/DWT2D.py +214 -0
  94. teradataml/data/docs/uaf/docs_17_20/DurbinWatson.py +1 -1
  95. teradataml/data/docs/uaf/docs_17_20/ExtractResults.py +1 -1
  96. teradataml/data/docs/uaf/docs_17_20/FilterFactory1d.py +160 -0
  97. teradataml/data/docs/uaf/docs_17_20/GenseriesSinusoids.py +1 -1
  98. teradataml/data/docs/uaf/docs_17_20/GoldfeldQuandt.py +9 -31
  99. teradataml/data/docs/uaf/docs_17_20/HoltWintersForecaster.py +4 -2
  100. teradataml/data/docs/uaf/docs_17_20/IDFFT2.py +1 -8
  101. teradataml/data/docs/uaf/docs_17_20/IDWT.py +236 -0
  102. teradataml/data/docs/uaf/docs_17_20/IDWT2D.py +226 -0
  103. teradataml/data/docs/uaf/docs_17_20/IQR.py +134 -0
  104. teradataml/data/docs/uaf/docs_17_20/LineSpec.py +1 -1
  105. teradataml/data/docs/uaf/docs_17_20/LinearRegr.py +2 -2
  106. teradataml/data/docs/uaf/docs_17_20/MAMean.py +3 -3
  107. teradataml/data/docs/uaf/docs_17_20/Matrix2Image.py +297 -0
  108. teradataml/data/docs/uaf/docs_17_20/MatrixMultiply.py +15 -6
  109. teradataml/data/docs/uaf/docs_17_20/PACF.py +0 -1
  110. teradataml/data/docs/uaf/docs_17_20/Portman.py +2 -2
  111. teradataml/data/docs/uaf/docs_17_20/PowerSpec.py +2 -2
  112. teradataml/data/docs/uaf/docs_17_20/Resample.py +9 -1
  113. teradataml/data/docs/uaf/docs_17_20/SAX.py +246 -0
  114. teradataml/data/docs/uaf/docs_17_20/SeasonalNormalize.py +17 -10
  115. teradataml/data/docs/uaf/docs_17_20/SignifPeriodicities.py +1 -1
  116. teradataml/data/docs/uaf/docs_17_20/WhitesGeneral.py +3 -1
  117. teradataml/data/docs/uaf/docs_17_20/WindowDFFT.py +368 -0
  118. teradataml/data/dwt2d_dataTable.csv +65 -0
  119. teradataml/data/dwt_dataTable.csv +8 -0
  120. teradataml/data/dwt_filterTable.csv +3 -0
  121. teradataml/data/finance_data4.csv +13 -0
  122. teradataml/data/glm_example.json +28 -1
  123. teradataml/data/grocery_transaction.csv +19 -0
  124. teradataml/data/housing_train_segment.csv +201 -0
  125. teradataml/data/idwt2d_dataTable.csv +5 -0
  126. teradataml/data/idwt_dataTable.csv +8 -0
  127. teradataml/data/idwt_filterTable.csv +3 -0
  128. teradataml/data/insect2Cols.csv +61 -0
  129. teradataml/data/interval_data.csv +5 -0
  130. teradataml/data/jsons/paired_functions.json +14 -0
  131. teradataml/data/jsons/sqle/17.20/TD_ANOVA.json +99 -27
  132. teradataml/data/jsons/sqle/17.20/TD_CFilter.json +118 -0
  133. teradataml/data/jsons/sqle/17.20/TD_FTest.json +166 -83
  134. teradataml/data/jsons/sqle/17.20/TD_GLM.json +90 -14
  135. teradataml/data/jsons/sqle/17.20/TD_GLMPREDICT.json +48 -5
  136. teradataml/data/jsons/sqle/17.20/TD_GetFutileColumns.json +5 -3
  137. teradataml/data/jsons/sqle/17.20/TD_KMeans.json +31 -11
  138. teradataml/data/jsons/sqle/17.20/TD_NaiveBayes.json +193 -0
  139. teradataml/data/jsons/sqle/17.20/TD_NaiveBayesPredict.json +212 -0
  140. teradataml/data/jsons/sqle/17.20/TD_NonLinearCombineFit.json +3 -2
  141. teradataml/data/jsons/sqle/17.20/TD_OneClassSVM.json +9 -9
  142. teradataml/data/jsons/sqle/17.20/TD_Pivoting.json +280 -0
  143. teradataml/data/jsons/sqle/17.20/TD_ROC.json +2 -1
  144. teradataml/data/jsons/sqle/17.20/TD_SVM.json +16 -16
  145. teradataml/data/jsons/sqle/17.20/TD_SVMPredict.json +19 -1
  146. teradataml/data/jsons/sqle/17.20/TD_ScaleFit.json +168 -15
  147. teradataml/data/jsons/sqle/17.20/TD_ScaleTransform.json +50 -1
  148. teradataml/data/jsons/sqle/17.20/TD_Shap.json +222 -0
  149. teradataml/data/jsons/sqle/17.20/TD_TFIDF.json +162 -0
  150. teradataml/data/jsons/sqle/17.20/TD_Unpivoting.json +235 -0
  151. teradataml/data/jsons/sqle/17.20/TD_XGBoost.json +25 -7
  152. teradataml/data/jsons/sqle/17.20/TD_XGBoostPredict.json +17 -4
  153. teradataml/data/jsons/sqle/17.20/TD_ZTest.json +157 -80
  154. teradataml/data/jsons/storedprocedure/17.20/TD_FILTERFACTORY1D.json +150 -0
  155. teradataml/data/jsons/uaf/17.20/TD_ACF.json +1 -18
  156. teradataml/data/jsons/uaf/17.20/TD_ARIMAESTIMATE.json +3 -16
  157. teradataml/data/jsons/uaf/17.20/TD_ARIMAFORECAST.json +0 -3
  158. teradataml/data/jsons/uaf/17.20/TD_ARIMAVALIDATE.json +5 -3
  159. teradataml/data/jsons/uaf/17.20/TD_ARIMAXESTIMATE.json +362 -0
  160. teradataml/data/jsons/uaf/17.20/TD_AUTOARIMA.json +469 -0
  161. teradataml/data/jsons/uaf/17.20/TD_BINARYMATRIXOP.json +0 -3
  162. teradataml/data/jsons/uaf/17.20/TD_BINARYSERIESOP.json +0 -2
  163. teradataml/data/jsons/uaf/17.20/TD_BREUSCH_GODFREY.json +2 -1
  164. teradataml/data/jsons/uaf/17.20/TD_BREUSCH_PAGAN_GODFREY.json +2 -5
  165. teradataml/data/jsons/uaf/17.20/TD_CONVOLVE.json +3 -6
  166. teradataml/data/jsons/uaf/17.20/TD_CONVOLVE2.json +1 -3
  167. teradataml/data/jsons/uaf/17.20/TD_CUMUL_PERIODOGRAM.json +0 -5
  168. teradataml/data/jsons/uaf/17.20/TD_DFFT.json +1 -4
  169. teradataml/data/jsons/uaf/17.20/TD_DFFT2.json +2 -7
  170. teradataml/data/jsons/uaf/17.20/TD_DFFT2CONV.json +1 -2
  171. teradataml/data/jsons/uaf/17.20/TD_DFFTCONV.json +0 -2
  172. teradataml/data/jsons/uaf/17.20/TD_DTW.json +3 -6
  173. teradataml/data/jsons/uaf/17.20/TD_DWT.json +173 -0
  174. teradataml/data/jsons/uaf/17.20/TD_DWT2D.json +160 -0
  175. teradataml/data/jsons/uaf/17.20/TD_FITMETRICS.json +1 -1
  176. teradataml/data/jsons/uaf/17.20/TD_GOLDFELD_QUANDT.json +16 -30
  177. teradataml/data/jsons/uaf/17.20/{TD_HOLT_WINTERS_FORECAST.json → TD_HOLT_WINTERS_FORECASTER.json} +1 -2
  178. teradataml/data/jsons/uaf/17.20/TD_IDFFT2.json +1 -15
  179. teradataml/data/jsons/uaf/17.20/TD_IDWT.json +162 -0
  180. teradataml/data/jsons/uaf/17.20/TD_IDWT2D.json +149 -0
  181. teradataml/data/jsons/uaf/17.20/TD_IQR.json +117 -0
  182. teradataml/data/jsons/uaf/17.20/TD_LINEAR_REGR.json +1 -1
  183. teradataml/data/jsons/uaf/17.20/TD_LINESPEC.json +1 -1
  184. teradataml/data/jsons/uaf/17.20/TD_MAMEAN.json +1 -3
  185. teradataml/data/jsons/uaf/17.20/TD_MATRIX2IMAGE.json +209 -0
  186. teradataml/data/jsons/uaf/17.20/TD_PACF.json +2 -2
  187. teradataml/data/jsons/uaf/17.20/TD_POWERSPEC.json +5 -5
  188. teradataml/data/jsons/uaf/17.20/TD_RESAMPLE.json +48 -28
  189. teradataml/data/jsons/uaf/17.20/TD_SAX.json +208 -0
  190. teradataml/data/jsons/uaf/17.20/TD_SEASONALNORMALIZE.json +12 -6
  191. teradataml/data/jsons/uaf/17.20/TD_SIMPLEEXP.json +0 -1
  192. teradataml/data/jsons/uaf/17.20/TD_TRACKINGOP.json +8 -8
  193. teradataml/data/jsons/uaf/17.20/TD_UNDIFF.json +1 -1
  194. teradataml/data/jsons/uaf/17.20/TD_UNNORMALIZE.json +1 -1
  195. teradataml/data/jsons/uaf/17.20/TD_WINDOWDFFT.json +400 -0
  196. teradataml/data/kmeans_example.json +5 -0
  197. teradataml/data/kmeans_table.csv +10 -0
  198. teradataml/data/load_example_data.py +8 -2
  199. teradataml/data/naivebayestextclassifier_example.json +1 -1
  200. teradataml/data/naivebayestextclassifierpredict_example.json +11 -0
  201. teradataml/data/onehot_encoder_train.csv +4 -0
  202. teradataml/data/openml_example.json +29 -0
  203. teradataml/data/peppers.png +0 -0
  204. teradataml/data/real_values.csv +14 -0
  205. teradataml/data/sax_example.json +8 -0
  206. teradataml/data/scale_attributes.csv +3 -0
  207. teradataml/data/scale_example.json +52 -1
  208. teradataml/data/scale_input_part_sparse.csv +31 -0
  209. teradataml/data/scale_input_partitioned.csv +16 -0
  210. teradataml/data/scale_input_sparse.csv +11 -0
  211. teradataml/data/scale_parameters.csv +3 -0
  212. teradataml/data/scripts/deploy_script.py +21 -2
  213. teradataml/data/scripts/sklearn/sklearn_fit.py +40 -37
  214. teradataml/data/scripts/sklearn/sklearn_fit_predict.py +22 -30
  215. teradataml/data/scripts/sklearn/sklearn_function.template +42 -24
  216. teradataml/data/scripts/sklearn/sklearn_model_selection_split.py +23 -33
  217. teradataml/data/scripts/sklearn/sklearn_neighbors.py +19 -28
  218. teradataml/data/scripts/sklearn/sklearn_score.py +32 -32
  219. teradataml/data/scripts/sklearn/sklearn_transform.py +85 -42
  220. teradataml/data/star_pivot.csv +8 -0
  221. teradataml/data/templates/open_source_ml.json +2 -1
  222. teradataml/data/teradataml_example.json +97 -1
  223. teradataml/data/timestamp_data.csv +4 -0
  224. teradataml/data/titanic_dataset_unpivoted.csv +19 -0
  225. teradataml/data/uaf_example.json +55 -1
  226. teradataml/data/unpivot_example.json +15 -0
  227. teradataml/data/url_data.csv +9 -0
  228. teradataml/data/windowdfft.csv +16 -0
  229. teradataml/data/ztest_example.json +16 -0
  230. teradataml/dataframe/copy_to.py +9 -4
  231. teradataml/dataframe/data_transfer.py +125 -64
  232. teradataml/dataframe/dataframe.py +575 -57
  233. teradataml/dataframe/dataframe_utils.py +47 -9
  234. teradataml/dataframe/fastload.py +273 -90
  235. teradataml/dataframe/functions.py +339 -0
  236. teradataml/dataframe/row.py +160 -0
  237. teradataml/dataframe/setop.py +2 -2
  238. teradataml/dataframe/sql.py +740 -18
  239. teradataml/dataframe/window.py +1 -1
  240. teradataml/dbutils/dbutils.py +324 -18
  241. teradataml/geospatial/geodataframe.py +1 -1
  242. teradataml/geospatial/geodataframecolumn.py +1 -1
  243. teradataml/hyperparameter_tuner/optimizer.py +13 -13
  244. teradataml/lib/aed_0_1.dll +0 -0
  245. teradataml/opensource/sklearn/_sklearn_wrapper.py +254 -122
  246. teradataml/options/__init__.py +16 -5
  247. teradataml/options/configure.py +39 -6
  248. teradataml/options/display.py +2 -2
  249. teradataml/plot/axis.py +4 -4
  250. teradataml/scriptmgmt/UserEnv.py +26 -19
  251. teradataml/scriptmgmt/lls_utils.py +120 -16
  252. teradataml/table_operators/Script.py +4 -5
  253. teradataml/table_operators/TableOperator.py +160 -26
  254. teradataml/table_operators/table_operator_util.py +88 -41
  255. teradataml/table_operators/templates/dataframe_udf.template +63 -0
  256. teradataml/telemetry_utils/__init__.py +0 -0
  257. teradataml/telemetry_utils/queryband.py +52 -0
  258. teradataml/utils/validators.py +41 -3
  259. {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/METADATA +191 -6
  260. {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/RECORD +263 -185
  261. {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/WHEEL +0 -0
  262. {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/top_level.txt +0 -0
  263. {teradataml-20.0.0.0.dist-info → teradataml-20.0.0.2.dist-info}/zip-safe +0 -0
@@ -25,13 +25,11 @@
25
25
  "PrimaryLayer": true,
26
26
  "LayerName": "ARTPRIMARY",
27
27
  "ResultTableColumnTypes": [
28
-
29
28
  "integer",
30
29
  "float",
31
30
  "float"
32
31
  ],
33
32
  "Description": [
34
-
35
33
  "The result series is always a multivariate series containing Fourier coefficients, whose elements are either complex numbers with real and imaginary component",
36
34
  "The TD_DFFT function returns only a primary result set. There are no secondary or tertiary result sets produced by this function. ",
37
35
  "The primary result set can be accessed either via a subsequent SELECT * FROM <art table> statement or by issuing a TD_EXTRACT_RESULTS(…,LAYER(ARTPRIMARY),…) .",
@@ -44,7 +42,6 @@
44
42
  ],
45
43
  "IsPlottable": true,
46
44
  "Params": [
47
-
48
45
  {
49
46
  "Name": "ZERO_PADDING_OK",
50
47
  "Type": "integer",
@@ -78,7 +75,7 @@
78
75
  "Type": "float",
79
76
  "Optional": true,
80
77
  "LowerBound": 0,
81
- "LowerBoundType": "INCLUSIVE",
78
+ "LowerBoundType": "EXCLUSIVE",
82
79
  "AllowNaN": false,
83
80
  "Description": [
84
81
  "Optional Parameter. Only valid with a FREQ_STYLE(K_HERTZ). A Floating point constant representing the sample rate, in hertz. A value of 10000.0 means that the sample points were obtained by sampling at a rate of 10,000 hertz."
@@ -25,23 +25,19 @@
25
25
  "PrimaryLayer": true,
26
26
  "LayerName": "ARTPRIMARY",
27
27
  "ResultTableColumnTypes": [
28
-
29
28
  "integer",
30
29
  "integer",
31
30
  "float",
32
31
  "float"
33
32
  ],
34
33
  "Description": [
35
-
36
34
  "The TD_DFFT2 returns ONLY a Primary Result set. There is no Secondary or Tertiary Result set returned by this function. The Primary Result set can be retrieved by issuing a SELECT statement against the analytical result table containing the results."
37
-
38
35
  ],
39
36
  "LangName": "data or object or newdata or ... --> Langauges team can work with UAF team to come up with this field"
40
37
  }
41
38
  ],
42
39
  "IsPlottable": true,
43
40
  "Params": [
44
-
45
41
  {
46
42
  "Name": "ZERO_PADDING_OK",
47
43
  "Type": "integer",
@@ -75,7 +71,7 @@
75
71
  "Type": "float",
76
72
  "Optional": true,
77
73
  "LowerBound": 0,
78
- "LowerBoundType": "INCLUSIVE",
74
+ "LowerBoundType": "EXCLUSIVE",
79
75
  "AllowNaN": false,
80
76
  "Description": [
81
77
  "Optional Parameter. Only valid with a FREQ_STYLE(K_HERTZ). A Floating point constant representing the sample rate, in hertz. A value of 10000.0 means that the sample points were obtained by sampling at a rate of 10,000 hertz. This hertz interpretation will be applied to both the ROW_I and COLUMN_I indexes."
@@ -118,7 +114,7 @@
118
114
  "AMPL_PHASE_RADIANS",
119
115
  "AMPL_PHASE_DEGREES",
120
116
  "AMPL_PHASE",
121
- "MULTIVAR_COMPLEX",
117
+ "MULTIVAR_COMPLEX",
122
118
  "MULTIVAR_AMPL_PHASE_RADIANS",
123
119
  "MULTIVAR_AMPL_PHASE_DEGREES",
124
120
  "MULTIVAR_AMPL_PHASE"
@@ -144,6 +140,5 @@
144
140
  ],
145
141
  "LangName": "output_fmt_row_major"
146
142
  }
147
-
148
143
  ]
149
144
  }
@@ -43,7 +43,6 @@
43
43
  ],
44
44
  "IsPlottable": true,
45
45
  "Params": [
46
-
47
46
  {
48
47
  "Name": "CONV",
49
48
  "Type": "string",
@@ -76,7 +75,7 @@
76
75
  "Type": "float",
77
76
  "Optional": true,
78
77
  "LowerBound": 0,
79
- "LowerBoundType": "INCLUSIVE",
78
+ "LowerBoundType": "EXCLUSIVE",
80
79
  "Description": [
81
80
  "Optional Parameter. Only valid with a FREQ_STYLE(K_HERTZ). A Floating point constant representing the sample rate, in hertz. A value of 10000.0 means that the sample points were obtained by sampling at a rate of 10,000 hertz."
82
81
  ]
@@ -33,7 +33,6 @@
33
33
  "float"
34
34
  ],
35
35
  "Description": [
36
-
37
36
  "The TD_DFFTCONV function returns only a primary result set. There are no secondary or tertiary result sets produced by this function. The primary result set can be retrieved by issuing a SELECT against the analytical table containing the results.",
38
37
  "The data scientist has the choice of outputting the result set with the produced Fourier Coefficients being realized in the form of: complex numbers - OUTPUT_FMT(CONTENT(COMPLEX)); or, in the form of amplitude and phase number pairs - OUTPUT_FMT(CONTENT(AMPL_PHASE)). This will, of course, affect the composition of the produced output columns. Multivar content types such as MULTIVAR_COMPLEX are also supported and have the same result table format.",
39
38
  "The two float values are AMPLITUDE and PHASE if OUTPUT_FMT(CONTENT(AMPL_PHASE_DEGREES) | CONTENT(AMPL_PHASE_RADIANS) | CONTENT(AMPL_PHASE) | CONTENT(MULTIVAR_AMPL_PHASE_DEGREES) | CONTENT(MULTIVAR_AMPL_PHASE_RADIANS) | CONTENT(MULTIVAR_AMPL_PHASE)). Fourier Amplitude coefficient corresponding to ROW_I index",
@@ -44,7 +43,6 @@
44
43
  ],
45
44
  "IsPlottable": true,
46
45
  "Params": [
47
-
48
46
  {
49
47
  "Name": "CONV",
50
48
  "Type": "string",
@@ -35,7 +35,6 @@
35
35
  "PrimaryLayer": true,
36
36
  "LayerName": "ARTPRIMARY",
37
37
  "ResultTableColumnTypes": [
38
-
39
38
  "integer",
40
39
  "float",
41
40
  "integer",
@@ -44,7 +43,6 @@
44
43
  "<varies>"
45
44
  ],
46
45
  "Description": [
47
-
48
46
  "Last two columns have types that are the same as series 1 and 2 row axis type. "
49
47
  ],
50
48
  "LangName": "data or object or newdata or ... --> Langauges team can work with UAF team to come up with this field"
@@ -52,7 +50,6 @@
52
50
  ],
53
51
  "IsPlottable": true,
54
52
  "Params": [
55
-
56
53
  {
57
54
  "Name": "RADIUS",
58
55
  "Type": "integer",
@@ -71,9 +68,9 @@
71
68
  "Type": "string",
72
69
  "Optional": true,
73
70
  "PermittedValues": [
74
- "EUCLIDEAN",
75
- "MANHATTAN",
76
- "BINARY"
71
+ "euclidean",
72
+ "manhattan",
73
+ "binary"
77
74
  ],
78
75
  "Description": [
79
76
  "The distance function to be used. The currently supported names are: 'euclidean' - for Euclidean distance function; 'manhattan' - for Manhattan distance function; 'binary' - For binary distance function. The match is case in-sensitive."
@@ -0,0 +1,173 @@
1
+ {
2
+ "FuncName": "TD_DWT",
3
+ "FuncDescriptionShort": "TD_DWT is a function that performs discrete wavelet transform (DWT).",
4
+ "FuncDescriptionLong": [
5
+ "TD_DWT is a function that performs discrete wavelet transform (DWT)."
6
+ ],
7
+ "FunctionVersion": "...",
8
+ "FunctionCategory": "Digital Signal Processing",
9
+ "JSONVersion": "1",
10
+ "FuncRName": "td_DWT",
11
+ "MaxInputFiles": 2,
12
+ "Input": [
13
+ {
14
+ "Type": "SERIES",
15
+ "Description": [
16
+ "Specify the SERIES_SPEC of the series.",
17
+ "Multiple payloads are supported, and each payload column is transformed independently.",
18
+ "Only REAL or MULTIVAR_REAL payload content types are supported."
19
+ ],
20
+ "LangName": "data or object or newdata or ... --> Langauges team can work with UAF team to come up with this field",
21
+ "Optional": false
22
+ },
23
+ {
24
+ "Type": "SERIES",
25
+ "Description": [
26
+ "[Optional] Specify the SERIES_SPEC of the series. The series specifies the filter.",
27
+ "It should have two payload columns corresponding to low and high pass filters.",
28
+ "Only MULTIVAR_REAL payload content type is supported."
29
+ ],
30
+ "LangName": "data or object or newdata or ... --> Langauges team can work with UAF team to come up with this field",
31
+ "Optional": true
32
+ }
33
+ ],
34
+ "Output": [
35
+ {
36
+ "Type": "ART",
37
+ "PrimaryLayer": true,
38
+ "LayerName": "ARTPRIMARY",
39
+ "ResultTableColumnTypes": [
40
+ "big_integer",
41
+ "float",
42
+ "<varies>"
43
+ ],
44
+ "Description": [
45
+ "The TD_DWT function returns only a primary result set."
46
+ ],
47
+ "LangName": "data or object or newdata or ... --> Langauges team can work with UAF team to come up with this field"
48
+ }
49
+ ],
50
+ "IsPlottable": true,
51
+ "Params": [
52
+ {
53
+ "Name": "WAVELET",
54
+ "Type": "string",
55
+ "Optional": true,
56
+ "Description": [
57
+ "Name of the wavelet.",
58
+ "If this parameter is specified, then do not include a second input series for the function.",
59
+ "If this parameter is not specified, then include a second input series to provide the filter.",
60
+ "Data type is case-sensitive.",
61
+ "Option families and names are:",
62
+ "Daubechies: 'db1' or 'haar', 'db2', 'db3', .... ,'db38'",
63
+ "Coiflets: 'coif1', 'coif2', ... , 'coif17'",
64
+ "Symlets: 'sym2', 'sym3', ... ,' sym20'",
65
+ "Discrete Meyer: 'dmey'",
66
+ "Biorthogonal: 'bior1.1', 'bior1.3', 'bior1.5', 'bior2.2', 'bior2.4', 'bior2.6', 'bior2.8', 'bior3.1', 'bior3.3', 'bior3.5', 'bior3.7', 'bior3.9', 'bior4.4', 'bior5.5', 'bior6.8'",
67
+ "Reverse Biorthogonal: 'rbio1.1', 'rbio1.3', 'rbio1.5' 'rbio2.2', 'rbio2.4', 'rbio2.6', 'rbio2.8', 'rbio3.1', 'rbio3.3', 'rbio3.5', 'rbio3.7','rbio3.9', 'rbio4.4', 'rbio5.5', 'rbio6.8'"
68
+ ]
69
+ },
70
+ {
71
+ "Name": "MODE",
72
+ "Type": "string",
73
+ "Optional": true,
74
+ "PermittedValues": [
75
+ "zero",
76
+ "symmetric",
77
+ "constant",
78
+ "smooth",
79
+ "periodic",
80
+ "periodization",
81
+ "reflect",
82
+ "antisymmetric",
83
+ "antireflect",
84
+ "sym",
85
+ "symh",
86
+ "symw",
87
+ "spd",
88
+ "sp1",
89
+ "sp0",
90
+ "zpd",
91
+ "ppd",
92
+ "per",
93
+ "asym",
94
+ "asymh",
95
+ "asymw"
96
+ ],
97
+ "DefaultValue": "symmetric",
98
+ "Description": [
99
+ "Signal extension mode.",
100
+ "Data type is case-insensitive.",
101
+ "Options are:",
102
+ "symmetric, sym, symh",
103
+ "reflect, symw",
104
+ "smooth, spd, sp1",
105
+ "constant, sp0",
106
+ "zero, zpd",
107
+ "periodic, ppd",
108
+ "periodization, per",
109
+ "antisymmetric, asym, asymh",
110
+ "antireflect, asymw"
111
+ ]
112
+ },
113
+ {
114
+ "Name": "LEVEL",
115
+ "Type": "integer",
116
+ "Optional": true,
117
+ "LowerBound": 1,
118
+ "UpperBound": 15,
119
+ "LowerBoundType": "INCLUSIVE",
120
+ "UpperBoundType": "INCLUSIVE",
121
+ "DefaultValue": 1,
122
+ "Description": "Level of decomposition. Valid values are [1,15]. Default is 1."
123
+ },
124
+ {
125
+ "Name": "PART",
126
+ "Type": "string",
127
+ "Optional": true,
128
+ "PermittedValues": [
129
+ "a",
130
+ "d"
131
+ ],
132
+ "Description": [
133
+ "Indicator that partial decomposition result is needed. Valid values are 'a' or 'd', corresponding to the approximation or the detail of the decomposition result.",
134
+ "Data type is case-insensitive."
135
+ ]
136
+ }
137
+ ],
138
+ "InputFmt": [
139
+ {
140
+ "Name": "INPUT_MODE",
141
+ "Type": "string",
142
+ "Optional": true,
143
+ "PermittedValues": [
144
+ "MANY2ONE",
145
+ "ONE2ONE",
146
+ "MATCH"
147
+ ],
148
+ "Description": [
149
+ "When there are two input series, then the INPUT_FMT specification is mandatory.",
150
+ "[Optional] The INPUT_MODE parameter has the following options:",
151
+ "ONE2ONE: Both the primary and secondary series specifications contain a series name which identifies the two series in the function.",
152
+ "MANY2ONE: The MANY specification is the primary series declaration. The secondary series specification contains a series name that identifies the single secondary series.",
153
+ "MATCH: Both series are defined by their respective SERIES_SPEC(INSTANCE_NAME()) declarations."
154
+ ],
155
+ "LangName": "input_fmt_input_mode"
156
+ }
157
+ ],
158
+ "OutputFmt": [
159
+ {
160
+ "Name": "INDEX_STYLE",
161
+ "Type": "string",
162
+ "Optional": true,
163
+ "DefaultValue": "NUMERICAL_SEQUENCE",
164
+ "PermittedValues": [
165
+ "NUMERICAL_SEQUENCE"
166
+ ],
167
+ "Description": [
168
+ "[Optional] The INDEX_STYLE of the output format is NUMERICAL_SEQUENCE."
169
+ ],
170
+ "LangName": "output_fmt_index_style"
171
+ }
172
+ ]
173
+ }
@@ -0,0 +1,160 @@
1
+ {
2
+ "FuncName": "TD_DWT2D",
3
+ "FuncDescriptionShort": "TD_DWT2D performs discrete wavelet transform (DWT) for two-dimensional data.",
4
+ "FuncDescriptionLong": [
5
+ "TD_DWT2D performs discrete wavelet transform (DWT) for two-dimensional data. The algorithm is applied first vertically by column axis, then horizontally by row axis."
6
+ ],
7
+ "FunctionVersion": "...",
8
+ "FunctionCategory": "Digital Signal Processing",
9
+ "JSONVersion": "1",
10
+ "FuncRName": "td_DWT2D",
11
+ "MaxInputFiles": 2,
12
+ "Input": [
13
+ {
14
+ "Type": "MATRIX",
15
+ "Description": [
16
+ "Specify the MATRIX_SPEC of the matrix.",
17
+ "Multiple payloads are supported, and each payload column is transformed independently.",
18
+ "Only REAL or MULTIVAR_REAL payload content types are supported."
19
+ ],
20
+ "LangName": "data or object or newdata or ... --> Langauges team can work with UAF team to come up with this field",
21
+ "Optional": false
22
+ },
23
+ {
24
+ "Type": "SERIES",
25
+ "Description": [
26
+ "[Optional] Specify the SERIES_SPEC of the series. The series specifies the filter.",
27
+ "It should have two payload columns corresponding to low and high pass filters.",
28
+ "Only MULTIVAR_REAL payload content type is supported."
29
+ ],
30
+ "LangName": "data or object or newdata or ... --> Langauges team can work with UAF team to come up with this field",
31
+ "Optional": true
32
+ }
33
+ ],
34
+ "Output": [
35
+ {
36
+ "Type": "ART",
37
+ "PrimaryLayer": true,
38
+ "LayerName": "ARTPRIMARY",
39
+ "ResultTableColumnTypes": [
40
+ "big_integer",
41
+ "float",
42
+ "<varies>"
43
+ ],
44
+ "Description": [
45
+ "The TD_DWT2D function returns only a primary result set."
46
+ ],
47
+ "LangName": "data or object or newdata or ... --> Langauges team can work with UAF team to come up with this field"
48
+ }
49
+ ],
50
+ "IsPlottable": true,
51
+ "Params": [
52
+ {
53
+ "Name": "WAVELET",
54
+ "Type": "string",
55
+ "Optional": true,
56
+ "Description": [
57
+ "Name of the wavelet.",
58
+ "If this parameter is specified, then do not include a second input series for the function.",
59
+ "If this parameter is not specified, then include a second input series to provide the filter.",
60
+ "Data type is case-sensitive.",
61
+ "Option families and names are:",
62
+ "Daubechies: 'db1' or 'haar', 'db2', 'db3', .... ,'db38'",
63
+ "Coiflets: 'coif1', 'coif2', ... , 'coif17'",
64
+ "Symlets: 'sym2', 'sym3', ... ,' sym20'",
65
+ "Discrete Meyer: 'dmey'",
66
+ "Biorthogonal: 'bior1.1', 'bior1.3', 'bior1.5', 'bior2.2', 'bior2.4', 'bior2.6', 'bior2.8', 'bior3.1', 'bior3.3', 'bior3.5', 'bior3.7', 'bior3.9', 'bior4.4', 'bior5.5', 'bior6.8'",
67
+ "Reverse Biorthogonal: 'rbio1.1', 'rbio1.3', 'rbio1.5' 'rbio2.2', 'rbio2.4', 'rbio2.6', 'rbio2.8', 'rbio3.1', 'rbio3.3', 'rbio3.5', 'rbio3.7','rbio3.9', 'rbio4.4', 'rbio5.5', 'rbio6.8'"
68
+ ]
69
+ },
70
+ {
71
+ "Name": "MODE",
72
+ "Type": "string",
73
+ "Optional": true,
74
+ "PermittedValues": [
75
+ "zero",
76
+ "symmetric",
77
+ "constant",
78
+ "smooth",
79
+ "periodic",
80
+ "periodization",
81
+ "reflect",
82
+ "antisymmetric",
83
+ "antireflect",
84
+ "sym",
85
+ "symh",
86
+ "symw",
87
+ "spd",
88
+ "sp1",
89
+ "sp0",
90
+ "zpd",
91
+ "ppd",
92
+ "per",
93
+ "asym",
94
+ "asymh",
95
+ "asymw"
96
+ ],
97
+ "DefaultValue": "symmetric",
98
+ "Description": [
99
+ "Signal extension mode.",
100
+ "Data type is case-insensitive.",
101
+ "Options are:",
102
+ "symmetric, sym, symh",
103
+ "reflect, symw",
104
+ "smooth, spd, sp1",
105
+ "constant, sp0",
106
+ "zero, zpd",
107
+ "periodic, ppd",
108
+ "periodization, per",
109
+ "antisymmetric, asym, asymh",
110
+ "antireflect, asymw"
111
+ ]
112
+ },
113
+ {
114
+ "Name": "LEVEL",
115
+ "Type": "integer",
116
+ "Optional": true,
117
+ "LowerBound": 1,
118
+ "UpperBound": 15,
119
+ "LowerBoundType": "INCLUSIVE",
120
+ "UpperBoundType": "INCLUSIVE",
121
+ "DefaultValue": 1,
122
+ "Description": "Level of decomposition. Valid values are [1,15]. Default is 1."
123
+ }
124
+ ],
125
+ "InputFmt": [
126
+ {
127
+ "Name": "INPUT_MODE",
128
+ "Type": "string",
129
+ "Optional": true,
130
+ "PermittedValues": [
131
+ "MANY2ONE",
132
+ "ONE2ONE",
133
+ "MATCH"
134
+ ],
135
+ "Description": [
136
+ "When there are two input series, then the INPUT_FMT specification is mandatory.",
137
+ "[Optional] The INPUT_MODE parameter has the following options:",
138
+ "ONE2ONE: Both the primary and secondary series specifications contain a series name which identifies the two series in the function.",
139
+ "MANY2ONE: The MANY specification is the primary series declaration. The secondary series specification contains a series name that identifies the single secondary series.",
140
+ "MATCH: Both series are defined by their respective SERIES_SPEC(INSTANCE_NAME()) declarations."
141
+ ],
142
+ "LangName": "input_fmt_input_mode"
143
+ }
144
+ ],
145
+ "OutputFmt": [
146
+ {
147
+ "Name": "INDEX_STYLE",
148
+ "Type": "string",
149
+ "Optional": true,
150
+ "DefaultValue": "NUMERICAL_SEQUENCE",
151
+ "PermittedValues": [
152
+ "NUMERICAL_SEQUENCE"
153
+ ],
154
+ "Description": [
155
+ "[Optional] The INDEX_STYLE of the output format is NUMERICAL_SEQUENCE."
156
+ ],
157
+ "LangName": "output_fmt_index_style"
158
+ }
159
+ ]
160
+ }
@@ -13,7 +13,7 @@
13
13
  {
14
14
  "Type": [
15
15
  "SERIES",
16
- "ART"
16
+ "ART"
17
17
  ],
18
18
  "Description": [
19
19
  "This section outlines the syntax associated with invoking the TD_FIT_METRICS function. This function can accept either a single multivariate series as an input, or alternatively can accept an ART table containing the residual results from a previously run regression operation. ",
@@ -50,49 +50,31 @@
50
50
  "IsPlottable": true,
51
51
  "Params": [
52
52
  {
53
- "Name": "ORIG_REGR_PARAMCNT",
54
- "Type": "integer",
55
- "Optional": false,
56
- "LowerBound": 1,
57
- "LowerBoundType": "INCLUSIVE",
58
- "AllowNaN": false,
59
- "Description": [
60
- "Positive Integer value > 0. A parameter indicating how many response and explanatory variables were present in the original regression, which is being analyzed by this test."
61
- ]
62
- },
63
- {
64
- "Name": "WEIGHTS",
53
+ "Name": "CONST_TERM",
65
54
  "Type": "integer",
66
55
  "Optional": true,
67
- "DefaultValue": 0,
56
+ "DefaultValue": 1,
68
57
  "PermittedValues": [
69
58
  0,
70
59
  1
71
60
  ],
72
61
  "Description": [
73
- "Optional Parameter. But when used must be accompanied by both a FORMULA and an ALGORITHM. ",
74
- "FLAG having either a '0' or '1' value. A value of '1' means the last series found in the payload is to be interpreted as a series of weights that can be used to perform a weighted least squares regression solution."
62
+ "Optional indicator of whether the regression performed should use a Y-intercept coefficient.",
63
+ "A value of 1 means the regression is performed on 'Y=C+aX1+bX2+…'.",
64
+ "A value of 0 means the regression is performed on 'Y=aX1+bX2+…'.",
65
+ "The default is 1."
75
66
  ]
76
67
  },
77
- {
78
- "Name": "FORMULA",
79
- "Type": "<td_formula>",
80
- "Optional": false,
81
- "Description": [
82
- "Specifies the formula used in the regression operation. The name of the response variable must always be Y, and the name of the explanatory variable must always be X1. For example, 'Y = B0 + B1 * X1'.\n",
83
- "Notes:\n",
84
- "* The formula argument must be specified along with the 'algorithm' argument.\n",
85
- "* Use the following link to refer the formula rules:\n",
86
- "https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-Unbounded-Array-Framework-Time-Series-Reference-17.20/Mathematic-Operators-and-Functions/Formula-Rules"
87
- ]
88
- },
89
68
  {
90
69
  "Name": "ALGORITHM",
91
70
  "Type": "string",
92
71
  "Optional": false,
72
+ "PermittedValues": [
73
+ "QR",
74
+ "PSI"
75
+ ],
93
76
  "Description": [
94
- "Specifies the algorithm used for the regression.\n",
95
- "Permitted Values:\n 1. QR: QR decomposition is used for the regression.\n 2. PSI: pseudo-inverse based on singular value decomposition (SVD) is used to solve the regression."
77
+ "Algorithm used for the regression. A value of QR means that QR decomposition is used for the regression. A value of PSI means that pseudo-inverse based on SVD is used to solve the regression."
96
78
  ],
97
79
  "LangName": "..."
98
80
  },
@@ -143,7 +125,11 @@
143
125
  "TWOSIDED"
144
126
  ],
145
127
  "Description": [
146
- "Optional parameter. Has a value of: 'GREATER' or 'LESS' or 'TWOSIDED' ; DEFAULT (if not present) is 'GREATER'. Test value influences how GQ Test Statistic is computed and how Hypothesis is evaluated. See test description above to understand meaning of choosing a TEST value."
128
+ "Optional parameter. Has a value of: 'GREATER' or 'LESS' or 'TWOSIDED' ; DEFAULT (if not present) is 'GREATER'. Test value influences how GQ Test Statistic is computed and how Hypothesis is evaluated.",
129
+ "Options are as follows:",
130
+ "GREATER: If the Goldfeld-Quandt test-statistic is less than the higher critical value, the null hypothesis is accepted, and there is no evidence of heteroscedastic variance. If the Goldfeld-Quandt test statistic is greater than or equal to the critical value, then the null hypothesis is rejected, and there is evidence of heteroscedastic variance.",
131
+ "LESS: If the Goldfeld-Quandt test-statistic is greater than the lower critical value, the null hypothesis is accepted, and there is no evidence of heteroscedastic variance. If the Goldfeld-Quandt test statistic is less than or equal to than the critical value, then the null hypothesis is rejected, and there is evidence of heteroscedastic variance.",
132
+ "TWOSIDED: If the Goldfeld-Quandt test-statistic is greater than the lower tail critical value and less than the higher tail critical value, the null hypothesis is accepted, and there is no evidence of heteroscedastic variance. If the Goldfeld-Quandt test statistic is less than or equal to the lower tail critical value or greater than or equal to the high tail critical value, then the null hypothesis is rejected, and there is evidence of heteroscedastic variance."
147
133
  ],
148
134
  "LangName": "..."
149
135
  }
@@ -162,7 +162,6 @@
162
162
  "Name": "SEASONAL_PERIODS",
163
163
  "Type": "integer",
164
164
  "Optional": true,
165
- "DefaultValue": 1,
166
165
  "LowerBound": 1,
167
166
  "LowerBoundType": "INCLUSIVE",
168
167
  "AllowNaN": false,
@@ -243,13 +242,13 @@
243
242
  "Name": "PREDICTION_INTERVALS",
244
243
  "Type": "string",
245
244
  "Optional": true,
245
+ "DefaultValue": "BOTH",
246
246
  "PermittedValues": [
247
247
  "NONE",
248
248
  "80",
249
249
  "95",
250
250
  "BOTH"
251
251
  ],
252
- "DefaultValue": "BOTH",
253
252
  "Description": [
254
253
  "Set to either of: 'NONE' or '80' or '95' or 'BOTH'; default is 'BOTH'"
255
254
  ]
@@ -65,7 +65,7 @@
65
65
  "AMPL_PHASE_RADIANS",
66
66
  "AMPL_PHASE_DEGREES",
67
67
  "AMPL_PHASE",
68
- "MULTIVAR_COMPLEX",
68
+ "MULTIVAR_COMPLEX",
69
69
  "MULTIVAR_AMPL_PHASE_RADIANS",
70
70
  "MULTIVAR_AMPL_PHASE_DEGREES",
71
71
  "MULTIVAR_AMPL_PHASE"
@@ -76,20 +76,6 @@
76
76
  "The default value is dependent on the datatype of the input series; a single var input will generate COMPLEX output CONTENT by default; a multi var input will generate MULTIVAR_COMPLEX output CONTENT by default."
77
77
  ],
78
78
  "LangName": "output_fmt_content"
79
- },
80
- {
81
- "Name": "ROW_MAJOR",
82
- "Type": "integer",
83
- "Optional": true,
84
- "DefaultValue": 1,
85
- "PermittedValues": [
86
- 0,
87
- 1
88
- ],
89
- "Description": [
90
- "The data scientist may explicity declare whether they want the matrix output in a row-major-centric or column-major-centric manner, via the OUTPUT_FMT(ROW_MAJOR()) construct."
91
- ],
92
- "LangName": "output_fmt_row_major"
93
79
  }
94
80
  ]
95
81
  }