teradataml 20.0.0.4__py3-none-any.whl → 20.0.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of teradataml might be problematic. Click here for more details.

Files changed (131) hide show
  1. teradataml/LICENSE-3RD-PARTY.pdf +0 -0
  2. teradataml/README.md +182 -13
  3. teradataml/__init__.py +2 -1
  4. teradataml/_version.py +2 -2
  5. teradataml/analytics/analytic_function_executor.py +8 -13
  6. teradataml/analytics/json_parser/analytic_functions_argument.py +4 -0
  7. teradataml/analytics/sqle/__init__.py +16 -1
  8. teradataml/analytics/utils.py +60 -1
  9. teradataml/automl/__init__.py +290 -106
  10. teradataml/automl/autodataprep/__init__.py +471 -0
  11. teradataml/automl/data_preparation.py +29 -10
  12. teradataml/automl/data_transformation.py +11 -0
  13. teradataml/automl/feature_engineering.py +64 -4
  14. teradataml/automl/feature_exploration.py +639 -25
  15. teradataml/automl/model_training.py +1 -1
  16. teradataml/clients/auth_client.py +12 -8
  17. teradataml/clients/keycloak_client.py +165 -0
  18. teradataml/common/constants.py +71 -26
  19. teradataml/common/exceptions.py +32 -0
  20. teradataml/common/messagecodes.py +28 -0
  21. teradataml/common/messages.py +13 -4
  22. teradataml/common/sqlbundle.py +3 -2
  23. teradataml/common/utils.py +345 -45
  24. teradataml/context/context.py +259 -93
  25. teradataml/data/apriori_example.json +22 -0
  26. teradataml/data/docs/sqle/docs_17_20/Apriori.py +138 -0
  27. teradataml/data/docs/sqle/docs_17_20/NERExtractor.py +121 -0
  28. teradataml/data/docs/sqle/docs_17_20/NGramSplitter.py +3 -3
  29. teradataml/data/docs/sqle/docs_17_20/SMOTE.py +212 -0
  30. teradataml/data/docs/sqle/docs_17_20/TextMorph.py +119 -0
  31. teradataml/data/docs/sqle/docs_17_20/TextParser.py +54 -3
  32. teradataml/data/docs/uaf/docs_17_20/ACF.py +1 -1
  33. teradataml/data/docs/uaf/docs_17_20/ArimaEstimate.py +2 -2
  34. teradataml/data/docs/uaf/docs_17_20/ArimaXEstimate.py +2 -2
  35. teradataml/data/docs/uaf/docs_17_20/DFFT.py +1 -1
  36. teradataml/data/docs/uaf/docs_17_20/DFFT2.py +1 -1
  37. teradataml/data/docs/uaf/docs_17_20/DFFT2Conv.py +1 -1
  38. teradataml/data/docs/uaf/docs_17_20/DFFTConv.py +1 -1
  39. teradataml/data/docs/uaf/docs_17_20/FilterFactory1d.py +4 -4
  40. teradataml/data/docs/uaf/docs_17_20/GenseriesSinusoids.py +2 -2
  41. teradataml/data/docs/uaf/docs_17_20/GoldfeldQuandt.py +2 -2
  42. teradataml/data/docs/uaf/docs_17_20/HoltWintersForecaster.py +6 -6
  43. teradataml/data/docs/uaf/docs_17_20/LineSpec.py +1 -1
  44. teradataml/data/docs/uaf/docs_17_20/LinearRegr.py +1 -1
  45. teradataml/data/docs/uaf/docs_17_20/Matrix2Image.py +4 -4
  46. teradataml/data/docs/uaf/docs_17_20/MultivarRegr.py +1 -1
  47. teradataml/data/docs/uaf/docs_17_20/PACF.py +1 -1
  48. teradataml/data/docs/uaf/docs_17_20/PowerSpec.py +2 -2
  49. teradataml/data/docs/uaf/docs_17_20/PowerTransform.py +3 -3
  50. teradataml/data/docs/uaf/docs_17_20/Resample.py +5 -5
  51. teradataml/data/docs/uaf/docs_17_20/SAX.py +3 -3
  52. teradataml/data/docs/uaf/docs_17_20/SignifPeriodicities.py +1 -1
  53. teradataml/data/docs/uaf/docs_17_20/SimpleExp.py +1 -1
  54. teradataml/data/docs/uaf/docs_17_20/Smoothma.py +3 -3
  55. teradataml/data/docs/uaf/docs_17_20/UNDIFF.py +1 -1
  56. teradataml/data/jsons/byom/onnxembeddings.json +1 -0
  57. teradataml/data/jsons/sqle/17.20/NGramSplitter.json +6 -6
  58. teradataml/data/jsons/sqle/17.20/TD_Apriori.json +181 -0
  59. teradataml/data/jsons/sqle/17.20/TD_NERExtractor.json +145 -0
  60. teradataml/data/jsons/sqle/17.20/TD_SMOTE.json +267 -0
  61. teradataml/data/jsons/sqle/17.20/TD_TextMorph.json +134 -0
  62. teradataml/data/jsons/sqle/17.20/TD_TextParser.json +114 -9
  63. teradataml/data/jsons/sqle/20.00/AI_AnalyzeSentiment.json +328 -0
  64. teradataml/data/jsons/sqle/20.00/AI_AskLLM.json +420 -0
  65. teradataml/data/jsons/sqle/20.00/AI_DetectLanguage.json +343 -0
  66. teradataml/data/jsons/sqle/20.00/AI_ExtractKeyPhrases.json +328 -0
  67. teradataml/data/jsons/sqle/20.00/AI_MaskPII.json +328 -0
  68. teradataml/data/jsons/sqle/20.00/AI_RecognizeEntities.json +328 -0
  69. teradataml/data/jsons/sqle/20.00/AI_RecognizePIIEntities.json +328 -0
  70. teradataml/data/jsons/sqle/20.00/AI_TextClassifier.json +359 -0
  71. teradataml/data/jsons/sqle/20.00/AI_TextEmbeddings.json +360 -0
  72. teradataml/data/jsons/sqle/20.00/AI_TextSummarize.json +343 -0
  73. teradataml/data/jsons/sqle/20.00/AI_TextTranslate.json +343 -0
  74. teradataml/data/jsons/sqle/20.00/TD_SMOTE.json +2 -2
  75. teradataml/data/jsons/sqle/20.00/TD_VectorDistance.json +1 -1
  76. teradataml/data/ner_dict.csv +8 -0
  77. teradataml/data/ner_input_eng.csv +7 -0
  78. teradataml/data/ner_rule.csv +5 -0
  79. teradataml/data/pattern_matching_data.csv +11 -0
  80. teradataml/data/pos_input.csv +40 -0
  81. teradataml/data/sdk/modelops/modelops_spec.json +101737 -0
  82. teradataml/data/tdnerextractor_example.json +14 -0
  83. teradataml/data/teradataml_example.json +21 -1
  84. teradataml/data/textmorph_example.json +5 -0
  85. teradataml/data/to_num_data.csv +4 -0
  86. teradataml/data/tochar_data.csv +5 -0
  87. teradataml/data/trans_dense.csv +16 -0
  88. teradataml/data/trans_sparse.csv +55 -0
  89. teradataml/data/url_data.csv +10 -9
  90. teradataml/dataframe/copy_to.py +38 -27
  91. teradataml/dataframe/data_transfer.py +61 -45
  92. teradataml/dataframe/dataframe.py +1110 -132
  93. teradataml/dataframe/dataframe_utils.py +73 -27
  94. teradataml/dataframe/functions.py +1070 -9
  95. teradataml/dataframe/sql.py +750 -959
  96. teradataml/dbutils/dbutils.py +33 -13
  97. teradataml/dbutils/filemgr.py +14 -10
  98. teradataml/hyperparameter_tuner/utils.py +4 -2
  99. teradataml/lib/aed_0_1.dll +0 -0
  100. teradataml/opensource/_base.py +12 -157
  101. teradataml/options/configure.py +24 -9
  102. teradataml/scriptmgmt/UserEnv.py +317 -39
  103. teradataml/scriptmgmt/lls_utils.py +456 -135
  104. teradataml/sdk/README.md +79 -0
  105. teradataml/sdk/__init__.py +4 -0
  106. teradataml/sdk/_auth_modes.py +422 -0
  107. teradataml/sdk/_func_params.py +487 -0
  108. teradataml/sdk/_json_parser.py +453 -0
  109. teradataml/sdk/_openapi_spec_constants.py +249 -0
  110. teradataml/sdk/_utils.py +236 -0
  111. teradataml/sdk/api_client.py +897 -0
  112. teradataml/sdk/constants.py +62 -0
  113. teradataml/sdk/modelops/__init__.py +98 -0
  114. teradataml/sdk/modelops/_client.py +406 -0
  115. teradataml/sdk/modelops/_constants.py +304 -0
  116. teradataml/sdk/modelops/models.py +2308 -0
  117. teradataml/sdk/spinner.py +107 -0
  118. teradataml/store/__init__.py +1 -1
  119. teradataml/table_operators/Apply.py +16 -1
  120. teradataml/table_operators/Script.py +20 -1
  121. teradataml/table_operators/query_generator.py +4 -21
  122. teradataml/table_operators/table_operator_util.py +58 -9
  123. teradataml/utils/dtypes.py +4 -2
  124. teradataml/utils/internal_buffer.py +22 -2
  125. teradataml/utils/utils.py +0 -1
  126. teradataml/utils/validators.py +318 -58
  127. {teradataml-20.0.0.4.dist-info → teradataml-20.0.0.6.dist-info}/METADATA +188 -14
  128. {teradataml-20.0.0.4.dist-info → teradataml-20.0.0.6.dist-info}/RECORD +131 -84
  129. {teradataml-20.0.0.4.dist-info → teradataml-20.0.0.6.dist-info}/WHEEL +0 -0
  130. {teradataml-20.0.0.4.dist-info → teradataml-20.0.0.6.dist-info}/top_level.txt +0 -0
  131. {teradataml-20.0.0.4.dist-info → teradataml-20.0.0.6.dist-info}/zip-safe +0 -0
@@ -0,0 +1,14 @@
1
+ {
2
+ "ner_input_eng": {
3
+ "id": "integer",
4
+ "txt": "varchar(500)"
5
+ },
6
+ "ner_rule": {
7
+ "type_ner": "varchar(500)",
8
+ "regex": "varchar(500)"
9
+ },
10
+ "ner_dict": {
11
+ "type_ner": "varchar(500)",
12
+ "dict": "varchar(500)"
13
+ }
14
+ }
@@ -1356,6 +1356,10 @@
1356
1356
  "format_col": "VARCHAR(50)",
1357
1357
  "timezone_col": "VARCHAR(50)"
1358
1358
  },
1359
+ "to_num_data":{
1360
+ "price": "VARCHAR(20)",
1361
+ "col_format": "VARCHAR(20)"
1362
+ },
1359
1363
  "interval_data":{
1360
1364
  "id": "INTEGER",
1361
1365
  "int_col": "BIGINT",
@@ -1367,7 +1371,8 @@
1367
1371
  "url_data": {
1368
1372
  "id": "INTEGER",
1369
1373
  "urls": "VARCHAR(60)",
1370
- "part": "VARCHAR(20)"
1374
+ "part": "VARCHAR(20)",
1375
+ "query_key": "VARCHAR(20)"
1371
1376
  },
1372
1377
  "hnsw_data": {
1373
1378
  "id": "INTEGER",
@@ -1376,5 +1381,20 @@
1376
1381
  "hnsw_alter_data": {
1377
1382
  "id": "INTEGER",
1378
1383
  "array_col": "Vector"
1384
+ },
1385
+ "tochar_data": {
1386
+ "id": "INTEGER",
1387
+ "int_col": "INTEGER",
1388
+ "float_col": "FLOAT",
1389
+ "date_col": "DATE",
1390
+ "int_format": "VARCHAR(20)",
1391
+ "float_format": "VARCHAR(20)",
1392
+ "date_format": "VARCHAR(20)"
1393
+ },
1394
+ "pattern_matching_data":{
1395
+ "id": "INTEGER",
1396
+ "data": "VARCHAR(20)",
1397
+ "pattern": "VARCHAR(20)",
1398
+ "level": "VARCHAR(20)"
1379
1399
  }
1380
1400
  }
@@ -2,5 +2,10 @@
2
2
  "words_input": {
3
3
  "id": "integer",
4
4
  "word": "varchar(30)"
5
+ },
6
+ "pos_input": {
7
+ "id": "integer",
8
+ "word": "varchar(100)",
9
+ "pos_tag": "varchar(100)"
5
10
  }
6
11
  }
@@ -0,0 +1,4 @@
1
+ price,col_format
2
+ "78.12","99.99"
3
+ "USD123","C999"
4
+ "$1234","$9999"
@@ -0,0 +1,5 @@
1
+ id,int_col,float_col,date_col,int_format,float_format,date_format
2
+ 0,1234,234.56,03/09/17,"9,999","999D9","MM-DD"
3
+ 1,456,234.56,03/09/17,"$999","9.9EEEE","CCAD"
4
+ 2,789,123.46,03/09/17,"0999","9999.9","DAY"
5
+ 3,1314,123.46,03/09/17,"XXXX","TM9","DY"
@@ -0,0 +1,16 @@
1
+ "location","tranid","period","storeid","region","item","sku","category"
2
+ "MiddleEast",999,"20100715",1,"west","milk,butter,eggs,flour,spinach",1,"dairy"
3
+ "MiddleEast",1000,"20100715",1,"west","milk,eggs,flour,spinach",1,"dairy"
4
+ "MiddleEast",1001,"20100715",1,"west","milk,butter,eggs",1,"dairy"
5
+ "MiddleEast",1002,"20100715",1,"west","milk,butter,spinach",1,"dairy"
6
+ "MiddleEast",1500,"20100715",3,"west","butter,eggs,flour",2,"dairy"
7
+ "AsiaPacific",999,"20100715",1,"west","milk,butter,eggs,flour,spinach",1,"dairy"
8
+ "AsiaPacific",1000,"20100715",1,"west","milk,eggs,flour,spinach",1,"dairy"
9
+ "AsiaPacific",1001,"20100715",1,"west","milk,butter,eggs",1,"dairy"
10
+ "AsiaPacific",1002,"20100715",1,"west","milk,butter,spinach",1,"dairy"
11
+ "AsiaPacific",1500,"20100715",3,"west","butter,eggs,flour",2,"dairy"
12
+ "LatinAmerica",999,"20100715",1,"west","milk,butter,eggs,flour,spinach",1,"dairy"
13
+ "LatinAmerica",1000,"20100715",1,"west","milk,eggs,flour,spinach",1,"dairy"
14
+ "LatinAmerica",1001,"20100715",1,"west","milk,butter,eggs",1,"dairy"
15
+ "LatinAmerica",1002,"20100715",1,"west","milk,butter,spinach",1,"dairy"
16
+ "LatinAmerica",1500,"20100715",3,"west","butter,eggs,flour",2,"dairy"
@@ -0,0 +1,55 @@
1
+ "location","tranid","period","storeid","region","item","sku","category"
2
+ "MiddleEast",999,"20100715",1,"west","milk",1,"dairy"
3
+ "MiddleEast",999,"20100715",1,"west","butter",2,"dairy"
4
+ "MiddleEast",999,"20100715",1,"west","eggs",3,"dairy"
5
+ "MiddleEast",999,"19990715",1,"west","flour",4,"baking"
6
+ "MiddleEast",999,"19990715",1,"west","spinach",4,"produce"
7
+ "MiddleEast",1000,"20100715",1,"west","milk",1,"dairy"
8
+ "MiddleEast",1000,"20100715",1,"west","eggs",3,"dairy"
9
+ "MiddleEast",1000,"19990715",1,"west","flour",4,"baking"
10
+ "MiddleEast",1000,"19990715",1,"west","spinach",2,"produce"
11
+ "MiddleEast",1001,"20100715",1,"west","milk",1,"dairy"
12
+ "MiddleEast",1001,"20100715",1,"west","butter",2,"dairy"
13
+ "MiddleEast",1001,"20100715",1,"west","eggs",3,"dairy"
14
+ "MiddleEast",1002,"20100715",1,"west","milk",1,"dairy"
15
+ "MiddleEast",1002,"20100715",1,"west","butter",2,"dairy"
16
+ "MiddleEast",1002,"20100715",1,"west","spinach",3,"produce"
17
+ "MiddleEast",1500,"20100715",3,"west","butter",2,"dairy"
18
+ "MiddleEast",1500,"20100715",3,"west","eggs",3,"dairy"
19
+ "MiddleEast",1500,"20100715",3,"west","flour",4,"baking"
20
+ "AsiaPacific",999,"20100715",1,"west","milk",1,"dairy"
21
+ "AsiaPacific",999,"20100715",1,"west","butter",2,"dairy"
22
+ "AsiaPacific",999,"20100715",1,"west","eggs",3,"dairy"
23
+ "AsiaPacific",999,"19990715",1,"west","flour",4,"baking"
24
+ "AsiaPacific",999,"19990715",1,"west","spinach",4,"produce"
25
+ "AsiaPacific",1000,"20100715",1,"west","milk",1,"dairy"
26
+ "AsiaPacific",1000,"20100715",1,"west","eggs",3,"dairy"
27
+ "AsiaPacific",1000,"19990715",1,"west","flour",4,"baking"
28
+ "AsiaPacific",1000,"19990715",1,"west","spinach",2,"produce"
29
+ "AsiaPacific",1001,"20100715",1,"west","milk",1,"dairy"
30
+ "AsiaPacific",1001,"20100715",1,"west","butter",2,"dairy"
31
+ "AsiaPacific",1001,"20100715",1,"west","eggs",3,"dairy"
32
+ "AsiaPacific",1002,"20100715",1,"west","milk",1,"dairy"
33
+ "AsiaPacific",1002,"20100715",1,"west","butter",2,"dairy"
34
+ "AsiaPacific",1002,"20100715",1,"west","spinach",3,"produce"
35
+ "AsiaPacific",1500,"20100715",3,"west","butter",2,"dairy"
36
+ "AsiaPacific",1500,"20100715",3,"west","eggs",3,"dairy"
37
+ "AsiaPacific",1500,"20100715",3,"west","flour",4,"baking"
38
+ "LatinAmerica",999,"20100715",1,"west","milk",1,"dairy"
39
+ "LatinAmerica",999,"20100715",1,"west","butter",2,"dairy"
40
+ "LatinAmerica",999,"20100715",1,"west","eggs",3,"dairy"
41
+ "LatinAmerica",999,"19990715",1,"west","flour",4,"baking"
42
+ "LatinAmerica",999,"19990715",1,"west","spinach",4,"produce"
43
+ "LatinAmerica",1000,"20100715",1,"west","milk",1,"dairy"
44
+ "LatinAmerica",1000,"20100715",1,"west","eggs",3,"dairy"
45
+ "LatinAmerica",1000,"19990715",1,"west","flour",4,"baking"
46
+ "LatinAmerica",1000,"19990715",1,"west","spinach",2,"produce"
47
+ "LatinAmerica",1001,"20100715",1,"west","milk",1,"dairy"
48
+ "LatinAmerica",1001,"20100715",1,"west","butter",2,"dairy"
49
+ "LatinAmerica",1001,"20100715",1,"west","eggs",3,"dairy"
50
+ "LatinAmerica",1002,"20100715",1,"west","milk",1,"dairy"
51
+ "LatinAmerica",1002,"20100715",1,"west","butter",2,"dairy"
52
+ "LatinAmerica",1002,"20100715",1,"west","spinach",3,"produce"
53
+ "LatinAmerica",1500,"20100715",3,"west","butter",2,"dairy"
54
+ "LatinAmerica",1500,"20100715",3,"west","eggs",3,"dairy"
55
+ "LatinAmerica",1500,"20100715",3,"west","flour",4,"baking"
@@ -1,9 +1,10 @@
1
- "id","urls","part"
2
- 0,"http://example.com:8080/path","FILE"
3
- 1,"ftp://example.net:21/path","PATH"
4
- 2,"https://example.net/path4/path5/path6?query4=value4#fragment3","REF"
5
- 3,"https://www.facebook.com","HOST"
6
- 4,"https://teracloud-pod-services-pod-account-service.dummyvalue.production.pods.teracloud.ninja/v1/accounts/acc-dummyvalue/user-environment-service/api/v1/","QUERY"
7
- 5,"http://pg.example.ml/path150#fragment90","AUTHORITY"
8
- 6,"smtp://user:password@smtp.example.com:21/file.txt","USERINFO"
9
- 7,"https://www.google.com","PROTOCOL"
1
+ "id","urls","part","query_key"
2
+ 0,"http://example.com:8080/path","FILE","path"
3
+ 1,"ftp://example.net:21/path","PATH","path"
4
+ 2,"https://example.net/path4/path5/path6?query4=value4#fragment3","REF","fragment3"
5
+ 3,"https://www.facebook.com","HOST","facebook.com"
6
+ 4,"https://teracloud-pod-services-pod-account-service.dummyvalue.production.pods.teracloud.ninja/v1/accounts/acc-dummyvalue/user-environment-service/api/v1/","QUERY",None
7
+ 5,"http://pg.example.ml/path150#fragment90","AUTHORITY","fragment90"
8
+ 6,"smtp://user:password@smtp.example.com:21/file.txt","USERINFO","password"
9
+ 7,"https://www.google.com","PROTOCOL","google.com"
10
+ 8,"http://example.com/api?query1=value1&query2=value2","QUERY","query1"
@@ -131,7 +131,7 @@ def copy_to_sql(df, table_name,
131
131
 
132
132
  types
133
133
  Optional Argument.
134
- Specifies required data-types for requested columns to be saved in Vantage.
134
+ Specifies required data types for requested columns to be saved in Teradata Vantage.
135
135
  Types: Python dictionary ({column_name1: type_value1, ... column_nameN: type_valueN})
136
136
  Default: None
137
137
 
@@ -297,10 +297,10 @@ def copy_to_sql(df, table_name,
297
297
  >>> from teradatasqlalchemy.types import *
298
298
 
299
299
  >>> df = {'emp_name': ['A1', 'A2', 'A3', 'A4'],
300
- 'emp_sage': [100, 200, 300, 400],
301
- 'emp_id': [133, 144, 155, 177],
302
- 'marks': [99.99, 97.32, 94.67, 91.00]
303
- }
300
+ ... 'emp_sage': [100, 200, 300, 400],
301
+ ... 'emp_id': [133, 144, 155, 177],
302
+ ... 'marks': [99.99, 97.32, 94.67, 91.00]
303
+ ... }
304
304
 
305
305
  >>> pandas_df = pd.DataFrame(df)
306
306
 
@@ -313,24 +313,35 @@ def copy_to_sql(df, table_name,
313
313
 
314
314
  c) Save a Pandas DataFrame by specifying additional parameters:
315
315
  >>> copy_to_sql(df = pandas_df, table_name = 'my_table_2', schema_name = 'alice',
316
- index = True, index_label = 'my_index_label', temporary = False,
317
- primary_index = ['emp_id'], if_exists = 'append',
318
- types = {'emp_name': VARCHAR, 'emp_sage':INTEGER,
319
- 'emp_id': BIGINT, 'marks': DECIMAL})
316
+ ... index = True, index_label = 'my_index_label', temporary = False,
317
+ ... primary_index = ['emp_id'], if_exists = 'append',
318
+ ... types = {'emp_name': VARCHAR, 'emp_sage':INTEGER,
319
+ ... 'emp_id': BIGINT, 'marks': DECIMAL})
320
320
 
321
321
  d) Saving with additional parameters as a SET table
322
322
  >>> copy_to_sql(df = pandas_df, table_name = 'my_table_3', schema_name = 'alice',
323
- index = True, index_label = 'my_index_label', temporary = False,
324
- primary_index = ['emp_id'], if_exists = 'append',
325
- types = {'emp_name': VARCHAR, 'emp_sage':INTEGER,
326
- 'emp_id': BIGINT, 'marks': DECIMAL},
327
- set_table=True)
323
+ ... index = True, index_label = 'my_index_label', temporary = False,
324
+ ... primary_index = ['emp_id'], if_exists = 'append',
325
+ ... types = {'emp_name': VARCHAR, 'emp_sage':INTEGER,
326
+ ... 'emp_id': BIGINT, 'marks': DECIMAL},
327
+ ... set_table=True)
328
328
 
329
329
  e) Saving levels in index of type MultiIndex
330
330
  >>> pandas_df = pandas_df.set_index(['emp_id', 'emp_name'])
331
331
  >>> copy_to_sql(df = pandas_df, table_name = 'my_table_4', schema_name = 'alice',
332
- index = True, index_label = ['index1', 'index2'], temporary = False,
333
- primary_index = ['index1'], if_exists = 'replace')
332
+ ... index = True, index_label = ['index1', 'index2'], temporary = False,
333
+ ... primary_index = ['index1'], if_exists = 'replace')
334
+
335
+ f) Save a Pandas DataFrame with VECTOR datatype:
336
+ >>> import pandas as pd
337
+ >>> VECTOR_data = {
338
+ ... 'id': [10, 11, 12, 13],
339
+ ... 'array_col': ['1,1', '2,2', '3,3', '4,4']
340
+ ... }
341
+ >>> df = pd.DataFrame(VECTOR_data)
342
+
343
+ >>> from teradatasqlalchemy import VECTOR
344
+ >>> copy_to_sql(df=df, table_name='my_vector_table', types={'array_col': VECTOR})
334
345
 
335
346
  2. Saving a teradataml DataFrame:
336
347
 
@@ -358,13 +369,13 @@ def copy_to_sql(df, table_name,
358
369
 
359
370
  d) Save a teradataml DataFrame by using copy_to_sql with additional parameters:
360
371
  >>> copy_to_sql(df = df2, table_name = 'my_tdml_table_3', schema_name = 'alice',
361
- temporary = False, primary_index = None, if_exists = 'append',
362
- types = {'masters': VARCHAR, 'gpa':INTEGER})
372
+ ... temporary = False, primary_index = None, if_exists = 'append',
373
+ ... types = {'masters': VARCHAR, 'gpa':INTEGER})
363
374
 
364
375
  e) Saving as a SET table
365
376
  >>> copy_to_sql(df = df2, table_name = 'my_tdml_set_table', schema_name = 'alice',
366
- temporary = False, primary_index = ['gpa'], if_exists = 'append',
367
- types = {'masters': VARCHAR, 'gpa':INTEGER}, set_table = True)
377
+ ... temporary = False, primary_index = ['gpa'], if_exists = 'append',
378
+ ... types = {'masters': VARCHAR, 'gpa':INTEGER}, set_table = True)
368
379
 
369
380
  3. Saving a teradataml DataFrame as a PTI table:
370
381
 
@@ -377,19 +388,19 @@ def copy_to_sql(df, table_name,
377
388
 
378
389
  a) Using copy_to_sql
379
390
  >>> copy_to_sql(df3, "test_copyto_pti",
380
- timecode_column='clicktime',
381
- columns_list='event')
391
+ ... timecode_column='clicktime',
392
+ ... columns_list='event')
382
393
 
383
394
  b) Alternatively, using DataFrame.to_sql
384
395
  >>> df3.to_sql(table_name = "test_copyto_pti_1",
385
- timecode_column='clicktime',
386
- columns_list='event')
396
+ ... timecode_column='clicktime',
397
+ ... columns_list='event')
387
398
 
388
399
  c) Saving as a SET table
389
400
  >>> copy_to_sql(df3, "test_copyto_pti_2",
390
- timecode_column='clicktime',
391
- columns_list='event',
392
- set_table=True)
401
+ ... timecode_column='clicktime',
402
+ ... columns_list='event',
403
+ ... set_table=True)
393
404
 
394
405
  """
395
406
  # Deriving global connection using get_connection().
@@ -690,77 +690,77 @@ def read_csv(filepath,
690
690
  # while doing so catch all errors and warnings as well as store those in the table.
691
691
  >>> types = OrderedDict(id=BIGINT, fname=VARCHAR, lname=VARCHAR, marks=FLOAT)
692
692
  >>> read_csv(filepath='test_file.csv',
693
- table_name='my_first_table1', types=types,
694
- save_errors=True, catch_errors_warnings=True)
693
+ ... table_name='my_first_table1', types=types,
694
+ ... save_errors=True, catch_errors_warnings=True)
695
695
 
696
696
  # Example 3: Load the data from CSV file into a table using fastload CSV protocol.
697
697
  # If table exists, then replace the same. Catch all errors and warnings as well as
698
698
  # store those in the table.
699
699
  >>> types = OrderedDict(id=BIGINT, fname=VARCHAR, lname=VARCHAR, marks=FLOAT)
700
700
  >>> read_csv(filepath='test_file.csv',
701
- table_name='my_first_table',
702
- types=types, if_exists='replace',
703
- save_errors=True, catch_errors_warnings=True)
701
+ ... table_name='my_first_table',
702
+ ... types=types, if_exists='replace',
703
+ ... save_errors=True, catch_errors_warnings=True)
704
704
 
705
705
  # Example 4: Load the data from CSV file into a table using fastload CSV protocol.
706
706
  # If table exists in specified schema, then append the same. Catch all
707
707
  # errors and warnings as well as store those in the table.
708
708
  >>> types = OrderedDict(id=BIGINT, fname=VARCHAR, lname=VARCHAR, marks=FLOAT)
709
709
  >>> read_csv(filepath='test_file.csv',
710
- table_name='my_first_table',
711
- types=types, if_exists='fail',
712
- save_errors=True, catch_errors_warnings=True)
710
+ ... table_name='my_first_table',
711
+ ... types=types, if_exists='fail',
712
+ ... save_errors=True, catch_errors_warnings=True)
713
713
  >>> read_csv(filepath='test_file.csv',
714
- table_name='my_first_table',
715
- if_exists='append',
716
- save_errors=True, catch_errors_warnings=True)
714
+ ... table_name='my_first_table',
715
+ ... if_exists='append',
716
+ ... save_errors=True, catch_errors_warnings=True)
717
717
 
718
718
  # Example 5: Load the data from CSV file into a SET table using fastload CSV protocol.
719
719
  # Catch all errors and warnings as well as store those in the table.
720
720
  >>> types = OrderedDict(id=BIGINT, fname=VARCHAR, lname=VARCHAR, marks=FLOAT)
721
721
  >>> read_csv(filepath='test_file.csv',
722
- table_name='my_first_table',
723
- types=types, if_exists='replace',
724
- set_table=True, primary_index='id',
725
- save_errors=True, catch_errors_warnings=True)
722
+ ... table_name='my_first_table',
723
+ ... types=types, if_exists='replace',
724
+ ... set_table=True, primary_index='id',
725
+ ... save_errors=True, catch_errors_warnings=True)
726
726
 
727
727
  # Example 6: Load the data from CSV file into a temporary table without fastloadCSV protocol.
728
728
  # If table exists, then append to the same.
729
729
  >>> types = OrderedDict(id=BIGINT, fname=VARCHAR, lname=VARCHAR, marks=FLOAT)
730
730
  >>> read_csv(filepath='test_file.csv',
731
- table_name='my_first_table',
732
- types=types, if_exists='replace',
733
- temporary=True)
731
+ ... table_name='my_first_table',
732
+ ... types=types, if_exists='replace',
733
+ ... temporary=True)
734
734
  >>> read_csv(filepath='test_file.csv',
735
- table_name='my_first_table',
736
- if_exists='append',
737
- temporary=True)
735
+ ... table_name='my_first_table',
736
+ ... if_exists='append',
737
+ ... temporary=True)
738
738
 
739
739
  # Example 7: Load the data from CSV file with DATE and TIMESTAMP columns into
740
740
  # a table without Fastload protocol. If table exists in specified
741
741
  # schema, then append to the table.
742
742
  >>> types = OrderedDict(id=BIGINT, fname=VARCHAR, lname=VARCHAR, marks=FLOAT,
743
- admission_date=DATE, admission_time=TIMESTAMP)
743
+ ... admission_date=DATE, admission_time=TIMESTAMP)
744
744
  >>> read_csv(filepath='test_file.csv',
745
- table_name='my_first_table',
746
- types=types, if_exists='fail',
747
- use_fastload=False)
745
+ ... table_name='my_first_table',
746
+ ... types=types, if_exists='fail',
747
+ ... use_fastload=False)
748
748
  >>> read_csv(filepath='test_file.csv',
749
- table_name='my_first_table',
750
- if_exists='append',
751
- use_fastload=False)
749
+ ... table_name='my_first_table',
750
+ ... if_exists='append',
751
+ ... use_fastload=False)
752
752
 
753
753
  # Example 8: Load the data from CSV file with TIMESTAMP columns into
754
754
  # a PTI table. If specified table exists then append to the table,
755
755
  # otherwise creates new table.
756
756
  >>> types = OrderedDict(partition_id=INTEGER, adid=INTEGER, productid=INTEGER,
757
- event=VARCHAR, clicktime=TIMESTAMP)
757
+ ... event=VARCHAR, clicktime=TIMESTAMP)
758
758
  >>> read_csv(filepath='test_file.csv',
759
- table_name='my_first_read_csv_pti_table',
760
- types=types, if_exists='append',
761
- timecode_column='clicktime',
762
- columns_list='event',
763
- use_fastload=False)
759
+ ... table_name='my_first_read_csv_pti_table',
760
+ ... types=types, if_exists='append',
761
+ ... timecode_column='clicktime',
762
+ ... columns_list='event',
763
+ ... use_fastload=False)
764
764
 
765
765
  # Example 9: Load the data from CSV file with TIMESTAMP columns into
766
766
  # a SET PTI table. If specified table exists then append to the table,
@@ -768,11 +768,11 @@ def read_csv(filepath,
768
768
  >>> types = OrderedDict(partition_id=INTEGER, adid=INTEGER, productid=INTEGER,
769
769
  event=VARCHAR, clicktime=TIMESTAMP)
770
770
  >>> read_csv(filepath='test_file.csv',
771
- table_name='my_first_read_csv_pti_table',
772
- types=types, if_exists='append',
773
- timecode_column='clicktime',
774
- columns_list='event',
775
- set_table=True)
771
+ ... table_name='my_first_read_csv_pti_table',
772
+ ... types=types, if_exists='append',
773
+ ... timecode_column='clicktime',
774
+ ... columns_list='event',
775
+ ... set_table=True)
776
776
 
777
777
  # Example 10: Load the data from CSV file with TIMESTAMP columns into
778
778
  # a temporary PTI table. If specified table exists then append to the table,
@@ -780,11 +780,11 @@ def read_csv(filepath,
780
780
  >>> types = OrderedDict(partition_id=INTEGER, adid=INTEGER, productid=INTEGER,
781
781
  event=VARCHAR, clicktime=TIMESTAMP)
782
782
  >>> read_csv(filepath='test_file.csv',
783
- table_name='my_first_read_csv_pti_table',
784
- types=types, if_exists='append',
785
- timecode_column='clicktime',
786
- columns_list='event',
787
- temporary=True)
783
+ ... table_name='my_first_read_csv_pti_table',
784
+ ... types=types, if_exists='append',
785
+ ... timecode_column='clicktime',
786
+ ... columns_list='event',
787
+ ... temporary=True)
788
788
 
789
789
  # Example 11: Load the data from CSV file into Vantage table by opening specified
790
790
  # number of Teradata data transfer sesions.
@@ -796,8 +796,24 @@ def read_csv(filepath,
796
796
  # through primary_index argument.
797
797
  >>> types = OrderedDict(id=BIGINT, fname=VARCHAR, lname=VARCHAR, marks=FLOAT)
798
798
  >>> read_csv(filepath='test_file.csv', table_name='my_first_table_with_primary_index',
799
- types=types, primary_index = ['fname'])
799
+ ... types=types, primary_index = ['fname'])
800
+
801
+ # Example 13: Load the data from CSV file into VECTOR datatype in Vantage table.
802
+ >>> from teradatasqlalchemy import VECTOR
803
+ >>> from pathlib import Path
804
+ >>> types = OrderedDict(id=BIGINT, array_col=VECTOR)
805
+
806
+ # Get the absolute path of the teradataml module
807
+ >>> import teradataml
808
+ >>> base_path = Path(teradataml.__path__[0])
809
+
810
+ # Append the relative path to the CSV file
811
+ >>> csv_path = os.path.join(base_path, "data", "hnsw_alter_data.csv")
800
812
 
813
+ >>> read_csv(filepath=csv_path,
814
+ ... table_name='my_first_table_with_vector',
815
+ ... types=types,
816
+ ... use_fastload=False)
801
817
  """
802
818
  # Deriving global connection using context.get_context()
803
819
  con = get_context()