teradataml 20.0.0.3__py3-none-any.whl → 20.0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of teradataml might be problematic. Click here for more details.

Files changed (84) hide show
  1. teradataml/LICENSE-3RD-PARTY.pdf +0 -0
  2. teradataml/README.md +119 -0
  3. teradataml/_version.py +1 -1
  4. teradataml/analytics/analytic_function_executor.py +18 -6
  5. teradataml/analytics/byom/__init__.py +1 -1
  6. teradataml/analytics/sqle/__init__.py +4 -1
  7. teradataml/analytics/valib.py +18 -4
  8. teradataml/automl/__init__.py +51 -6
  9. teradataml/automl/data_preparation.py +56 -33
  10. teradataml/automl/data_transformation.py +58 -33
  11. teradataml/automl/feature_engineering.py +12 -5
  12. teradataml/automl/model_training.py +34 -13
  13. teradataml/common/__init__.py +1 -2
  14. teradataml/common/constants.py +64 -40
  15. teradataml/common/messagecodes.py +13 -3
  16. teradataml/common/messages.py +4 -1
  17. teradataml/common/sqlbundle.py +40 -10
  18. teradataml/common/utils.py +113 -39
  19. teradataml/common/warnings.py +11 -0
  20. teradataml/context/context.py +141 -17
  21. teradataml/data/amazon_reviews_25.csv +26 -0
  22. teradataml/data/byom_example.json +11 -0
  23. teradataml/data/docs/byom/docs/DataRobotPredict.py +2 -2
  24. teradataml/data/docs/byom/docs/DataikuPredict.py +40 -1
  25. teradataml/data/docs/byom/docs/H2OPredict.py +2 -2
  26. teradataml/data/docs/byom/docs/ONNXEmbeddings.py +242 -0
  27. teradataml/data/docs/byom/docs/ONNXPredict.py +2 -2
  28. teradataml/data/docs/byom/docs/PMMLPredict.py +2 -2
  29. teradataml/data/docs/sqle/docs_17_20/Shap.py +28 -6
  30. teradataml/data/docs/uaf/docs_17_20/DWT2D.py +4 -1
  31. teradataml/data/hnsw_alter_data.csv +5 -0
  32. teradataml/data/hnsw_data.csv +10 -0
  33. teradataml/data/jsons/byom/h2opredict.json +1 -1
  34. teradataml/data/jsons/byom/onnxembeddings.json +266 -0
  35. teradataml/data/jsons/sqle/17.20/TD_Shap.json +0 -1
  36. teradataml/data/jsons/sqle/20.00/TD_HNSW.json +296 -0
  37. teradataml/data/jsons/sqle/20.00/TD_HNSWPredict.json +206 -0
  38. teradataml/data/jsons/sqle/20.00/TD_HNSWSummary.json +32 -0
  39. teradataml/data/jsons/sqle/20.00/TD_KMeans.json +2 -2
  40. teradataml/data/jsons/sqle/20.00/TD_SMOTE.json +1 -1
  41. teradataml/data/jsons/sqle/20.00/TD_VectorDistance.json +5 -5
  42. teradataml/data/teradataml_example.json +8 -0
  43. teradataml/data/vectordistance_example.json +1 -1
  44. teradataml/dataframe/copy_to.py +8 -3
  45. teradataml/dataframe/data_transfer.py +11 -1
  46. teradataml/dataframe/dataframe.py +517 -121
  47. teradataml/dataframe/dataframe_utils.py +152 -20
  48. teradataml/dataframe/functions.py +26 -11
  49. teradataml/dataframe/setop.py +11 -6
  50. teradataml/dataframe/sql.py +2 -2
  51. teradataml/dbutils/dbutils.py +525 -129
  52. teradataml/hyperparameter_tuner/optimizer.py +12 -1
  53. teradataml/opensource/{sklearn/_sklearn_wrapper.py → _base.py} +317 -1011
  54. teradataml/opensource/_class.py +141 -17
  55. teradataml/opensource/{constants.py → _constants.py} +7 -3
  56. teradataml/opensource/_lightgbm.py +52 -53
  57. teradataml/opensource/_sklearn.py +1008 -0
  58. teradataml/opensource/_wrapper_utils.py +5 -5
  59. teradataml/options/__init__.py +47 -15
  60. teradataml/options/configure.py +103 -25
  61. teradataml/options/display.py +13 -2
  62. teradataml/plot/axis.py +47 -8
  63. teradataml/plot/figure.py +33 -0
  64. teradataml/plot/plot.py +63 -13
  65. teradataml/scriptmgmt/UserEnv.py +2 -2
  66. teradataml/scriptmgmt/lls_utils.py +63 -26
  67. teradataml/store/__init__.py +1 -2
  68. teradataml/store/feature_store/feature_store.py +102 -7
  69. teradataml/table_operators/Apply.py +32 -18
  70. teradataml/table_operators/Script.py +3 -1
  71. teradataml/table_operators/TableOperator.py +3 -1
  72. teradataml/utils/dtypes.py +47 -0
  73. teradataml/utils/internal_buffer.py +18 -0
  74. teradataml/utils/validators.py +68 -9
  75. {teradataml-20.0.0.3.dist-info → teradataml-20.0.0.4.dist-info}/METADATA +123 -2
  76. {teradataml-20.0.0.3.dist-info → teradataml-20.0.0.4.dist-info}/RECORD +79 -75
  77. teradataml/data/SQL_Fundamentals.pdf +0 -0
  78. teradataml/libaed_0_1.dylib +0 -0
  79. teradataml/libaed_0_1.so +0 -0
  80. teradataml/opensource/sklearn/__init__.py +0 -0
  81. teradataml/store/vector_store/__init__.py +0 -1586
  82. {teradataml-20.0.0.3.dist-info → teradataml-20.0.0.4.dist-info}/WHEEL +0 -0
  83. {teradataml-20.0.0.3.dist-info → teradataml-20.0.0.4.dist-info}/top_level.txt +0 -0
  84. {teradataml-20.0.0.3.dist-info → teradataml-20.0.0.4.dist-info}/zip-safe +0 -0
@@ -12,13 +12,14 @@ This file implements util functions of data frame.
12
12
  """
13
13
 
14
14
  import numbers
15
+ import re
15
16
  import pandas as pd
16
17
  from collections import OrderedDict
17
18
 
18
19
  from teradataml.common.utils import UtilFuncs
19
20
  from teradataml.common.aed_utils import AedUtils
20
21
  from teradataml.common.constants import AEDConstants, PTITableConstants, \
21
- SQLPattern, PythonTypes
22
+ SQLPattern, PythonTypes, TeradataConstants, SQLConstants
22
23
  from teradataml.common.sqlbundle import SQLBundle
23
24
  from teradataml.common.exceptions import TeradataMlException
24
25
  from teradataml.common.messages import Messages
@@ -30,6 +31,7 @@ from teradataml.dbutils.dbutils import _execute_query_and_generate_pandas_df
30
31
 
31
32
  from teradataml.options.display import display
32
33
  from teradataml.options.configure import configure
34
+ from teradataml.utils.dtypes import _DtypesMappers
33
35
  from teradataml.utils.utils import execute_sql
34
36
 
35
37
  from teradatasqlalchemy.types import FLOAT, NUMBER, DECIMAL, PERIOD_TIMESTAMP
@@ -77,7 +79,10 @@ class DataFrameUtils():
77
79
  is_persist = True
78
80
 
79
81
  try:
80
- if node_query_types[index] == AEDConstants.AED_QUERY_NODE_TYPE_ML_QUERY_MULTI_OUTPUT.value or\
82
+ if configure.temp_object_type == TeradataConstants.TERADATA_VOLATILE_TABLE:
83
+ UtilFuncs._create_table(view_names[index], queries[index], volatile=True)
84
+
85
+ elif node_query_types[index] == AEDConstants.AED_QUERY_NODE_TYPE_ML_QUERY_MULTI_OUTPUT.value or\
81
86
  ("OUT TABLE " in queries[index] and SQLPattern.SQLMR.value.match(queries[index])) or \
82
87
  is_persist:
83
88
  # TODO:: OR condition in above needs to be removed once AED support is added.
@@ -1291,43 +1296,32 @@ class DataFrameUtils():
1291
1296
 
1292
1297
  aggregate_expr = ", ".join(select_columns)
1293
1298
  return aggregate_expr, new_column_names, new_column_types
1294
-
1299
+
1295
1300
  @staticmethod
1296
- def _invalid_describe_column(df, columns, metaexpr, groupby_column_list):
1301
+ def _validate_describe_columns(columns, metaexpr, groupby_column_list):
1297
1302
  """
1298
- Internal function to validate columns provided to describe() is correct or not,
1303
+ Internal function to validate columns provided to describe() are correct or not,
1299
1304
  when DataFrame is output of groupby and groupby_time.
1300
-
1301
1305
  PARAMETERS:
1302
- df:
1303
- Required Argument.
1304
- Specifies teradataml DataFrame we are collecting statistics for.
1305
- Types: str
1306
-
1307
1306
  columns:
1308
1307
  Optional Argument.
1309
1308
  Specifies the name(s) of columns we are collecting statistics for.
1310
1309
  Types: str ot List of strings (str)
1311
-
1312
1310
  metaexpr:
1313
1311
  Required Argument.
1314
1312
  Specifies the meta expression for the dataframe.
1315
1313
  Types: _MetaExpression
1316
-
1317
1314
  groupby_column_list:
1318
1315
  Optional Argument.
1319
1316
  Specifies the group by columns for the dataframe.
1320
1317
  Default Values: None.
1321
1318
  Types: str ot List of strings (str)
1322
-
1323
1319
  Returns:
1324
1320
  None
1325
-
1326
1321
  Raises:
1327
1322
  TeradataMLException
1328
1323
  """
1329
- invalid_columns = [_column for _column in groupby_column_list if columns is not None
1330
- and _column in columns]
1324
+ invalid_columns = [_column for _column in groupby_column_list if _column in columns]
1331
1325
  if len(invalid_columns) > 0:
1332
1326
  all_columns = [col.name for col in metaexpr.c]
1333
1327
  valid_columns = [item for item in all_columns if item not in groupby_column_list]
@@ -1849,7 +1843,10 @@ class DataFrameUtils():
1849
1843
  db_schema = UtilFuncs._extract_db_name(tab_name_first)
1850
1844
  db_table_name = UtilFuncs._extract_table_name(tab_name_first)
1851
1845
 
1852
- return DataFrame(in_schema(db_schema, db_table_name))
1846
+ if db_schema:
1847
+ return DataFrame(in_schema(db_schema, db_table_name))
1848
+
1849
+ return DataFrame(db_table_name)
1853
1850
 
1854
1851
  pids_first = None
1855
1852
  parent_df = None
@@ -1865,11 +1862,146 @@ class DataFrameUtils():
1865
1862
  db_schema = UtilFuncs._extract_db_name(tab_name_first)
1866
1863
  db_table_name = UtilFuncs._extract_table_name(tab_name_first)
1867
1864
 
1868
- parent_df = DataFrame(in_schema(db_schema, db_table_name))
1865
+ if db_schema:
1866
+ parent_df = DataFrame(in_schema(db_schema, db_table_name))
1867
+ else:
1868
+ parent_df = DataFrame(db_table_name)
1869
1869
  pids_first = pids
1870
1870
  else:
1871
1871
  if pids_first != pids:
1872
1872
  raise TeradataMlException(Messages.get_message(MessageCodes.DFS_NO_COMMON_PARENT),
1873
1873
  MessageCodes.DFS_NO_COMMON_PARENT)
1874
1874
 
1875
- return parent_df
1875
+ return parent_df
1876
+
1877
+ @staticmethod
1878
+ def _get_sqlalchemy_type_from_str(td_type):
1879
+ """
1880
+ Function to get teradatasqlalchemy type from string representation of that type.
1881
+
1882
+ PARAMETERS:
1883
+ td_type:
1884
+ Required Argument.
1885
+ Specifies string representation of teradatasqlalchemy type.
1886
+ Types: str
1887
+
1888
+ RAISES:
1889
+ ValueError
1890
+
1891
+ EXAMPLES:
1892
+ >>> dt = DataFrameUtils._get_sqlalchemy_type_from_str("DECIMAL(4,4)")
1893
+ >>> dt
1894
+ DECIMAL(precision=4, scale=4)
1895
+ >>> type(dt)
1896
+ teradatasqlalchemy.types.DECIMAL
1897
+
1898
+ >>> dt = DataFrameUtils._get_sqlalchemy_type_from_str("VARCHAR(32000) CHARACTER SET UNICODE")
1899
+ >>> dt
1900
+ VARCHAR(length=32000, charset='UNICODE')
1901
+ >>> type(dt)
1902
+ teradatasqlalchemy.types.VARCHAR
1903
+ """
1904
+ # 4 groups of pattern:
1905
+ # 1. Type name
1906
+ # 2. Comma separated parameters enclosed in parentheses
1907
+ # 3. Comma separated parameters without parenthesis
1908
+ # 4. Remaining string
1909
+ pattern = "([A-Z0-9_]+)(\((.*)\))?(.*)"
1910
+
1911
+ m = re.match(pattern, td_type)
1912
+ td_str_type = m.group(1)
1913
+ td_str_params = m.group(3)
1914
+ td_str_remain = m.group(4)
1915
+
1916
+ if m is None or td_str_type not in _DtypesMappers.DATALAKE_STR_to_TDSQLALCHEMY_DATATYPE_MAPPER.keys():
1917
+ raise ValueError("Invalid Teradata type: {} from datalake".format(td_type))
1918
+
1919
+ if td_str_type in ["VARCHAR", "CHAR"]:
1920
+ # If VARCHAR or CHAR, extract, length and charset from string.
1921
+ length = int(td_str_params.split(",")[0])
1922
+ charset = td_str_remain.strip().split(" ")[2]
1923
+ return _DtypesMappers.DATALAKE_STR_to_TDSQLALCHEMY_DATATYPE_MAPPER[td_str_type]\
1924
+ (length=length, charset=charset)
1925
+
1926
+ if td_str_type in ["BLOB"]:
1927
+ # Ignoring the charset as BLOB does not have it.
1928
+ # If BLOB, extract length from string.
1929
+ length = int(td_str_params.split(",")[0])
1930
+ return _DtypesMappers.DATALAKE_STR_to_TDSQLALCHEMY_DATATYPE_MAPPER[td_str_type]\
1931
+ (length=length)
1932
+
1933
+ if td_str_type in ["DECIMAL"]:
1934
+ # If DECIMAL, extract precision and scale from string.
1935
+ args = td_str_params.split(",")
1936
+ return _DtypesMappers.DATALAKE_STR_to_TDSQLALCHEMY_DATATYPE_MAPPER[td_str_type]\
1937
+ (precision=int(args[0]), scale=int(args[1]))
1938
+
1939
+ # TODO: Test for other data types once OTF team finalize all data types.
1940
+ return _DtypesMappers.DATALAKE_STR_to_TDSQLALCHEMY_DATATYPE_MAPPER[td_str_type]()
1941
+
1942
+ @staticmethod
1943
+ def _get_datalake_table_columns_info(schema, table_name, datalake):
1944
+ """
1945
+ Function to get column names and corresponding teradatasqlalchemy types
1946
+ of a datalake table using results of 'help table <datalake>.<db_name>.<table_name>'
1947
+ SQL query.
1948
+
1949
+ PARAMETERS:
1950
+ schema:
1951
+ Required Argument.
1952
+ Specifies name of schema.
1953
+ Types: str
1954
+
1955
+ table_name:
1956
+ Required Argument.
1957
+ Specifies name of table.
1958
+ Types: str
1959
+
1960
+ datalake:
1961
+ Required Argument.
1962
+ Specifies name of datalake.
1963
+ Types: str
1964
+
1965
+ RAISES:
1966
+ TeradataMlException
1967
+
1968
+ EXAMPLES:
1969
+ >>> DataFrameUtils._get_datalake_table_columns_info(table_name = 'sales',
1970
+ ... schema='otftestdb',
1971
+ ... datalake='datalake_iceberg_glue')
1972
+ (['id', 'masters', 'gpa', 'stats', 'programming', 'admitted'],
1973
+ [INTEGER(),
1974
+ VARCHAR(length=2000, charset='UNICODE'),
1975
+ FLOAT(),
1976
+ VARCHAR(length=2000, charset='UNICODE'),
1977
+ VARCHAR(length=2000, charset='UNICODE'),
1978
+ INTEGER()])
1979
+ """
1980
+ # Get the column information from the strings type.
1981
+ prepared = preparer(td_dialect())
1982
+ sqlbundle = SQLBundle()
1983
+ full_tbl_name = '{}.{}.{}'.format(prepared.quote(datalake),
1984
+ prepared.quote(schema),
1985
+ prepared.quote(table_name))
1986
+ help_table_sql = sqlbundle._get_sql_query(SQLConstants.SQL_HELP_TABLE).format(full_tbl_name)
1987
+
1988
+ cur = execute_sql(help_table_sql)
1989
+ td_types_col_index = -1
1990
+ for i, col_metadata in enumerate(cur.description):
1991
+ # Help Table returns column names and
1992
+ # corresponding IcebergType, TeradataInternalType,
1993
+ # TeradataType. We need to extract column index for
1994
+ # 'TeradataType' column.
1995
+ if col_metadata[0].lower() == 'teradatatype':
1996
+ td_types_col_index = i
1997
+
1998
+ col_names = []
1999
+ col_types = []
2000
+ if td_types_col_index > -1:
2001
+ for col_info in cur.fetchall():
2002
+ col_names.append(col_info[0])
2003
+ col_types.append(DataFrameUtils._get_sqlalchemy_type_from_str(col_info[td_types_col_index]))
2004
+ else:
2005
+ raise TeradataMlException(Messages.get_message(MessageCodes.TDMLDF_CREATE_FAIL),
2006
+ MessageCodes.TDMLDF_CREATE_FAIL)
2007
+ return col_names, col_types
@@ -23,6 +23,15 @@ def udf(user_function=None, returns=VARCHAR(1024), env_name = None, delimiter=',
23
23
  """
24
24
  DESCRIPTION:
25
25
  Creates a user defined function (UDF).
26
+
27
+ Notes:
28
+ 1. Date and time data types must be formatted to supported formats.
29
+ (See Prerequisite Input and Output Structures in Open Analytics Framework for more details.)
30
+ 2. Packages required to run the user defined function must be installed in remote user
31
+ environment using install_lib method of UserEnv class. Import statements of these
32
+ packages should be inside the user defined function itself.
33
+ 3. Do not call a regular function defined outside the udf() from the user defined function.
34
+ The function definition and call must be inside the udf(). Look at Example 9 to understand more.
26
35
 
27
36
  PARAMETERS:
28
37
  user_function:
@@ -31,7 +40,7 @@ def udf(user_function=None, returns=VARCHAR(1024), env_name = None, delimiter=',
31
40
  teradataml DataFrame.
32
41
  Types: function
33
42
  Note:
34
- 1. Lambda Function are not supported.
43
+ Lambda functions are not supported. Re-write the lambda function as regular Python function to use with UDF.
35
44
 
36
45
  returns:
37
46
  Optional Argument.
@@ -82,15 +91,6 @@ def udf(user_function=None, returns=VARCHAR(1024), env_name = None, delimiter=',
82
91
  RAISES:
83
92
  TeradataMLException
84
93
 
85
- NOTES:
86
- 1. While working on date and time data types one must format these to supported formats.
87
- (See Requisite Input and Output Structures in Open Analytics Framework for more details.)
88
- 2. Required packages to run the user defined function must be installed in remote user
89
- environment using install_lib function Of UserEnv class. Import statements of these
90
- packages should be inside the user defined function itself.
91
- 3. One can't call a regular function defined outside the udf from the user defined function.
92
- The function definition and call must be inside the udf. Look at Example 9 to understand more.
93
-
94
94
  EXAMPLES:
95
95
  # Load the data to run the example.
96
96
  >>> load_example_data("dataframe", "sales")
@@ -340,6 +340,12 @@ def register(name, user_function, returns=VARCHAR(1024)):
340
340
  DESCRIPTION:
341
341
  Registers a user defined function (UDF).
342
342
 
343
+ Notes:
344
+ 1. Date and time data types must be formatted to supported formats.
345
+ (See Requisite Input and Output Structures in Open Analytics Framework for more details.)
346
+ 2. On VantageCloud Lake, user defined function is registered by default in the 'openml_env' environment.
347
+ User can register it in their own user environment, using the 'openml_user_env' configuration option.
348
+
343
349
  PARAMETERS:
344
350
  name:
345
351
  Required Argument.
@@ -351,6 +357,8 @@ def register(name, user_function, returns=VARCHAR(1024)):
351
357
  Specifies the user defined function to create a column for
352
358
  teradataml DataFrame.
353
359
  Types: function, udf
360
+ Note:
361
+ Lambda functions are not supported. Re-write the lambda function as regular Python function to use with UDF.
354
362
 
355
363
  returns:
356
364
  Optional Argument.
@@ -459,10 +467,17 @@ def call_udf(udf_name, func_args = () , **kwargs):
459
467
  DESCRIPTION:
460
468
  Call a registered user defined function (UDF).
461
469
 
470
+ Notes:
471
+ 1. Packages required to run the registered user defined function must be installed in remote user
472
+ environment using install_lib method of UserEnv class. Import statements of these
473
+ packages should be inside the user defined function itself.
474
+ 2. On VantageCloud Lake, user defined function runs by default in the 'openml_env' environment.
475
+ User can use their own user environment, using the 'openml_user_env' configuration option.
476
+
462
477
  PARAMETERS:
463
478
  udf_name:
464
479
  Required Argument.
465
- Specifies the name of the registered user defined.
480
+ Specifies the name of the registered user defined function.
466
481
  Types: str
467
482
 
468
483
  func_args:
@@ -149,7 +149,7 @@ def __check_concat_compatibility(df_list, join, sort, ignore_index):
149
149
  # Iterate on all DFs to be applied for set operation.
150
150
  for df in dfs_to_operate_on:
151
151
  # Process each column in the DF of the iteration.
152
- for c in df._metaexpr.t.c:
152
+ for c in df._metaexpr.c:
153
153
  col_name = c.name
154
154
  # Process the column name if it is not already processed.
155
155
  # Processing of set operation is column name based so if the DF in the nth iteration had column 'xyz',
@@ -193,6 +193,8 @@ def __check_concat_compatibility(df_list, join, sort, ignore_index):
193
193
  col_dict[col_name]['col_present'] = col_present_in_dfs
194
194
  # The type to be used for the column is the one of the first DF it is present in.
195
195
  col_dict[col_name]['col_type'] = col_types_in_dfs[0]
196
+ # Column name stored with quotes if required.
197
+ col_dict[col_name]['name'] = c.compile()
196
198
 
197
199
  # If the type of the column in all DFs is not the same, then the operation is not lazy.
198
200
  if not all(ctype == col_dict[col_name]['col_type']
@@ -217,6 +219,8 @@ def __check_concat_compatibility(df_list, join, sort, ignore_index):
217
219
  col_dict[col_name]['col_present'] = col_present_in_dfs
218
220
  # The type to be used for the column is the one of the first DF it is present in.
219
221
  col_dict[col_name]['col_type'] = non_none_type_to_add
222
+ # Column name stored with quotes if required.
223
+ col_dict[col_name]['name'] = c.compile()
220
224
 
221
225
  # If the type of the column in all DFs is not the same, then the operation is not lazy.
222
226
  if not all(True if ctype is None else ctype == non_none_type_to_add
@@ -667,15 +671,16 @@ def concat(df_list, join='OUTER', allow_duplicates=True, sort=False, ignore_inde
667
671
 
668
672
  # Now create the list of columns for each DataFrame to concatenate
669
673
  type_compiler = td_type_compiler(td_dialect)
674
+
670
675
  for col_name, value in master_columns_dict.items():
671
676
  for i in range(len(col_list)):
677
+ # Quoting is already done for column names if column name starts with number or it is reserved keywords.
678
+ # Here checking again if it is teradata keyword or not for quotes.
679
+ column_name = UtilFuncs._process_for_teradata_keyword(value['name'])
672
680
  if not value['col_present'][i]:
673
- col_list[i].append('CAST(NULL as {}) as {}'.format(type_compiler.process(value['col_type']),
674
- UtilFuncs._teradata_quote_arg(col_name, "\"",
675
- False)))
681
+ col_list[i].append('CAST(NULL as {}) as {}'.format(type_compiler.process(value['col_type']), column_name))
676
682
  else:
677
- col_name = UtilFuncs._process_for_teradata_keyword(col_name)
678
- col_list[i].append(col_name)
683
+ col_list[i].append(column_name)
679
684
 
680
685
  input_table_columns = []
681
686
  for i in range(len(col_list)):
@@ -265,7 +265,7 @@ class _PandasTableExpression(TableExpression):
265
265
 
266
266
  existing = [(c.name, c) for c in self.c]
267
267
  new = [(label, expression) for label, expression in kw.items() if label not in current]
268
- new = sorted(new, key = lambda x: x[0])
268
+ new = sorted(new, key=lambda x: x[0])
269
269
 
270
270
  for alias, expression in existing + new:
271
271
  if drop_columns and alias not in kw:
@@ -10978,4 +10978,4 @@ class _SQLColumnExpression(_LogicalColumnExpression,
10978
10978
  >>>
10979
10979
 
10980
10980
  """
10981
- return _SQLColumnExpression(literal_column(f"TD_ISFINITE({self.compile()})"), type=INTEGER)
10981
+ return _SQLColumnExpression(literal_column(f"TD_ISFINITE({self.compile()})"), type=INTEGER)