teradataml 20.0.0.1__py3-none-any.whl → 20.0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of teradataml might be problematic. Click here for more details.
- teradataml/LICENSE-3RD-PARTY.pdf +0 -0
- teradataml/LICENSE.pdf +0 -0
- teradataml/README.md +306 -0
- teradataml/__init__.py +10 -3
- teradataml/_version.py +1 -1
- teradataml/analytics/__init__.py +3 -2
- teradataml/analytics/analytic_function_executor.py +299 -16
- teradataml/analytics/analytic_query_generator.py +92 -0
- teradataml/analytics/byom/__init__.py +3 -2
- teradataml/analytics/json_parser/metadata.py +13 -3
- teradataml/analytics/json_parser/utils.py +13 -6
- teradataml/analytics/meta_class.py +40 -1
- teradataml/analytics/sqle/DecisionTreePredict.py +1 -1
- teradataml/analytics/sqle/__init__.py +11 -2
- teradataml/analytics/table_operator/__init__.py +4 -3
- teradataml/analytics/uaf/__init__.py +21 -2
- teradataml/analytics/utils.py +66 -1
- teradataml/analytics/valib.py +1 -1
- teradataml/automl/__init__.py +1502 -323
- teradataml/automl/custom_json_utils.py +139 -61
- teradataml/automl/data_preparation.py +247 -307
- teradataml/automl/data_transformation.py +32 -12
- teradataml/automl/feature_engineering.py +325 -86
- teradataml/automl/model_evaluation.py +44 -35
- teradataml/automl/model_training.py +122 -153
- teradataml/catalog/byom.py +8 -8
- teradataml/clients/pkce_client.py +1 -1
- teradataml/common/__init__.py +2 -1
- teradataml/common/constants.py +72 -0
- teradataml/common/deprecations.py +13 -7
- teradataml/common/garbagecollector.py +152 -120
- teradataml/common/messagecodes.py +11 -2
- teradataml/common/messages.py +4 -1
- teradataml/common/sqlbundle.py +26 -4
- teradataml/common/utils.py +225 -14
- teradataml/common/wrapper_utils.py +1 -1
- teradataml/context/context.py +82 -2
- teradataml/data/SQL_Fundamentals.pdf +0 -0
- teradataml/data/complaints_test_tokenized.csv +353 -0
- teradataml/data/complaints_tokens_model.csv +348 -0
- teradataml/data/covid_confirm_sd.csv +83 -0
- teradataml/data/dataframe_example.json +27 -1
- teradataml/data/docs/sqle/docs_17_20/CFilter.py +132 -0
- teradataml/data/docs/sqle/docs_17_20/NaiveBayes.py +162 -0
- teradataml/data/docs/sqle/docs_17_20/OutlierFilterFit.py +2 -0
- teradataml/data/docs/sqle/docs_17_20/Pivoting.py +279 -0
- teradataml/data/docs/sqle/docs_17_20/Shap.py +203 -0
- teradataml/data/docs/sqle/docs_17_20/TDNaiveBayesPredict.py +189 -0
- teradataml/data/docs/sqle/docs_17_20/TFIDF.py +142 -0
- teradataml/data/docs/sqle/docs_17_20/TextParser.py +3 -3
- teradataml/data/docs/sqle/docs_17_20/Unpivoting.py +216 -0
- teradataml/data/docs/tableoperator/docs_17_20/Image2Matrix.py +118 -0
- teradataml/data/docs/uaf/docs_17_20/ACF.py +1 -10
- teradataml/data/docs/uaf/docs_17_20/ArimaEstimate.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/ArimaForecast.py +35 -5
- teradataml/data/docs/uaf/docs_17_20/ArimaValidate.py +3 -1
- teradataml/data/docs/uaf/docs_17_20/ArimaXEstimate.py +293 -0
- teradataml/data/docs/uaf/docs_17_20/AutoArima.py +354 -0
- teradataml/data/docs/uaf/docs_17_20/BreuschGodfrey.py +3 -2
- teradataml/data/docs/uaf/docs_17_20/BreuschPaganGodfrey.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/Convolve.py +13 -10
- teradataml/data/docs/uaf/docs_17_20/Convolve2.py +4 -1
- teradataml/data/docs/uaf/docs_17_20/CopyArt.py +145 -0
- teradataml/data/docs/uaf/docs_17_20/CumulPeriodogram.py +5 -4
- teradataml/data/docs/uaf/docs_17_20/DFFT2Conv.py +4 -4
- teradataml/data/docs/uaf/docs_17_20/DWT.py +235 -0
- teradataml/data/docs/uaf/docs_17_20/DWT2D.py +214 -0
- teradataml/data/docs/uaf/docs_17_20/DickeyFuller.py +18 -21
- teradataml/data/docs/uaf/docs_17_20/DurbinWatson.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/ExtractResults.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/FilterFactory1d.py +160 -0
- teradataml/data/docs/uaf/docs_17_20/GenseriesSinusoids.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/GoldfeldQuandt.py +9 -31
- teradataml/data/docs/uaf/docs_17_20/HoltWintersForecaster.py +4 -2
- teradataml/data/docs/uaf/docs_17_20/IDFFT2.py +1 -8
- teradataml/data/docs/uaf/docs_17_20/IDWT.py +236 -0
- teradataml/data/docs/uaf/docs_17_20/IDWT2D.py +226 -0
- teradataml/data/docs/uaf/docs_17_20/IQR.py +134 -0
- teradataml/data/docs/uaf/docs_17_20/LineSpec.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/LinearRegr.py +2 -2
- teradataml/data/docs/uaf/docs_17_20/MAMean.py +3 -3
- teradataml/data/docs/uaf/docs_17_20/Matrix2Image.py +297 -0
- teradataml/data/docs/uaf/docs_17_20/MatrixMultiply.py +15 -6
- teradataml/data/docs/uaf/docs_17_20/PACF.py +0 -1
- teradataml/data/docs/uaf/docs_17_20/Portman.py +2 -2
- teradataml/data/docs/uaf/docs_17_20/PowerSpec.py +2 -2
- teradataml/data/docs/uaf/docs_17_20/Resample.py +9 -1
- teradataml/data/docs/uaf/docs_17_20/SAX.py +246 -0
- teradataml/data/docs/uaf/docs_17_20/SeasonalNormalize.py +17 -10
- teradataml/data/docs/uaf/docs_17_20/SignifPeriodicities.py +1 -1
- teradataml/data/docs/uaf/docs_17_20/WhitesGeneral.py +3 -1
- teradataml/data/docs/uaf/docs_17_20/WindowDFFT.py +368 -0
- teradataml/data/dwt2d_dataTable.csv +65 -0
- teradataml/data/dwt_dataTable.csv +8 -0
- teradataml/data/dwt_filterTable.csv +3 -0
- teradataml/data/finance_data4.csv +13 -0
- teradataml/data/grocery_transaction.csv +19 -0
- teradataml/data/idwt2d_dataTable.csv +5 -0
- teradataml/data/idwt_dataTable.csv +8 -0
- teradataml/data/idwt_filterTable.csv +3 -0
- teradataml/data/interval_data.csv +5 -0
- teradataml/data/jsons/paired_functions.json +14 -0
- teradataml/data/jsons/sqle/17.20/TD_CFilter.json +118 -0
- teradataml/data/jsons/sqle/17.20/TD_NaiveBayes.json +193 -0
- teradataml/data/jsons/sqle/17.20/TD_NaiveBayesPredict.json +212 -0
- teradataml/data/jsons/sqle/17.20/TD_OneClassSVM.json +9 -9
- teradataml/data/jsons/sqle/17.20/TD_Pivoting.json +280 -0
- teradataml/data/jsons/sqle/17.20/TD_Shap.json +222 -0
- teradataml/data/jsons/sqle/17.20/TD_TFIDF.json +162 -0
- teradataml/data/jsons/sqle/17.20/TD_TextParser.json +1 -1
- teradataml/data/jsons/sqle/17.20/TD_Unpivoting.json +235 -0
- teradataml/data/jsons/sqle/20.00/TD_KMeans.json +250 -0
- teradataml/data/jsons/sqle/20.00/TD_SMOTE.json +266 -0
- teradataml/data/jsons/sqle/20.00/TD_VectorDistance.json +278 -0
- teradataml/data/jsons/storedprocedure/17.20/TD_COPYART.json +71 -0
- teradataml/data/jsons/storedprocedure/17.20/TD_FILTERFACTORY1D.json +150 -0
- teradataml/data/jsons/tableoperator/17.20/IMAGE2MATRIX.json +53 -0
- teradataml/data/jsons/uaf/17.20/TD_ACF.json +1 -18
- teradataml/data/jsons/uaf/17.20/TD_ARIMAESTIMATE.json +3 -16
- teradataml/data/jsons/uaf/17.20/TD_ARIMAFORECAST.json +0 -3
- teradataml/data/jsons/uaf/17.20/TD_ARIMAVALIDATE.json +5 -3
- teradataml/data/jsons/uaf/17.20/TD_ARIMAXESTIMATE.json +362 -0
- teradataml/data/jsons/uaf/17.20/TD_AUTOARIMA.json +469 -0
- teradataml/data/jsons/uaf/17.20/TD_BINARYMATRIXOP.json +0 -3
- teradataml/data/jsons/uaf/17.20/TD_BINARYSERIESOP.json +0 -2
- teradataml/data/jsons/uaf/17.20/TD_BREUSCH_GODFREY.json +2 -1
- teradataml/data/jsons/uaf/17.20/TD_BREUSCH_PAGAN_GODFREY.json +2 -5
- teradataml/data/jsons/uaf/17.20/TD_CONVOLVE.json +3 -6
- teradataml/data/jsons/uaf/17.20/TD_CONVOLVE2.json +1 -3
- teradataml/data/jsons/uaf/17.20/TD_CUMUL_PERIODOGRAM.json +0 -5
- teradataml/data/jsons/uaf/17.20/TD_DFFT.json +1 -4
- teradataml/data/jsons/uaf/17.20/TD_DFFT2.json +2 -7
- teradataml/data/jsons/uaf/17.20/TD_DFFT2CONV.json +1 -2
- teradataml/data/jsons/uaf/17.20/TD_DFFTCONV.json +0 -2
- teradataml/data/jsons/uaf/17.20/TD_DICKEY_FULLER.json +10 -19
- teradataml/data/jsons/uaf/17.20/TD_DTW.json +3 -6
- teradataml/data/jsons/uaf/17.20/TD_DWT.json +173 -0
- teradataml/data/jsons/uaf/17.20/TD_DWT2D.json +160 -0
- teradataml/data/jsons/uaf/17.20/TD_FITMETRICS.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_GOLDFELD_QUANDT.json +16 -30
- teradataml/data/jsons/uaf/17.20/{TD_HOLT_WINTERS_FORECAST.json → TD_HOLT_WINTERS_FORECASTER.json} +1 -2
- teradataml/data/jsons/uaf/17.20/TD_IDFFT2.json +1 -15
- teradataml/data/jsons/uaf/17.20/TD_IDWT.json +162 -0
- teradataml/data/jsons/uaf/17.20/TD_IDWT2D.json +149 -0
- teradataml/data/jsons/uaf/17.20/TD_IQR.json +117 -0
- teradataml/data/jsons/uaf/17.20/TD_LINEAR_REGR.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_LINESPEC.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_MAMEAN.json +1 -3
- teradataml/data/jsons/uaf/17.20/TD_MATRIX2IMAGE.json +209 -0
- teradataml/data/jsons/uaf/17.20/TD_PACF.json +2 -2
- teradataml/data/jsons/uaf/17.20/TD_POWERSPEC.json +5 -5
- teradataml/data/jsons/uaf/17.20/TD_RESAMPLE.json +48 -28
- teradataml/data/jsons/uaf/17.20/TD_SAX.json +210 -0
- teradataml/data/jsons/uaf/17.20/TD_SEASONALNORMALIZE.json +12 -6
- teradataml/data/jsons/uaf/17.20/TD_SIMPLEEXP.json +0 -1
- teradataml/data/jsons/uaf/17.20/TD_TRACKINGOP.json +8 -8
- teradataml/data/jsons/uaf/17.20/TD_UNDIFF.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_UNNORMALIZE.json +1 -1
- teradataml/data/jsons/uaf/17.20/TD_WINDOWDFFT.json +410 -0
- teradataml/data/load_example_data.py +8 -2
- teradataml/data/medical_readings.csv +101 -0
- teradataml/data/naivebayestextclassifier_example.json +1 -1
- teradataml/data/naivebayestextclassifierpredict_example.json +11 -0
- teradataml/data/patient_profile.csv +101 -0
- teradataml/data/peppers.png +0 -0
- teradataml/data/real_values.csv +14 -0
- teradataml/data/sax_example.json +8 -0
- teradataml/data/scripts/deploy_script.py +1 -1
- teradataml/data/scripts/lightgbm/dataset.template +157 -0
- teradataml/data/scripts/lightgbm/lightgbm_class_functions.template +247 -0
- teradataml/data/scripts/lightgbm/lightgbm_function.template +216 -0
- teradataml/data/scripts/lightgbm/lightgbm_sklearn.template +159 -0
- teradataml/data/scripts/sklearn/sklearn_fit.py +194 -160
- teradataml/data/scripts/sklearn/sklearn_fit_predict.py +136 -115
- teradataml/data/scripts/sklearn/sklearn_function.template +34 -16
- teradataml/data/scripts/sklearn/sklearn_model_selection_split.py +155 -137
- teradataml/data/scripts/sklearn/sklearn_neighbors.py +1 -1
- teradataml/data/scripts/sklearn/sklearn_score.py +12 -3
- teradataml/data/scripts/sklearn/sklearn_transform.py +162 -24
- teradataml/data/star_pivot.csv +8 -0
- teradataml/data/target_udt_data.csv +8 -0
- teradataml/data/templates/open_source_ml.json +3 -1
- teradataml/data/teradataml_example.json +20 -1
- teradataml/data/timestamp_data.csv +4 -0
- teradataml/data/titanic_dataset_unpivoted.csv +19 -0
- teradataml/data/uaf_example.json +55 -1
- teradataml/data/unpivot_example.json +15 -0
- teradataml/data/url_data.csv +9 -0
- teradataml/data/vectordistance_example.json +4 -0
- teradataml/data/windowdfft.csv +16 -0
- teradataml/dataframe/copy_to.py +1 -1
- teradataml/dataframe/data_transfer.py +5 -3
- teradataml/dataframe/dataframe.py +1002 -201
- teradataml/dataframe/fastload.py +3 -3
- teradataml/dataframe/functions.py +867 -0
- teradataml/dataframe/row.py +160 -0
- teradataml/dataframe/setop.py +2 -2
- teradataml/dataframe/sql.py +840 -33
- teradataml/dataframe/window.py +1 -1
- teradataml/dbutils/dbutils.py +878 -34
- teradataml/dbutils/filemgr.py +48 -1
- teradataml/geospatial/geodataframe.py +1 -1
- teradataml/geospatial/geodataframecolumn.py +1 -1
- teradataml/hyperparameter_tuner/optimizer.py +13 -13
- teradataml/lib/aed_0_1.dll +0 -0
- teradataml/opensource/__init__.py +1 -1
- teradataml/opensource/{sklearn/_class.py → _class.py} +102 -17
- teradataml/opensource/_lightgbm.py +950 -0
- teradataml/opensource/{sklearn/_wrapper_utils.py → _wrapper_utils.py} +1 -2
- teradataml/opensource/{sklearn/constants.py → constants.py} +13 -10
- teradataml/opensource/sklearn/__init__.py +0 -1
- teradataml/opensource/sklearn/_sklearn_wrapper.py +1019 -574
- teradataml/options/__init__.py +9 -23
- teradataml/options/configure.py +42 -4
- teradataml/options/display.py +2 -2
- teradataml/plot/axis.py +4 -4
- teradataml/scriptmgmt/UserEnv.py +13 -9
- teradataml/scriptmgmt/lls_utils.py +77 -23
- teradataml/store/__init__.py +13 -0
- teradataml/store/feature_store/__init__.py +0 -0
- teradataml/store/feature_store/constants.py +291 -0
- teradataml/store/feature_store/feature_store.py +2223 -0
- teradataml/store/feature_store/models.py +1505 -0
- teradataml/store/vector_store/__init__.py +1586 -0
- teradataml/table_operators/Script.py +2 -2
- teradataml/table_operators/TableOperator.py +106 -20
- teradataml/table_operators/query_generator.py +3 -0
- teradataml/table_operators/table_operator_query_generator.py +3 -1
- teradataml/table_operators/table_operator_util.py +102 -56
- teradataml/table_operators/templates/dataframe_register.template +69 -0
- teradataml/table_operators/templates/dataframe_udf.template +63 -0
- teradataml/telemetry_utils/__init__.py +0 -0
- teradataml/telemetry_utils/queryband.py +52 -0
- teradataml/utils/dtypes.py +4 -2
- teradataml/utils/validators.py +34 -2
- {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.3.dist-info}/METADATA +311 -3
- {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.3.dist-info}/RECORD +240 -157
- {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.3.dist-info}/WHEEL +0 -0
- {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.3.dist-info}/top_level.txt +0 -0
- {teradataml-20.0.0.1.dist-info → teradataml-20.0.0.3.dist-info}/zip-safe +0 -0
|
@@ -0,0 +1,216 @@
|
|
|
1
|
+
import sys, json, io
|
|
2
|
+
import pickle, base64, importlib, numpy as np
|
|
3
|
+
from collections import OrderedDict
|
|
4
|
+
|
|
5
|
+
func_name = "<func_name>"
|
|
6
|
+
module_name = "<module_name>"
|
|
7
|
+
is_lake_system = <is_lake_system>
|
|
8
|
+
params = json.loads('<params>')
|
|
9
|
+
data_partition_column_indices = <partition_cols_indices>
|
|
10
|
+
data_partition_column_types = <partition_cols_types>
|
|
11
|
+
model_file_prefix = "<model_file_prefix>" # Needed in case of lake system for writing model to /tmp
|
|
12
|
+
|
|
13
|
+
DELIMITER = '\t'
|
|
14
|
+
|
|
15
|
+
def convert_to_type(val, typee):
|
|
16
|
+
if typee == 'int':
|
|
17
|
+
return int(val) if val != "" else np.nan
|
|
18
|
+
if typee == 'float':
|
|
19
|
+
if isinstance(val, str):
|
|
20
|
+
val = val.replace(' ', '')
|
|
21
|
+
return float(val) if val != "" else np.nan
|
|
22
|
+
if typee == 'bool':
|
|
23
|
+
return eval(val) if val != "" else None
|
|
24
|
+
return str(val) if val != "" else None
|
|
25
|
+
|
|
26
|
+
if not is_lake_system:
|
|
27
|
+
db = sys.argv[0].split("/")[1]
|
|
28
|
+
|
|
29
|
+
data_present = False
|
|
30
|
+
data_partition_column_values = []
|
|
31
|
+
|
|
32
|
+
while 1:
|
|
33
|
+
try:
|
|
34
|
+
line = input()
|
|
35
|
+
if line == '': # Exit if user provides blank line
|
|
36
|
+
break
|
|
37
|
+
else:
|
|
38
|
+
data_present = True
|
|
39
|
+
values = line.split(DELIMITER)
|
|
40
|
+
if not data_partition_column_values:
|
|
41
|
+
# Partition column values is same for all rows. Hence, only read once.
|
|
42
|
+
for i, val in enumerate(data_partition_column_indices): # Only partition columns are
|
|
43
|
+
data_partition_column_values.append(
|
|
44
|
+
convert_to_type(values[val], typee=data_partition_column_types[i])
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
# Prepare the corresponding model file name and extract model.
|
|
48
|
+
partition_join = "_".join([str(x) for x in data_partition_column_values])
|
|
49
|
+
# Replace '-' with '_' because partition_columns can be negative containing '-'.
|
|
50
|
+
partition_join = partition_join.replace("-", "_")
|
|
51
|
+
|
|
52
|
+
train_set = params.get("train_set") # Gets file name prefix.
|
|
53
|
+
model_file_path = f"{train_set}_{partition_join}"\
|
|
54
|
+
if is_lake_system else \
|
|
55
|
+
f"./{db}/{train_set}_{partition_join}"
|
|
56
|
+
|
|
57
|
+
with open(model_file_path, "rb") as fp:
|
|
58
|
+
params["train_set"] = pickle.loads(fp.read())
|
|
59
|
+
|
|
60
|
+
valid_sets = params.get("valid_sets", None) # Gets file names prefix.
|
|
61
|
+
if valid_sets:
|
|
62
|
+
params["valid_sets"] = []
|
|
63
|
+
for valid_set in valid_sets:
|
|
64
|
+
model_file_path = f"{valid_set}_{partition_join}"\
|
|
65
|
+
if is_lake_system else \
|
|
66
|
+
f"./{db}/{valid_set}_{partition_join}"
|
|
67
|
+
with open(model_file_path, "rb") as fp:
|
|
68
|
+
params["valid_sets"].append(pickle.load(fp))
|
|
69
|
+
|
|
70
|
+
except EOFError: # Exit if reached EOF or CTRL-D
|
|
71
|
+
break
|
|
72
|
+
|
|
73
|
+
if not data_present:
|
|
74
|
+
sys.exit(0)
|
|
75
|
+
|
|
76
|
+
# Handle callbacks.
|
|
77
|
+
rec_eval = None
|
|
78
|
+
if "callbacks" in params and params["callbacks"] is not None:
|
|
79
|
+
callbacks = params["callbacks"]
|
|
80
|
+
callbacks = [callbacks] if not isinstance(callbacks, list) else callbacks
|
|
81
|
+
for i, callback in enumerate(callbacks):
|
|
82
|
+
c_module_name = callback["module"]
|
|
83
|
+
c_func_name = callback["func_name"]
|
|
84
|
+
c_kwargs = callback["kwargs"]
|
|
85
|
+
c_module = importlib.import_module(c_module_name)
|
|
86
|
+
if c_func_name == "record_evaluation":
|
|
87
|
+
# record_evaluation function takes empty dict. If the argument has elements in the
|
|
88
|
+
# dict, they will be deleted as per the documentation from lightgbm as described below:
|
|
89
|
+
# eval_result (dict) -
|
|
90
|
+
# Dictionary used to store all evaluation results of all validation sets. This should
|
|
91
|
+
# be initialized outside of your call to record_evaluation() and should be empty. Any
|
|
92
|
+
# initial contents of the dictionary will be deleted.
|
|
93
|
+
rec_eval = {}
|
|
94
|
+
callbacks[i] = getattr(c_module, c_func_name)(rec_eval)
|
|
95
|
+
else:
|
|
96
|
+
callbacks[i] = getattr(c_module, c_func_name)(**c_kwargs)
|
|
97
|
+
|
|
98
|
+
params["callbacks"] = callbacks
|
|
99
|
+
|
|
100
|
+
module_ = importlib.import_module(module_name)
|
|
101
|
+
|
|
102
|
+
### LightGBM training is giving some meaningful console output like this:
|
|
103
|
+
### Hence, capturing it to show to the user.
|
|
104
|
+
|
|
105
|
+
# [LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.000190 seconds.
|
|
106
|
+
# You can set `force_row_wise=true` to remove the overhead.
|
|
107
|
+
# And if memory is not enough, you can set `force_col_wise=true`.
|
|
108
|
+
# [LightGBM] [Info] Total Bins 136
|
|
109
|
+
# [LightGBM] [Info] Number of data points in the train set: 97, number of used features: 4
|
|
110
|
+
# [LightGBM] [Info] Start training from score 0.556701
|
|
111
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
112
|
+
# [1] valid_0's l2: 0.219637 valid_1's l2: 0.219637
|
|
113
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
114
|
+
# [2] valid_0's l2: 0.196525 valid_1's l2: 0.196525
|
|
115
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
116
|
+
# [3] valid_0's l2: 0.178462 valid_1's l2: 0.178462
|
|
117
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
118
|
+
# [4] valid_0's l2: 0.162887 valid_1's l2: 0.162887
|
|
119
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
120
|
+
# [5] valid_0's l2: 0.150271 valid_1's l2: 0.150271
|
|
121
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
122
|
+
# [6] valid_0's l2: 0.140219 valid_1's l2: 0.140219
|
|
123
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
124
|
+
# [7] valid_0's l2: 0.131697 valid_1's l2: 0.131697
|
|
125
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
126
|
+
# [8] valid_0's l2: 0.124056 valid_1's l2: 0.124056
|
|
127
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
128
|
+
# [9] valid_0's l2: 0.117944 valid_1's l2: 0.117944
|
|
129
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
130
|
+
# [10] valid_0's l2: 0.11263 valid_1's l2: 0.11263
|
|
131
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
132
|
+
# [11] valid_0's l2: 0.105228 valid_1's l2: 0.105228
|
|
133
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
134
|
+
# [12] valid_0's l2: 0.0981571 valid_1's l2: 0.0981571
|
|
135
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
136
|
+
# [13] valid_0's l2: 0.0924294 valid_1's l2: 0.0924294
|
|
137
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
138
|
+
# [14] valid_0's l2: 0.0877899 valid_1's l2: 0.0877899
|
|
139
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
140
|
+
# [15] valid_0's l2: 0.084032 valid_1's l2: 0.084032
|
|
141
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
142
|
+
# [16] valid_0's l2: 0.080988 valid_1's l2: 0.080988
|
|
143
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
144
|
+
# [17] valid_0's l2: 0.0785224 valid_1's l2: 0.0785224
|
|
145
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
146
|
+
# [18] valid_0's l2: 0.0765253 valid_1's l2: 0.0765253
|
|
147
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
148
|
+
# [19] valid_0's l2: 0.0750803 valid_1's l2: 0.0750803
|
|
149
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
150
|
+
# [20] valid_0's l2: 0.0738915 valid_1's l2: 0.0738915
|
|
151
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
152
|
+
# [21] valid_0's l2: 0.07288 valid_1's l2: 0.07288
|
|
153
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
154
|
+
# [22] valid_0's l2: 0.0718676 valid_1's l2: 0.0718676
|
|
155
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
156
|
+
# [23] valid_0's l2: 0.0706037 valid_1's l2: 0.0706037
|
|
157
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
158
|
+
# [24] valid_0's l2: 0.0695799 valid_1's l2: 0.0695799
|
|
159
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
160
|
+
# [25] valid_0's l2: 0.0687507 valid_1's l2: 0.0687507
|
|
161
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
162
|
+
# [26] valid_0's l2: 0.0680819 valid_1's l2: 0.0680819
|
|
163
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
164
|
+
# [27] valid_0's l2: 0.0674077 valid_1's l2: 0.0674077
|
|
165
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
166
|
+
# [28] valid_0's l2: 0.0665111 valid_1's l2: 0.0665111
|
|
167
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
168
|
+
# [29] valid_0's l2: 0.0659656 valid_1's l2: 0.0659656
|
|
169
|
+
# [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
|
|
170
|
+
# [30] valid_0's l2: 0.0652665 valid_1's l2: 0.0652665
|
|
171
|
+
result = ""
|
|
172
|
+
stdout = None
|
|
173
|
+
try:
|
|
174
|
+
stdout = sys.stdout
|
|
175
|
+
new_stdout = io.StringIO()
|
|
176
|
+
sys.stdout = new_stdout
|
|
177
|
+
trained_model = getattr(module_, func_name)(**params)
|
|
178
|
+
result = new_stdout.getvalue()
|
|
179
|
+
except Exception:
|
|
180
|
+
raise
|
|
181
|
+
finally:
|
|
182
|
+
sys.stdout = stdout
|
|
183
|
+
|
|
184
|
+
model_str = pickle.dumps(trained_model)
|
|
185
|
+
console_output_str = result.encode()
|
|
186
|
+
|
|
187
|
+
if is_lake_system:
|
|
188
|
+
model_file_path = f"/tmp/{model_file_prefix}_{partition_join}.pickle"
|
|
189
|
+
model_console_output_path = f"/tmp/{model_file_prefix}_{partition_join}_console_output.pickle"
|
|
190
|
+
|
|
191
|
+
# Write to file in Vantage, to be used in predict/scoring.
|
|
192
|
+
with open(model_file_path, "wb") as fp:
|
|
193
|
+
fp.write(model_str)
|
|
194
|
+
|
|
195
|
+
with open(model_console_output_path, "wb") as fpc:
|
|
196
|
+
fpc.write(console_output_str)
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
model_data = model_file_path if is_lake_system else base64.b64encode(model_str)
|
|
200
|
+
console_output = model_console_output_path if is_lake_system else base64.b64encode(console_output_str)
|
|
201
|
+
|
|
202
|
+
output_data = [model_data, console_output]
|
|
203
|
+
|
|
204
|
+
if rec_eval is not None:
|
|
205
|
+
rec_eval = pickle.dumps(rec_eval)
|
|
206
|
+
if is_lake_system:
|
|
207
|
+
rec_eval_file_path = f"/tmp/{model_file_prefix}_{partition_join}_rec_eval.pickle"
|
|
208
|
+
|
|
209
|
+
with open(rec_eval_file_path, "wb") as fp:
|
|
210
|
+
fp.write(rec_eval)
|
|
211
|
+
|
|
212
|
+
rec_eval = rec_eval_file_path if is_lake_system else base64.b64encode(rec_eval)
|
|
213
|
+
|
|
214
|
+
output_data.append(rec_eval)
|
|
215
|
+
|
|
216
|
+
print(*(data_partition_column_values + output_data), sep=DELIMITER)
|
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
import sys, json
|
|
2
|
+
import pickle, base64, importlib, numpy as np
|
|
3
|
+
from collections import OrderedDict
|
|
4
|
+
|
|
5
|
+
DELIMITER = '\t'
|
|
6
|
+
|
|
7
|
+
func_name = <func_name>
|
|
8
|
+
params = json.loads('<params>')
|
|
9
|
+
is_lake_system = <is_lake_system>
|
|
10
|
+
model_file_prefix = <model_file_prefix>
|
|
11
|
+
|
|
12
|
+
def convert_to_type(val, typee):
|
|
13
|
+
if typee == 'int':
|
|
14
|
+
return int(val) if val != "" else np.nan
|
|
15
|
+
if typee == 'float':
|
|
16
|
+
if isinstance(val, str):
|
|
17
|
+
val = val.replace(' ', '')
|
|
18
|
+
return float(val) if val != "" else np.nan
|
|
19
|
+
if typee == 'bool':
|
|
20
|
+
return eval(val) if val != "" else None
|
|
21
|
+
return str(val) if val != "" else None
|
|
22
|
+
|
|
23
|
+
def splitter(strr, delim=",", convert_to="str"):
|
|
24
|
+
"""
|
|
25
|
+
Split the string based on delimiter and convert to the type specified.
|
|
26
|
+
"""
|
|
27
|
+
if strr == "None":
|
|
28
|
+
return []
|
|
29
|
+
return [convert_to_type(i, convert_to) for i in strr.split(delim)]
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
if not is_lake_system:
|
|
33
|
+
db = sys.argv[0].split("/")[1]
|
|
34
|
+
|
|
35
|
+
data_partition_column_indices = <partition_cols_indices>
|
|
36
|
+
data_column_types = <types_of_data_cols>
|
|
37
|
+
|
|
38
|
+
data_partition_column_types = [data_column_types[idx] for idx in data_partition_column_indices]
|
|
39
|
+
|
|
40
|
+
# Data related arguments information of indices and types.
|
|
41
|
+
data_args_indices_types = OrderedDict()
|
|
42
|
+
|
|
43
|
+
# Data related arguments values - prepare dictionary and populate data later.
|
|
44
|
+
data_args_values = {}
|
|
45
|
+
|
|
46
|
+
data_args_info_str = <data_args_info_str>
|
|
47
|
+
for data_arg in data_args_info_str.split("--"):
|
|
48
|
+
arg_name, indices, types = data_arg.split("-")
|
|
49
|
+
indices = splitter(indices, convert_to="int")
|
|
50
|
+
types = splitter(types)
|
|
51
|
+
|
|
52
|
+
data_args_indices_types[arg_name] = {"indices": indices, "types": types}
|
|
53
|
+
data_args_values[arg_name] = [] # Keeping empty for each data arg name and populate data later.
|
|
54
|
+
|
|
55
|
+
data_partition_column_values = []
|
|
56
|
+
data_present = False
|
|
57
|
+
|
|
58
|
+
model = None
|
|
59
|
+
|
|
60
|
+
# Read data - columns information is passed as command line argument and stored in
|
|
61
|
+
# data_args_indices_types dictionary.
|
|
62
|
+
while 1:
|
|
63
|
+
try:
|
|
64
|
+
line = input()
|
|
65
|
+
if line == '': # Exit if user provides blank line
|
|
66
|
+
break
|
|
67
|
+
else:
|
|
68
|
+
data_present = True
|
|
69
|
+
values = line.split(DELIMITER)
|
|
70
|
+
if not data_partition_column_values:
|
|
71
|
+
# Partition column values is same for all rows. Hence, only read once.
|
|
72
|
+
for i, val in enumerate(data_partition_column_indices):
|
|
73
|
+
data_partition_column_values.append(
|
|
74
|
+
convert_to_type(values[val], typee=data_partition_column_types[i])
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
# Prepare the corresponding model file name and extract model.
|
|
78
|
+
partition_join = "_".join([str(x) for x in data_partition_column_values])
|
|
79
|
+
# Replace '-' with '_' as '-' because partition_columns can be negative.
|
|
80
|
+
partition_join = partition_join.replace("-", "_")
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
model_file_path = f"{model_file_prefix}_{partition_join}"\
|
|
84
|
+
if is_lake_system else \
|
|
85
|
+
f"./{db}/{model_file_prefix}_{partition_join}"
|
|
86
|
+
|
|
87
|
+
with open(model_file_path, "rb") as fp:
|
|
88
|
+
model = pickle.loads(fp.read())
|
|
89
|
+
|
|
90
|
+
if model is None:
|
|
91
|
+
sys.exit("Model file is not installed in Vantage.")
|
|
92
|
+
|
|
93
|
+
# Prepare data dictionary containing only arguments related to data.
|
|
94
|
+
for arg_name in data_args_values:
|
|
95
|
+
data_indices = data_args_indices_types[arg_name]["indices"]
|
|
96
|
+
types = data_args_indices_types[arg_name]["types"]
|
|
97
|
+
cur_row = []
|
|
98
|
+
for idx, data_idx in enumerate(data_indices):
|
|
99
|
+
cur_row.append(convert_to_type(values[data_idx], types[idx]))
|
|
100
|
+
data_args_values[arg_name].append(cur_row)
|
|
101
|
+
except EOFError: # Exit if reached EOF or CTRL-D
|
|
102
|
+
break
|
|
103
|
+
|
|
104
|
+
if not data_present:
|
|
105
|
+
sys.exit(0)
|
|
106
|
+
|
|
107
|
+
# Handle callbacks.
|
|
108
|
+
rec_eval = None
|
|
109
|
+
if "callbacks" in params and params["callbacks"] is not None:
|
|
110
|
+
callbacks = params["callbacks"]
|
|
111
|
+
callbacks = [callbacks] if not isinstance(callbacks, list) else callbacks
|
|
112
|
+
for i, callback in enumerate(callbacks):
|
|
113
|
+
c_module_name = callback["module"]
|
|
114
|
+
c_func_name = callback["func_name"]
|
|
115
|
+
c_kwargs = callback["kwargs"]
|
|
116
|
+
c_module = importlib.import_module(c_module_name)
|
|
117
|
+
if c_func_name == "record_evaluation":
|
|
118
|
+
# record_evaluation function takes empty dict. If the argument has elements in the
|
|
119
|
+
# dict, they will be deleted as per the documentation from lightgbm as described below:
|
|
120
|
+
# eval_result (dict) -
|
|
121
|
+
# Dictionary used to store all evaluation results of all validation sets. This should
|
|
122
|
+
# be initialized outside of your call to record_evaluation() and should be empty. Any
|
|
123
|
+
# initial contents of the dictionary will be deleted.
|
|
124
|
+
rec_eval = {}
|
|
125
|
+
callbacks[i] = getattr(c_module, c_func_name)(rec_eval)
|
|
126
|
+
else:
|
|
127
|
+
callbacks[i] = getattr(c_module, c_func_name)(**c_kwargs)
|
|
128
|
+
|
|
129
|
+
params["callbacks"] = callbacks
|
|
130
|
+
|
|
131
|
+
# Update data as numpy arrays.
|
|
132
|
+
for arg_name in data_args_values:
|
|
133
|
+
np_values = np.array(data_args_values[arg_name])
|
|
134
|
+
data_args_values[arg_name] = np_values
|
|
135
|
+
if arg_name == "sample_weight":
|
|
136
|
+
data_args_values[arg_name] = np_values.ravel()
|
|
137
|
+
|
|
138
|
+
# Combine all arguments.
|
|
139
|
+
all_args = {**data_args_values, **params}
|
|
140
|
+
|
|
141
|
+
trained_model = getattr(model, func_name)(**all_args)
|
|
142
|
+
|
|
143
|
+
model_data = 0
|
|
144
|
+
if func_name == "fit":
|
|
145
|
+
model_str = pickle.dumps(trained_model)
|
|
146
|
+
|
|
147
|
+
if is_lake_system:
|
|
148
|
+
model_file_path = f"/tmp/{model_file_prefix}_{partition_join}.pickle"
|
|
149
|
+
|
|
150
|
+
# Write to file in Vantage, to be used in predict/scoring.
|
|
151
|
+
with open(model_file_path, "wb") as fp:
|
|
152
|
+
fp.write(model_str)
|
|
153
|
+
|
|
154
|
+
model_data = model_file_path if is_lake_system else base64.b64encode(model_str)
|
|
155
|
+
|
|
156
|
+
elif func_name == "score":
|
|
157
|
+
model_data = trained_model
|
|
158
|
+
|
|
159
|
+
print(*(data_partition_column_values + [model_data]), sep=DELIMITER)
|