teradataml 20.0.0.2__py3-none-any.whl → 20.0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of teradataml might be problematic. Click here for more details.

Files changed (126) hide show
  1. teradataml/LICENSE-3RD-PARTY.pdf +0 -0
  2. teradataml/README.md +315 -2
  3. teradataml/__init__.py +4 -0
  4. teradataml/_version.py +1 -1
  5. teradataml/analytics/analytic_function_executor.py +95 -8
  6. teradataml/analytics/byom/__init__.py +1 -1
  7. teradataml/analytics/json_parser/metadata.py +12 -3
  8. teradataml/analytics/json_parser/utils.py +7 -2
  9. teradataml/analytics/sqle/__init__.py +5 -1
  10. teradataml/analytics/table_operator/__init__.py +1 -1
  11. teradataml/analytics/uaf/__init__.py +1 -1
  12. teradataml/analytics/utils.py +4 -0
  13. teradataml/analytics/valib.py +18 -4
  14. teradataml/automl/__init__.py +51 -6
  15. teradataml/automl/data_preparation.py +59 -35
  16. teradataml/automl/data_transformation.py +58 -33
  17. teradataml/automl/feature_engineering.py +27 -12
  18. teradataml/automl/model_training.py +73 -46
  19. teradataml/common/constants.py +88 -29
  20. teradataml/common/garbagecollector.py +2 -1
  21. teradataml/common/messagecodes.py +19 -3
  22. teradataml/common/messages.py +6 -1
  23. teradataml/common/sqlbundle.py +64 -12
  24. teradataml/common/utils.py +246 -47
  25. teradataml/common/warnings.py +11 -0
  26. teradataml/context/context.py +161 -27
  27. teradataml/data/amazon_reviews_25.csv +26 -0
  28. teradataml/data/byom_example.json +11 -0
  29. teradataml/data/dataframe_example.json +18 -2
  30. teradataml/data/docs/byom/docs/DataRobotPredict.py +2 -2
  31. teradataml/data/docs/byom/docs/DataikuPredict.py +40 -1
  32. teradataml/data/docs/byom/docs/H2OPredict.py +2 -2
  33. teradataml/data/docs/byom/docs/ONNXEmbeddings.py +242 -0
  34. teradataml/data/docs/byom/docs/ONNXPredict.py +2 -2
  35. teradataml/data/docs/byom/docs/PMMLPredict.py +2 -2
  36. teradataml/data/docs/sqle/docs_17_20/NaiveBayes.py +1 -1
  37. teradataml/data/docs/sqle/docs_17_20/Shap.py +34 -6
  38. teradataml/data/docs/sqle/docs_17_20/TDNaiveBayesPredict.py +4 -4
  39. teradataml/data/docs/sqle/docs_17_20/TextParser.py +3 -3
  40. teradataml/data/docs/tableoperator/docs_17_20/Image2Matrix.py +118 -0
  41. teradataml/data/docs/uaf/docs_17_20/CopyArt.py +145 -0
  42. teradataml/data/docs/uaf/docs_17_20/DWT2D.py +4 -1
  43. teradataml/data/docs/uaf/docs_17_20/DickeyFuller.py +18 -21
  44. teradataml/data/hnsw_alter_data.csv +5 -0
  45. teradataml/data/hnsw_data.csv +10 -0
  46. teradataml/data/jsons/byom/h2opredict.json +1 -1
  47. teradataml/data/jsons/byom/onnxembeddings.json +266 -0
  48. teradataml/data/jsons/sqle/17.20/TD_Shap.json +0 -1
  49. teradataml/data/jsons/sqle/17.20/TD_TextParser.json +1 -1
  50. teradataml/data/jsons/sqle/20.00/TD_HNSW.json +296 -0
  51. teradataml/data/jsons/sqle/20.00/TD_HNSWPredict.json +206 -0
  52. teradataml/data/jsons/sqle/20.00/TD_HNSWSummary.json +32 -0
  53. teradataml/data/jsons/sqle/20.00/TD_KMeans.json +250 -0
  54. teradataml/data/jsons/sqle/20.00/TD_SMOTE.json +266 -0
  55. teradataml/data/jsons/sqle/20.00/TD_VectorDistance.json +278 -0
  56. teradataml/data/jsons/storedprocedure/17.20/TD_COPYART.json +71 -0
  57. teradataml/data/jsons/tableoperator/17.20/IMAGE2MATRIX.json +53 -0
  58. teradataml/data/jsons/uaf/17.20/TD_DICKEY_FULLER.json +10 -19
  59. teradataml/data/jsons/uaf/17.20/TD_SAX.json +3 -1
  60. teradataml/data/jsons/uaf/17.20/TD_WINDOWDFFT.json +15 -5
  61. teradataml/data/medical_readings.csv +101 -0
  62. teradataml/data/patient_profile.csv +101 -0
  63. teradataml/data/scripts/lightgbm/dataset.template +157 -0
  64. teradataml/data/scripts/lightgbm/lightgbm_class_functions.template +247 -0
  65. teradataml/data/scripts/lightgbm/lightgbm_function.template +216 -0
  66. teradataml/data/scripts/lightgbm/lightgbm_sklearn.template +159 -0
  67. teradataml/data/scripts/sklearn/sklearn_fit.py +194 -167
  68. teradataml/data/scripts/sklearn/sklearn_fit_predict.py +136 -115
  69. teradataml/data/scripts/sklearn/sklearn_function.template +14 -19
  70. teradataml/data/scripts/sklearn/sklearn_model_selection_split.py +155 -137
  71. teradataml/data/scripts/sklearn/sklearn_transform.py +129 -42
  72. teradataml/data/target_udt_data.csv +8 -0
  73. teradataml/data/templates/open_source_ml.json +3 -2
  74. teradataml/data/teradataml_example.json +8 -0
  75. teradataml/data/vectordistance_example.json +4 -0
  76. teradataml/dataframe/copy_to.py +8 -3
  77. teradataml/dataframe/data_transfer.py +11 -1
  78. teradataml/dataframe/dataframe.py +1049 -285
  79. teradataml/dataframe/dataframe_utils.py +152 -20
  80. teradataml/dataframe/functions.py +578 -35
  81. teradataml/dataframe/setop.py +11 -6
  82. teradataml/dataframe/sql.py +185 -16
  83. teradataml/dbutils/dbutils.py +1049 -115
  84. teradataml/dbutils/filemgr.py +48 -1
  85. teradataml/hyperparameter_tuner/optimizer.py +12 -1
  86. teradataml/lib/aed_0_1.dll +0 -0
  87. teradataml/opensource/__init__.py +1 -1
  88. teradataml/opensource/_base.py +1466 -0
  89. teradataml/opensource/_class.py +464 -0
  90. teradataml/opensource/{sklearn/constants.py → _constants.py} +21 -14
  91. teradataml/opensource/_lightgbm.py +949 -0
  92. teradataml/opensource/_sklearn.py +1008 -0
  93. teradataml/opensource/{sklearn/_wrapper_utils.py → _wrapper_utils.py} +5 -6
  94. teradataml/options/__init__.py +54 -38
  95. teradataml/options/configure.py +131 -27
  96. teradataml/options/display.py +13 -2
  97. teradataml/plot/axis.py +47 -8
  98. teradataml/plot/figure.py +33 -0
  99. teradataml/plot/plot.py +63 -13
  100. teradataml/scriptmgmt/UserEnv.py +5 -5
  101. teradataml/scriptmgmt/lls_utils.py +130 -40
  102. teradataml/store/__init__.py +12 -0
  103. teradataml/store/feature_store/__init__.py +0 -0
  104. teradataml/store/feature_store/constants.py +291 -0
  105. teradataml/store/feature_store/feature_store.py +2318 -0
  106. teradataml/store/feature_store/models.py +1505 -0
  107. teradataml/table_operators/Apply.py +32 -18
  108. teradataml/table_operators/Script.py +3 -1
  109. teradataml/table_operators/TableOperator.py +3 -1
  110. teradataml/table_operators/query_generator.py +3 -0
  111. teradataml/table_operators/table_operator_query_generator.py +3 -1
  112. teradataml/table_operators/table_operator_util.py +37 -38
  113. teradataml/table_operators/templates/dataframe_register.template +69 -0
  114. teradataml/utils/dtypes.py +51 -2
  115. teradataml/utils/internal_buffer.py +18 -0
  116. teradataml/utils/validators.py +99 -8
  117. {teradataml-20.0.0.2.dist-info → teradataml-20.0.0.4.dist-info}/METADATA +321 -5
  118. {teradataml-20.0.0.2.dist-info → teradataml-20.0.0.4.dist-info}/RECORD +121 -94
  119. teradataml/libaed_0_1.dylib +0 -0
  120. teradataml/libaed_0_1.so +0 -0
  121. teradataml/opensource/sklearn/__init__.py +0 -1
  122. teradataml/opensource/sklearn/_class.py +0 -255
  123. teradataml/opensource/sklearn/_sklearn_wrapper.py +0 -1800
  124. {teradataml-20.0.0.2.dist-info → teradataml-20.0.0.4.dist-info}/WHEEL +0 -0
  125. {teradataml-20.0.0.2.dist-info → teradataml-20.0.0.4.dist-info}/top_level.txt +0 -0
  126. {teradataml-20.0.0.2.dist-info → teradataml-20.0.0.4.dist-info}/zip-safe +0 -0
@@ -0,0 +1,216 @@
1
+ import sys, json, io
2
+ import pickle, base64, importlib, numpy as np
3
+ from collections import OrderedDict
4
+
5
+ func_name = "<func_name>"
6
+ module_name = "<module_name>"
7
+ is_lake_system = <is_lake_system>
8
+ params = json.loads('<params>')
9
+ data_partition_column_indices = <partition_cols_indices>
10
+ data_partition_column_types = <partition_cols_types>
11
+ model_file_prefix = "<model_file_prefix>" # Needed in case of lake system for writing model to /tmp
12
+
13
+ DELIMITER = '\t'
14
+
15
+ def convert_to_type(val, typee):
16
+ if typee == 'int':
17
+ return int(val) if val != "" else np.nan
18
+ if typee == 'float':
19
+ if isinstance(val, str):
20
+ val = val.replace(' ', '')
21
+ return float(val) if val != "" else np.nan
22
+ if typee == 'bool':
23
+ return eval(val) if val != "" else None
24
+ return str(val) if val != "" else None
25
+
26
+ if not is_lake_system:
27
+ db = sys.argv[0].split("/")[1]
28
+
29
+ data_present = False
30
+ data_partition_column_values = []
31
+
32
+ while 1:
33
+ try:
34
+ line = input()
35
+ if line == '': # Exit if user provides blank line
36
+ break
37
+ else:
38
+ data_present = True
39
+ values = line.split(DELIMITER)
40
+ if not data_partition_column_values:
41
+ # Partition column values is same for all rows. Hence, only read once.
42
+ for i, val in enumerate(data_partition_column_indices): # Only partition columns are
43
+ data_partition_column_values.append(
44
+ convert_to_type(values[val], typee=data_partition_column_types[i])
45
+ )
46
+
47
+ # Prepare the corresponding model file name and extract model.
48
+ partition_join = "_".join([str(x) for x in data_partition_column_values])
49
+ # Replace '-' with '_' because partition_columns can be negative containing '-'.
50
+ partition_join = partition_join.replace("-", "_")
51
+
52
+ train_set = params.get("train_set") # Gets file name prefix.
53
+ model_file_path = f"{train_set}_{partition_join}"\
54
+ if is_lake_system else \
55
+ f"./{db}/{train_set}_{partition_join}"
56
+
57
+ with open(model_file_path, "rb") as fp:
58
+ params["train_set"] = pickle.loads(fp.read())
59
+
60
+ valid_sets = params.get("valid_sets", None) # Gets file names prefix.
61
+ if valid_sets:
62
+ params["valid_sets"] = []
63
+ for valid_set in valid_sets:
64
+ model_file_path = f"{valid_set}_{partition_join}"\
65
+ if is_lake_system else \
66
+ f"./{db}/{valid_set}_{partition_join}"
67
+ with open(model_file_path, "rb") as fp:
68
+ params["valid_sets"].append(pickle.load(fp))
69
+
70
+ except EOFError: # Exit if reached EOF or CTRL-D
71
+ break
72
+
73
+ if not data_present:
74
+ sys.exit(0)
75
+
76
+ # Handle callbacks.
77
+ rec_eval = None
78
+ if "callbacks" in params and params["callbacks"] is not None:
79
+ callbacks = params["callbacks"]
80
+ callbacks = [callbacks] if not isinstance(callbacks, list) else callbacks
81
+ for i, callback in enumerate(callbacks):
82
+ c_module_name = callback["module"]
83
+ c_func_name = callback["func_name"]
84
+ c_kwargs = callback["kwargs"]
85
+ c_module = importlib.import_module(c_module_name)
86
+ if c_func_name == "record_evaluation":
87
+ # record_evaluation function takes empty dict. If the argument has elements in the
88
+ # dict, they will be deleted as per the documentation from lightgbm as described below:
89
+ # eval_result (dict) -
90
+ # Dictionary used to store all evaluation results of all validation sets. This should
91
+ # be initialized outside of your call to record_evaluation() and should be empty. Any
92
+ # initial contents of the dictionary will be deleted.
93
+ rec_eval = {}
94
+ callbacks[i] = getattr(c_module, c_func_name)(rec_eval)
95
+ else:
96
+ callbacks[i] = getattr(c_module, c_func_name)(**c_kwargs)
97
+
98
+ params["callbacks"] = callbacks
99
+
100
+ module_ = importlib.import_module(module_name)
101
+
102
+ ### LightGBM training is giving some meaningful console output like this:
103
+ ### Hence, capturing it to show to the user.
104
+
105
+ # [LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.000190 seconds.
106
+ # You can set `force_row_wise=true` to remove the overhead.
107
+ # And if memory is not enough, you can set `force_col_wise=true`.
108
+ # [LightGBM] [Info] Total Bins 136
109
+ # [LightGBM] [Info] Number of data points in the train set: 97, number of used features: 4
110
+ # [LightGBM] [Info] Start training from score 0.556701
111
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
112
+ # [1] valid_0's l2: 0.219637 valid_1's l2: 0.219637
113
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
114
+ # [2] valid_0's l2: 0.196525 valid_1's l2: 0.196525
115
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
116
+ # [3] valid_0's l2: 0.178462 valid_1's l2: 0.178462
117
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
118
+ # [4] valid_0's l2: 0.162887 valid_1's l2: 0.162887
119
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
120
+ # [5] valid_0's l2: 0.150271 valid_1's l2: 0.150271
121
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
122
+ # [6] valid_0's l2: 0.140219 valid_1's l2: 0.140219
123
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
124
+ # [7] valid_0's l2: 0.131697 valid_1's l2: 0.131697
125
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
126
+ # [8] valid_0's l2: 0.124056 valid_1's l2: 0.124056
127
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
128
+ # [9] valid_0's l2: 0.117944 valid_1's l2: 0.117944
129
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
130
+ # [10] valid_0's l2: 0.11263 valid_1's l2: 0.11263
131
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
132
+ # [11] valid_0's l2: 0.105228 valid_1's l2: 0.105228
133
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
134
+ # [12] valid_0's l2: 0.0981571 valid_1's l2: 0.0981571
135
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
136
+ # [13] valid_0's l2: 0.0924294 valid_1's l2: 0.0924294
137
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
138
+ # [14] valid_0's l2: 0.0877899 valid_1's l2: 0.0877899
139
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
140
+ # [15] valid_0's l2: 0.084032 valid_1's l2: 0.084032
141
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
142
+ # [16] valid_0's l2: 0.080988 valid_1's l2: 0.080988
143
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
144
+ # [17] valid_0's l2: 0.0785224 valid_1's l2: 0.0785224
145
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
146
+ # [18] valid_0's l2: 0.0765253 valid_1's l2: 0.0765253
147
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
148
+ # [19] valid_0's l2: 0.0750803 valid_1's l2: 0.0750803
149
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
150
+ # [20] valid_0's l2: 0.0738915 valid_1's l2: 0.0738915
151
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
152
+ # [21] valid_0's l2: 0.07288 valid_1's l2: 0.07288
153
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
154
+ # [22] valid_0's l2: 0.0718676 valid_1's l2: 0.0718676
155
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
156
+ # [23] valid_0's l2: 0.0706037 valid_1's l2: 0.0706037
157
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
158
+ # [24] valid_0's l2: 0.0695799 valid_1's l2: 0.0695799
159
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
160
+ # [25] valid_0's l2: 0.0687507 valid_1's l2: 0.0687507
161
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
162
+ # [26] valid_0's l2: 0.0680819 valid_1's l2: 0.0680819
163
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
164
+ # [27] valid_0's l2: 0.0674077 valid_1's l2: 0.0674077
165
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
166
+ # [28] valid_0's l2: 0.0665111 valid_1's l2: 0.0665111
167
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
168
+ # [29] valid_0's l2: 0.0659656 valid_1's l2: 0.0659656
169
+ # [LightGBM] [Warning] No further splits with positive gain, best gain: -inf
170
+ # [30] valid_0's l2: 0.0652665 valid_1's l2: 0.0652665
171
+ result = ""
172
+ stdout = None
173
+ try:
174
+ stdout = sys.stdout
175
+ new_stdout = io.StringIO()
176
+ sys.stdout = new_stdout
177
+ trained_model = getattr(module_, func_name)(**params)
178
+ result = new_stdout.getvalue()
179
+ except Exception:
180
+ raise
181
+ finally:
182
+ sys.stdout = stdout
183
+
184
+ model_str = pickle.dumps(trained_model)
185
+ console_output_str = result.encode()
186
+
187
+ if is_lake_system:
188
+ model_file_path = f"/tmp/{model_file_prefix}_{partition_join}.pickle"
189
+ model_console_output_path = f"/tmp/{model_file_prefix}_{partition_join}_console_output.pickle"
190
+
191
+ # Write to file in Vantage, to be used in predict/scoring.
192
+ with open(model_file_path, "wb") as fp:
193
+ fp.write(model_str)
194
+
195
+ with open(model_console_output_path, "wb") as fpc:
196
+ fpc.write(console_output_str)
197
+
198
+
199
+ model_data = model_file_path if is_lake_system else base64.b64encode(model_str)
200
+ console_output = model_console_output_path if is_lake_system else base64.b64encode(console_output_str)
201
+
202
+ output_data = [model_data, console_output]
203
+
204
+ if rec_eval is not None:
205
+ rec_eval = pickle.dumps(rec_eval)
206
+ if is_lake_system:
207
+ rec_eval_file_path = f"/tmp/{model_file_prefix}_{partition_join}_rec_eval.pickle"
208
+
209
+ with open(rec_eval_file_path, "wb") as fp:
210
+ fp.write(rec_eval)
211
+
212
+ rec_eval = rec_eval_file_path if is_lake_system else base64.b64encode(rec_eval)
213
+
214
+ output_data.append(rec_eval)
215
+
216
+ print(*(data_partition_column_values + output_data), sep=DELIMITER)
@@ -0,0 +1,159 @@
1
+ import sys, json
2
+ import pickle, base64, importlib, numpy as np
3
+ from collections import OrderedDict
4
+
5
+ DELIMITER = '\t'
6
+
7
+ func_name = <func_name>
8
+ params = json.loads('<params>')
9
+ is_lake_system = <is_lake_system>
10
+ model_file_prefix = <model_file_prefix>
11
+
12
+ def convert_to_type(val, typee):
13
+ if typee == 'int':
14
+ return int(val) if val != "" else np.nan
15
+ if typee == 'float':
16
+ if isinstance(val, str):
17
+ val = val.replace(' ', '')
18
+ return float(val) if val != "" else np.nan
19
+ if typee == 'bool':
20
+ return eval(val) if val != "" else None
21
+ return str(val) if val != "" else None
22
+
23
+ def splitter(strr, delim=",", convert_to="str"):
24
+ """
25
+ Split the string based on delimiter and convert to the type specified.
26
+ """
27
+ if strr == "None":
28
+ return []
29
+ return [convert_to_type(i, convert_to) for i in strr.split(delim)]
30
+
31
+
32
+ if not is_lake_system:
33
+ db = sys.argv[0].split("/")[1]
34
+
35
+ data_partition_column_indices = <partition_cols_indices>
36
+ data_column_types = <types_of_data_cols>
37
+
38
+ data_partition_column_types = [data_column_types[idx] for idx in data_partition_column_indices]
39
+
40
+ # Data related arguments information of indices and types.
41
+ data_args_indices_types = OrderedDict()
42
+
43
+ # Data related arguments values - prepare dictionary and populate data later.
44
+ data_args_values = {}
45
+
46
+ data_args_info_str = <data_args_info_str>
47
+ for data_arg in data_args_info_str.split("--"):
48
+ arg_name, indices, types = data_arg.split("-")
49
+ indices = splitter(indices, convert_to="int")
50
+ types = splitter(types)
51
+
52
+ data_args_indices_types[arg_name] = {"indices": indices, "types": types}
53
+ data_args_values[arg_name] = [] # Keeping empty for each data arg name and populate data later.
54
+
55
+ data_partition_column_values = []
56
+ data_present = False
57
+
58
+ model = None
59
+
60
+ # Read data - columns information is passed as command line argument and stored in
61
+ # data_args_indices_types dictionary.
62
+ while 1:
63
+ try:
64
+ line = input()
65
+ if line == '': # Exit if user provides blank line
66
+ break
67
+ else:
68
+ data_present = True
69
+ values = line.split(DELIMITER)
70
+ if not data_partition_column_values:
71
+ # Partition column values is same for all rows. Hence, only read once.
72
+ for i, val in enumerate(data_partition_column_indices):
73
+ data_partition_column_values.append(
74
+ convert_to_type(values[val], typee=data_partition_column_types[i])
75
+ )
76
+
77
+ # Prepare the corresponding model file name and extract model.
78
+ partition_join = "_".join([str(x) for x in data_partition_column_values])
79
+ # Replace '-' with '_' as '-' because partition_columns can be negative.
80
+ partition_join = partition_join.replace("-", "_")
81
+
82
+
83
+ model_file_path = f"{model_file_prefix}_{partition_join}"\
84
+ if is_lake_system else \
85
+ f"./{db}/{model_file_prefix}_{partition_join}"
86
+
87
+ with open(model_file_path, "rb") as fp:
88
+ model = pickle.loads(fp.read())
89
+
90
+ if model is None:
91
+ sys.exit("Model file is not installed in Vantage.")
92
+
93
+ # Prepare data dictionary containing only arguments related to data.
94
+ for arg_name in data_args_values:
95
+ data_indices = data_args_indices_types[arg_name]["indices"]
96
+ types = data_args_indices_types[arg_name]["types"]
97
+ cur_row = []
98
+ for idx, data_idx in enumerate(data_indices):
99
+ cur_row.append(convert_to_type(values[data_idx], types[idx]))
100
+ data_args_values[arg_name].append(cur_row)
101
+ except EOFError: # Exit if reached EOF or CTRL-D
102
+ break
103
+
104
+ if not data_present:
105
+ sys.exit(0)
106
+
107
+ # Handle callbacks.
108
+ rec_eval = None
109
+ if "callbacks" in params and params["callbacks"] is not None:
110
+ callbacks = params["callbacks"]
111
+ callbacks = [callbacks] if not isinstance(callbacks, list) else callbacks
112
+ for i, callback in enumerate(callbacks):
113
+ c_module_name = callback["module"]
114
+ c_func_name = callback["func_name"]
115
+ c_kwargs = callback["kwargs"]
116
+ c_module = importlib.import_module(c_module_name)
117
+ if c_func_name == "record_evaluation":
118
+ # record_evaluation function takes empty dict. If the argument has elements in the
119
+ # dict, they will be deleted as per the documentation from lightgbm as described below:
120
+ # eval_result (dict) -
121
+ # Dictionary used to store all evaluation results of all validation sets. This should
122
+ # be initialized outside of your call to record_evaluation() and should be empty. Any
123
+ # initial contents of the dictionary will be deleted.
124
+ rec_eval = {}
125
+ callbacks[i] = getattr(c_module, c_func_name)(rec_eval)
126
+ else:
127
+ callbacks[i] = getattr(c_module, c_func_name)(**c_kwargs)
128
+
129
+ params["callbacks"] = callbacks
130
+
131
+ # Update data as numpy arrays.
132
+ for arg_name in data_args_values:
133
+ np_values = np.array(data_args_values[arg_name])
134
+ data_args_values[arg_name] = np_values
135
+ if arg_name == "sample_weight":
136
+ data_args_values[arg_name] = np_values.ravel()
137
+
138
+ # Combine all arguments.
139
+ all_args = {**data_args_values, **params}
140
+
141
+ trained_model = getattr(model, func_name)(**all_args)
142
+
143
+ model_data = 0
144
+ if func_name == "fit":
145
+ model_str = pickle.dumps(trained_model)
146
+
147
+ if is_lake_system:
148
+ model_file_path = f"/tmp/{model_file_prefix}_{partition_join}.pickle"
149
+
150
+ # Write to file in Vantage, to be used in predict/scoring.
151
+ with open(model_file_path, "wb") as fp:
152
+ fp.write(model_str)
153
+
154
+ model_data = model_file_path if is_lake_system else base64.b64encode(model_str)
155
+
156
+ elif func_name == "score":
157
+ model_data = trained_model
158
+
159
+ print(*(data_partition_column_values + [model_data]), sep=DELIMITER)