workbench 0.8.174__py3-none-any.whl → 0.8.227__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of workbench might be problematic. Click here for more details.
- workbench/__init__.py +1 -0
- workbench/algorithms/dataframe/__init__.py +1 -2
- workbench/algorithms/dataframe/compound_dataset_overlap.py +321 -0
- workbench/algorithms/dataframe/feature_space_proximity.py +168 -75
- workbench/algorithms/dataframe/fingerprint_proximity.py +422 -86
- workbench/algorithms/dataframe/projection_2d.py +44 -21
- workbench/algorithms/dataframe/proximity.py +259 -305
- workbench/algorithms/graph/light/proximity_graph.py +12 -11
- workbench/algorithms/models/cleanlab_model.py +382 -0
- workbench/algorithms/models/noise_model.py +388 -0
- workbench/algorithms/sql/column_stats.py +0 -1
- workbench/algorithms/sql/correlations.py +0 -1
- workbench/algorithms/sql/descriptive_stats.py +0 -1
- workbench/algorithms/sql/outliers.py +3 -3
- workbench/api/__init__.py +5 -1
- workbench/api/df_store.py +17 -108
- workbench/api/endpoint.py +14 -12
- workbench/api/feature_set.py +117 -11
- workbench/api/meta.py +0 -1
- workbench/api/meta_model.py +289 -0
- workbench/api/model.py +52 -21
- workbench/api/parameter_store.py +3 -52
- workbench/cached/cached_meta.py +0 -1
- workbench/cached/cached_model.py +49 -11
- workbench/core/artifacts/__init__.py +11 -2
- workbench/core/artifacts/artifact.py +7 -7
- workbench/core/artifacts/data_capture_core.py +8 -1
- workbench/core/artifacts/df_store_core.py +114 -0
- workbench/core/artifacts/endpoint_core.py +323 -205
- workbench/core/artifacts/feature_set_core.py +249 -45
- workbench/core/artifacts/model_core.py +133 -101
- workbench/core/artifacts/parameter_store_core.py +98 -0
- workbench/core/cloud_platform/aws/aws_account_clamp.py +48 -2
- workbench/core/cloud_platform/cloud_meta.py +0 -1
- workbench/core/pipelines/pipeline_executor.py +1 -1
- workbench/core/transforms/features_to_model/features_to_model.py +60 -44
- workbench/core/transforms/model_to_endpoint/model_to_endpoint.py +43 -10
- workbench/core/transforms/pandas_transforms/pandas_to_features.py +38 -2
- workbench/core/views/training_view.py +113 -42
- workbench/core/views/view.py +53 -3
- workbench/core/views/view_utils.py +4 -4
- workbench/model_script_utils/model_script_utils.py +339 -0
- workbench/model_script_utils/pytorch_utils.py +405 -0
- workbench/model_script_utils/uq_harness.py +277 -0
- workbench/model_scripts/chemprop/chemprop.template +774 -0
- workbench/model_scripts/chemprop/generated_model_script.py +774 -0
- workbench/model_scripts/chemprop/model_script_utils.py +339 -0
- workbench/model_scripts/chemprop/requirements.txt +3 -0
- workbench/model_scripts/custom_models/chem_info/fingerprints.py +175 -0
- workbench/model_scripts/custom_models/chem_info/mol_descriptors.py +18 -7
- workbench/model_scripts/custom_models/chem_info/mol_standardize.py +80 -58
- workbench/model_scripts/custom_models/chem_info/molecular_descriptors.py +0 -1
- workbench/model_scripts/custom_models/chem_info/morgan_fingerprints.py +1 -2
- workbench/model_scripts/custom_models/proximity/feature_space_proximity.py +194 -0
- workbench/model_scripts/custom_models/proximity/feature_space_proximity.template +8 -10
- workbench/model_scripts/custom_models/uq_models/bayesian_ridge.template +7 -8
- workbench/model_scripts/custom_models/uq_models/ensemble_xgb.template +20 -21
- workbench/model_scripts/custom_models/uq_models/feature_space_proximity.py +194 -0
- workbench/model_scripts/custom_models/uq_models/gaussian_process.template +5 -11
- workbench/model_scripts/custom_models/uq_models/ngboost.template +15 -16
- workbench/model_scripts/ensemble_xgb/ensemble_xgb.template +15 -17
- workbench/model_scripts/meta_model/generated_model_script.py +209 -0
- workbench/model_scripts/meta_model/meta_model.template +209 -0
- workbench/model_scripts/pytorch_model/generated_model_script.py +443 -499
- workbench/model_scripts/pytorch_model/model_script_utils.py +339 -0
- workbench/model_scripts/pytorch_model/pytorch.template +440 -496
- workbench/model_scripts/pytorch_model/pytorch_utils.py +405 -0
- workbench/model_scripts/pytorch_model/requirements.txt +1 -1
- workbench/model_scripts/pytorch_model/uq_harness.py +277 -0
- workbench/model_scripts/scikit_learn/generated_model_script.py +7 -12
- workbench/model_scripts/scikit_learn/scikit_learn.template +4 -9
- workbench/model_scripts/script_generation.py +15 -12
- workbench/model_scripts/uq_models/generated_model_script.py +248 -0
- workbench/model_scripts/xgb_model/generated_model_script.py +371 -403
- workbench/model_scripts/xgb_model/model_script_utils.py +339 -0
- workbench/model_scripts/xgb_model/uq_harness.py +277 -0
- workbench/model_scripts/xgb_model/xgb_model.template +367 -399
- workbench/repl/workbench_shell.py +18 -14
- workbench/resources/open_source_api.key +1 -1
- workbench/scripts/endpoint_test.py +162 -0
- workbench/scripts/lambda_test.py +73 -0
- workbench/scripts/meta_model_sim.py +35 -0
- workbench/scripts/ml_pipeline_sqs.py +122 -6
- workbench/scripts/training_test.py +85 -0
- workbench/themes/dark/custom.css +59 -0
- workbench/themes/dark/plotly.json +5 -5
- workbench/themes/light/custom.css +153 -40
- workbench/themes/light/plotly.json +9 -9
- workbench/themes/midnight_blue/custom.css +59 -0
- workbench/utils/aws_utils.py +0 -1
- workbench/utils/chem_utils/fingerprints.py +87 -46
- workbench/utils/chem_utils/mol_descriptors.py +18 -7
- workbench/utils/chem_utils/mol_standardize.py +80 -58
- workbench/utils/chem_utils/projections.py +16 -6
- workbench/utils/chem_utils/vis.py +25 -27
- workbench/utils/chemprop_utils.py +141 -0
- workbench/utils/config_manager.py +2 -6
- workbench/utils/endpoint_utils.py +5 -7
- workbench/utils/license_manager.py +2 -6
- workbench/utils/markdown_utils.py +57 -0
- workbench/utils/meta_model_simulator.py +499 -0
- workbench/utils/metrics_utils.py +256 -0
- workbench/utils/model_utils.py +274 -87
- workbench/utils/pipeline_utils.py +0 -1
- workbench/utils/plot_utils.py +159 -34
- workbench/utils/pytorch_utils.py +87 -0
- workbench/utils/shap_utils.py +11 -57
- workbench/utils/theme_manager.py +95 -30
- workbench/utils/xgboost_local_crossfold.py +267 -0
- workbench/utils/xgboost_model_utils.py +127 -220
- workbench/web_interface/components/experiments/outlier_plot.py +0 -1
- workbench/web_interface/components/model_plot.py +16 -2
- workbench/web_interface/components/plugin_unit_test.py +5 -3
- workbench/web_interface/components/plugins/ag_table.py +2 -4
- workbench/web_interface/components/plugins/confusion_matrix.py +3 -6
- workbench/web_interface/components/plugins/model_details.py +48 -80
- workbench/web_interface/components/plugins/scatter_plot.py +192 -92
- workbench/web_interface/components/settings_menu.py +184 -0
- workbench/web_interface/page_views/main_page.py +0 -1
- {workbench-0.8.174.dist-info → workbench-0.8.227.dist-info}/METADATA +31 -17
- {workbench-0.8.174.dist-info → workbench-0.8.227.dist-info}/RECORD +125 -111
- {workbench-0.8.174.dist-info → workbench-0.8.227.dist-info}/entry_points.txt +4 -0
- {workbench-0.8.174.dist-info → workbench-0.8.227.dist-info}/licenses/LICENSE +1 -1
- workbench/core/cloud_platform/aws/aws_df_store.py +0 -404
- workbench/core/cloud_platform/aws/aws_parameter_store.py +0 -280
- workbench/model_scripts/custom_models/meta_endpoints/example.py +0 -53
- workbench/model_scripts/custom_models/proximity/generated_model_script.py +0 -138
- workbench/model_scripts/custom_models/proximity/proximity.py +0 -384
- workbench/model_scripts/custom_models/uq_models/generated_model_script.py +0 -393
- workbench/model_scripts/custom_models/uq_models/mapie.template +0 -502
- workbench/model_scripts/custom_models/uq_models/meta_uq.template +0 -386
- workbench/model_scripts/custom_models/uq_models/proximity.py +0 -384
- workbench/model_scripts/ensemble_xgb/generated_model_script.py +0 -279
- workbench/model_scripts/quant_regression/quant_regression.template +0 -279
- workbench/model_scripts/quant_regression/requirements.txt +0 -1
- workbench/themes/quartz/base_css.url +0 -1
- workbench/themes/quartz/custom.css +0 -117
- workbench/themes/quartz/plotly.json +0 -642
- workbench/themes/quartz_dark/base_css.url +0 -1
- workbench/themes/quartz_dark/custom.css +0 -131
- workbench/themes/quartz_dark/plotly.json +0 -642
- workbench/utils/fast_inference.py +0 -167
- workbench/utils/resource_utils.py +0 -39
- {workbench-0.8.174.dist-info → workbench-0.8.227.dist-info}/WHEEL +0 -0
- {workbench-0.8.174.dist-info → workbench-0.8.227.dist-info}/top_level.txt +0 -0
|
@@ -1,384 +0,0 @@
|
|
|
1
|
-
import pandas as pd
|
|
2
|
-
import numpy as np
|
|
3
|
-
from sklearn.preprocessing import StandardScaler
|
|
4
|
-
from sklearn.neighbors import NearestNeighbors
|
|
5
|
-
from typing import List, Dict
|
|
6
|
-
import logging
|
|
7
|
-
import pickle
|
|
8
|
-
import os
|
|
9
|
-
import json
|
|
10
|
-
from pathlib import Path
|
|
11
|
-
from enum import Enum
|
|
12
|
-
|
|
13
|
-
# Set up logging
|
|
14
|
-
log = logging.getLogger("workbench")
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
# ^Enumerated^ Proximity Types (distance or similarity)
|
|
18
|
-
class ProximityType(Enum):
|
|
19
|
-
DISTANCE = "distance"
|
|
20
|
-
SIMILARITY = "similarity"
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
class Proximity:
|
|
24
|
-
def __init__(
|
|
25
|
-
self,
|
|
26
|
-
df: pd.DataFrame,
|
|
27
|
-
id_column: str,
|
|
28
|
-
features: List[str],
|
|
29
|
-
target: str = None,
|
|
30
|
-
track_columns: List[str] = None,
|
|
31
|
-
n_neighbors: int = 10,
|
|
32
|
-
):
|
|
33
|
-
"""
|
|
34
|
-
Initialize the Proximity class.
|
|
35
|
-
|
|
36
|
-
Args:
|
|
37
|
-
df (pd.DataFrame): DataFrame containing data for neighbor computations.
|
|
38
|
-
id_column (str): Name of the column used as the identifier.
|
|
39
|
-
features (List[str]): List of feature column names to be used for neighbor computations.
|
|
40
|
-
target (str, optional): Name of the target column. Defaults to None.
|
|
41
|
-
track_columns (List[str], optional): Additional columns to track in results. Defaults to None.
|
|
42
|
-
n_neighbors (int): Number of neighbors to compute. Defaults to 10.
|
|
43
|
-
"""
|
|
44
|
-
self.df = df.dropna(subset=features).copy()
|
|
45
|
-
self.id_column = id_column
|
|
46
|
-
self.n_neighbors = min(n_neighbors, len(self.df) - 1)
|
|
47
|
-
self.target = target
|
|
48
|
-
self.features = features
|
|
49
|
-
self.scaler = None
|
|
50
|
-
self.X = None
|
|
51
|
-
self.nn = None
|
|
52
|
-
self.proximity_type = None
|
|
53
|
-
self.track_columns = track_columns or []
|
|
54
|
-
|
|
55
|
-
# Right now we only support numeric features, so remove any columns that are not numeric
|
|
56
|
-
non_numeric_features = self.df[self.features].select_dtypes(exclude=["number"]).columns.tolist()
|
|
57
|
-
if non_numeric_features:
|
|
58
|
-
log.warning(f"Non-numeric features {non_numeric_features} aren't currently supported...")
|
|
59
|
-
self.features = [f for f in self.features if f not in non_numeric_features]
|
|
60
|
-
|
|
61
|
-
# Build the proximity model
|
|
62
|
-
self.build_proximity_model()
|
|
63
|
-
|
|
64
|
-
def build_proximity_model(self) -> None:
|
|
65
|
-
"""Standardize features and fit Nearest Neighbors model.
|
|
66
|
-
Note: This method can be overridden in subclasses for custom behavior."""
|
|
67
|
-
self.proximity_type = ProximityType.DISTANCE
|
|
68
|
-
self.scaler = StandardScaler()
|
|
69
|
-
self.X = self.scaler.fit_transform(self.df[self.features])
|
|
70
|
-
self.nn = NearestNeighbors(n_neighbors=self.n_neighbors + 1).fit(self.X)
|
|
71
|
-
|
|
72
|
-
def all_neighbors(self) -> pd.DataFrame:
|
|
73
|
-
"""
|
|
74
|
-
Compute nearest neighbors for all rows in the dataset.
|
|
75
|
-
|
|
76
|
-
Returns:
|
|
77
|
-
pd.DataFrame: A DataFrame of neighbors and their distances.
|
|
78
|
-
"""
|
|
79
|
-
distances, indices = self.nn.kneighbors(self.X)
|
|
80
|
-
results = []
|
|
81
|
-
|
|
82
|
-
for i, (dists, nbrs) in enumerate(zip(distances, indices)):
|
|
83
|
-
query_id = self.df.iloc[i][self.id_column]
|
|
84
|
-
|
|
85
|
-
# Process neighbors
|
|
86
|
-
for neighbor_idx, dist in zip(nbrs, dists):
|
|
87
|
-
# Skip self (neighbor index == current row index)
|
|
88
|
-
if neighbor_idx == i:
|
|
89
|
-
continue
|
|
90
|
-
results.append(self._build_neighbor_result(query_id=query_id, neighbor_idx=neighbor_idx, distance=dist))
|
|
91
|
-
|
|
92
|
-
return pd.DataFrame(results)
|
|
93
|
-
|
|
94
|
-
def neighbors(
|
|
95
|
-
self,
|
|
96
|
-
query_df: pd.DataFrame,
|
|
97
|
-
radius: float = None,
|
|
98
|
-
include_self: bool = True,
|
|
99
|
-
) -> pd.DataFrame:
|
|
100
|
-
"""
|
|
101
|
-
Return neighbors for rows in a query DataFrame.
|
|
102
|
-
|
|
103
|
-
Args:
|
|
104
|
-
query_df: DataFrame containing query points
|
|
105
|
-
radius: If provided, find all neighbors within this radius
|
|
106
|
-
include_self: Whether to include self in results (if present)
|
|
107
|
-
|
|
108
|
-
Returns:
|
|
109
|
-
DataFrame containing neighbors and distances
|
|
110
|
-
|
|
111
|
-
Note: The query DataFrame must include the feature columns. The id_column is optional.
|
|
112
|
-
"""
|
|
113
|
-
# Check if all required features are present
|
|
114
|
-
missing = set(self.features) - set(query_df.columns)
|
|
115
|
-
if missing:
|
|
116
|
-
raise ValueError(f"Query DataFrame is missing required feature columns: {missing}")
|
|
117
|
-
|
|
118
|
-
# Check if id_column is present
|
|
119
|
-
id_column_present = self.id_column in query_df.columns
|
|
120
|
-
|
|
121
|
-
# None of the features can be NaNs, so report rows with NaNs and then drop them
|
|
122
|
-
rows_with_nan = query_df[self.features].isna().any(axis=1)
|
|
123
|
-
|
|
124
|
-
# Print the ID column for rows with NaNs
|
|
125
|
-
if rows_with_nan.any():
|
|
126
|
-
log.warning(f"Found {rows_with_nan.sum()} rows with NaNs in feature columns:")
|
|
127
|
-
log.warning(query_df.loc[rows_with_nan, self.id_column])
|
|
128
|
-
|
|
129
|
-
# Drop rows with NaNs in feature columns and reassign to query_df
|
|
130
|
-
query_df = query_df.dropna(subset=self.features)
|
|
131
|
-
|
|
132
|
-
# Transform the query features using the model's scaler
|
|
133
|
-
X_query = self.scaler.transform(query_df[self.features])
|
|
134
|
-
|
|
135
|
-
# Get neighbors using either radius or k-nearest neighbors
|
|
136
|
-
if radius is not None:
|
|
137
|
-
distances, indices = self.nn.radius_neighbors(X_query, radius=radius)
|
|
138
|
-
else:
|
|
139
|
-
distances, indices = self.nn.kneighbors(X_query)
|
|
140
|
-
|
|
141
|
-
# Build results
|
|
142
|
-
all_results = []
|
|
143
|
-
for i, (dists, nbrs) in enumerate(zip(distances, indices)):
|
|
144
|
-
# Use the ID from the query DataFrame if available, otherwise use the row index
|
|
145
|
-
query_id = query_df.iloc[i][self.id_column] if id_column_present else f"query_{i}"
|
|
146
|
-
|
|
147
|
-
for neighbor_idx, dist in zip(nbrs, dists):
|
|
148
|
-
# Skip if the neighbor is the query itself and include_self is False
|
|
149
|
-
neighbor_id = self.df.iloc[neighbor_idx][self.id_column]
|
|
150
|
-
if not include_self and neighbor_id == query_id:
|
|
151
|
-
continue
|
|
152
|
-
|
|
153
|
-
all_results.append(
|
|
154
|
-
self._build_neighbor_result(query_id=query_id, neighbor_idx=neighbor_idx, distance=dist)
|
|
155
|
-
)
|
|
156
|
-
|
|
157
|
-
return pd.DataFrame(all_results)
|
|
158
|
-
|
|
159
|
-
def _build_neighbor_result(self, query_id, neighbor_idx: int, distance: float) -> Dict:
|
|
160
|
-
"""
|
|
161
|
-
Internal: Build a result dictionary for a single neighbor.
|
|
162
|
-
|
|
163
|
-
Args:
|
|
164
|
-
query_id: ID of the query point
|
|
165
|
-
neighbor_idx: Index of the neighbor in the original DataFrame
|
|
166
|
-
distance: Distance between query and neighbor
|
|
167
|
-
|
|
168
|
-
Returns:
|
|
169
|
-
Dictionary containing neighbor information
|
|
170
|
-
"""
|
|
171
|
-
neighbor_id = self.df.iloc[neighbor_idx][self.id_column]
|
|
172
|
-
|
|
173
|
-
# Basic neighbor info
|
|
174
|
-
neighbor_info = {
|
|
175
|
-
self.id_column: query_id,
|
|
176
|
-
"neighbor_id": neighbor_id,
|
|
177
|
-
"distance": distance,
|
|
178
|
-
}
|
|
179
|
-
|
|
180
|
-
# Determine which additional columns to include
|
|
181
|
-
relevant_cols = [self.target, "prediction"] if self.target else []
|
|
182
|
-
relevant_cols += [c for c in self.df.columns if "_proba" in c or "residual" in c]
|
|
183
|
-
relevant_cols += ["outlier"]
|
|
184
|
-
|
|
185
|
-
# Add user-specified columns
|
|
186
|
-
relevant_cols += self.track_columns
|
|
187
|
-
|
|
188
|
-
# Add values for each relevant column that exists in the dataframe
|
|
189
|
-
for col in filter(lambda c: c in self.df.columns, relevant_cols):
|
|
190
|
-
neighbor_info[col] = self.df.iloc[neighbor_idx][col]
|
|
191
|
-
|
|
192
|
-
return neighbor_info
|
|
193
|
-
|
|
194
|
-
def serialize(self, directory: str) -> None:
|
|
195
|
-
"""
|
|
196
|
-
Serialize the Proximity model to a directory.
|
|
197
|
-
|
|
198
|
-
Args:
|
|
199
|
-
directory: Directory path to save the model components
|
|
200
|
-
"""
|
|
201
|
-
# Create directory if it doesn't exist
|
|
202
|
-
os.makedirs(directory, exist_ok=True)
|
|
203
|
-
|
|
204
|
-
# Save metadata
|
|
205
|
-
metadata = {
|
|
206
|
-
"id_column": self.id_column,
|
|
207
|
-
"features": self.features,
|
|
208
|
-
"target": self.target,
|
|
209
|
-
"track_columns": self.track_columns,
|
|
210
|
-
"n_neighbors": self.n_neighbors,
|
|
211
|
-
}
|
|
212
|
-
|
|
213
|
-
with open(os.path.join(directory, "metadata.json"), "w") as f:
|
|
214
|
-
json.dump(metadata, f)
|
|
215
|
-
|
|
216
|
-
# Save the DataFrame
|
|
217
|
-
self.df.to_pickle(os.path.join(directory, "df.pkl"))
|
|
218
|
-
|
|
219
|
-
# Save the scaler and nearest neighbors model
|
|
220
|
-
with open(os.path.join(directory, "scaler.pkl"), "wb") as f:
|
|
221
|
-
pickle.dump(self.scaler, f)
|
|
222
|
-
|
|
223
|
-
with open(os.path.join(directory, "nn_model.pkl"), "wb") as f:
|
|
224
|
-
pickle.dump(self.nn, f)
|
|
225
|
-
|
|
226
|
-
log.info(f"Proximity model serialized to {directory}")
|
|
227
|
-
|
|
228
|
-
@classmethod
|
|
229
|
-
def deserialize(cls, directory: str) -> "Proximity":
|
|
230
|
-
"""
|
|
231
|
-
Deserialize a Proximity model from a directory.
|
|
232
|
-
|
|
233
|
-
Args:
|
|
234
|
-
directory: Directory path containing the serialized model components
|
|
235
|
-
|
|
236
|
-
Returns:
|
|
237
|
-
Proximity: A new Proximity instance
|
|
238
|
-
"""
|
|
239
|
-
directory_path = Path(directory)
|
|
240
|
-
if not directory_path.exists() or not directory_path.is_dir():
|
|
241
|
-
raise ValueError(f"Directory {directory} does not exist or is not a directory")
|
|
242
|
-
|
|
243
|
-
# Load metadata
|
|
244
|
-
with open(os.path.join(directory, "metadata.json"), "r") as f:
|
|
245
|
-
metadata = json.load(f)
|
|
246
|
-
|
|
247
|
-
# Load DataFrame
|
|
248
|
-
df_path = os.path.join(directory, "df.pkl")
|
|
249
|
-
if not os.path.exists(df_path):
|
|
250
|
-
raise FileNotFoundError(f"DataFrame file not found at {df_path}")
|
|
251
|
-
df = pd.read_pickle(df_path)
|
|
252
|
-
|
|
253
|
-
# Create instance but skip _prepare_data
|
|
254
|
-
instance = cls.__new__(cls)
|
|
255
|
-
instance.df = df
|
|
256
|
-
instance.id_column = metadata["id_column"]
|
|
257
|
-
instance.features = metadata["features"]
|
|
258
|
-
instance.target = metadata["target"]
|
|
259
|
-
instance.track_columns = metadata["track_columns"]
|
|
260
|
-
instance.n_neighbors = metadata["n_neighbors"]
|
|
261
|
-
|
|
262
|
-
# Load scaler and nn model
|
|
263
|
-
with open(os.path.join(directory, "scaler.pkl"), "rb") as f:
|
|
264
|
-
instance.scaler = pickle.load(f)
|
|
265
|
-
|
|
266
|
-
with open(os.path.join(directory, "nn_model.pkl"), "rb") as f:
|
|
267
|
-
instance.nn = pickle.load(f)
|
|
268
|
-
|
|
269
|
-
# Load X from scaler transform
|
|
270
|
-
instance.X = instance.scaler.transform(instance.df[instance.features])
|
|
271
|
-
|
|
272
|
-
log.info(f"Proximity model deserialized from {directory}")
|
|
273
|
-
return instance
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
# Testing the Proximity class
|
|
277
|
-
if __name__ == "__main__":
|
|
278
|
-
|
|
279
|
-
pd.set_option("display.max_columns", None)
|
|
280
|
-
pd.set_option("display.width", 1000)
|
|
281
|
-
|
|
282
|
-
# Create a sample DataFrame
|
|
283
|
-
data = {
|
|
284
|
-
"ID": [1, 2, 3, 4, 5],
|
|
285
|
-
"Feature1": [0.1, 0.2, 0.3, 0.4, 0.5],
|
|
286
|
-
"Feature2": [0.5, 0.4, 0.3, 0.2, 0.1],
|
|
287
|
-
"Feature3": [2.5, 2.4, 2.3, 2.3, np.nan],
|
|
288
|
-
}
|
|
289
|
-
df = pd.DataFrame(data)
|
|
290
|
-
|
|
291
|
-
# Test the Proximity class
|
|
292
|
-
features = ["Feature1", "Feature2", "Feature3"]
|
|
293
|
-
prox = Proximity(df, id_column="ID", features=features, n_neighbors=3)
|
|
294
|
-
print(prox.all_neighbors())
|
|
295
|
-
|
|
296
|
-
# Test the neighbors method
|
|
297
|
-
print(prox.neighbors(query_df=df.iloc[[0]]))
|
|
298
|
-
|
|
299
|
-
# Test the neighbors method with radius
|
|
300
|
-
print(prox.neighbors(query_df=df.iloc[0:2], radius=2.0))
|
|
301
|
-
|
|
302
|
-
# Test with data that isn't in the 'train' dataframe
|
|
303
|
-
query_data = {
|
|
304
|
-
"ID": [6],
|
|
305
|
-
"Feature1": [0.31],
|
|
306
|
-
"Feature2": [0.31],
|
|
307
|
-
"Feature3": [2.31],
|
|
308
|
-
}
|
|
309
|
-
query_df = pd.DataFrame(query_data)
|
|
310
|
-
print(prox.neighbors(query_df=query_df))
|
|
311
|
-
|
|
312
|
-
# Test with Features list
|
|
313
|
-
prox = Proximity(df, id_column="ID", features=["Feature1"], n_neighbors=2)
|
|
314
|
-
print(prox.all_neighbors())
|
|
315
|
-
|
|
316
|
-
# Create a sample DataFrame
|
|
317
|
-
data = {
|
|
318
|
-
"foo_id": ["a", "b", "c", "d", "e"], # Testing string IDs
|
|
319
|
-
"Feature1": [0.1, 0.2, 0.3, 0.4, 0.5],
|
|
320
|
-
"Feature2": [0.5, 0.4, 0.3, 0.2, 0.1],
|
|
321
|
-
"target": [1, 0, 1, 0, 5],
|
|
322
|
-
}
|
|
323
|
-
df = pd.DataFrame(data)
|
|
324
|
-
|
|
325
|
-
# Test with String Ids
|
|
326
|
-
prox = Proximity(
|
|
327
|
-
df,
|
|
328
|
-
id_column="foo_id",
|
|
329
|
-
features=["Feature1", "Feature2"],
|
|
330
|
-
target="target",
|
|
331
|
-
track_columns=["Feature1", "Feature2"],
|
|
332
|
-
n_neighbors=3,
|
|
333
|
-
)
|
|
334
|
-
print(prox.all_neighbors())
|
|
335
|
-
|
|
336
|
-
# Test the neighbors method
|
|
337
|
-
print(prox.neighbors(query_df=df.iloc[0:2]))
|
|
338
|
-
|
|
339
|
-
# Time neighbors with all IDs versus calling all_neighbors
|
|
340
|
-
import time
|
|
341
|
-
|
|
342
|
-
start_time = time.time()
|
|
343
|
-
prox_df = prox.neighbors(query_df=df, include_self=False)
|
|
344
|
-
end_time = time.time()
|
|
345
|
-
print(f"Time taken for neighbors: {end_time - start_time:.4f} seconds")
|
|
346
|
-
start_time = time.time()
|
|
347
|
-
prox_df_all = prox.all_neighbors()
|
|
348
|
-
end_time = time.time()
|
|
349
|
-
print(f"Time taken for all_neighbors: {end_time - start_time:.4f} seconds")
|
|
350
|
-
|
|
351
|
-
# Now compare the two dataframes
|
|
352
|
-
print("Neighbors DataFrame:")
|
|
353
|
-
print(prox_df)
|
|
354
|
-
print("\nAll Neighbors DataFrame:")
|
|
355
|
-
print(prox_df_all)
|
|
356
|
-
# Check for any discrepancies
|
|
357
|
-
if prox_df.equals(prox_df_all):
|
|
358
|
-
print("The two DataFrames are equal :)")
|
|
359
|
-
else:
|
|
360
|
-
print("ERROR: The two DataFrames are not equal!")
|
|
361
|
-
|
|
362
|
-
# Test querying without the id_column
|
|
363
|
-
df_no_id = df.drop(columns=["foo_id"])
|
|
364
|
-
print(prox.neighbors(query_df=df_no_id, include_self=False))
|
|
365
|
-
|
|
366
|
-
# Test duplicate IDs
|
|
367
|
-
data = {
|
|
368
|
-
"foo_id": ["a", "b", "c", "d", "d"], # Duplicate ID (d)
|
|
369
|
-
"Feature1": [0.1, 0.2, 0.3, 0.4, 0.5],
|
|
370
|
-
"Feature2": [0.5, 0.4, 0.3, 0.2, 0.1],
|
|
371
|
-
"target": [1, 0, 1, 0, 5],
|
|
372
|
-
}
|
|
373
|
-
df = pd.DataFrame(data)
|
|
374
|
-
prox = Proximity(df, id_column="foo_id", features=["Feature1", "Feature2"], target="target", n_neighbors=3)
|
|
375
|
-
print(df.equals(prox.df))
|
|
376
|
-
|
|
377
|
-
# Test with a categorical feature
|
|
378
|
-
from workbench.api import FeatureSet, Model
|
|
379
|
-
|
|
380
|
-
fs = FeatureSet("abalone_features")
|
|
381
|
-
model = Model("abalone-regression")
|
|
382
|
-
df = fs.pull_dataframe()
|
|
383
|
-
prox = Proximity(df, id_column=fs.id_column, features=model.features(), target=model.target())
|
|
384
|
-
print(prox.neighbors(query_df=df[0:2]))
|
|
@@ -1,279 +0,0 @@
|
|
|
1
|
-
# Template Placeholders
|
|
2
|
-
TEMPLATE_PARAMS = {
|
|
3
|
-
"model_type": "ensemble_regressor",
|
|
4
|
-
"target_column": "solubility",
|
|
5
|
-
"feature_list": ['molwt', 'mollogp', 'molmr', 'heavyatomcount', 'numhacceptors', 'numhdonors', 'numheteroatoms', 'numrotatablebonds', 'numvalenceelectrons', 'numaromaticrings', 'numsaturatedrings', 'numaliphaticrings', 'ringcount', 'tpsa', 'labuteasa', 'balabanj', 'bertzct'],
|
|
6
|
-
"model_metrics_s3_path": "s3://sandbox-sageworks-artifacts/models/aqsol-ensemble/training"
|
|
7
|
-
}
|
|
8
|
-
|
|
9
|
-
# Imports for XGB Model
|
|
10
|
-
import xgboost as xgb
|
|
11
|
-
import awswrangler as wr
|
|
12
|
-
import numpy as np
|
|
13
|
-
|
|
14
|
-
# Model Performance Scores
|
|
15
|
-
from sklearn.metrics import (
|
|
16
|
-
mean_absolute_error,
|
|
17
|
-
r2_score,
|
|
18
|
-
root_mean_squared_error
|
|
19
|
-
)
|
|
20
|
-
|
|
21
|
-
from io import StringIO
|
|
22
|
-
import json
|
|
23
|
-
import argparse
|
|
24
|
-
import os
|
|
25
|
-
import pandas as pd
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
# Function to check if dataframe is empty
|
|
29
|
-
def check_dataframe(df: pd.DataFrame, df_name: str) -> None:
|
|
30
|
-
"""
|
|
31
|
-
Check if the provided dataframe is empty and raise an exception if it is.
|
|
32
|
-
|
|
33
|
-
Args:
|
|
34
|
-
df (pd.DataFrame): DataFrame to check
|
|
35
|
-
df_name (str): Name of the DataFrame
|
|
36
|
-
"""
|
|
37
|
-
if df.empty:
|
|
38
|
-
msg = f"*** The training data {df_name} has 0 rows! ***STOPPING***"
|
|
39
|
-
print(msg)
|
|
40
|
-
raise ValueError(msg)
|
|
41
|
-
|
|
42
|
-
def match_features_case_insensitive(df: pd.DataFrame, model_features: list) -> pd.DataFrame:
|
|
43
|
-
"""
|
|
44
|
-
Matches and renames the DataFrame's column names to match the model's feature names (case-insensitive).
|
|
45
|
-
Prioritizes exact case matches first, then falls back to case-insensitive matching if no exact match exists.
|
|
46
|
-
|
|
47
|
-
Args:
|
|
48
|
-
df (pd.DataFrame): The DataFrame with the original columns.
|
|
49
|
-
model_features (list): The desired list of feature names (mixed case allowed).
|
|
50
|
-
|
|
51
|
-
Returns:
|
|
52
|
-
pd.DataFrame: The DataFrame with renamed columns to match the model's feature names.
|
|
53
|
-
"""
|
|
54
|
-
# Create a mapping for exact and case-insensitive matching
|
|
55
|
-
exact_match_set = set(df.columns)
|
|
56
|
-
column_map = {}
|
|
57
|
-
|
|
58
|
-
# Build the case-insensitive map (if we have any duplicate columns, the first one wins)
|
|
59
|
-
for col in df.columns:
|
|
60
|
-
lower_col = col.lower()
|
|
61
|
-
if lower_col not in column_map:
|
|
62
|
-
column_map[lower_col] = col
|
|
63
|
-
|
|
64
|
-
# Create a dictionary for renaming
|
|
65
|
-
rename_dict = {}
|
|
66
|
-
for feature in model_features:
|
|
67
|
-
# Check for an exact match first
|
|
68
|
-
if feature in exact_match_set:
|
|
69
|
-
rename_dict[feature] = feature
|
|
70
|
-
|
|
71
|
-
# If not an exact match, fall back to case-insensitive matching
|
|
72
|
-
elif feature.lower() in column_map:
|
|
73
|
-
rename_dict[column_map[feature.lower()]] = feature
|
|
74
|
-
|
|
75
|
-
# Rename the columns in the DataFrame to match the model's feature names
|
|
76
|
-
return df.rename(columns=rename_dict)
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
if __name__ == "__main__":
|
|
80
|
-
"""The main function is for training the XGBoost Quantile Regression models"""
|
|
81
|
-
|
|
82
|
-
# Harness Template Parameters
|
|
83
|
-
target = TEMPLATE_PARAMS["target_column"]
|
|
84
|
-
feature_list = TEMPLATE_PARAMS["feature_list"]
|
|
85
|
-
model_metrics_s3_path = TEMPLATE_PARAMS["model_metrics_s3_path"]
|
|
86
|
-
models = {}
|
|
87
|
-
|
|
88
|
-
# Script arguments for input/output directories
|
|
89
|
-
parser = argparse.ArgumentParser()
|
|
90
|
-
parser.add_argument("--model-dir", type=str, default=os.environ.get("SM_MODEL_DIR", "/opt/ml/model"))
|
|
91
|
-
parser.add_argument("--train", type=str, default=os.environ.get("SM_CHANNEL_TRAIN", "/opt/ml/input/data/train"))
|
|
92
|
-
parser.add_argument(
|
|
93
|
-
"--output-data-dir", type=str, default=os.environ.get("SM_OUTPUT_DATA_DIR", "/opt/ml/output/data")
|
|
94
|
-
)
|
|
95
|
-
args = parser.parse_args()
|
|
96
|
-
|
|
97
|
-
# Read the training data into DataFrames
|
|
98
|
-
training_files = [
|
|
99
|
-
os.path.join(args.train, file)
|
|
100
|
-
for file in os.listdir(args.train)
|
|
101
|
-
if file.endswith(".csv")
|
|
102
|
-
]
|
|
103
|
-
print(f"Training Files: {training_files}")
|
|
104
|
-
|
|
105
|
-
# Combine files and read them all into a single pandas dataframe
|
|
106
|
-
df = pd.concat([pd.read_csv(file, engine="python") for file in training_files])
|
|
107
|
-
|
|
108
|
-
# Check if the dataframe is empty
|
|
109
|
-
check_dataframe(df, "training_df")
|
|
110
|
-
|
|
111
|
-
# Features/Target output
|
|
112
|
-
print(f"Target: {target}")
|
|
113
|
-
print(f"Features: {str(feature_list)}")
|
|
114
|
-
print(f"Data Shape: {df.shape}")
|
|
115
|
-
|
|
116
|
-
# Grab our Features and Target with traditional X, y handles
|
|
117
|
-
y = df[target]
|
|
118
|
-
X = df[feature_list]
|
|
119
|
-
|
|
120
|
-
# Train 50 models with random 70/30 splits of the data
|
|
121
|
-
for model_id in range(50):
|
|
122
|
-
# Model Name
|
|
123
|
-
model_name = f"m_{model_id:02}"
|
|
124
|
-
|
|
125
|
-
# Bootstrap sample (50% with replacement)
|
|
126
|
-
sample_size = int(0.5 * len(X))
|
|
127
|
-
bootstrap_indices = np.random.choice(len(X), size=sample_size, replace=True)
|
|
128
|
-
X_train, y_train = X.iloc[bootstrap_indices], y.iloc[bootstrap_indices]
|
|
129
|
-
print(f"Training Model {model_name} with {len(X_train)} rows")
|
|
130
|
-
model = xgb.XGBRegressor(reg_alpha=0.1, reg_lambda=1.0)
|
|
131
|
-
model.fit(X_train, y_train)
|
|
132
|
-
|
|
133
|
-
# Store the model
|
|
134
|
-
models[model_name] = model
|
|
135
|
-
|
|
136
|
-
# Run predictions for each model
|
|
137
|
-
all_predictions = {model_name: model.predict(X) for model_name, model in models.items()}
|
|
138
|
-
|
|
139
|
-
# Create a copy of the provided DataFrame and add the new columns
|
|
140
|
-
result_df = df[[target]].copy()
|
|
141
|
-
|
|
142
|
-
# Add the model predictions to the DataFrame
|
|
143
|
-
for name, preds in all_predictions.items():
|
|
144
|
-
result_df[name] = preds
|
|
145
|
-
|
|
146
|
-
# Add the main prediction to the DataFrame (mean of all models)
|
|
147
|
-
result_df["prediction"] = result_df[[name for name in result_df.columns if name.startswith("m_")]].mean(axis=1)
|
|
148
|
-
|
|
149
|
-
# Now compute residuals on the rmse prediction
|
|
150
|
-
result_df["residual"] = result_df[target] - result_df["prediction"]
|
|
151
|
-
result_df["residual_abs"] = result_df["residual"].abs()
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
# Save the results dataframe to S3
|
|
155
|
-
wr.s3.to_csv(
|
|
156
|
-
result_df,
|
|
157
|
-
path=f"{model_metrics_s3_path}/validation_predictions.csv",
|
|
158
|
-
index=False,
|
|
159
|
-
)
|
|
160
|
-
|
|
161
|
-
# Report Performance Metrics
|
|
162
|
-
rmse = root_mean_squared_error(result_df[target], result_df["prediction"])
|
|
163
|
-
mae = mean_absolute_error(result_df[target], result_df["prediction"])
|
|
164
|
-
r2 = r2_score(result_df[target], result_df["prediction"])
|
|
165
|
-
print(f"RMSE: {rmse:.3f}")
|
|
166
|
-
print(f"MAE: {mae:.3f}")
|
|
167
|
-
print(f"R2: {r2:.3f}")
|
|
168
|
-
print(f"NumRows: {len(result_df)}")
|
|
169
|
-
|
|
170
|
-
# Now save the models
|
|
171
|
-
for name, model in models.items():
|
|
172
|
-
model_path = os.path.join(args.model_dir, f"{name}.json")
|
|
173
|
-
print(f"Saving model: {model_path}")
|
|
174
|
-
model.save_model(model_path)
|
|
175
|
-
|
|
176
|
-
# Also save the features (this will validate input during predictions)
|
|
177
|
-
with open(os.path.join(args.model_dir, "feature_columns.json"), "w") as fp:
|
|
178
|
-
json.dump(feature_list, fp)
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
def model_fn(model_dir) -> dict:
|
|
182
|
-
"""Deserialized and return all the fitted models from the model directory.
|
|
183
|
-
|
|
184
|
-
Args:
|
|
185
|
-
model_dir (str): The directory where the models are stored.
|
|
186
|
-
|
|
187
|
-
Returns:
|
|
188
|
-
dict: A dictionary of the models.
|
|
189
|
-
"""
|
|
190
|
-
|
|
191
|
-
# Load ALL the models from the model directory
|
|
192
|
-
models = {}
|
|
193
|
-
for file in os.listdir(model_dir):
|
|
194
|
-
if file.startswith("m_") and file.endswith(".json"): # The Quantile models
|
|
195
|
-
# Load the model
|
|
196
|
-
model_path = os.path.join(model_dir, file)
|
|
197
|
-
print(f"Loading model: {model_path}")
|
|
198
|
-
model = xgb.XGBRegressor()
|
|
199
|
-
model.load_model(model_path)
|
|
200
|
-
|
|
201
|
-
# Store the model
|
|
202
|
-
m_name = os.path.splitext(file)[0]
|
|
203
|
-
models[m_name] = model
|
|
204
|
-
|
|
205
|
-
# Return all the models
|
|
206
|
-
return models
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
def input_fn(input_data, content_type):
|
|
210
|
-
"""Parse input data and return a DataFrame."""
|
|
211
|
-
if not input_data:
|
|
212
|
-
raise ValueError("Empty input data is not supported!")
|
|
213
|
-
|
|
214
|
-
# Decode bytes to string if necessary
|
|
215
|
-
if isinstance(input_data, bytes):
|
|
216
|
-
input_data = input_data.decode("utf-8")
|
|
217
|
-
|
|
218
|
-
if "text/csv" in content_type:
|
|
219
|
-
return pd.read_csv(StringIO(input_data))
|
|
220
|
-
elif "application/json" in content_type:
|
|
221
|
-
return pd.DataFrame(json.loads(input_data)) # Assumes JSON array of records
|
|
222
|
-
else:
|
|
223
|
-
raise ValueError(f"{content_type} not supported!")
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
def output_fn(output_df, accept_type):
|
|
227
|
-
"""Supports both CSV and JSON output formats."""
|
|
228
|
-
if "text/csv" in accept_type:
|
|
229
|
-
csv_output = output_df.fillna("N/A").to_csv(index=False) # CSV with N/A for missing values
|
|
230
|
-
return csv_output, "text/csv"
|
|
231
|
-
elif "application/json" in accept_type:
|
|
232
|
-
return output_df.to_json(orient="records"), "application/json" # JSON array of records (NaNs -> null)
|
|
233
|
-
else:
|
|
234
|
-
raise RuntimeError(f"{accept_type} accept type is not supported by this script.")
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
def predict_fn(df, models) -> pd.DataFrame:
|
|
238
|
-
"""Make Predictions with our XGB Quantile Regression Model
|
|
239
|
-
|
|
240
|
-
Args:
|
|
241
|
-
df (pd.DataFrame): The input DataFrame
|
|
242
|
-
models (dict): The dictionary of models to use for predictions
|
|
243
|
-
|
|
244
|
-
Returns:
|
|
245
|
-
pd.DataFrame: The DataFrame with the predictions added
|
|
246
|
-
"""
|
|
247
|
-
|
|
248
|
-
# Grab our feature columns (from training)
|
|
249
|
-
model_dir = os.environ.get("SM_MODEL_DIR", "/opt/ml/model")
|
|
250
|
-
with open(os.path.join(model_dir, "feature_columns.json")) as fp:
|
|
251
|
-
model_features = json.load(fp)
|
|
252
|
-
print(f"Model Features: {model_features}")
|
|
253
|
-
|
|
254
|
-
# We're going match features in a case-insensitive manner, accounting for all the permutations
|
|
255
|
-
# - Model has a feature list that's any case ("Id", "taCos", "cOunT", "likes_tacos")
|
|
256
|
-
# - Incoming data has columns that are mixed case ("ID", "Tacos", "Count", "Likes_Tacos")
|
|
257
|
-
matched_df = match_features_case_insensitive(df, model_features)
|
|
258
|
-
|
|
259
|
-
# Predict the features against all the models
|
|
260
|
-
for name, model in models.items():
|
|
261
|
-
df[name] = model.predict(matched_df[model_features])
|
|
262
|
-
|
|
263
|
-
# Add quantiles for consistency with other UQ models
|
|
264
|
-
df["q_025"] = df[[name for name in df.columns if name.startswith("m_")]].quantile(0.025, axis=1)
|
|
265
|
-
df["q_975"] = df[[name for name in df.columns if name.startswith("m_")]].quantile(0.975, axis=1)
|
|
266
|
-
df["q_25"] = df[[name for name in df.columns if name.startswith("m_")]].quantile(0.25, axis=1)
|
|
267
|
-
df["q_75"] = df[[name for name in df.columns if name.startswith("m_")]].quantile(0.75, axis=1)
|
|
268
|
-
|
|
269
|
-
# Compute the mean, min, max and stddev of the predictions
|
|
270
|
-
df["prediction"] = df[[name for name in df.columns if name.startswith("m_")]].mean(axis=1)
|
|
271
|
-
df["p_min"] = df[[name for name in df.columns if name.startswith("m_")]].min(axis=1)
|
|
272
|
-
df["p_max"] = df[[name for name in df.columns if name.startswith("m_")]].max(axis=1)
|
|
273
|
-
df["prediction_std"] = df[[name for name in df.columns if name.startswith("m_")]].std(axis=1)
|
|
274
|
-
|
|
275
|
-
# Reorganize the columns so they are in alphabetical order
|
|
276
|
-
df = df.reindex(sorted(df.columns), axis=1)
|
|
277
|
-
|
|
278
|
-
# All done, return the DataFrame
|
|
279
|
-
return df
|