workbench 0.8.156__py3-none-any.whl → 0.8.157__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,542 @@
1
+ # Imports for PyTorch Tabular Model
2
+ import os
3
+ import awswrangler as wr
4
+ import numpy as np
5
+
6
+ # PyTorch 2.6+ compatibility: pytorch-tabular saves complex objects, not just tensors
7
+ # Use legacy loading behavior for compatibility (recommended by PyTorch docs for this scenario)
8
+ os.environ["TORCH_FORCE_NO_WEIGHTS_ONLY_LOAD"] = "1"
9
+ from pytorch_tabular import TabularModel
10
+ from pytorch_tabular.config import DataConfig, OptimizerConfig, TrainerConfig
11
+ from pytorch_tabular.models import CategoryEmbeddingModelConfig, TabNetModelConfig
12
+
13
+ # Model Performance Scores
14
+ from sklearn.metrics import (
15
+ mean_absolute_error,
16
+ r2_score,
17
+ root_mean_squared_error,
18
+ precision_recall_fscore_support,
19
+ confusion_matrix,
20
+ )
21
+
22
+ # Classification Encoder
23
+ from sklearn.preprocessing import LabelEncoder
24
+
25
+ # Scikit Learn Imports
26
+ from sklearn.model_selection import train_test_split
27
+
28
+ from io import StringIO
29
+ import json
30
+ import argparse
31
+ import joblib
32
+ import os
33
+ import pandas as pd
34
+ from typing import List, Tuple
35
+
36
+ # Template Parameters
37
+ TEMPLATE_PARAMS = {
38
+ "model_type": "{{model_type}}",
39
+ "target_column": "{{target_column}}",
40
+ "features": "{{feature_list}}",
41
+ "compressed_features": "{{compressed_features}}",
42
+ "model_metrics_s3_path": "{{model_metrics_s3_path}}",
43
+ "train_all_data": "{{train_all_data}}"
44
+ }
45
+
46
+
47
+ # Function to check if dataframe is empty
48
+ def check_dataframe(df: pd.DataFrame, df_name: str) -> None:
49
+ """
50
+ Check if the provided dataframe is empty and raise an exception if it is.
51
+
52
+ Args:
53
+ df (pd.DataFrame): DataFrame to check
54
+ df_name (str): Name of the DataFrame
55
+ """
56
+ if df.empty:
57
+ msg = f"*** The training data {df_name} has 0 rows! ***STOPPING***"
58
+ print(msg)
59
+ raise ValueError(msg)
60
+
61
+
62
+ def expand_proba_column(df: pd.DataFrame, class_labels: List[str]) -> pd.DataFrame:
63
+ """
64
+ Expands a column in a DataFrame containing a list of probabilities into separate columns.
65
+
66
+ Args:
67
+ df (pd.DataFrame): DataFrame containing a "pred_proba" column
68
+ class_labels (List[str]): List of class labels
69
+
70
+ Returns:
71
+ pd.DataFrame: DataFrame with the "pred_proba" expanded into separate columns
72
+ """
73
+
74
+ # Sanity check
75
+ proba_column = "pred_proba"
76
+ if proba_column not in df.columns:
77
+ raise ValueError('DataFrame does not contain a "pred_proba" column')
78
+
79
+ # Construct new column names with '_proba' suffix
80
+ proba_splits = [f"{label}_proba" for label in class_labels]
81
+
82
+ # Expand the proba_column into separate columns for each probability
83
+ proba_df = pd.DataFrame(df[proba_column].tolist(), columns=proba_splits)
84
+
85
+ # Drop any proba columns and reset the index in prep for the concat
86
+ df = df.drop(columns=[proba_column] + proba_splits, errors="ignore")
87
+ df = df.reset_index(drop=True)
88
+
89
+ # Concatenate the new columns with the original DataFrame
90
+ df = pd.concat([df, proba_df], axis=1)
91
+ print(df)
92
+ return df
93
+
94
+
95
+ def match_features_case_insensitive(df: pd.DataFrame, model_features: list) -> pd.DataFrame:
96
+ """
97
+ Matches and renames DataFrame columns to match model feature names (case-insensitive).
98
+ Prioritizes exact matches, then case-insensitive matches.
99
+
100
+ Raises ValueError if any model features cannot be matched.
101
+ """
102
+ df_columns_lower = {col.lower(): col for col in df.columns}
103
+ rename_dict = {}
104
+ missing = []
105
+
106
+ for feature in model_features:
107
+ if feature in df.columns:
108
+ continue # Exact match
109
+ elif feature.lower() in df_columns_lower:
110
+ rename_dict[df_columns_lower[feature.lower()]] = feature
111
+ else:
112
+ missing.append(feature)
113
+
114
+ if missing:
115
+ raise ValueError(f"Features not found: {missing}")
116
+
117
+ return df.rename(columns=rename_dict)
118
+
119
+
120
+ def convert_categorical_types(df: pd.DataFrame, features: list, category_mappings={}) -> tuple:
121
+ """
122
+ Converts appropriate columns to categorical type with consistent mappings.
123
+
124
+ Args:
125
+ df (pd.DataFrame): The DataFrame to process.
126
+ features (list): List of feature names to consider for conversion.
127
+ category_mappings (dict, optional): Existing category mappings. If empty dict, we're in
128
+ training mode. If populated, we're in inference mode.
129
+
130
+ Returns:
131
+ tuple: (processed DataFrame, category mappings dictionary)
132
+ """
133
+ # Training mode
134
+ if category_mappings == {}:
135
+ for col in df.select_dtypes(include=["object", "string"]):
136
+ if col in features and df[col].nunique() < 20:
137
+ print(f"Training mode: Converting {col} to category")
138
+ df[col] = df[col].astype("category")
139
+ category_mappings[col] = df[col].cat.categories.tolist() # Store category mappings
140
+
141
+ # Inference mode
142
+ else:
143
+ for col, categories in category_mappings.items():
144
+ if col in df.columns:
145
+ print(f"Inference mode: Applying categorical mapping for {col}")
146
+ df[col] = pd.Categorical(df[col], categories=categories) # Apply consistent categorical mapping
147
+
148
+ return df, category_mappings
149
+
150
+
151
+ def decompress_features(df: pd.DataFrame, features: List[str], compressed_features: List[str]) -> Tuple[pd.DataFrame, List[str]]:
152
+ """Prepare features for the model
153
+
154
+ Args:
155
+ df (pd.DataFrame): The features DataFrame
156
+ features (List[str]): Full list of feature names
157
+ compressed_features (List[str]): List of feature names to decompress (bitstrings)
158
+
159
+ Returns:
160
+ pd.DataFrame: DataFrame with the decompressed features
161
+ List[str]: Updated list of feature names after decompression
162
+
163
+ Raises:
164
+ ValueError: If any missing values are found in the specified features
165
+ """
166
+
167
+ # Check for any missing values in the required features
168
+ missing_counts = df[features].isna().sum()
169
+ if missing_counts.any():
170
+ missing_features = missing_counts[missing_counts > 0]
171
+ print(
172
+ f"WARNING: Found missing values in features: {missing_features.to_dict()}. "
173
+ "WARNING: You might want to remove/replace all NaN values before processing."
174
+ )
175
+
176
+ # Decompress the specified compressed features
177
+ decompressed_features = features
178
+ for feature in compressed_features:
179
+ if (feature not in df.columns) or (feature not in features):
180
+ print(f"Feature '{feature}' not in the features list, skipping decompression.")
181
+ continue
182
+
183
+ # Remove the feature from the list of features to avoid duplication
184
+ decompressed_features.remove(feature)
185
+
186
+ # Handle all compressed features as bitstrings
187
+ bit_matrix = np.array([list(bitstring) for bitstring in df[feature]], dtype=np.uint8)
188
+ prefix = feature[:3]
189
+
190
+ # Create all new columns at once - avoids fragmentation
191
+ new_col_names = [f"{prefix}_{i}" for i in range(bit_matrix.shape[1])]
192
+ new_df = pd.DataFrame(bit_matrix, columns=new_col_names, index=df.index)
193
+
194
+ # Add to features list
195
+ decompressed_features.extend(new_col_names)
196
+
197
+ # Drop original column and concatenate new ones
198
+ df = df.drop(columns=[feature])
199
+ df = pd.concat([df, new_df], axis=1)
200
+
201
+ return df, decompressed_features
202
+
203
+
204
+ if __name__ == "__main__":
205
+ """The main function is for training the PyTorch Tabular model"""
206
+
207
+ # Harness Template Parameters
208
+ target = TEMPLATE_PARAMS["target_column"]
209
+ features = TEMPLATE_PARAMS["features"]
210
+ orig_features = features.copy()
211
+ compressed_features = TEMPLATE_PARAMS["compressed_features"]
212
+ model_type = TEMPLATE_PARAMS["model_type"]
213
+ model_metrics_s3_path = TEMPLATE_PARAMS["model_metrics_s3_path"]
214
+ train_all_data = TEMPLATE_PARAMS["train_all_data"]
215
+ validation_split = 0.2
216
+
217
+ # Script arguments for input/output directories
218
+ parser = argparse.ArgumentParser()
219
+ parser.add_argument("--model-dir", type=str, default=os.environ.get("SM_MODEL_DIR", "/opt/ml/model"))
220
+ parser.add_argument("--train", type=str, default=os.environ.get("SM_CHANNEL_TRAIN", "/opt/ml/input/data/train"))
221
+ parser.add_argument(
222
+ "--output-data-dir", type=str, default=os.environ.get("SM_OUTPUT_DATA_DIR", "/opt/ml/output/data")
223
+ )
224
+ args = parser.parse_args()
225
+
226
+ # Read the training data into DataFrames
227
+ training_files = [
228
+ os.path.join(args.train, file)
229
+ for file in os.listdir(args.train)
230
+ if file.endswith(".csv")
231
+ ]
232
+ print(f"Training Files: {training_files}")
233
+
234
+ # Combine files and read them all into a single pandas dataframe
235
+ all_df = pd.concat([pd.read_csv(file, engine="python") for file in training_files])
236
+
237
+ # Check if the dataframe is empty
238
+ check_dataframe(all_df, "training_df")
239
+
240
+ # Features/Target output
241
+ print(f"Target: {target}")
242
+ print(f"Features: {str(features)}")
243
+
244
+ # Convert any features that might be categorical to 'category' type
245
+ all_df, category_mappings = convert_categorical_types(all_df, features)
246
+
247
+ # If we have compressed features, decompress them
248
+ if compressed_features:
249
+ print(f"Decompressing features {compressed_features}...")
250
+ all_df, features = decompress_features(all_df, features, compressed_features)
251
+
252
+ # Do we want to train on all the data?
253
+ if train_all_data:
254
+ print("Training on ALL of the data")
255
+ df_train = all_df.copy()
256
+ df_val = all_df.copy()
257
+
258
+ # Does the dataframe have a training column?
259
+ elif "training" in all_df.columns:
260
+ print("Found training column, splitting data based on training column")
261
+ df_train = all_df[all_df["training"]]
262
+ df_val = all_df[~all_df["training"]]
263
+ else:
264
+ # Just do a random training Split
265
+ print("WARNING: No training column found, splitting data with random state=42")
266
+ df_train, df_val = train_test_split(
267
+ all_df, test_size=validation_split, random_state=42
268
+ )
269
+ print(f"FIT/TRAIN: {df_train.shape}")
270
+ print(f"VALIDATION: {df_val.shape}")
271
+
272
+ # Determine categorical and continuous columns
273
+ categorical_cols = [col for col in features if df_train[col].dtype.name == 'category']
274
+ continuous_cols = [col for col in features if col not in categorical_cols]
275
+
276
+ print(f"Categorical columns: {categorical_cols}")
277
+ print(f"Continuous columns: {continuous_cols}")
278
+
279
+ # Set up PyTorch Tabular configuration
280
+ data_config = DataConfig(
281
+ target=[target],
282
+ continuous_cols=continuous_cols,
283
+ categorical_cols=categorical_cols,
284
+ )
285
+
286
+ trainer_config = TrainerConfig(
287
+ auto_lr_find=True,
288
+ batch_size=1024,
289
+ max_epochs=100,
290
+ early_stopping="valid_loss",
291
+ early_stopping_patience=20,
292
+ progress_bar="none",
293
+ )
294
+
295
+ optimizer_config = OptimizerConfig()
296
+
297
+ # Choose model configuration based on model type
298
+ if model_type == "classifier":
299
+ task = "classification"
300
+ # Use TabNet for classification
301
+ model_config = TabNetModelConfig(
302
+ task=task,
303
+ learning_rate=1e-3,
304
+ )
305
+
306
+ # Encode the target column
307
+ label_encoder = LabelEncoder()
308
+ df_train[target] = label_encoder.fit_transform(df_train[target])
309
+ df_val[target] = label_encoder.transform(df_val[target])
310
+
311
+ else:
312
+ task = "regression"
313
+ # Use CategoryEmbedding for regression
314
+ model_config = CategoryEmbeddingModelConfig(
315
+ task=task,
316
+ layers="1024-512-512",
317
+ activation="ReLU",
318
+ learning_rate=1e-3,
319
+ )
320
+ label_encoder = None # We don't need this for regression
321
+
322
+ # Create and train the TabularModel
323
+ tabular_model = TabularModel(
324
+ data_config=data_config,
325
+ model_config=model_config,
326
+ optimizer_config=optimizer_config,
327
+ trainer_config=trainer_config,
328
+ )
329
+
330
+ # Train the model
331
+ tabular_model.fit(train=df_train, validation=df_val)
332
+
333
+ # Make Predictions on the Validation Set
334
+ print(f"Making Predictions on Validation Set...")
335
+ result = tabular_model.predict(df_val)
336
+
337
+ # For regression: pytorch-tabular returns predictions using the target column name
338
+ # For classification: pytorch-tabular returns predictions using "prediction" column
339
+ if model_type == "classifier":
340
+ preds = result["prediction"].values
341
+ else:
342
+ # Regression: use the target column name
343
+ preds = result[target].values
344
+
345
+ if model_type == "classifier":
346
+ # Get probabilities for classification
347
+ print("Processing Probabilities...")
348
+ prob_cols = [col for col in result.columns if col.endswith("_probability")]
349
+ if prob_cols:
350
+ probs = result[prob_cols].values
351
+ df_val["pred_proba"] = [p.tolist() for p in probs]
352
+
353
+ # Expand the pred_proba column into separate columns for each class
354
+ print(df_val.columns)
355
+ df_val = expand_proba_column(df_val, label_encoder.classes_)
356
+ print(df_val.columns)
357
+
358
+ # Decode the target and prediction labels
359
+ y_validate = label_encoder.inverse_transform(df_val[target])
360
+ preds = label_encoder.inverse_transform(preds.astype(int))
361
+ else:
362
+ y_validate = df_val[target].values
363
+
364
+ # Save predictions to S3 (just the target, prediction, and '_proba' columns)
365
+ df_val["prediction"] = preds
366
+ output_columns = [target, "prediction"]
367
+ output_columns += [col for col in df_val.columns if col.endswith("_proba")]
368
+ wr.s3.to_csv(
369
+ df_val[output_columns],
370
+ path=f"{model_metrics_s3_path}/validation_predictions.csv",
371
+ index=False,
372
+ )
373
+
374
+ # Report Performance Metrics
375
+ if model_type == "classifier":
376
+ # Get the label names and their integer mapping
377
+ label_names = label_encoder.classes_
378
+
379
+ # Calculate various model performance metrics
380
+ scores = precision_recall_fscore_support(
381
+ y_validate, preds, average=None, labels=label_names
382
+ )
383
+
384
+ # Put the scores into a dataframe
385
+ score_df = pd.DataFrame(
386
+ {
387
+ target: label_names,
388
+ "precision": scores[0],
389
+ "recall": scores[1],
390
+ "fscore": scores[2],
391
+ "support": scores[3],
392
+ }
393
+ )
394
+
395
+ # We need to get creative with the Classification Metrics
396
+ metrics = ["precision", "recall", "fscore", "support"]
397
+ for t in label_names:
398
+ for m in metrics:
399
+ value = score_df.loc[score_df[target] == t, m].iloc[0]
400
+ print(f"Metrics:{t}:{m} {value}")
401
+
402
+ # Compute and output the confusion matrix
403
+ conf_mtx = confusion_matrix(y_validate, preds, labels=label_names)
404
+ for i, row_name in enumerate(label_names):
405
+ for j, col_name in enumerate(label_names):
406
+ value = conf_mtx[i, j]
407
+ print(f"ConfusionMatrix:{row_name}:{col_name} {value}")
408
+
409
+ else:
410
+ # Calculate various model performance metrics (regression)
411
+ rmse = root_mean_squared_error(y_validate, preds)
412
+ mae = mean_absolute_error(y_validate, preds)
413
+ r2 = r2_score(y_validate, preds)
414
+ print(f"RMSE: {rmse:.3f}")
415
+ print(f"MAE: {mae:.3f}")
416
+ print(f"R2: {r2:.3f}")
417
+ print(f"NumRows: {len(df_val)}")
418
+
419
+ # Save the model to the standard place/name
420
+ tabular_model.save_model(os.path.join(args.model_dir, "tabular_model"))
421
+ if label_encoder:
422
+ joblib.dump(label_encoder, os.path.join(args.model_dir, "label_encoder.joblib"))
423
+
424
+ # Save the features (this will validate input during predictions)
425
+ with open(os.path.join(args.model_dir, "feature_columns.json"), "w") as fp:
426
+ json.dump(orig_features, fp) # We save the original features, not the decompressed ones
427
+
428
+ # Save the category mappings
429
+ with open(os.path.join(args.model_dir, "category_mappings.json"), "w") as fp:
430
+ json.dump(category_mappings, fp)
431
+
432
+
433
+ def model_fn(model_dir):
434
+ """Deserialize and return fitted PyTorch Tabular model"""
435
+ model_path = os.path.join(model_dir, "tabular_model")
436
+ model = TabularModel.load_model(model_path)
437
+ return model
438
+
439
+
440
+ def input_fn(input_data, content_type):
441
+ """Parse input data and return a DataFrame."""
442
+ if not input_data:
443
+ raise ValueError("Empty input data is not supported!")
444
+
445
+ # Decode bytes to string if necessary
446
+ if isinstance(input_data, bytes):
447
+ input_data = input_data.decode("utf-8")
448
+
449
+ if "text/csv" in content_type:
450
+ return pd.read_csv(StringIO(input_data))
451
+ elif "application/json" in content_type:
452
+ return pd.DataFrame(json.loads(input_data)) # Assumes JSON array of records
453
+ else:
454
+ raise ValueError(f"{content_type} not supported!")
455
+
456
+
457
+ def output_fn(output_df, accept_type):
458
+ """Supports both CSV and JSON output formats."""
459
+ if "text/csv" in accept_type:
460
+ csv_output = output_df.fillna("N/A").to_csv(index=False) # CSV with N/A for missing values
461
+ return csv_output, "text/csv"
462
+ elif "application/json" in accept_type:
463
+ return output_df.to_json(orient="records"), "application/json" # JSON array of records (NaNs -> null)
464
+ else:
465
+ raise RuntimeError(f"{accept_type} accept type is not supported by this script.")
466
+
467
+
468
+ def predict_fn(df, model) -> pd.DataFrame:
469
+ """Make Predictions with our PyTorch Tabular Model
470
+
471
+ Args:
472
+ df (pd.DataFrame): The input DataFrame
473
+ model: The TabularModel use for predictions
474
+
475
+ Returns:
476
+ pd.DataFrame: The DataFrame with the predictions added
477
+ """
478
+ compressed_features = TEMPLATE_PARAMS["compressed_features"]
479
+
480
+ # Grab our feature columns (from training)
481
+ model_dir = os.environ.get("SM_MODEL_DIR", "/opt/ml/model")
482
+ with open(os.path.join(model_dir, "feature_columns.json")) as fp:
483
+ features = json.load(fp)
484
+ print(f"Model Features: {features}")
485
+
486
+ # Load the category mappings (from training)
487
+ with open(os.path.join(model_dir, "category_mappings.json")) as fp:
488
+ category_mappings = json.load(fp)
489
+
490
+ # Load our Label Encoder if we have one
491
+ label_encoder = None
492
+ if os.path.exists(os.path.join(model_dir, "label_encoder.joblib")):
493
+ label_encoder = joblib.load(os.path.join(model_dir, "label_encoder.joblib"))
494
+
495
+ # We're going match features in a case-insensitive manner, accounting for all the permutations
496
+ # - Model has a feature list that's any case ("Id", "taCos", "cOunT", "likes_tacos")
497
+ # - Incoming data has columns that are mixed case ("ID", "Tacos", "Count", "Likes_Tacos")
498
+ matched_df = match_features_case_insensitive(df, features)
499
+
500
+ # Detect categorical types in the incoming DataFrame
501
+ matched_df, _ = convert_categorical_types(matched_df, features, category_mappings)
502
+
503
+ # If we have compressed features, decompress them
504
+ if compressed_features:
505
+ print("Decompressing features for prediction...")
506
+ matched_df, features = decompress_features(matched_df, features, compressed_features)
507
+
508
+ # Make predictions using the TabularModel
509
+ result = model.predict(matched_df)
510
+
511
+ # Extract predictions based on model type
512
+ # For regression: pytorch-tabular uses target column name
513
+ # For classification: pytorch-tabular uses "prediction" column
514
+ if "prediction" in result.columns:
515
+ predictions = result["prediction"].values
516
+ else:
517
+ # For regression, find the new column (not in original dataframe)
518
+ pred_cols = [col for col in result.columns if col not in matched_df.columns]
519
+ if pred_cols:
520
+ predictions = result[pred_cols[0]].values
521
+ else:
522
+ raise ValueError(f"Cannot find prediction column in: {result.columns.tolist()}")
523
+
524
+ # If we have a label encoder, decode the predictions
525
+ if label_encoder:
526
+ predictions = label_encoder.inverse_transform(predictions.astype(int))
527
+
528
+ # Set the predictions on the DataFrame
529
+ df["prediction"] = predictions
530
+
531
+ # For classification, get probabilities
532
+ if label_encoder is not None:
533
+ prob_cols = [col for col in result.columns if col.endswith("_probability")]
534
+ if prob_cols:
535
+ probs = result[prob_cols].values
536
+ df["pred_proba"] = [p.tolist() for p in probs]
537
+
538
+ # Expand the pred_proba column into separate columns for each class
539
+ df = expand_proba_column(df, label_encoder.classes_)
540
+
541
+ # All done, return the DataFrame with new columns for the predictions
542
+ return df
@@ -0,0 +1,4 @@
1
+ # Note: PyTorch-Tabular is not included in the default inference image, so it must be specified here.
2
+ # PyTorch-Tabular also implicitly requires requests
3
+ requests
4
+ pytorch-tabular
@@ -91,6 +91,8 @@ class DashboardStatus(PluginInterface):
91
91
 
92
92
  # Key packages relevant to Workbench
93
93
  key_packages = [
94
+ "workbench",
95
+ "workbench-bridges",
94
96
  "boto3",
95
97
  "botocore",
96
98
  "pandas",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: workbench
3
- Version: 0.8.156
3
+ Version: 0.8.157
4
4
  Summary: Workbench: A Dashboard and Python API for creating and deploying AWS SageMaker Model Pipelines
5
5
  Author-email: SuperCowPowers LLC <support@supercowpowers.com>
6
6
  License-Expression: MIT
@@ -59,8 +59,8 @@ workbench/core/artifacts/model_core.py,sha256=WjiZyqwUVeVZF22FO8zXFoHmgh-Vd6c_KQ
59
59
  workbench/core/artifacts/monitor_core.py,sha256=BP6UuCyBI4zB2wwcIXvUw6RC0EktTcQd5Rv0x73qzio,37670
60
60
  workbench/core/cloud_platform/cloud_meta.py,sha256=J3qqMLdzikRRI9TC7UEk9Ib2-64ovU8X-lzLxhze1uY,8669
61
61
  workbench/core/cloud_platform/aws/README.md,sha256=QT5IQXoUHbIA0qQ2wO6_2P2lYjYQFVYuezc22mWY4i8,97
62
- workbench/core/cloud_platform/aws/aws_account_clamp.py,sha256=YRZ_DufSNfuKgW7q9oanSwqlhZecbnapNW-twBajUX8,5683
63
- workbench/core/cloud_platform/aws/aws_df_store.py,sha256=Tdgg8QThmjWQs5gXgjK7D34qaQKe7MsaO5s8fD133F0,15043
62
+ workbench/core/cloud_platform/aws/aws_account_clamp.py,sha256=OzFknZXKW7VTvnDGGX4BXKoh0i1gQ7yaEBhkLCyHFSs,6310
63
+ workbench/core/cloud_platform/aws/aws_df_store.py,sha256=utRIlTCPwFneHHZ8_Z3Hw3rOJSeryiFA4wBtucxULRQ,15055
64
64
  workbench/core/cloud_platform/aws/aws_graph_store.py,sha256=ytYxQTplUmeWbsPmxyZbf6mO9qyTl60ewlJG8MyfyEY,9414
65
65
  workbench/core/cloud_platform/aws/aws_meta.py,sha256=ijbzucUF948RyD-GgYYyxl-Ejvkv5Z6gqB_g497ss7M,33932
66
66
  workbench/core/cloud_platform/aws/aws_parameter_store.py,sha256=9ekuMOQFHFMIEV68UbHhS_fLB9iqG5Hvu4EV6iamEpk,10400
@@ -101,7 +101,7 @@ workbench/core/transforms/features_to_features/__init__.py,sha256=47DEQpj8HBSa-_
101
101
  workbench/core/transforms/features_to_features/heavy/emr/Readme.md,sha256=YtQgCEQeKe0CQXQkhzMTYq9xOtCsCYb5P5LW2BmRKWQ,68
102
102
  workbench/core/transforms/features_to_features/heavy/glue/Readme.md,sha256=TuyCatWfoDr99zUwvOcxf-TqMkQzaMqXlj5nmFcRzfo,48
103
103
  workbench/core/transforms/features_to_model/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
104
- workbench/core/transforms/features_to_model/features_to_model.py,sha256=qtqfiN4a35TqxP_Sxfu90zrbNMA3r-q0F13Yr0VaIbM,19162
104
+ workbench/core/transforms/features_to_model/features_to_model.py,sha256=xAOcbcm2f9iumCCM_T8OTdzRkl3ZLTzyGRdv9ImmnqM,19333
105
105
  workbench/core/transforms/model_to_endpoint/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
106
106
  workbench/core/transforms/model_to_endpoint/model_to_endpoint.py,sha256=5IBhA56qSDSnfh4Xwvb2PP5UDM7md2R7Ur38hP4Mgyw,4624
107
107
  workbench/core/transforms/pandas_transforms/__init__.py,sha256=xL4MT8-fZ1SFqDbTLc8XyxjupHtB1YR6Ej0AC2nwd7I,894
@@ -130,6 +130,10 @@ workbench/model_scripts/custom_models/chem_info/requirements.txt,sha256=7HBUzvNi
130
130
  workbench/model_scripts/custom_models/chem_info/tautomerize.py,sha256=KAxTAqtTql4_FvnrAyYRgaJEmtAx399HXA_iw_awa08,3125
131
131
  workbench/model_scripts/custom_models/meta_endpoints/example.py,sha256=hzOAuLhIGB8vei-555ruNxpsE1GhuByHGjGB0zw8GSs,1726
132
132
  workbench/model_scripts/custom_models/network_security/Readme.md,sha256=Z2gtiu0hLHvEJ1x-_oFq3qJZcsK81sceBAGAGltpqQ8,222
133
+ workbench/model_scripts/custom_models/nn_models/Readme.md,sha256=x6U_gox2yV-kLspgmzE77t2xk5GFNgcpcuQq-Q78G7w,146
134
+ workbench/model_scripts/custom_models/nn_models/generated_model_script.py,sha256=PsowW-kosyN2wWhuDgP_41mBYscUDYKOhzzQFVUKBzc,20695
135
+ workbench/model_scripts/custom_models/nn_models/pytorch.template,sha256=-gd0FbDW1ilTo1WcwiOLVZPAyTQ09naVlnSXxrVxKYU,20422
136
+ workbench/model_scripts/custom_models/nn_models/requirements.txt,sha256=sC6v1LSBkwJFbvObn6DUD1HwPM86_rbmXEMH5Tcn2kM,184
133
137
  workbench/model_scripts/custom_models/proximity/Readme.md,sha256=RlMFAJZgAT2mCgDk-UwR_R0Y_NbCqeI5-8DUsxsbpWQ,289
134
138
  workbench/model_scripts/custom_models/proximity/feature_space_proximity.template,sha256=2c3eDu4sLP_bCTBAf_aIR1QdC7CpYDpXo8UU_2ZoLuE,4833
135
139
  workbench/model_scripts/custom_models/proximity/generated_model_script.py,sha256=RdbKbXtrSNYQJvB-oLcRHpJ6w0TM7zbmMfuocHb7GM0,7967
@@ -142,7 +146,6 @@ workbench/model_scripts/custom_models/uq_models/gaussian_process.template,sha256
142
146
  workbench/model_scripts/custom_models/uq_models/generated_model_script.py,sha256=l74VibzFnhmPeNUEiFwIIg5aNujcCs9LtRywUvc5Avo,14528
143
147
  workbench/model_scripts/custom_models/uq_models/mapie_xgb.template,sha256=ZTmerwkmXtewJwx3GGJSdLRyzJV5SJ86PvCu3dV_GHw,7330
144
148
  workbench/model_scripts/custom_models/uq_models/meta_uq.template,sha256=26FNangcpyV9nFOIufRuVZ45BQv6oPf9xlJZkVIULG4,9287
145
- workbench/model_scripts/custom_models/uq_models/meta_uq_experiment.template,sha256=2MT6-jzhmz69DuQCXAmMXoKRkBruto1m92LDsG8vdVI,14326
146
149
  workbench/model_scripts/custom_models/uq_models/ngboost.template,sha256=N-eWP967-X2Qbvk18VL7LPXRJMKne9SS2fb_jntwTec,7738
147
150
  workbench/model_scripts/custom_models/uq_models/proximity.py,sha256=zqmNlX70LnWXr5fdtFFQppSNTLjlOciQVrjGr-g9jRE,13716
148
151
  workbench/model_scripts/custom_models/uq_models/requirements.txt,sha256=jfwV5b1t6BFtdaRGrSz8LnuQzJm-4V5OlhhP-4CGxhs,107
@@ -252,7 +255,7 @@ workbench/web_interface/components/experiments/dashboard_metric_plots.py,sha256=
252
255
  workbench/web_interface/components/experiments/outlier_plot.py,sha256=5bWsmJEXyt50npeQxLHXCPtiq4WRVgg938Sl0DVjNWg,3647
253
256
  workbench/web_interface/components/plugins/ag_table.py,sha256=HrPOMotlOGigk0v8Cxx_doSHXdOKTT1-bzlsqDwwzng,3979
254
257
  workbench/web_interface/components/plugins/confusion_matrix.py,sha256=1K94JSlDwQwdf5uDYVydQzY-EQm89hYXchxbXoNvons,7176
255
- workbench/web_interface/components/plugins/dashboard_status.py,sha256=KWQW6EArOXhrUwZ0mMZydQk6nOpIoU6ftP4a0goN-Ew,5690
258
+ workbench/web_interface/components/plugins/dashboard_status.py,sha256=8Tu38lR5YgntxDjz_x2XfLiW7SOdreNLOFT5VkbYzKo,5748
256
259
  workbench/web_interface/components/plugins/data_details.py,sha256=pZm1AbM_0EXQwx77qUkfyrU9MedAs4Wlkp6iOtSrUtI,11104
257
260
  workbench/web_interface/components/plugins/endpoint_details.py,sha256=0A7g_Lx5-3XnDWOGT3YEDPNpmME_-WfYc65f-rRVjJE,3769
258
261
  workbench/web_interface/components/plugins/generated_compounds.py,sha256=hC0sh-1_rbN55Huno-E_2wF37kgIHi5Mtaer6Xk5fRM,8052
@@ -272,9 +275,9 @@ workbench/web_interface/page_views/main_page.py,sha256=QIGiQPXu5Q9TU15nPEAYa6sjQ
272
275
  workbench/web_interface/page_views/models_page_view.py,sha256=M0bdC7bAzLyIaE2jviY12FF4abdMFZmg6sFuOY_LaGI,2650
273
276
  workbench/web_interface/page_views/page_view.py,sha256=Gh6YnpOGlUejx-bHZAf5pzqoQ1H1R0OSwOpGhOBO06w,455
274
277
  workbench/web_interface/page_views/pipelines_page_view.py,sha256=v2pxrIbsHBcYiblfius3JK766NZ7ciD2yPx0t3E5IJo,2656
275
- workbench-0.8.156.dist-info/licenses/LICENSE,sha256=z4QMMPlLJkZjU8VOKqJkZiQZCEZ--saIU2Z8-p3aVc0,1080
276
- workbench-0.8.156.dist-info/METADATA,sha256=29854fjJPyoP6gHfmAt9DQFAI_KuS72Cjh2q133azrk,9497
277
- workbench-0.8.156.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
278
- workbench-0.8.156.dist-info/entry_points.txt,sha256=oZykkheWiiIBjRE8cS5SdcxwmZKSFaQEGwMBjNh-eNM,238
279
- workbench-0.8.156.dist-info/top_level.txt,sha256=Dhy72zTxaA_o_yRkPZx5zw-fwumnjGaeGf0hBN3jc_w,10
280
- workbench-0.8.156.dist-info/RECORD,,
278
+ workbench-0.8.157.dist-info/licenses/LICENSE,sha256=z4QMMPlLJkZjU8VOKqJkZiQZCEZ--saIU2Z8-p3aVc0,1080
279
+ workbench-0.8.157.dist-info/METADATA,sha256=F0BA7wvUuJGEBJ-uj84XE6p_e8zM98UVjniG_rP56hE,9497
280
+ workbench-0.8.157.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
281
+ workbench-0.8.157.dist-info/entry_points.txt,sha256=oZykkheWiiIBjRE8cS5SdcxwmZKSFaQEGwMBjNh-eNM,238
282
+ workbench-0.8.157.dist-info/top_level.txt,sha256=Dhy72zTxaA_o_yRkPZx5zw-fwumnjGaeGf0hBN3jc_w,10
283
+ workbench-0.8.157.dist-info/RECORD,,