workbench 0.8.179__py3-none-any.whl → 0.8.181__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- workbench/core/artifacts/endpoint_core.py +63 -4
- workbench/model_scripts/custom_models/uq_models/generated_model_script.py +5 -6
- workbench/model_scripts/custom_models/uq_models/mapie.template +3 -4
- workbench/model_scripts/xgb_model/xgb_model.template +2 -2
- workbench/utils/model_utils.py +25 -30
- workbench/utils/shap_utils.py +10 -2
- {workbench-0.8.179.dist-info → workbench-0.8.181.dist-info}/METADATA +1 -1
- {workbench-0.8.179.dist-info → workbench-0.8.181.dist-info}/RECORD +12 -12
- {workbench-0.8.179.dist-info → workbench-0.8.181.dist-info}/WHEEL +0 -0
- {workbench-0.8.179.dist-info → workbench-0.8.181.dist-info}/entry_points.txt +0 -0
- {workbench-0.8.179.dist-info → workbench-0.8.181.dist-info}/licenses/LICENSE +0 -0
- {workbench-0.8.179.dist-info → workbench-0.8.181.dist-info}/top_level.txt +0 -0
|
@@ -429,8 +429,6 @@ class EndpointCore(Artifact):
|
|
|
429
429
|
# For UQ Models we also capture the uncertainty metrics
|
|
430
430
|
if model_type in [ModelType.UQ_REGRESSOR]:
|
|
431
431
|
metrics = uq_metrics(prediction_df, target_column)
|
|
432
|
-
|
|
433
|
-
# Now put into the Parameter Store Model Inference Namespace
|
|
434
432
|
self.param_store.upsert(f"/workbench/models/{model.name}/inference/{capture_name}", metrics)
|
|
435
433
|
|
|
436
434
|
# Return the prediction DataFrame
|
|
@@ -453,6 +451,55 @@ class EndpointCore(Artifact):
|
|
|
453
451
|
cross_fold_metrics, out_of_fold_df = cross_fold_inference(model, nfolds=nfolds)
|
|
454
452
|
if cross_fold_metrics:
|
|
455
453
|
self.param_store.upsert(f"/workbench/models/{model.name}/inference/cross_fold", cross_fold_metrics)
|
|
454
|
+
|
|
455
|
+
# Capture the results
|
|
456
|
+
capture_name = "full_cross_fold"
|
|
457
|
+
description = capture_name.replace("_", " ").title()
|
|
458
|
+
target_column = model.target()
|
|
459
|
+
model_type = model.model_type
|
|
460
|
+
|
|
461
|
+
# Get the id_column from the model's FeatureSet
|
|
462
|
+
fs = FeatureSetCore(model.get_input())
|
|
463
|
+
id_column = fs.id_column
|
|
464
|
+
|
|
465
|
+
# Is this a UQ Model? If so, run full inference and merge the results
|
|
466
|
+
additional_columns = []
|
|
467
|
+
if model_type == ModelType.UQ_REGRESSOR:
|
|
468
|
+
self.log.important("UQ Regressor detected, running full inference to get uncertainty estimates...")
|
|
469
|
+
|
|
470
|
+
# Get the training view dataframe for inference
|
|
471
|
+
training_df = fs.view("training").pull_dataframe()
|
|
472
|
+
|
|
473
|
+
# Run inference on the endpoint to get UQ outputs
|
|
474
|
+
full_inference_df = self.inference(training_df)
|
|
475
|
+
|
|
476
|
+
# Identify UQ-specific columns (quantiles and prediction_std)
|
|
477
|
+
uq_columns = [col for col in full_inference_df.columns if col.startswith("q_") or col == "prediction_std"]
|
|
478
|
+
|
|
479
|
+
# Merge UQ columns with out-of-fold predictions
|
|
480
|
+
if uq_columns:
|
|
481
|
+
# Keep id_column and UQ columns, drop 'prediction' to avoid conflict
|
|
482
|
+
merge_columns = [id_column] + uq_columns
|
|
483
|
+
uq_df = full_inference_df[merge_columns]
|
|
484
|
+
|
|
485
|
+
out_of_fold_df = pd.merge(out_of_fold_df, uq_df, on=id_column, how="left")
|
|
486
|
+
additional_columns = uq_columns
|
|
487
|
+
self.log.info(f"Added UQ columns: {', '.join(additional_columns)}")
|
|
488
|
+
|
|
489
|
+
# Also compute UQ metrics
|
|
490
|
+
metrics = uq_metrics(out_of_fold_df, target_column)
|
|
491
|
+
self.param_store.upsert(f"/workbench/models/{model.name}/inference/{capture_name}", metrics)
|
|
492
|
+
|
|
493
|
+
self._capture_inference_results(
|
|
494
|
+
capture_name,
|
|
495
|
+
out_of_fold_df,
|
|
496
|
+
target_column,
|
|
497
|
+
model_type,
|
|
498
|
+
pd.DataFrame([cross_fold_metrics["summary_metrics"]]),
|
|
499
|
+
description,
|
|
500
|
+
features=additional_columns,
|
|
501
|
+
id_column=id_column,
|
|
502
|
+
)
|
|
456
503
|
return cross_fold_metrics, out_of_fold_df
|
|
457
504
|
|
|
458
505
|
def fast_inference(self, eval_df: pd.DataFrame, threads: int = 4) -> pd.DataFrame:
|
|
@@ -648,6 +695,10 @@ class EndpointCore(Artifact):
|
|
|
648
695
|
@staticmethod
|
|
649
696
|
def _hash_dataframe(df: pd.DataFrame, hash_length: int = 8):
|
|
650
697
|
# Internal: Compute a data hash for the dataframe
|
|
698
|
+
if df.empty:
|
|
699
|
+
return "--hash--"
|
|
700
|
+
|
|
701
|
+
# Sort the dataframe by columns to ensure consistent ordering
|
|
651
702
|
df = df.copy()
|
|
652
703
|
df = df.sort_values(by=sorted(df.columns.tolist()))
|
|
653
704
|
row_hashes = pd.util.hash_pandas_object(df, index=False)
|
|
@@ -702,8 +753,8 @@ class EndpointCore(Artifact):
|
|
|
702
753
|
wr.s3.to_csv(metrics, f"{inference_capture_path}/inference_metrics.csv", index=False)
|
|
703
754
|
|
|
704
755
|
# Grab the target column, prediction column, any _proba columns, and the ID column (if present)
|
|
705
|
-
|
|
706
|
-
output_columns
|
|
756
|
+
output_columns = [target_column]
|
|
757
|
+
output_columns += [col for col in pred_results_df.columns if "prediction" in col]
|
|
707
758
|
|
|
708
759
|
# Add any _proba columns to the output columns
|
|
709
760
|
output_columns += [col for col in pred_results_df.columns if col.endswith("_proba")]
|
|
@@ -1134,6 +1185,10 @@ if __name__ == "__main__":
|
|
|
1134
1185
|
# Run predictions using the fast_inference method
|
|
1135
1186
|
fast_results = my_endpoint.fast_inference(my_eval_df)
|
|
1136
1187
|
|
|
1188
|
+
# Test the cross_fold_inference method
|
|
1189
|
+
print("Running Cross-Fold Inference...")
|
|
1190
|
+
metrics, all_results = my_endpoint.cross_fold_inference()
|
|
1191
|
+
|
|
1137
1192
|
# Run Inference and metrics for a Classification Endpoint
|
|
1138
1193
|
class_endpoint = EndpointCore("wine-classification")
|
|
1139
1194
|
auto_predictions = class_endpoint.auto_inference()
|
|
@@ -1142,6 +1197,10 @@ if __name__ == "__main__":
|
|
|
1142
1197
|
target = "wine_class"
|
|
1143
1198
|
print(class_endpoint.generate_confusion_matrix(target, auto_predictions))
|
|
1144
1199
|
|
|
1200
|
+
# Test the cross_fold_inference method
|
|
1201
|
+
print("Running Cross-Fold Inference...")
|
|
1202
|
+
metrics, all_results = class_endpoint.cross_fold_inference()
|
|
1203
|
+
|
|
1145
1204
|
# Test the class method delete (commented out for now)
|
|
1146
1205
|
# from workbench.api import Model
|
|
1147
1206
|
# model = Model("abalone-regression")
|
|
@@ -21,8 +21,8 @@ TEMPLATE_PARAMS = {
|
|
|
21
21
|
"target": "solubility",
|
|
22
22
|
"features": ['molwt', 'mollogp', 'molmr', 'heavyatomcount', 'numhacceptors', 'numhdonors', 'numheteroatoms', 'numrotatablebonds', 'numvalenceelectrons', 'numaromaticrings', 'numsaturatedrings', 'numaliphaticrings', 'ringcount', 'tpsa', 'labuteasa', 'balabanj', 'bertzct'],
|
|
23
23
|
"compressed_features": [],
|
|
24
|
-
"train_all_data":
|
|
25
|
-
"hyperparameters": {
|
|
24
|
+
"train_all_data": True,
|
|
25
|
+
"hyperparameters": {},
|
|
26
26
|
}
|
|
27
27
|
|
|
28
28
|
|
|
@@ -303,7 +303,7 @@ if __name__ == "__main__":
|
|
|
303
303
|
print(f" {conf_level * 100:.0f}% CI: Mean width={np.mean(widths):.3f}, Std={np.std(widths):.3f}")
|
|
304
304
|
|
|
305
305
|
# Save the trained XGBoost model
|
|
306
|
-
|
|
306
|
+
joblib.dump(xgb_model, os.path.join(args.model_dir, "xgb_model.joblib"))
|
|
307
307
|
|
|
308
308
|
# Save all MAPIE models
|
|
309
309
|
for model_name, model in mapie_models.items():
|
|
@@ -349,9 +349,8 @@ def model_fn(model_dir) -> dict:
|
|
|
349
349
|
config = json.load(fp)
|
|
350
350
|
|
|
351
351
|
# Load XGBoost regressor
|
|
352
|
-
xgb_path = os.path.join(model_dir, "xgb_model.
|
|
353
|
-
xgb_model =
|
|
354
|
-
xgb_model.load_model(xgb_path)
|
|
352
|
+
xgb_path = os.path.join(model_dir, "xgb_model.joblib")
|
|
353
|
+
xgb_model = joblib.load(xgb_path)
|
|
355
354
|
|
|
356
355
|
# Load all MAPIE models
|
|
357
356
|
mapie_models = {}
|
|
@@ -303,7 +303,7 @@ if __name__ == "__main__":
|
|
|
303
303
|
print(f" {conf_level * 100:.0f}% CI: Mean width={np.mean(widths):.3f}, Std={np.std(widths):.3f}")
|
|
304
304
|
|
|
305
305
|
# Save the trained XGBoost model
|
|
306
|
-
|
|
306
|
+
joblib.dump(xgb_model, os.path.join(args.model_dir, "xgb_model.joblib"))
|
|
307
307
|
|
|
308
308
|
# Save all MAPIE models
|
|
309
309
|
for model_name, model in mapie_models.items():
|
|
@@ -349,9 +349,8 @@ def model_fn(model_dir) -> dict:
|
|
|
349
349
|
config = json.load(fp)
|
|
350
350
|
|
|
351
351
|
# Load XGBoost regressor
|
|
352
|
-
xgb_path = os.path.join(model_dir, "xgb_model.
|
|
353
|
-
xgb_model =
|
|
354
|
-
xgb_model.load_model(xgb_path)
|
|
352
|
+
xgb_path = os.path.join(model_dir, "xgb_model.joblib")
|
|
353
|
+
xgb_model = joblib.load(xgb_path)
|
|
355
354
|
|
|
356
355
|
# Load all MAPIE models
|
|
357
356
|
mapie_models = {}
|
|
@@ -145,7 +145,7 @@ def convert_categorical_types(df: pd.DataFrame, features: list, category_mapping
|
|
|
145
145
|
def decompress_features(
|
|
146
146
|
df: pd.DataFrame, features: List[str], compressed_features: List[str]
|
|
147
147
|
) -> Tuple[pd.DataFrame, List[str]]:
|
|
148
|
-
"""Prepare features for the model
|
|
148
|
+
"""Prepare features for the model by decompressing bitstring features
|
|
149
149
|
|
|
150
150
|
Args:
|
|
151
151
|
df (pd.DataFrame): The features DataFrame
|
|
@@ -170,7 +170,7 @@ def decompress_features(
|
|
|
170
170
|
)
|
|
171
171
|
|
|
172
172
|
# Decompress the specified compressed features
|
|
173
|
-
decompressed_features = features
|
|
173
|
+
decompressed_features = features.copy()
|
|
174
174
|
for feature in compressed_features:
|
|
175
175
|
if (feature not in df.columns) or (feature not in features):
|
|
176
176
|
print(f"Feature '{feature}' not in the features list, skipping decompression.")
|
workbench/utils/model_utils.py
CHANGED
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
import logging
|
|
4
4
|
import pandas as pd
|
|
5
5
|
import numpy as np
|
|
6
|
+
from scipy.stats import spearmanr
|
|
6
7
|
import importlib.resources
|
|
7
8
|
from pathlib import Path
|
|
8
9
|
import os
|
|
@@ -222,8 +223,8 @@ def uq_metrics(df: pd.DataFrame, target_col: str) -> Dict[str, Any]:
|
|
|
222
223
|
lower_95, upper_95 = df["q_025"], df["q_975"]
|
|
223
224
|
lower_90, upper_90 = df["q_05"], df["q_95"]
|
|
224
225
|
lower_80, upper_80 = df["q_10"], df["q_90"]
|
|
225
|
-
lower_68 = df.get("q_16",
|
|
226
|
-
upper_68 = df.get("q_84",
|
|
226
|
+
lower_68 = df.get("q_16", df["q_10"]) # fallback to 80% interval
|
|
227
|
+
upper_68 = df.get("q_84", df["q_90"]) # fallback to 80% interval
|
|
227
228
|
lower_50, upper_50 = df["q_25"], df["q_75"]
|
|
228
229
|
elif "prediction_std" in df.columns:
|
|
229
230
|
lower_95 = df["prediction"] - 1.96 * df["prediction_std"]
|
|
@@ -240,18 +241,16 @@ def uq_metrics(df: pd.DataFrame, target_col: str) -> Dict[str, Any]:
|
|
|
240
241
|
raise ValueError(
|
|
241
242
|
"Either quantile columns (q_025, q_975, q_25, q_75) or 'prediction_std' column must be present."
|
|
242
243
|
)
|
|
243
|
-
avg_std = df["prediction_std"].mean()
|
|
244
244
|
median_std = df["prediction_std"].median()
|
|
245
245
|
coverage_95 = np.mean((df[target_col] >= lower_95) & (df[target_col] <= upper_95))
|
|
246
246
|
coverage_90 = np.mean((df[target_col] >= lower_90) & (df[target_col] <= upper_90))
|
|
247
247
|
coverage_80 = np.mean((df[target_col] >= lower_80) & (df[target_col] <= upper_80))
|
|
248
248
|
coverage_68 = np.mean((df[target_col] >= lower_68) & (df[target_col] <= upper_68))
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
avg_width_68 = np.mean(upper_68 - lower_68)
|
|
249
|
+
median_width_95 = np.median(upper_95 - lower_95)
|
|
250
|
+
median_width_90 = np.median(upper_90 - lower_90)
|
|
251
|
+
median_width_80 = np.median(upper_80 - lower_80)
|
|
252
|
+
median_width_50 = np.median(upper_50 - lower_50)
|
|
253
|
+
median_width_68 = np.median(upper_68 - lower_68)
|
|
255
254
|
|
|
256
255
|
# --- CRPS (measures calibration + sharpness) ---
|
|
257
256
|
z = (df[target_col] - df["prediction"]) / df["prediction_std"]
|
|
@@ -267,47 +266,43 @@ def uq_metrics(df: pd.DataFrame, target_col: str) -> Dict[str, Any]:
|
|
|
267
266
|
)
|
|
268
267
|
mean_is_95 = np.mean(is_95)
|
|
269
268
|
|
|
270
|
-
# ---
|
|
269
|
+
# --- Interval to Error Correlation ---
|
|
271
270
|
abs_residuals = np.abs(df[target_col] - df["prediction"])
|
|
272
|
-
|
|
273
|
-
|
|
271
|
+
width_68 = upper_68 - lower_68
|
|
272
|
+
|
|
273
|
+
# Spearman correlation for robustness
|
|
274
|
+
interval_to_error_corr = spearmanr(width_68, abs_residuals)[0]
|
|
274
275
|
|
|
275
276
|
# Collect results
|
|
276
277
|
results = {
|
|
277
|
-
"coverage_50": coverage_50,
|
|
278
278
|
"coverage_68": coverage_68,
|
|
279
279
|
"coverage_80": coverage_80,
|
|
280
280
|
"coverage_90": coverage_90,
|
|
281
281
|
"coverage_95": coverage_95,
|
|
282
282
|
"median_std": median_std,
|
|
283
|
-
"
|
|
284
|
-
"
|
|
285
|
-
"
|
|
286
|
-
"
|
|
287
|
-
"
|
|
288
|
-
"
|
|
289
|
-
# "crps": mean_crps,
|
|
290
|
-
# "interval_score_95": mean_is_95,
|
|
291
|
-
# "adaptive_calibration": adaptive_calibration,
|
|
283
|
+
"median_width_50": median_width_50,
|
|
284
|
+
"median_width_68": median_width_68,
|
|
285
|
+
"median_width_80": median_width_80,
|
|
286
|
+
"median_width_90": median_width_90,
|
|
287
|
+
"median_width_95": median_width_95,
|
|
288
|
+
"interval_to_error_corr": interval_to_error_corr,
|
|
292
289
|
"n_samples": len(df),
|
|
293
290
|
}
|
|
294
291
|
|
|
295
292
|
print("\n=== UQ Metrics ===")
|
|
296
|
-
print(f"Coverage @ 50%: {coverage_50:.3f} (target: 0.50)")
|
|
297
293
|
print(f"Coverage @ 68%: {coverage_68:.3f} (target: 0.68)")
|
|
298
294
|
print(f"Coverage @ 80%: {coverage_80:.3f} (target: 0.80)")
|
|
299
295
|
print(f"Coverage @ 90%: {coverage_90:.3f} (target: 0.90)")
|
|
300
296
|
print(f"Coverage @ 95%: {coverage_95:.3f} (target: 0.95)")
|
|
301
297
|
print(f"Median Prediction StdDev: {median_std:.3f}")
|
|
302
|
-
print(f"
|
|
303
|
-
print(f"
|
|
304
|
-
print(f"
|
|
305
|
-
print(f"
|
|
306
|
-
print(f"
|
|
307
|
-
print(f"Average 95% Width: {avg_width_95:.3f}")
|
|
298
|
+
print(f"Median 50% Width: {median_width_50:.3f}")
|
|
299
|
+
print(f"Median 68% Width: {median_width_68:.3f}")
|
|
300
|
+
print(f"Median 80% Width: {median_width_80:.3f}")
|
|
301
|
+
print(f"Median 90% Width: {median_width_90:.3f}")
|
|
302
|
+
print(f"Median 95% Width: {median_width_95:.3f}")
|
|
308
303
|
print(f"CRPS: {mean_crps:.3f} (lower is better)")
|
|
309
304
|
print(f"Interval Score 95%: {mean_is_95:.3f} (lower is better)")
|
|
310
|
-
print(f"
|
|
305
|
+
print(f"Interval/Error Corr: {interval_to_error_corr:.3f} (higher is better, target: >0.5)")
|
|
311
306
|
print(f"Samples: {len(df)}")
|
|
312
307
|
return results
|
|
313
308
|
|
workbench/utils/shap_utils.py
CHANGED
|
@@ -212,6 +212,14 @@ def _calculate_shap_values(workbench_model, sample_df: pd.DataFrame = None):
|
|
|
212
212
|
log.error("No XGBoost model found in the artifact.")
|
|
213
213
|
return None, None, None, None
|
|
214
214
|
|
|
215
|
+
# Get the booster (SHAP requires the booster, not the sklearn wrapper)
|
|
216
|
+
if hasattr(xgb_model, "get_booster"):
|
|
217
|
+
# Full sklearn model - extract the booster
|
|
218
|
+
booster = xgb_model.get_booster()
|
|
219
|
+
else:
|
|
220
|
+
# Already a booster
|
|
221
|
+
booster = xgb_model
|
|
222
|
+
|
|
215
223
|
# Load category mappings if available
|
|
216
224
|
category_mappings = load_category_mappings_from_s3(model_artifact_uri)
|
|
217
225
|
|
|
@@ -229,8 +237,8 @@ def _calculate_shap_values(workbench_model, sample_df: pd.DataFrame = None):
|
|
|
229
237
|
# Create a DMatrix with categorical support
|
|
230
238
|
dmatrix = xgb.DMatrix(X, enable_categorical=True)
|
|
231
239
|
|
|
232
|
-
# Use XGBoost's built-in SHAP calculation
|
|
233
|
-
shap_values =
|
|
240
|
+
# Use XGBoost's built-in SHAP calculation (booster method, not sklearn)
|
|
241
|
+
shap_values = booster.predict(dmatrix, pred_contribs=True, strict_shape=True)
|
|
234
242
|
features_with_bias = features + ["bias"]
|
|
235
243
|
|
|
236
244
|
# Now we need to subset the columns based on top 10 SHAP values
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: workbench
|
|
3
|
-
Version: 0.8.
|
|
3
|
+
Version: 0.8.181
|
|
4
4
|
Summary: Workbench: A Dashboard and Python API for creating and deploying AWS SageMaker Model Pipelines
|
|
5
5
|
Author-email: SuperCowPowers LLC <support@supercowpowers.com>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -54,7 +54,7 @@ workbench/core/artifacts/cached_artifact_mixin.py,sha256=ngqFLZ4cQx_TFouXZgXZQsv
|
|
|
54
54
|
workbench/core/artifacts/data_capture_core.py,sha256=q8f79rRTYiZ7T4IQRWXl8ZvPpcvZyNxYERwvo8o0OQc,14858
|
|
55
55
|
workbench/core/artifacts/data_source_abstract.py,sha256=5IRCzFVK-17cd4NXPMRfx99vQAmQ0WHE5jcm5RfsVTg,10619
|
|
56
56
|
workbench/core/artifacts/data_source_factory.py,sha256=YL_tA5fsgubbB3dPF6T4tO0rGgz-6oo3ge4i_YXVC-M,2380
|
|
57
|
-
workbench/core/artifacts/endpoint_core.py,sha256=
|
|
57
|
+
workbench/core/artifacts/endpoint_core.py,sha256=iOBKnlfG3xVj9-Z9MX_IxxnSs6jMNXJXLgCsnWgyUqM,51657
|
|
58
58
|
workbench/core/artifacts/feature_set_core.py,sha256=7b1o_PzxtwaYC-W2zxlkltiO0fYULA8CVGWwHNmqgtI,31457
|
|
59
59
|
workbench/core/artifacts/model_core.py,sha256=ECDwQ0qM5qb1yGJ07U70BVdfkrW9m7p9e6YJWib3uR0,50855
|
|
60
60
|
workbench/core/artifacts/monitor_core.py,sha256=M307yz7tEzOEHgv-LmtVy9jKjSbM98fHW3ckmNYrwlU,27897
|
|
@@ -140,8 +140,8 @@ workbench/model_scripts/custom_models/uq_models/Readme.md,sha256=UVpL-lvtTrLqwBe
|
|
|
140
140
|
workbench/model_scripts/custom_models/uq_models/bayesian_ridge.template,sha256=ca3CaAk6HVuNv1HnPgABTzRY3oDrRxomjgD4V1ZDwoc,6448
|
|
141
141
|
workbench/model_scripts/custom_models/uq_models/ensemble_xgb.template,sha256=xlKLHeLQkScONnrlbAGIsrCm2wwsvcfv4Vdrw4nlc_8,13457
|
|
142
142
|
workbench/model_scripts/custom_models/uq_models/gaussian_process.template,sha256=3nMlCi8nEbc4N-MQTzjfIcljfDQkUmWeLBfmd18m5fg,6632
|
|
143
|
-
workbench/model_scripts/custom_models/uq_models/generated_model_script.py,sha256=
|
|
144
|
-
workbench/model_scripts/custom_models/uq_models/mapie.template,sha256=
|
|
143
|
+
workbench/model_scripts/custom_models/uq_models/generated_model_script.py,sha256=PCCDF3DuiH13wMltuCzorVb79uLjKuX_9-ryuooQK5o,19131
|
|
144
|
+
workbench/model_scripts/custom_models/uq_models/mapie.template,sha256=8VzoP-Wp3ECVIDqXVkiTS6bwmn3cd3dDZ2WjYPzXTi8,18955
|
|
145
145
|
workbench/model_scripts/custom_models/uq_models/meta_uq.template,sha256=XTfhODRaHlI1jZGo9pSe-TqNsk2_nuSw0xMO2fKzDv8,14011
|
|
146
146
|
workbench/model_scripts/custom_models/uq_models/ngboost.template,sha256=v1rviYTJGJnQRGgAyveXhOQlS-WFCTlc2vdnWq6HIXk,8241
|
|
147
147
|
workbench/model_scripts/custom_models/uq_models/proximity.py,sha256=zqmNlX70LnWXr5fdtFFQppSNTLjlOciQVrjGr-g9jRE,13716
|
|
@@ -161,7 +161,7 @@ workbench/model_scripts/scikit_learn/requirements.txt,sha256=aVvwiJ3LgBUhM_PyFlb
|
|
|
161
161
|
workbench/model_scripts/scikit_learn/scikit_learn.template,sha256=QQvqx-eX9ZTbYmyupq6R6vIQwosmsmY_MRBPaHyfjdk,12586
|
|
162
162
|
workbench/model_scripts/xgb_model/generated_model_script.py,sha256=Tbn7EMXxZZO8rDdKQ5fYCbpltACsMXNvuusLL9p-U5c,22319
|
|
163
163
|
workbench/model_scripts/xgb_model/requirements.txt,sha256=jWlGc7HH7vqyukTm38LN4EyDi8jDUPEay4n45z-30uc,104
|
|
164
|
-
workbench/model_scripts/xgb_model/xgb_model.template,sha256=
|
|
164
|
+
workbench/model_scripts/xgb_model/xgb_model.template,sha256=0uXknIEqgUaIFUfu2gfkxa3WHUr8HBBqBepGUTDvrhQ,17917
|
|
165
165
|
workbench/repl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
166
166
|
workbench/repl/workbench_shell.py,sha256=Vhg4BQr2r4D4ymekrVtOFi0MaRvaH4V2UgcWRgvN_3U,22122
|
|
167
167
|
workbench/resources/open_source_api.key,sha256=3S0OTblsmC0msUPdE_dbBmI83xJNmYscuwLJ57JmuOc,433
|
|
@@ -219,7 +219,7 @@ workbench/utils/lambda_utils.py,sha256=7GhGRPyXn9o-toWb9HBGSnI8-DhK9YRkwhCSk_mNK
|
|
|
219
219
|
workbench/utils/license_manager.py,sha256=sDuhk1mZZqUbFmnuFXehyGnui_ALxrmYBg7gYwoo7ho,6975
|
|
220
220
|
workbench/utils/log_utils.py,sha256=7n1NJXO_jUX82e6LWAQug6oPo3wiPDBYsqk9gsYab_A,3167
|
|
221
221
|
workbench/utils/markdown_utils.py,sha256=4lEqzgG4EVmLcvvKKNUwNxVCySLQKJTJmWDiaDroI1w,8306
|
|
222
|
-
workbench/utils/model_utils.py,sha256=
|
|
222
|
+
workbench/utils/model_utils.py,sha256=sbsWDaeTagHdrh8Z_A_yfWsiSIEqSl18sq67l9E2xiA,12602
|
|
223
223
|
workbench/utils/monitor_utils.py,sha256=kVaJ7BgUXs3VPMFYfLC03wkIV4Dq-pEhoXS0wkJFxCc,7858
|
|
224
224
|
workbench/utils/pandas_utils.py,sha256=uTUx-d1KYfjbS9PMQp2_9FogCV7xVZR6XLzU5YAGmfs,39371
|
|
225
225
|
workbench/utils/performance_utils.py,sha256=WDNvz-bOdC99cDuXl0urAV4DJ7alk_V3yzKPwvqgST4,1329
|
|
@@ -231,7 +231,7 @@ workbench/utils/redis_cache.py,sha256=39LFSWmOlNNcah02D3sBnmibc-DPeKC3SNq71K4HaB
|
|
|
231
231
|
workbench/utils/repl_utils.py,sha256=rWOMv2HiEIp8ZL6Ps6DlwiJlGr-pOhv9OZQhm3aR-1A,4668
|
|
232
232
|
workbench/utils/resource_utils.py,sha256=EM4SrMmRUQnG80aR5M7hmzw86hYdP_S7fRPuqhpDSVo,1435
|
|
233
233
|
workbench/utils/s3_utils.py,sha256=Xme_o_cftC_jWnw6R9YKS6-6C11zaCBAoQDlY3dZb5o,7337
|
|
234
|
-
workbench/utils/shap_utils.py,sha256=
|
|
234
|
+
workbench/utils/shap_utils.py,sha256=dtjSIwSyvYSaQjjvIp5A9LGS7pr-5Vt907rvVKOrqNY,12651
|
|
235
235
|
workbench/utils/shapley_values.py,sha256=3DvQz4HIPnxW42idgtuQ5vtzU-oF4_lToaWzLRjU-E4,3673
|
|
236
236
|
workbench/utils/symbols.py,sha256=PioF1yAQyOabw7kLg8nhvaZBPFe7ABkpfpPPE0qz_2k,1265
|
|
237
237
|
workbench/utils/test_data_generator.py,sha256=gqRXL7IUKG4wVfO1onflY3wg7vLkgx402_Zy3iqY7NU,11921
|
|
@@ -287,9 +287,9 @@ workbench/web_interface/page_views/main_page.py,sha256=X4-KyGTKLAdxR-Zk2niuLJB2Y
|
|
|
287
287
|
workbench/web_interface/page_views/models_page_view.py,sha256=M0bdC7bAzLyIaE2jviY12FF4abdMFZmg6sFuOY_LaGI,2650
|
|
288
288
|
workbench/web_interface/page_views/page_view.py,sha256=Gh6YnpOGlUejx-bHZAf5pzqoQ1H1R0OSwOpGhOBO06w,455
|
|
289
289
|
workbench/web_interface/page_views/pipelines_page_view.py,sha256=v2pxrIbsHBcYiblfius3JK766NZ7ciD2yPx0t3E5IJo,2656
|
|
290
|
-
workbench-0.8.
|
|
291
|
-
workbench-0.8.
|
|
292
|
-
workbench-0.8.
|
|
293
|
-
workbench-0.8.
|
|
294
|
-
workbench-0.8.
|
|
295
|
-
workbench-0.8.
|
|
290
|
+
workbench-0.8.181.dist-info/licenses/LICENSE,sha256=z4QMMPlLJkZjU8VOKqJkZiQZCEZ--saIU2Z8-p3aVc0,1080
|
|
291
|
+
workbench-0.8.181.dist-info/METADATA,sha256=DZJcZg7gfOSERy7Y-Qia0fY9l9jcloqrkwk9OGaNAc4,9210
|
|
292
|
+
workbench-0.8.181.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
293
|
+
workbench-0.8.181.dist-info/entry_points.txt,sha256=zPFPruY9uayk8-wsKrhfnIyIB6jvZOW_ibyllEIsLWo,356
|
|
294
|
+
workbench-0.8.181.dist-info/top_level.txt,sha256=Dhy72zTxaA_o_yRkPZx5zw-fwumnjGaeGf0hBN3jc_w,10
|
|
295
|
+
workbench-0.8.181.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|