workbench 0.8.161__py3-none-any.whl → 0.8.192__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. workbench/algorithms/dataframe/proximity.py +143 -102
  2. workbench/algorithms/graph/light/proximity_graph.py +2 -1
  3. workbench/api/compound.py +1 -1
  4. workbench/api/endpoint.py +12 -0
  5. workbench/api/feature_set.py +4 -4
  6. workbench/api/meta.py +5 -2
  7. workbench/api/model.py +16 -12
  8. workbench/api/monitor.py +1 -16
  9. workbench/core/artifacts/artifact.py +11 -3
  10. workbench/core/artifacts/data_capture_core.py +355 -0
  11. workbench/core/artifacts/endpoint_core.py +168 -78
  12. workbench/core/artifacts/feature_set_core.py +72 -13
  13. workbench/core/artifacts/model_core.py +50 -15
  14. workbench/core/artifacts/monitor_core.py +33 -248
  15. workbench/core/cloud_platform/aws/aws_account_clamp.py +50 -1
  16. workbench/core/cloud_platform/aws/aws_meta.py +12 -5
  17. workbench/core/cloud_platform/aws/aws_session.py +4 -4
  18. workbench/core/transforms/data_to_features/light/molecular_descriptors.py +4 -4
  19. workbench/core/transforms/features_to_model/features_to_model.py +9 -4
  20. workbench/core/transforms/model_to_endpoint/model_to_endpoint.py +36 -6
  21. workbench/core/transforms/pandas_transforms/pandas_to_features.py +27 -0
  22. workbench/core/views/training_view.py +49 -53
  23. workbench/core/views/view.py +51 -1
  24. workbench/core/views/view_utils.py +4 -4
  25. workbench/model_scripts/custom_models/chem_info/mol_descriptors.py +483 -0
  26. workbench/model_scripts/custom_models/chem_info/mol_standardize.py +450 -0
  27. workbench/model_scripts/custom_models/chem_info/molecular_descriptors.py +7 -9
  28. workbench/model_scripts/custom_models/proximity/feature_space_proximity.template +3 -5
  29. workbench/model_scripts/custom_models/proximity/proximity.py +143 -102
  30. workbench/model_scripts/custom_models/uq_models/bayesian_ridge.template +7 -8
  31. workbench/model_scripts/custom_models/uq_models/ensemble_xgb.template +10 -17
  32. workbench/model_scripts/custom_models/uq_models/gaussian_process.template +5 -11
  33. workbench/model_scripts/custom_models/uq_models/meta_uq.template +156 -58
  34. workbench/model_scripts/custom_models/uq_models/ngboost.template +20 -14
  35. workbench/model_scripts/custom_models/uq_models/proximity.py +143 -102
  36. workbench/model_scripts/custom_models/uq_models/requirements.txt +1 -3
  37. workbench/model_scripts/ensemble_xgb/ensemble_xgb.template +5 -13
  38. workbench/model_scripts/pytorch_model/pytorch.template +19 -20
  39. workbench/model_scripts/scikit_learn/scikit_learn.template +4 -9
  40. workbench/model_scripts/script_generation.py +7 -2
  41. workbench/model_scripts/uq_models/mapie.template +492 -0
  42. workbench/model_scripts/uq_models/requirements.txt +1 -0
  43. workbench/model_scripts/xgb_model/xgb_model.template +31 -40
  44. workbench/repl/workbench_shell.py +11 -6
  45. workbench/scripts/lambda_launcher.py +63 -0
  46. workbench/scripts/ml_pipeline_batch.py +137 -0
  47. workbench/scripts/ml_pipeline_sqs.py +186 -0
  48. workbench/scripts/monitor_cloud_watch.py +20 -100
  49. workbench/utils/aws_utils.py +4 -3
  50. workbench/utils/chem_utils/__init__.py +0 -0
  51. workbench/utils/chem_utils/fingerprints.py +134 -0
  52. workbench/utils/chem_utils/misc.py +194 -0
  53. workbench/utils/chem_utils/mol_descriptors.py +483 -0
  54. workbench/utils/chem_utils/mol_standardize.py +450 -0
  55. workbench/utils/chem_utils/mol_tagging.py +348 -0
  56. workbench/utils/chem_utils/projections.py +209 -0
  57. workbench/utils/chem_utils/salts.py +256 -0
  58. workbench/utils/chem_utils/sdf.py +292 -0
  59. workbench/utils/chem_utils/toxicity.py +250 -0
  60. workbench/utils/chem_utils/vis.py +253 -0
  61. workbench/utils/cloudwatch_handler.py +1 -1
  62. workbench/utils/cloudwatch_utils.py +137 -0
  63. workbench/utils/config_manager.py +3 -7
  64. workbench/utils/endpoint_utils.py +5 -7
  65. workbench/utils/license_manager.py +2 -6
  66. workbench/utils/model_utils.py +76 -30
  67. workbench/utils/monitor_utils.py +44 -62
  68. workbench/utils/pandas_utils.py +3 -3
  69. workbench/utils/shap_utils.py +10 -2
  70. workbench/utils/workbench_logging.py +0 -3
  71. workbench/utils/workbench_sqs.py +1 -1
  72. workbench/utils/xgboost_model_utils.py +283 -145
  73. workbench/web_interface/components/plugins/dashboard_status.py +3 -1
  74. workbench/web_interface/components/plugins/generated_compounds.py +1 -1
  75. workbench/web_interface/components/plugins/scatter_plot.py +3 -3
  76. {workbench-0.8.161.dist-info → workbench-0.8.192.dist-info}/METADATA +4 -4
  77. {workbench-0.8.161.dist-info → workbench-0.8.192.dist-info}/RECORD +81 -76
  78. {workbench-0.8.161.dist-info → workbench-0.8.192.dist-info}/entry_points.txt +3 -0
  79. workbench/model_scripts/custom_models/chem_info/local_utils.py +0 -769
  80. workbench/model_scripts/custom_models/chem_info/tautomerize.py +0 -83
  81. workbench/model_scripts/custom_models/proximity/generated_model_script.py +0 -138
  82. workbench/model_scripts/custom_models/uq_models/generated_model_script.py +0 -393
  83. workbench/model_scripts/custom_models/uq_models/mapie_xgb.template +0 -203
  84. workbench/model_scripts/ensemble_xgb/generated_model_script.py +0 -279
  85. workbench/model_scripts/pytorch_model/generated_model_script.py +0 -565
  86. workbench/model_scripts/quant_regression/quant_regression.template +0 -279
  87. workbench/model_scripts/quant_regression/requirements.txt +0 -1
  88. workbench/model_scripts/scikit_learn/generated_model_script.py +0 -307
  89. workbench/model_scripts/xgb_model/generated_model_script.py +0 -477
  90. workbench/utils/chem_utils.py +0 -1556
  91. workbench/utils/execution_environment.py +0 -211
  92. workbench/utils/fast_inference.py +0 -167
  93. workbench/utils/resource_utils.py +0 -39
  94. {workbench-0.8.161.dist-info → workbench-0.8.192.dist-info}/WHEEL +0 -0
  95. {workbench-0.8.161.dist-info → workbench-0.8.192.dist-info}/licenses/LICENSE +0 -0
  96. {workbench-0.8.161.dist-info → workbench-0.8.192.dist-info}/top_level.txt +0 -0
@@ -7,9 +7,7 @@ from typing import Union, Optional
7
7
  import pandas as pd
8
8
 
9
9
  # Workbench Imports
10
- from workbench.api.feature_set import FeatureSet
11
- from workbench.api.model import Model
12
- from workbench.api.endpoint import Endpoint
10
+ from workbench.api import FeatureSet, Model, Endpoint
13
11
 
14
12
  # Set up the log
15
13
  log = logging.getLogger("workbench")
@@ -77,7 +75,7 @@ def internal_model_data_url(endpoint_config_name: str, session: boto3.Session) -
77
75
  return None
78
76
 
79
77
 
80
- def fs_training_data(end: Endpoint) -> pd.DataFrame:
78
+ def get_training_data(end: Endpoint) -> pd.DataFrame:
81
79
  """Code to get the training data from the FeatureSet used to train the Model
82
80
 
83
81
  Args:
@@ -100,7 +98,7 @@ def fs_training_data(end: Endpoint) -> pd.DataFrame:
100
98
  return train_df
101
99
 
102
100
 
103
- def fs_evaluation_data(end: Endpoint) -> pd.DataFrame:
101
+ def get_evaluation_data(end: Endpoint) -> pd.DataFrame:
104
102
  """Code to get the evaluation data from the FeatureSet NOT used for training
105
103
 
106
104
  Args:
@@ -178,11 +176,11 @@ if __name__ == "__main__":
178
176
  print(model_data_url)
179
177
 
180
178
  # Get the training data
181
- my_train_df = fs_training_data(my_endpoint)
179
+ my_train_df = get_training_data(my_endpoint)
182
180
  print(my_train_df)
183
181
 
184
182
  # Get the evaluation data
185
- my_eval_df = fs_evaluation_data(my_endpoint)
183
+ my_eval_df = get_evaluation_data(my_endpoint)
186
184
  print(my_eval_df)
187
185
 
188
186
  # Backtrack to the FeatureSet
@@ -6,15 +6,12 @@ import json
6
6
  import logging
7
7
  import requests
8
8
  from typing import Union
9
- import importlib.resources as resources # noqa: F401 Python 3.9 compatibility
10
9
  from datetime import datetime
11
10
  from cryptography.hazmat.primitives import hashes
12
11
  from cryptography.hazmat.primitives.asymmetric import padding
13
12
  from cryptography.hazmat.primitives import serialization
14
13
  from cryptography.hazmat.backends import default_backend
15
-
16
- # Python 3.9 compatibility
17
- from workbench.utils.resource_utils import get_resource_path
14
+ from importlib.resources import files, as_file
18
15
 
19
16
 
20
17
  class FatalLicenseError(Exception):
@@ -140,8 +137,7 @@ class LicenseManager:
140
137
  Returns:
141
138
  The public key as an object.
142
139
  """
143
- # Python 3.9 compatibility
144
- with get_resource_path("workbench.resources", "signature_verify_pub.pem") as public_key_path:
140
+ with as_file(files("workbench.resources").joinpath("signature_verify_pub.pem")) as public_key_path:
145
141
  with open(public_key_path, "rb") as key_file:
146
142
  public_key_data = key_file.read()
147
143
 
@@ -3,6 +3,7 @@
3
3
  import logging
4
4
  import pandas as pd
5
5
  import numpy as np
6
+ from scipy.stats import spearmanr
6
7
  import importlib.resources
7
8
  from pathlib import Path
8
9
  import os
@@ -92,6 +93,31 @@ def get_custom_script_path(package: str, script_name: str) -> Path:
92
93
  return script_path
93
94
 
94
95
 
96
+ def proximity_model_local(model: "Model"):
97
+ """Create a Proximity Model for this Model
98
+
99
+ Args:
100
+ model (Model): The Model/FeatureSet used to create the proximity model
101
+
102
+ Returns:
103
+ Proximity: The proximity model
104
+ """
105
+ from workbench.algorithms.dataframe.proximity import Proximity # noqa: F401 (avoid circular import)
106
+ from workbench.api import Model, FeatureSet # noqa: F401 (avoid circular import)
107
+
108
+ # Get Feature and Target Columns from the existing given Model
109
+ features = model.features()
110
+ target = model.target()
111
+
112
+ # Backtrack our FeatureSet to get the ID column
113
+ fs = FeatureSet(model.get_input())
114
+ id_column = fs.id_column
115
+
116
+ # Create the Proximity Model from our Training Data
117
+ df = model.training_view().pull_dataframe()
118
+ return Proximity(df, id_column, features, target, track_columns=features)
119
+
120
+
95
121
  def proximity_model(model: "Model", prox_model_name: str, track_columns: list = None) -> "Model":
96
122
  """Create a proximity model based on the given model
97
123
 
@@ -140,7 +166,7 @@ def uq_model(model: "Model", uq_model_name: str, train_all_data: bool = False) -
140
166
  from workbench.api import Model, ModelType, FeatureSet # noqa: F401 (avoid circular import)
141
167
 
142
168
  # Get the custom script path for the UQ model
143
- script_path = get_custom_script_path("uq_models", "meta_uq.template")
169
+ script_path = get_custom_script_path("uq_models", "mapie.template")
144
170
 
145
171
  # Get Feature and Target Columns from the existing given Model
146
172
  features = model.features()
@@ -181,7 +207,8 @@ def load_category_mappings_from_s3(model_artifact_uri: str) -> Optional[dict]:
181
207
 
182
208
  # Extract tarball
183
209
  with tarfile.open(local_tar_path, "r:gz") as tar:
184
- tar.extractall(path=tmpdir, filter="data")
210
+ # Note: For 3.12+, can use filter="data" argument
211
+ tar.extractall(path=tmpdir)
185
212
 
186
213
  # Look for category mappings in base directory only
187
214
  mappings_path = os.path.join(tmpdir, "category_mappings.json")
@@ -220,28 +247,41 @@ def uq_metrics(df: pd.DataFrame, target_col: str) -> Dict[str, Any]:
220
247
  # --- Coverage and Interval Width ---
221
248
  if "q_025" in df.columns and "q_975" in df.columns:
222
249
  lower_95, upper_95 = df["q_025"], df["q_975"]
250
+ lower_90, upper_90 = df["q_05"], df["q_95"]
251
+ lower_80, upper_80 = df["q_10"], df["q_90"]
252
+ lower_68 = df.get("q_16", df["q_10"]) # fallback to 80% interval
253
+ upper_68 = df.get("q_84", df["q_90"]) # fallback to 80% interval
223
254
  lower_50, upper_50 = df["q_25"], df["q_75"]
224
255
  elif "prediction_std" in df.columns:
225
256
  lower_95 = df["prediction"] - 1.96 * df["prediction_std"]
226
257
  upper_95 = df["prediction"] + 1.96 * df["prediction_std"]
258
+ lower_90 = df["prediction"] - 1.645 * df["prediction_std"]
259
+ upper_90 = df["prediction"] + 1.645 * df["prediction_std"]
260
+ lower_80 = df["prediction"] - 1.282 * df["prediction_std"]
261
+ upper_80 = df["prediction"] + 1.282 * df["prediction_std"]
262
+ lower_68 = df["prediction"] - 1.0 * df["prediction_std"]
263
+ upper_68 = df["prediction"] + 1.0 * df["prediction_std"]
227
264
  lower_50 = df["prediction"] - 0.674 * df["prediction_std"]
228
265
  upper_50 = df["prediction"] + 0.674 * df["prediction_std"]
229
266
  else:
230
267
  raise ValueError(
231
268
  "Either quantile columns (q_025, q_975, q_25, q_75) or 'prediction_std' column must be present."
232
269
  )
270
+ median_std = df["prediction_std"].median()
233
271
  coverage_95 = np.mean((df[target_col] >= lower_95) & (df[target_col] <= upper_95))
234
- coverage_50 = np.mean((df[target_col] >= lower_50) & (df[target_col] <= upper_50))
235
- avg_width_95 = np.mean(upper_95 - lower_95)
236
- avg_width_50 = np.mean(upper_50 - lower_50)
272
+ coverage_90 = np.mean((df[target_col] >= lower_90) & (df[target_col] <= upper_90))
273
+ coverage_80 = np.mean((df[target_col] >= lower_80) & (df[target_col] <= upper_80))
274
+ coverage_68 = np.mean((df[target_col] >= lower_68) & (df[target_col] <= upper_68))
275
+ median_width_95 = np.median(upper_95 - lower_95)
276
+ median_width_90 = np.median(upper_90 - lower_90)
277
+ median_width_80 = np.median(upper_80 - lower_80)
278
+ median_width_50 = np.median(upper_50 - lower_50)
279
+ median_width_68 = np.median(upper_68 - lower_68)
237
280
 
238
281
  # --- CRPS (measures calibration + sharpness) ---
239
- if "prediction_std" in df.columns:
240
- z = (df[target_col] - df["prediction"]) / df["prediction_std"]
241
- crps = df["prediction_std"] * (z * (2 * norm.cdf(z) - 1) + 2 * norm.pdf(z) - 1 / np.sqrt(np.pi))
242
- mean_crps = np.mean(crps)
243
- else:
244
- mean_crps = np.nan
282
+ z = (df[target_col] - df["prediction"]) / df["prediction_std"]
283
+ crps = df["prediction_std"] * (z * (2 * norm.cdf(z) - 1) + 2 * norm.pdf(z) - 1 / np.sqrt(np.pi))
284
+ mean_crps = np.mean(crps)
245
285
 
246
286
  # --- Interval Score @ 95% (penalizes miscoverage) ---
247
287
  alpha_95 = 0.05
@@ -252,31 +292,43 @@ def uq_metrics(df: pd.DataFrame, target_col: str) -> Dict[str, Any]:
252
292
  )
253
293
  mean_is_95 = np.mean(is_95)
254
294
 
255
- # --- Adaptive Calibration (correlation between errors and uncertainty) ---
295
+ # --- Interval to Error Correlation ---
256
296
  abs_residuals = np.abs(df[target_col] - df["prediction"])
257
- width_95 = upper_95 - lower_95
258
- adaptive_calibration = np.corrcoef(abs_residuals, width_95)[0, 1]
297
+ width_68 = upper_68 - lower_68
298
+
299
+ # Spearman correlation for robustness
300
+ interval_to_error_corr = spearmanr(width_68, abs_residuals)[0]
259
301
 
260
302
  # Collect results
261
303
  results = {
304
+ "coverage_68": coverage_68,
305
+ "coverage_80": coverage_80,
306
+ "coverage_90": coverage_90,
262
307
  "coverage_95": coverage_95,
263
- "coverage_50": coverage_50,
264
- "avg_width_95": avg_width_95,
265
- "avg_width_50": avg_width_50,
266
- "crps": mean_crps,
267
- "interval_score_95": mean_is_95,
268
- "adaptive_calibration": adaptive_calibration,
308
+ "median_std": median_std,
309
+ "median_width_50": median_width_50,
310
+ "median_width_68": median_width_68,
311
+ "median_width_80": median_width_80,
312
+ "median_width_90": median_width_90,
313
+ "median_width_95": median_width_95,
314
+ "interval_to_error_corr": interval_to_error_corr,
269
315
  "n_samples": len(df),
270
316
  }
271
317
 
272
318
  print("\n=== UQ Metrics ===")
319
+ print(f"Coverage @ 68%: {coverage_68:.3f} (target: 0.68)")
320
+ print(f"Coverage @ 80%: {coverage_80:.3f} (target: 0.80)")
321
+ print(f"Coverage @ 90%: {coverage_90:.3f} (target: 0.90)")
273
322
  print(f"Coverage @ 95%: {coverage_95:.3f} (target: 0.95)")
274
- print(f"Coverage @ 50%: {coverage_50:.3f} (target: 0.50)")
275
- print(f"Average 95% Width: {avg_width_95:.3f}")
276
- print(f"Average 50% Width: {avg_width_50:.3f}")
323
+ print(f"Median Prediction StdDev: {median_std:.3f}")
324
+ print(f"Median 50% Width: {median_width_50:.3f}")
325
+ print(f"Median 68% Width: {median_width_68:.3f}")
326
+ print(f"Median 80% Width: {median_width_80:.3f}")
327
+ print(f"Median 90% Width: {median_width_90:.3f}")
328
+ print(f"Median 95% Width: {median_width_95:.3f}")
277
329
  print(f"CRPS: {mean_crps:.3f} (lower is better)")
278
330
  print(f"Interval Score 95%: {mean_is_95:.3f} (lower is better)")
279
- print(f"Adaptive Calibration: {adaptive_calibration:.3f} (higher is better, target: >0.5)")
331
+ print(f"Interval/Error Corr: {interval_to_error_corr:.3f} (higher is better, target: >0.5)")
280
332
  print(f"Samples: {len(df)}")
281
333
  return results
282
334
 
@@ -313,9 +365,3 @@ if __name__ == "__main__":
313
365
  df = end.auto_inference(capture=True)
314
366
  results = uq_metrics(df, target_col="solubility")
315
367
  print(results)
316
-
317
- # Test the uq_metrics function
318
- end = Endpoint("aqsol-uq-100")
319
- df = end.auto_inference(capture=True)
320
- results = uq_metrics(df, target_col="solubility")
321
- print(results)
@@ -14,7 +14,7 @@ from workbench.utils.s3_utils import read_content_from_s3
14
14
  log = logging.getLogger("workbench")
15
15
 
16
16
 
17
- def pull_data_capture(data_capture_path, max_files=1) -> Union[pd.DataFrame, None]:
17
+ def pull_data_capture_for_testing(data_capture_path, max_files=1) -> Union[pd.DataFrame, None]:
18
18
  """
19
19
  Read and process captured data from S3.
20
20
 
@@ -26,7 +26,12 @@ def pull_data_capture(data_capture_path, max_files=1) -> Union[pd.DataFrame, Non
26
26
 
27
27
  Returns:
28
28
  Union[pd.DataFrame, None]: A dataframe of the captured data (or None if no data is found).
29
+
30
+ Notes:
31
+ This method is really only for testing and debugging.
29
32
  """
33
+ log.important("This method is for testing and debugging only.")
34
+
30
35
  # List files in the specified S3 path
31
36
  files = wr.s3.list_objects(data_capture_path)
32
37
  if not files:
@@ -64,59 +69,53 @@ def pull_data_capture(data_capture_path, max_files=1) -> Union[pd.DataFrame, Non
64
69
  def process_data_capture(df: pd.DataFrame) -> tuple[pd.DataFrame, pd.DataFrame]:
65
70
  """
66
71
  Process the captured data DataFrame to extract input and output data.
67
- Continues processing even if individual files are malformed.
72
+ Handles cases where input or output might not be captured.
73
+
68
74
  Args:
69
75
  df (DataFrame): DataFrame with captured data.
70
76
  Returns:
71
77
  tuple[DataFrame, DataFrame]: Input and output DataFrames.
72
78
  """
79
+
80
+ def parse_endpoint_data(data: dict) -> pd.DataFrame:
81
+ """Parse endpoint data based on encoding type."""
82
+ encoding = data["encoding"].upper()
83
+
84
+ if encoding == "CSV":
85
+ return pd.read_csv(StringIO(data["data"]))
86
+ elif encoding == "JSON":
87
+ json_data = json.loads(data["data"])
88
+ if isinstance(json_data, dict):
89
+ return pd.DataFrame({k: [v] if not isinstance(v, list) else v for k, v in json_data.items()})
90
+ else:
91
+ return pd.DataFrame(json_data)
92
+ else:
93
+ return None # Unknown encoding
94
+
73
95
  input_dfs = []
74
96
  output_dfs = []
75
97
 
76
- for idx, row in df.iterrows():
98
+ # Use itertuples() instead of iterrows() for better performance
99
+ for row in df.itertuples(index=True):
77
100
  try:
78
- capture_data = row["captureData"]
79
-
80
- # Check if this capture has the required fields (all or nothing)
81
- if "endpointInput" not in capture_data:
82
- log.warning(f"Row {idx}: No endpointInput found in capture data.")
83
- continue
84
-
85
- if "endpointOutput" not in capture_data:
86
- log.critical(
87
- f"Row {idx}: No endpointOutput found in capture data. DataCapture needs to include Output capture!"
88
- )
89
- continue
90
-
91
- # Process input data
92
- input_data = capture_data["endpointInput"]
93
- if input_data["encoding"].upper() == "CSV":
94
- input_df = pd.read_csv(StringIO(input_data["data"]))
95
- elif input_data["encoding"].upper() == "JSON":
96
- json_data = json.loads(input_data["data"])
97
- if isinstance(json_data, dict):
98
- input_df = pd.DataFrame({k: [v] if not isinstance(v, list) else v for k, v in json_data.items()})
99
- else:
100
- input_df = pd.DataFrame(json_data)
101
-
102
- # Process output data
103
- output_data = capture_data["endpointOutput"]
104
- if output_data["encoding"].upper() == "CSV":
105
- output_df = pd.read_csv(StringIO(output_data["data"]))
106
- elif output_data["encoding"].upper() == "JSON":
107
- json_data = json.loads(output_data["data"])
108
- if isinstance(json_data, dict):
109
- output_df = pd.DataFrame({k: [v] if not isinstance(v, list) else v for k, v in json_data.items()})
110
- else:
111
- output_df = pd.DataFrame(json_data)
112
-
113
- # If we get here, both processed successfully
114
- input_dfs.append(input_df)
115
- output_dfs.append(output_df)
101
+ capture_data = row.captureData
102
+
103
+ # Process input data if present
104
+ if "endpointInput" in capture_data:
105
+ input_df = parse_endpoint_data(capture_data["endpointInput"])
106
+ if input_df is not None:
107
+ input_dfs.append(input_df)
108
+
109
+ # Process output data if present
110
+ if "endpointOutput" in capture_data:
111
+ output_df = parse_endpoint_data(capture_data["endpointOutput"])
112
+ if output_df is not None:
113
+ output_dfs.append(output_df)
116
114
 
117
115
  except Exception as e:
118
- log.error(f"Row {idx}: Failed to process row: {e}")
116
+ log.debug(f"Row {row.Index}: Failed to process row: {e}")
119
117
  continue
118
+
120
119
  # Combine and return results
121
120
  return (
122
121
  pd.concat(input_dfs, ignore_index=True) if input_dfs else pd.DataFrame(),
@@ -178,23 +177,6 @@ def parse_monitoring_results(results_json: str) -> Dict[str, Any]:
178
177
  return {"error": str(e)}
179
178
 
180
179
 
181
- """TEMP
182
- # If the status is "CompletedWithViolations", we grab the lastest
183
- # violation file and add it to the result
184
- if status == "CompletedWithViolations":
185
- violation_file = f"{self.monitoring_path}/
186
- {last_run['CreationTime'].strftime('%Y/%m/%d')}/constraint_violations.json"
187
- if wr.s3.does_object_exist(violation_file):
188
- violations_json = read_content_from_s3(violation_file)
189
- violations = parse_monitoring_results(violations_json)
190
- result["violations"] = violations.get("constraint_violations", [])
191
- result["violation_count"] = len(result["violations"])
192
- else:
193
- result["violations"] = []
194
- result["violation_count"] = 0
195
- """
196
-
197
-
198
180
  def preprocessing_script(feature_list: list[str]) -> str:
199
181
  """
200
182
  A preprocessing script for monitoring jobs.
@@ -245,8 +227,8 @@ if __name__ == "__main__":
245
227
  from workbench.api.monitor import Monitor
246
228
 
247
229
  # Test pulling data capture
248
- mon = Monitor("caco2-pappab-class-0")
249
- df = pull_data_capture(mon.data_capture_path)
230
+ mon = Monitor("abalone-regression-rt")
231
+ df = pull_data_capture_for_testing(mon.data_capture_path)
250
232
  print("Data Capture:")
251
233
  print(df.head())
252
234
 
@@ -262,4 +244,4 @@ if __name__ == "__main__":
262
244
  # Test preprocessing script
263
245
  script = preprocessing_script(["feature1", "feature2", "feature3"])
264
246
  print("\nPreprocessing Script:")
265
- print(script)
247
+ # print(script)
@@ -152,7 +152,7 @@ def compare_dataframes(df1: pd.DataFrame, df2: pd.DataFrame, display_columns: li
152
152
 
153
153
  # Check for differences in common columns
154
154
  for column in common_columns:
155
- if pd.api.types.is_string_dtype(df1[column]) or pd.api.types.is_string_dtype(df2[column]):
155
+ if pd.api.types.is_string_dtype(df1[column]) and pd.api.types.is_string_dtype(df2[column]):
156
156
  # String comparison with NaNs treated as equal
157
157
  differences = ~(df1[column].fillna("") == df2[column].fillna(""))
158
158
  elif pd.api.types.is_float_dtype(df1[column]) or pd.api.types.is_float_dtype(df2[column]):
@@ -161,8 +161,8 @@ def compare_dataframes(df1: pd.DataFrame, df2: pd.DataFrame, display_columns: li
161
161
  pd.isna(df1[column]) & pd.isna(df2[column])
162
162
  )
163
163
  else:
164
- # Other types (e.g., int) with NaNs treated as equal
165
- differences = ~(df1[column].fillna(0) == df2[column].fillna(0))
164
+ # Other types (int, Int64, etc.) - compare with NaNs treated as equal
165
+ differences = (df1[column] != df2[column]) & ~(pd.isna(df1[column]) & pd.isna(df2[column]))
166
166
 
167
167
  # If differences exist, display them
168
168
  if differences.any():
@@ -212,6 +212,14 @@ def _calculate_shap_values(workbench_model, sample_df: pd.DataFrame = None):
212
212
  log.error("No XGBoost model found in the artifact.")
213
213
  return None, None, None, None
214
214
 
215
+ # Get the booster (SHAP requires the booster, not the sklearn wrapper)
216
+ if hasattr(xgb_model, "get_booster"):
217
+ # Full sklearn model - extract the booster
218
+ booster = xgb_model.get_booster()
219
+ else:
220
+ # Already a booster
221
+ booster = xgb_model
222
+
215
223
  # Load category mappings if available
216
224
  category_mappings = load_category_mappings_from_s3(model_artifact_uri)
217
225
 
@@ -229,8 +237,8 @@ def _calculate_shap_values(workbench_model, sample_df: pd.DataFrame = None):
229
237
  # Create a DMatrix with categorical support
230
238
  dmatrix = xgb.DMatrix(X, enable_categorical=True)
231
239
 
232
- # Use XGBoost's built-in SHAP calculation
233
- shap_values = xgb_model.predict(dmatrix, pred_contribs=True, strict_shape=True)
240
+ # Use XGBoost's built-in SHAP calculation (booster method, not sklearn)
241
+ shap_values = booster.predict(dmatrix, pred_contribs=True, strict_shape=True)
234
242
  features_with_bias = features + ["bias"]
235
243
 
236
244
  # Now we need to subset the columns based on top 10 SHAP values
@@ -181,9 +181,6 @@ def logging_setup(color_logs=True):
181
181
  log.debug("Debugging enabled via WORKBENCH_DEBUG environment variable.")
182
182
  else:
183
183
  log.setLevel(logging.INFO)
184
- # Note: Not using the ThrottlingFilter for now
185
- # throttle_filter = ThrottlingFilter(rate_seconds=5)
186
- # handler.addFilter(throttle_filter)
187
184
 
188
185
  # Suppress specific logger
189
186
  logging.getLogger("sagemaker.config").setLevel(logging.WARNING)
@@ -12,7 +12,7 @@ class WorkbenchSQS:
12
12
  self.log = logging.getLogger("workbench")
13
13
  self.queue_url = queue_url
14
14
 
15
- # Grab a Workbench Session (this allows us to assume the Workbench-ExecutionRole)
15
+ # Grab a Workbench Session
16
16
  self.boto3_session = AWSAccountClamp().boto3_session
17
17
  print(self.boto3_session)
18
18