workbench 0.8.184__py3-none-any.whl → 0.8.186__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of workbench might be problematic. Click here for more details.

@@ -475,17 +475,20 @@ class EndpointCore(Artifact):
475
475
  training_df = fs.view("training").pull_dataframe()
476
476
 
477
477
  # Run inference on the endpoint to get UQ outputs
478
- full_inference_df = self.inference(training_df)
478
+ uq_df = self.inference(training_df)
479
479
 
480
480
  # Identify UQ-specific columns (quantiles and prediction_std)
481
- uq_columns = [col for col in full_inference_df.columns if col.startswith("q_") or col == "prediction_std"]
481
+ uq_columns = [col for col in uq_df.columns if col.startswith("q_") or col == "prediction_std"]
482
482
 
483
483
  # Merge UQ columns with out-of-fold predictions
484
484
  if uq_columns:
485
- # Keep id_column and UQ columns, drop 'prediction' to avoid conflict
486
- merge_columns = [id_column] + uq_columns
487
- uq_df = full_inference_df[merge_columns]
485
+ # Keep id_column and UQ columns, drop 'prediction' to avoid conflict when merging
486
+ uq_df = uq_df[[id_column] + uq_columns]
488
487
 
488
+ # Drop duplicates in uq_df based on id_column
489
+ uq_df = uq_df.drop_duplicates(subset=[id_column])
490
+
491
+ # Merge UQ columns into out_of_fold_df
489
492
  out_of_fold_df = pd.merge(out_of_fold_df, uq_df, on=id_column, how="left")
490
493
  additional_columns = uq_columns
491
494
  self.log.info(f"Added UQ columns: {', '.join(additional_columns)}")
@@ -990,9 +993,9 @@ class EndpointCore(Artifact):
990
993
  self.upsert_workbench_meta({"workbench_input": input})
991
994
 
992
995
  def delete(self):
993
- """ "Delete an existing Endpoint: Underlying Models, Configuration, and Endpoint"""
996
+ """Delete an existing Endpoint: Underlying Models, Configuration, and Endpoint"""
994
997
  if not self.exists():
995
- self.log.warning(f"Trying to delete an Model that doesn't exist: {self.name}")
998
+ self.log.warning(f"Trying to delete an Endpoint that doesn't exist: {self.name}")
996
999
 
997
1000
  # Remove this endpoint from the list of registered endpoints
998
1001
  self.log.info(f"Removing {self.name} from the list of registered endpoints...")
@@ -899,7 +899,7 @@ class ModelCore(Artifact):
899
899
  def delete(self):
900
900
  """Delete the Model Packages and the Model Group"""
901
901
  if not self.exists():
902
- self.log.warning(f"Trying to delete an Model that doesn't exist: {self.name}")
902
+ self.log.warning(f"Trying to delete a Model that doesn't exist: {self.name}")
903
903
 
904
904
  # Call the Class Method to delete the Model Group
905
905
  ModelCore.managed_delete(model_group_name=self.name)
@@ -327,9 +327,36 @@ class PandasToFeatures(Transform):
327
327
  self.delete_existing()
328
328
  self.output_feature_group = self.create_feature_group()
329
329
 
330
+ def mac_spawn_hack(self):
331
+ """Workaround for macOS Tahoe fork/spawn issue with SageMaker FeatureStore ingest.
332
+
333
+ See: https://github.com/aws/sagemaker-python-sdk/issues/5312
334
+ macOS Tahoe 26+ has issues with forked processes creating boto3 sessions.
335
+ This forces spawn mode on macOS to avoid the hang.
336
+ """
337
+ import platform
338
+
339
+ if platform.system() == "Darwin": # macOS
340
+ self.log.warning("macOS detected, forcing 'spawn' mode for multiprocessing (Tahoe hang workaround)")
341
+ import multiprocessing
342
+
343
+ try:
344
+ import multiprocess
345
+
346
+ multiprocess.set_start_method("spawn", force=True)
347
+ except (RuntimeError, ImportError):
348
+ pass # Already set or multiprocess not available
349
+ try:
350
+ multiprocessing.set_start_method("spawn", force=True)
351
+ except RuntimeError:
352
+ pass # Already set
353
+
330
354
  def transform_impl(self):
331
355
  """Transform Implementation: Ingest the data into the Feature Group"""
332
356
 
357
+ # Workaround for macOS Tahoe hang issue
358
+ self.mac_spawn_hack()
359
+
333
360
  # Now we actually push the data into the Feature Group (called ingestion)
334
361
  self.log.important(f"Ingesting rows into Feature Group {self.output_name}...")
335
362
  ingest_manager = self.output_feature_group.ingest(self.output_df, max_workers=8, max_processes=4, wait=False)
@@ -466,7 +466,7 @@ def predict_fn(df, models) -> pd.DataFrame:
466
466
  df["q_50"] = df["prediction"]
467
467
 
468
468
  # Calculate a pseudo-standard deviation from the 68% interval width
469
- df["prediction_std"] = (df["q_84"] - df["q_16"]) / 2.0
469
+ df["prediction_std"] = (df["q_84"] - df["q_16"]).abs() / 2.0
470
470
 
471
471
  # Reorder the quantile columns for easier reading
472
472
  quantile_cols = ["q_025", "q_05", "q_10", "q_16", "q_25", "q_75", "q_84", "q_90", "q_95", "q_975"]
@@ -466,7 +466,7 @@ def predict_fn(df, models) -> pd.DataFrame:
466
466
  df["q_50"] = df["prediction"]
467
467
 
468
468
  # Calculate a pseudo-standard deviation from the 68% interval width
469
- df["prediction_std"] = (df["q_84"] - df["q_16"]) / 2.0
469
+ df["prediction_std"] = (df["q_84"] - df["q_16"]).abs() / 2.0
470
470
 
471
471
  # Reorder the quantile columns for easier reading
472
472
  quantile_cols = ["q_025", "q_05", "q_10", "q_16", "q_25", "q_75", "q_84", "q_90", "q_95", "q_975"]
@@ -13,14 +13,21 @@ cm = ConfigManager()
13
13
  workbench_bucket = cm.get_config("WORKBENCH_BUCKET")
14
14
 
15
15
 
16
- def submit_to_sqs(script_path: str, size: str = "small", realtime: bool = False, recreate: bool = False) -> None:
16
+ def submit_to_sqs(
17
+ script_path: str, size: str = "small", realtime: bool = False, dt: bool = False, promote: bool = False
18
+ ) -> None:
17
19
  """
18
20
  Upload script to S3 and submit message to SQS queue for processing.
21
+
19
22
  Args:
20
23
  script_path: Local path to the ML pipeline script
21
24
  size: Job size tier - "small" (default), "medium", or "large"
22
- realtime: If True, sets serverless=False for real-time processing (default: False, meaning serverless=True)
23
- recreate: If True, sets RECREATE=True in environment (default: False)
25
+ realtime: If True, sets serverless=False for real-time processing (default: False)
26
+ dt: If True, sets DT=True in environment (default: False)
27
+ promote: If True, sets PROMOTE=True in environment (default: False)
28
+
29
+ Raises:
30
+ ValueError: If size is invalid or script file not found
24
31
  """
25
32
  print(f"\n{'=' * 60}")
26
33
  print("🚀 SUBMITTING ML PIPELINE JOB")
@@ -36,7 +43,8 @@ def submit_to_sqs(script_path: str, size: str = "small", realtime: bool = False,
36
43
  print(f"📄 Script: {script_file.name}")
37
44
  print(f"📏 Size tier: {size}")
38
45
  print(f"⚡ Mode: {'Real-time' if realtime else 'Serverless'} (serverless={'False' if realtime else 'True'})")
39
- print(f"🔄 Recreate: {recreate}")
46
+ print(f"🔄 DynamicTraining: {dt}")
47
+ print(f"🆕 Promote: {promote}")
40
48
  print(f"🪣 Bucket: {workbench_bucket}")
41
49
  sqs = AWSAccountClamp().boto3_session.client("sqs")
42
50
  script_name = script_file.name
@@ -94,14 +102,15 @@ def submit_to_sqs(script_path: str, size: str = "small", realtime: bool = False,
94
102
  message = {"script_path": s3_path, "size": size}
95
103
 
96
104
  # Set environment variables
97
- message["environment"] = {"SERVERLESS": "False" if realtime else "True"}
98
- if recreate:
99
- message["environment"]["RECREATE"] = "True"
100
-
101
- print("\n📨 Sending message to SQS...")
105
+ message["environment"] = {
106
+ "SERVERLESS": "False" if realtime else "True",
107
+ "DT": str(dt),
108
+ "PROMOTE": str(promote),
109
+ }
102
110
 
103
111
  # Send the message to SQS
104
112
  try:
113
+ print("\n📨 Sending message to SQS...")
105
114
  response = sqs.send_message(
106
115
  QueueUrl=queue_url,
107
116
  MessageBody=json.dumps(message, indent=2),
@@ -121,7 +130,8 @@ def submit_to_sqs(script_path: str, size: str = "small", realtime: bool = False,
121
130
  print(f"📄 Script: {script_name}")
122
131
  print(f"📏 Size: {size}")
123
132
  print(f"⚡ Mode: {'Real-time' if realtime else 'Serverless'} (SERVERLESS={'False' if realtime else 'True'})")
124
- print(f"🔄 Recreate: {recreate}")
133
+ print(f"🔄 DynamicTraining: {dt}")
134
+ print(f"🆕 Promote: {promote}")
125
135
  print(f"🆔 Message ID: {message_id}")
126
136
  print("\n🔍 MONITORING LOCATIONS:")
127
137
  print(f" • SQS Queue: AWS Console → SQS → {queue_name}")
@@ -144,13 +154,13 @@ def main():
144
154
  help="Create realtime endpoints (default is serverless)",
145
155
  )
146
156
  parser.add_argument(
147
- "--recreate",
157
+ "--dt",
148
158
  action="store_true",
149
- help="Set RECREATE=True (will force recreation of resources)",
159
+ help="Set DT=True (models and endpoints will have '-dt' suffix)",
150
160
  )
151
161
  args = parser.parse_args()
152
162
  try:
153
- submit_to_sqs(args.script_file, args.size, realtime=args.realtime, recreate=args.recreate)
163
+ submit_to_sqs(args.script_file, args.size, realtime=args.realtime, dt=args.dt)
154
164
  except Exception as e:
155
165
  print(f"\n❌ ERROR: {e}")
156
166
  log.error(f"Error: {e}")
@@ -386,6 +386,106 @@ def cross_fold_inference(workbench_model: Any, nfolds: int = 5) -> Tuple[Dict[st
386
386
  return metrics_dict, predictions_df
387
387
 
388
388
 
389
+ def leave_one_out_inference(workbench_model: Any) -> pd.DataFrame:
390
+ """
391
+ Performs leave-one-out cross-validation (parallelized).
392
+ For datasets > 1000 rows, first identifies top 100 worst predictions via 10-fold CV,
393
+ then performs true leave-one-out on those 100 samples.
394
+ Each model trains on ALL data except one sample.
395
+ """
396
+ from workbench.api import FeatureSet
397
+ from joblib import Parallel, delayed
398
+ from tqdm import tqdm
399
+
400
+ def train_and_predict_one(model_params, is_classifier, X, y, train_idx, val_idx):
401
+ """Train on train_idx, predict on val_idx."""
402
+ model = xgb.XGBClassifier(**model_params) if is_classifier else xgb.XGBRegressor(**model_params)
403
+ model.fit(X[train_idx], y[train_idx])
404
+ return model.predict(X[val_idx])[0]
405
+
406
+ # Load model and get params
407
+ model_artifact_uri = workbench_model.model_data_url()
408
+ loaded_model = xgboost_model_from_s3(model_artifact_uri)
409
+ if loaded_model is None:
410
+ log.error("No XGBoost model found in the artifact.")
411
+ return pd.DataFrame()
412
+
413
+ if isinstance(loaded_model, (xgb.XGBClassifier, xgb.XGBRegressor)):
414
+ is_classifier = isinstance(loaded_model, xgb.XGBClassifier)
415
+ model_params = loaded_model.get_params()
416
+ elif isinstance(loaded_model, xgb.Booster):
417
+ log.warning("Deprecated: Loaded model is a Booster, wrapping in sklearn model.")
418
+ is_classifier = workbench_model.model_type.value == "classifier"
419
+ model_params = {"enable_categorical": True}
420
+ else:
421
+ log.error(f"Unexpected model type: {type(loaded_model)}")
422
+ return pd.DataFrame()
423
+
424
+ # Load and prepare data
425
+ fs = FeatureSet(workbench_model.get_input())
426
+ df = fs.view("training").pull_dataframe()
427
+ id_col = fs.id_column
428
+ target_col = workbench_model.target()
429
+ feature_cols = workbench_model.features()
430
+
431
+ # Convert string features to categorical
432
+ for col in feature_cols:
433
+ if df[col].dtype in ["object", "string"]:
434
+ df[col] = df[col].astype("category")
435
+
436
+ # Determine which samples to run LOO on
437
+ if len(df) > 1000:
438
+ log.important(f"Dataset has {len(df)} rows. Running 10-fold CV to identify top 1000 worst predictions...")
439
+ _, predictions_df = cross_fold_inference(workbench_model, nfolds=10)
440
+ predictions_df["residual_abs"] = np.abs(predictions_df[target_col] - predictions_df["prediction"])
441
+ worst_samples = predictions_df.nlargest(1000, "residual_abs")
442
+ worst_ids = worst_samples[id_col].values
443
+ loo_indices = df[df[id_col].isin(worst_ids)].index.values
444
+ log.important(f"Running leave-one-out CV on 1000 worst samples. Each model trains on {len(df)-1} rows...")
445
+ else:
446
+ log.important(f"Running leave-one-out CV on all {len(df)} samples...")
447
+ loo_indices = df.index.values
448
+
449
+ # Prepare full dataset for training
450
+ X_full = df[feature_cols].values
451
+ y_full = df[target_col].values
452
+
453
+ # Encode target if classifier
454
+ label_encoder = LabelEncoder() if is_classifier else None
455
+ if label_encoder:
456
+ y_full = label_encoder.fit_transform(y_full)
457
+
458
+ # Generate LOO splits
459
+ splits = []
460
+ for loo_idx in loo_indices:
461
+ train_idx = np.delete(np.arange(len(X_full)), loo_idx)
462
+ val_idx = np.array([loo_idx])
463
+ splits.append((train_idx, val_idx))
464
+
465
+ # Parallel execution
466
+ predictions = Parallel(n_jobs=4)(
467
+ delayed(train_and_predict_one)(model_params, is_classifier, X_full, y_full, train_idx, val_idx)
468
+ for train_idx, val_idx in tqdm(splits, desc="LOO CV")
469
+ )
470
+
471
+ # Build results dataframe
472
+ predictions_array = np.array(predictions)
473
+ if label_encoder:
474
+ predictions_array = label_encoder.inverse_transform(predictions_array.astype(int))
475
+
476
+ predictions_df = pd.DataFrame(
477
+ {
478
+ id_col: df.loc[loo_indices, id_col].values,
479
+ target_col: df.loc[loo_indices, target_col].values,
480
+ "prediction": predictions_array,
481
+ }
482
+ )
483
+
484
+ predictions_df["residual_abs"] = np.abs(predictions_df[target_col] - predictions_df["prediction"])
485
+
486
+ return predictions_df
487
+
488
+
389
489
  if __name__ == "__main__":
390
490
  """Exercise the Model Utilities"""
391
491
  from workbench.api import Model, FeatureSet
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: workbench
3
- Version: 0.8.184
3
+ Version: 0.8.186
4
4
  Summary: Workbench: A Dashboard and Python API for creating and deploying AWS SageMaker Model Pipelines
5
5
  Author-email: SuperCowPowers LLC <support@supercowpowers.com>
6
6
  License-Expression: MIT
@@ -54,9 +54,9 @@ workbench/core/artifacts/cached_artifact_mixin.py,sha256=ngqFLZ4cQx_TFouXZgXZQsv
54
54
  workbench/core/artifacts/data_capture_core.py,sha256=q8f79rRTYiZ7T4IQRWXl8ZvPpcvZyNxYERwvo8o0OQc,14858
55
55
  workbench/core/artifacts/data_source_abstract.py,sha256=5IRCzFVK-17cd4NXPMRfx99vQAmQ0WHE5jcm5RfsVTg,10619
56
56
  workbench/core/artifacts/data_source_factory.py,sha256=YL_tA5fsgubbB3dPF6T4tO0rGgz-6oo3ge4i_YXVC-M,2380
57
- workbench/core/artifacts/endpoint_core.py,sha256=b3cNj1UnlHmQdG1C8bmD2jWpD4h-O6F-75fWSm01uGU,51850
57
+ workbench/core/artifacts/endpoint_core.py,sha256=FUBs8z5l0D3UsVqmQzjlzkVDzN0fhMcqKXTLAAG1gmc,51966
58
58
  workbench/core/artifacts/feature_set_core.py,sha256=7b1o_PzxtwaYC-W2zxlkltiO0fYULA8CVGWwHNmqgtI,31457
59
- workbench/core/artifacts/model_core.py,sha256=wjoa2GQnzrrTM-E2VgYZHT9Ixebl3LaKbJL0YvEdrJY,51546
59
+ workbench/core/artifacts/model_core.py,sha256=x_FloG9bMUTqUBDYdfl68AaakwNMBCl-BcP-1E9ZpuQ,51545
60
60
  workbench/core/artifacts/monitor_core.py,sha256=M307yz7tEzOEHgv-LmtVy9jKjSbM98fHW3ckmNYrwlU,27897
61
61
  workbench/core/cloud_platform/cloud_meta.py,sha256=-g4-LTC3D0PXb3VfaXdLR1ERijKuHdffeMK_zhD-koQ,8809
62
62
  workbench/core/cloud_platform/aws/README.md,sha256=QT5IQXoUHbIA0qQ2wO6_2P2lYjYQFVYuezc22mWY4i8,97
@@ -109,7 +109,7 @@ workbench/core/transforms/pandas_transforms/__init__.py,sha256=xL4MT8-fZ1SFqDbTL
109
109
  workbench/core/transforms/pandas_transforms/data_to_pandas.py,sha256=sJHPeuNF8Q8aQqgRnkdWkyvur5cbggdUVIwR-xF3Dlo,3621
110
110
  workbench/core/transforms/pandas_transforms/features_to_pandas.py,sha256=af6xdPt2V4zhh-SzQa_UYxdmNMzMLXbrbsznV5QoIJg,3441
111
111
  workbench/core/transforms/pandas_transforms/pandas_to_data.py,sha256=cqo6hQmzUGUFACvNuVLZQdgrlXrQIu4NjqK-ujPmoIc,9181
112
- workbench/core/transforms/pandas_transforms/pandas_to_features.py,sha256=XiUz2BqOX4N34g6hvFvcLswhkEouyU0AjyIy9EGv2zg,20440
112
+ workbench/core/transforms/pandas_transforms/pandas_to_features.py,sha256=mj00L40PXhw-JHG2SZe53yJAzicgn4xuM2VbmOY-wsM,21480
113
113
  workbench/core/transforms/pandas_transforms/pandas_to_features_chunked.py,sha256=0R8mQlWfbIlTVmYUmrtu2gsw0AE815k6kqPgpd0bmyQ,4422
114
114
  workbench/core/views/__init__.py,sha256=UZJMAJBCMVM3uSYmnFg8c2LWtdu9-479WNAdVMIohAc,962
115
115
  workbench/core/views/column_subset_view.py,sha256=vGDKTTGrPIY-IFOeWvudJrhKiq0OjWDp5rTuuj-X40U,4261
@@ -156,8 +156,8 @@ workbench/model_scripts/pytorch_model/requirements.txt,sha256=ICS5nW0wix44EJO2tJ
156
156
  workbench/model_scripts/scikit_learn/generated_model_script.py,sha256=c73ZpJBlU5k13Nx-ZDkLXu7da40CYyhwjwwmuPq6uLg,12870
157
157
  workbench/model_scripts/scikit_learn/requirements.txt,sha256=aVvwiJ3LgBUhM_PyFlb2gHXu_kpGPho3ANBzlOkfcvs,107
158
158
  workbench/model_scripts/scikit_learn/scikit_learn.template,sha256=QQvqx-eX9ZTbYmyupq6R6vIQwosmsmY_MRBPaHyfjdk,12586
159
- workbench/model_scripts/uq_models/generated_model_script.py,sha256=OS_ufhyLR9IQcyRV2ukO_CfDnjp60UE9kwcAN4RY0Is,21191
160
- workbench/model_scripts/uq_models/mapie.template,sha256=8VzoP-Wp3ECVIDqXVkiTS6bwmn3cd3dDZ2WjYPzXTi8,18955
159
+ workbench/model_scripts/uq_models/generated_model_script.py,sha256=U4_41APyNISnJ3EHnXiaSIEdb3E1M1JT7ECNjsoX4fI,21197
160
+ workbench/model_scripts/uq_models/mapie.template,sha256=2HIwB_658IsZiLIV1RViIZBIGgXxDsJPZinDUu8SchU,18961
161
161
  workbench/model_scripts/uq_models/requirements.txt,sha256=fw7T7t_YJAXK3T6Ysbesxh_Agx_tv0oYx72cEBTqRDY,98
162
162
  workbench/model_scripts/xgb_model/generated_model_script.py,sha256=Tbn7EMXxZZO8rDdKQ5fYCbpltACsMXNvuusLL9p-U5c,22319
163
163
  workbench/model_scripts/xgb_model/requirements.txt,sha256=jWlGc7HH7vqyukTm38LN4EyDi8jDUPEay4n45z-30uc,104
@@ -169,7 +169,7 @@ workbench/resources/signature_verify_pub.pem,sha256=V3-u-3_z2PH-805ybkKvzDOBwAbv
169
169
  workbench/scripts/check_double_bond_stereo.py,sha256=p5hnL54Weq77ES0HCELq9JeoM-PyUGkvVSeWYF2dKyo,7776
170
170
  workbench/scripts/glue_launcher.py,sha256=bIKQvfGxpAhzbeNvTnHfRW_5kQhY-169_868ZnCejJk,10692
171
171
  workbench/scripts/ml_pipeline_batch.py,sha256=1T5JnLlUJR7bwAGBLHmLPOuj1xFRqVIQX8PsuDhHy8o,4907
172
- workbench/scripts/ml_pipeline_sqs.py,sha256=s1861q4zuvV-aSOhwy8xW1xid9yDVNGioDHxba80Qpg,6185
172
+ workbench/scripts/ml_pipeline_sqs.py,sha256=LxZyaNKmwt3L7SURyO3lwO64FQ0Y0s4HwKPkkahlhUU,6395
173
173
  workbench/scripts/monitor_cloud_watch.py,sha256=s7MY4bsHts0nup9G0lWESCvgJZ9Mw1Eo-c8aKRgLjMw,9235
174
174
  workbench/scripts/redis_expire.py,sha256=DxI_RKSNlrW2BsJZXcsSbaWGBgPZdPhtzHjV9SUtElE,1120
175
175
  workbench/scripts/redis_report.py,sha256=iaJSuGPyLCs6e0TMcZDoT0YyJ43xJ1u74YD8FLnnUg4,990
@@ -242,7 +242,7 @@ workbench/utils/workbench_cache.py,sha256=IQchxB81iR4eVggHBxUJdXxUCRkqWz1jKe5gxN
242
242
  workbench/utils/workbench_event_bridge.py,sha256=z1GmXOB-Qs7VOgC6Hjnp2DI9nSEWepaSXejACxTIR7o,4150
243
243
  workbench/utils/workbench_logging.py,sha256=WCuMWhQwibrvcGAyj96h2wowh6dH7zNlDJ7sWUzdCeI,10263
244
244
  workbench/utils/workbench_sqs.py,sha256=RwM80z7YWwdtMaCKh7KWF8v38f7eBRU7kyC7ZhTRuI0,2072
245
- workbench/utils/xgboost_model_utils.py,sha256=wSUrs9VlftaTZ-cWZMEeHY6TmcLvxwrKk4S4lr7kWWw,17482
245
+ workbench/utils/xgboost_model_utils.py,sha256=rPfbl5sCZOttkilAHSmYU_u9JIDeYsGi2JrEs-zzcwI,21597
246
246
  workbench/utils/chem_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
247
247
  workbench/utils/chem_utils/fingerprints.py,sha256=Qvs8jaUwguWUq3Q3j695MY0t0Wk3BvroW-oWBwalMUo,5255
248
248
  workbench/utils/chem_utils/misc.py,sha256=Nevf8_opu-uIPrv_1_0ubuFVVo2_fGUkMoLAHB3XAeo,7372
@@ -287,9 +287,9 @@ workbench/web_interface/page_views/main_page.py,sha256=X4-KyGTKLAdxR-Zk2niuLJB2Y
287
287
  workbench/web_interface/page_views/models_page_view.py,sha256=M0bdC7bAzLyIaE2jviY12FF4abdMFZmg6sFuOY_LaGI,2650
288
288
  workbench/web_interface/page_views/page_view.py,sha256=Gh6YnpOGlUejx-bHZAf5pzqoQ1H1R0OSwOpGhOBO06w,455
289
289
  workbench/web_interface/page_views/pipelines_page_view.py,sha256=v2pxrIbsHBcYiblfius3JK766NZ7ciD2yPx0t3E5IJo,2656
290
- workbench-0.8.184.dist-info/licenses/LICENSE,sha256=z4QMMPlLJkZjU8VOKqJkZiQZCEZ--saIU2Z8-p3aVc0,1080
291
- workbench-0.8.184.dist-info/METADATA,sha256=3B5uP_y9cOctNaxDK6Z9Fwfcwzf7p9f3HyjJ35B-nqY,9210
292
- workbench-0.8.184.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
293
- workbench-0.8.184.dist-info/entry_points.txt,sha256=zPFPruY9uayk8-wsKrhfnIyIB6jvZOW_ibyllEIsLWo,356
294
- workbench-0.8.184.dist-info/top_level.txt,sha256=Dhy72zTxaA_o_yRkPZx5zw-fwumnjGaeGf0hBN3jc_w,10
295
- workbench-0.8.184.dist-info/RECORD,,
290
+ workbench-0.8.186.dist-info/licenses/LICENSE,sha256=z4QMMPlLJkZjU8VOKqJkZiQZCEZ--saIU2Z8-p3aVc0,1080
291
+ workbench-0.8.186.dist-info/METADATA,sha256=r5n70ah6hgyfQfE3oM0gseinKWI4n6DCpvB7ZJXFLNM,9210
292
+ workbench-0.8.186.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
293
+ workbench-0.8.186.dist-info/entry_points.txt,sha256=zPFPruY9uayk8-wsKrhfnIyIB6jvZOW_ibyllEIsLWo,356
294
+ workbench-0.8.186.dist-info/top_level.txt,sha256=Dhy72zTxaA_o_yRkPZx5zw-fwumnjGaeGf0hBN3jc_w,10
295
+ workbench-0.8.186.dist-info/RECORD,,