workbench 0.8.201__py3-none-any.whl → 0.8.204__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. workbench/api/df_store.py +17 -108
  2. workbench/api/feature_set.py +41 -7
  3. workbench/api/parameter_store.py +3 -52
  4. workbench/core/artifacts/artifact.py +5 -5
  5. workbench/core/artifacts/df_store_core.py +114 -0
  6. workbench/core/artifacts/endpoint_core.py +184 -75
  7. workbench/core/artifacts/model_core.py +11 -7
  8. workbench/core/artifacts/parameter_store_core.py +98 -0
  9. workbench/core/transforms/features_to_model/features_to_model.py +27 -13
  10. workbench/core/transforms/model_to_endpoint/model_to_endpoint.py +11 -0
  11. workbench/core/transforms/pandas_transforms/pandas_to_features.py +11 -2
  12. workbench/model_scripts/chemprop/chemprop.template +312 -293
  13. workbench/model_scripts/chemprop/generated_model_script.py +316 -297
  14. workbench/model_scripts/custom_models/uq_models/ensemble_xgb.template +11 -5
  15. workbench/model_scripts/custom_models/uq_models/meta_uq.template +11 -5
  16. workbench/model_scripts/custom_models/uq_models/ngboost.template +11 -5
  17. workbench/model_scripts/ensemble_xgb/ensemble_xgb.template +11 -5
  18. workbench/model_scripts/pytorch_model/generated_model_script.py +278 -128
  19. workbench/model_scripts/pytorch_model/pytorch.template +273 -123
  20. workbench/model_scripts/uq_models/generated_model_script.py +20 -11
  21. workbench/model_scripts/uq_models/mapie.template +17 -8
  22. workbench/model_scripts/xgb_model/generated_model_script.py +38 -9
  23. workbench/model_scripts/xgb_model/xgb_model.template +34 -5
  24. workbench/resources/open_source_api.key +1 -1
  25. workbench/utils/chemprop_utils.py +38 -1
  26. workbench/utils/pytorch_utils.py +38 -8
  27. workbench/web_interface/components/model_plot.py +7 -1
  28. {workbench-0.8.201.dist-info → workbench-0.8.204.dist-info}/METADATA +2 -2
  29. {workbench-0.8.201.dist-info → workbench-0.8.204.dist-info}/RECORD +33 -33
  30. workbench/core/cloud_platform/aws/aws_df_store.py +0 -404
  31. workbench/core/cloud_platform/aws/aws_parameter_store.py +0 -296
  32. {workbench-0.8.201.dist-info → workbench-0.8.204.dist-info}/WHEEL +0 -0
  33. {workbench-0.8.201.dist-info → workbench-0.8.204.dist-info}/entry_points.txt +0 -0
  34. {workbench-0.8.201.dist-info → workbench-0.8.204.dist-info}/licenses/LICENSE +0 -0
  35. {workbench-0.8.201.dist-info → workbench-0.8.204.dist-info}/top_level.txt +0 -0
@@ -5,7 +5,8 @@ from xgboost import XGBRegressor
5
5
  from sklearn.model_selection import train_test_split
6
6
 
7
7
  # Model Performance Scores
8
- from sklearn.metrics import mean_absolute_error, r2_score, root_mean_squared_error
8
+ from sklearn.metrics import mean_absolute_error, median_absolute_error, r2_score, root_mean_squared_error
9
+ from scipy.stats import spearmanr
9
10
 
10
11
  from io import StringIO
11
12
  import json
@@ -277,10 +278,15 @@ if __name__ == "__main__":
277
278
  xgb_mae = mean_absolute_error(y_validate, y_pred_xgb)
278
279
  xgb_r2 = r2_score(y_validate, y_pred_xgb)
279
280
 
281
+ xgb_medae = median_absolute_error(y_validate, y_pred_xgb)
282
+ xgb_spearman = spearmanr(y_validate, y_pred_xgb).correlation
283
+
280
284
  print(f"\nXGBoost Point Prediction Performance:")
281
- print(f"RMSE: {xgb_rmse:.3f}")
282
- print(f"MAE: {xgb_mae:.3f}")
283
- print(f"R2: {xgb_r2:.3f}")
285
+ print(f"rmse: {xgb_rmse:.3f}")
286
+ print(f"mae: {xgb_mae:.3f}")
287
+ print(f"medae: {xgb_medae:.3f}")
288
+ print(f"r2: {xgb_r2:.3f}")
289
+ print(f"spearmanr: {xgb_spearman:.3f}")
284
290
 
285
291
  # Define confidence levels we want to model
286
292
  confidence_levels = [0.50, 0.68, 0.80, 0.90, 0.95] # 50%, 68%, 80%, 90%, 95% confidence intervals
@@ -336,11 +342,14 @@ if __name__ == "__main__":
336
342
  coverage = np.mean((y_validate >= y_pis[:, 0, 0]) & (y_validate <= y_pis[:, 1, 0]))
337
343
  print(f" Coverage: Target={confidence_level * 100:.0f}%, Empirical={coverage * 100:.1f}%")
338
344
 
345
+ support = len(df_val)
339
346
  print(f"\nOverall Model Performance Summary:")
340
- print(f"XGBoost RMSE: {xgb_rmse:.3f}")
341
- print(f"XGBoost MAE: {xgb_mae:.3f}")
342
- print(f"XGBoost R2: {xgb_r2:.3f}")
343
- print(f"NumRows: {len(df_val)}")
347
+ print(f"rmse: {xgb_rmse:.3f}")
348
+ print(f"mae: {xgb_mae:.3f}")
349
+ print(f"medae: {xgb_medae:.3f}")
350
+ print(f"r2: {xgb_r2:.3f}")
351
+ print(f"spearmanr: {xgb_spearman:.3f}")
352
+ print(f"support: {support}")
344
353
 
345
354
  # Analyze interval widths across confidence levels
346
355
  print(f"\nInterval Width Analysis:")
@@ -6,11 +6,13 @@ import numpy as np
6
6
  # Model Performance Scores
7
7
  from sklearn.metrics import (
8
8
  mean_absolute_error,
9
+ median_absolute_error,
9
10
  r2_score,
10
11
  root_mean_squared_error,
11
12
  precision_recall_fscore_support,
12
13
  confusion_matrix,
13
14
  )
15
+ from scipy.stats import spearmanr
14
16
 
15
17
  # Classification Encoder
16
18
  from sklearn.preprocessing import LabelEncoder
@@ -26,13 +28,35 @@ import os
26
28
  import pandas as pd
27
29
  from typing import List, Tuple
28
30
 
31
+ # Default Hyperparameters for XGBoost
32
+ DEFAULT_HYPERPARAMETERS = {
33
+ # Core tree parameters
34
+ "n_estimators": 200, # More trees for better signal capture when we have lots of features
35
+ "max_depth": 6, # Medium depth
36
+ "learning_rate": 0.05, # Lower rate with more estimators for smoother learning
37
+
38
+ # Sampling parameters
39
+ "subsample": 0.7, # Moderate row sampling to reduce overfitting
40
+ "colsample_bytree": 0.6, # More aggressive feature sampling given lots of features
41
+ "colsample_bylevel": 0.8, # Additional feature sampling at each tree level
42
+
43
+ # Regularization
44
+ "min_child_weight": 5, # Higher to prevent overfitting on small groups
45
+ "gamma": 0.2, # Moderate pruning - you have real signal so don't over-prune
46
+ "reg_alpha": 0.5, # L1 for feature selection (useful with many features)
47
+ "reg_lambda": 2.0, # Strong L2 to smooth predictions
48
+
49
+ # Random seed
50
+ "random_state": 42,
51
+ }
52
+
29
53
  # Template Parameters
30
54
  TEMPLATE_PARAMS = {
31
- "model_type": "regressor",
32
- "target": "class_number_of_rings",
33
- "features": ['length', 'diameter', 'height', 'whole_weight', 'shucked_weight', 'viscera_weight', 'shell_weight', 'sex'],
55
+ "model_type": "classifier",
56
+ "target": "wine_class",
57
+ "features": ['alcohol', 'malic_acid', 'ash', 'alcalinity_of_ash', 'magnesium', 'total_phenols', 'flavanoids', 'nonflavanoid_phenols', 'proanthocyanins', 'color_intensity', 'hue', 'od280_od315_of_diluted_wines', 'proline'],
34
58
  "compressed_features": [],
35
- "model_metrics_s3_path": "s3://sandbox-sageworks-artifacts/models/abalone-regression/training",
59
+ "model_metrics_s3_path": "s3://sandbox-sageworks-artifacts/models/wine-classification/training",
36
60
  "train_all_data": False,
37
61
  "hyperparameters": {},
38
62
  }
@@ -208,7 +232,7 @@ if __name__ == "__main__":
208
232
  model_type = TEMPLATE_PARAMS["model_type"]
209
233
  model_metrics_s3_path = TEMPLATE_PARAMS["model_metrics_s3_path"]
210
234
  train_all_data = TEMPLATE_PARAMS["train_all_data"]
211
- hyperparameters = TEMPLATE_PARAMS["hyperparameters"]
235
+ hyperparameters = {**DEFAULT_HYPERPARAMETERS, **TEMPLATE_PARAMS["hyperparameters"]}
212
236
  validation_split = 0.2
213
237
 
214
238
  # Script arguments for input/output directories
@@ -348,11 +372,16 @@ if __name__ == "__main__":
348
372
  # Calculate various model performance metrics (regression)
349
373
  rmse = root_mean_squared_error(y_validate, preds)
350
374
  mae = mean_absolute_error(y_validate, preds)
375
+ medae = median_absolute_error(y_validate, preds)
351
376
  r2 = r2_score(y_validate, preds)
352
- print(f"RMSE: {rmse:.3f}")
353
- print(f"MAE: {mae:.3f}")
354
- print(f"R2: {r2:.3f}")
355
- print(f"NumRows: {len(df_val)}")
377
+ spearman_corr = spearmanr(y_validate, preds).correlation
378
+ support = len(df_val)
379
+ print(f"rmse: {rmse:.3f}")
380
+ print(f"mae: {mae:.3f}")
381
+ print(f"medae: {medae:.3f}")
382
+ print(f"r2: {r2:.3f}")
383
+ print(f"spearmanr: {spearman_corr:.3f}")
384
+ print(f"support: {support}")
356
385
 
357
386
  # Now save the model to the standard place/name
358
387
  joblib.dump(xgb_model, os.path.join(args.model_dir, "xgb_model.joblib"))
@@ -6,11 +6,13 @@ import numpy as np
6
6
  # Model Performance Scores
7
7
  from sklearn.metrics import (
8
8
  mean_absolute_error,
9
+ median_absolute_error,
9
10
  r2_score,
10
11
  root_mean_squared_error,
11
12
  precision_recall_fscore_support,
12
13
  confusion_matrix,
13
14
  )
15
+ from scipy.stats import spearmanr
14
16
 
15
17
  # Classification Encoder
16
18
  from sklearn.preprocessing import LabelEncoder
@@ -26,6 +28,28 @@ import os
26
28
  import pandas as pd
27
29
  from typing import List, Tuple
28
30
 
31
+ # Default Hyperparameters for XGBoost
32
+ DEFAULT_HYPERPARAMETERS = {
33
+ # Core tree parameters
34
+ "n_estimators": 200, # More trees for better signal capture when we have lots of features
35
+ "max_depth": 6, # Medium depth
36
+ "learning_rate": 0.05, # Lower rate with more estimators for smoother learning
37
+
38
+ # Sampling parameters
39
+ "subsample": 0.7, # Moderate row sampling to reduce overfitting
40
+ "colsample_bytree": 0.6, # More aggressive feature sampling given lots of features
41
+ "colsample_bylevel": 0.8, # Additional feature sampling at each tree level
42
+
43
+ # Regularization
44
+ "min_child_weight": 5, # Higher to prevent overfitting on small groups
45
+ "gamma": 0.2, # Moderate pruning - you have real signal so don't over-prune
46
+ "reg_alpha": 0.5, # L1 for feature selection (useful with many features)
47
+ "reg_lambda": 2.0, # Strong L2 to smooth predictions
48
+
49
+ # Random seed
50
+ "random_state": 42,
51
+ }
52
+
29
53
  # Template Parameters
30
54
  TEMPLATE_PARAMS = {
31
55
  "model_type": "{{model_type}}",
@@ -208,7 +232,7 @@ if __name__ == "__main__":
208
232
  model_type = TEMPLATE_PARAMS["model_type"]
209
233
  model_metrics_s3_path = TEMPLATE_PARAMS["model_metrics_s3_path"]
210
234
  train_all_data = TEMPLATE_PARAMS["train_all_data"]
211
- hyperparameters = TEMPLATE_PARAMS["hyperparameters"]
235
+ hyperparameters = {**DEFAULT_HYPERPARAMETERS, **TEMPLATE_PARAMS["hyperparameters"]}
212
236
  validation_split = 0.2
213
237
 
214
238
  # Script arguments for input/output directories
@@ -348,11 +372,16 @@ if __name__ == "__main__":
348
372
  # Calculate various model performance metrics (regression)
349
373
  rmse = root_mean_squared_error(y_validate, preds)
350
374
  mae = mean_absolute_error(y_validate, preds)
375
+ medae = median_absolute_error(y_validate, preds)
351
376
  r2 = r2_score(y_validate, preds)
352
- print(f"RMSE: {rmse:.3f}")
353
- print(f"MAE: {mae:.3f}")
354
- print(f"R2: {r2:.3f}")
355
- print(f"NumRows: {len(df_val)}")
377
+ spearman_corr = spearmanr(y_validate, preds).correlation
378
+ support = len(df_val)
379
+ print(f"rmse: {rmse:.3f}")
380
+ print(f"mae: {mae:.3f}")
381
+ print(f"medae: {medae:.3f}")
382
+ print(f"r2: {r2:.3f}")
383
+ print(f"spearmanr: {spearman_corr:.3f}")
384
+ print(f"support: {support}")
356
385
 
357
386
  # Now save the model to the standard place/name
358
387
  joblib.dump(xgb_model, os.path.join(args.model_dir, "xgb_model.joblib"))
@@ -1 +1 @@
1
- eyJsaWNlbnNlX2lkIjogIk9wZW5fU291cmNlX0xpY2Vuc2UiLCAiY29tcGFueSI6ICIiLCAiYXdzX2FjY291bnRfaWQiOiAiIiwgInRpZXIiOiAiRW50ZXJwcmlzZSBQcm8iLCAiZmVhdHVyZXMiOiBbInBsdWdpbnMiLCAicGFnZXMiLCAidGhlbWVzIiwgInBpcGVsaW5lcyIsICJicmFuZGluZyJdLCAiZXhwaXJlcyI6ICIyMDI2LTAxLTE0In02zCDRy41wKRViRnGmodczFWexLyfXYrJWSuVQQbhbWeRttQRv6zpo9x4O2yBjdRfhb9E7mFUppNiOS_ZGK-bL71nGHt_Mc8niG8jkpvKX9qZ6BqkXF_vzDIOcI8iGiwB3wikeVO4zRLD1AI0U3cgYmIyGXI9QKJ9L7IHyQ0TWqw==
1
+ eyJsaWNlbnNlX2lkIjogIk9wZW5fU291cmNlX0xpY2Vuc2UiLCAiY29tcGFueSI6ICIiLCAiYXdzX2FjY291bnRfaWQiOiAiIiwgInRpZXIiOiAiRW50ZXJwcmlzZSBQcm8iLCAiZmVhdHVyZXMiOiBbInBsdWdpbnMiLCAicGFnZXMiLCAidGhlbWVzIiwgInBpcGVsaW5lcyIsICJicmFuZGluZyJdLCAiZXhwaXJlcyI6ICIyMDI2LTEyLTA1In1IsmpkuybFALADkRj_RfmkQ0LAIsQeXRE7Uoc3DL1UrDr-rSnwu-PDqsKBUkX6jPRFZV3DLxNjBapxPeEIFhfvxvjzz_sc6CwtxNpZ3bPmxSPs2W-j3xZS4-XyEqIilcwSkWh-NU1u27gCuuivn5eiUmIYJGAp0wdVkeE6_Z9dlg==
@@ -22,6 +22,7 @@ from sklearn.preprocessing import LabelEncoder
22
22
 
23
23
  from workbench.utils.model_utils import safe_extract_tarfile
24
24
  from workbench.utils.pandas_utils import expand_proba_column
25
+ from workbench.utils.aws_utils import pull_s3_data
25
26
 
26
27
  log = logging.getLogger("workbench")
27
28
 
@@ -160,7 +161,7 @@ def _build_mpnn_model(
160
161
  # Model hyperparameters with defaults
161
162
  hidden_dim = hyperparameters.get("hidden_dim", 300)
162
163
  depth = hyperparameters.get("depth", 3)
163
- dropout = hyperparameters.get("dropout", 0.0)
164
+ dropout = hyperparameters.get("dropout", 0.1)
164
165
  ffn_hidden_dim = hyperparameters.get("ffn_hidden_dim", 300)
165
166
  ffn_num_layers = hyperparameters.get("ffn_num_layers", 1)
166
167
 
@@ -283,6 +284,42 @@ def _get_n_extra_descriptors(loaded_model: Any) -> int:
283
284
  return 0
284
285
 
285
286
 
287
+ def pull_cv_results(workbench_model: Any) -> Tuple[pd.DataFrame, pd.DataFrame]:
288
+ """Pull cross-validation results from AWS training artifacts.
289
+
290
+ This retrieves the validation predictions and training metrics that were
291
+ saved during model training.
292
+
293
+ Args:
294
+ workbench_model: Workbench model object
295
+
296
+ Returns:
297
+ Tuple of:
298
+ - DataFrame with training metrics
299
+ - DataFrame with validation predictions
300
+ """
301
+ # Get the validation predictions from S3
302
+ s3_path = f"{workbench_model.model_training_path}/validation_predictions.csv"
303
+ predictions_df = pull_s3_data(s3_path)
304
+
305
+ if predictions_df is None:
306
+ raise ValueError(f"No validation predictions found at {s3_path}")
307
+
308
+ log.info(f"Pulled {len(predictions_df)} validation predictions from {s3_path}")
309
+
310
+ # Get training metrics from model metadata
311
+ training_metrics = workbench_model.workbench_meta().get("workbench_training_metrics")
312
+
313
+ if training_metrics is None:
314
+ log.warning(f"No training metrics found in model metadata for {workbench_model.model_name}")
315
+ metrics_df = pd.DataFrame({"error": [f"No training metrics found for {workbench_model.model_name}"]})
316
+ else:
317
+ metrics_df = pd.DataFrame.from_dict(training_metrics)
318
+ log.info(f"Metrics summary:\n{metrics_df.to_string(index=False)}")
319
+
320
+ return metrics_df, predictions_df
321
+
322
+
286
323
  def cross_fold_inference(
287
324
  workbench_model: Any,
288
325
  nfolds: int = 5,
@@ -29,6 +29,7 @@ from sklearn.preprocessing import LabelEncoder
29
29
 
30
30
  from workbench.utils.model_utils import safe_extract_tarfile
31
31
  from workbench.utils.pandas_utils import expand_proba_column
32
+ from workbench.utils.aws_utils import pull_s3_data
32
33
 
33
34
  log = logging.getLogger("workbench")
34
35
 
@@ -78,18 +79,11 @@ def load_pytorch_model_artifacts(model_dir: str) -> Tuple[Any, dict]:
78
79
  if not os.path.exists(model_path):
79
80
  raise FileNotFoundError(f"No tabular_model directory found in {model_dir}")
80
81
 
81
- # Remove callbacks.sav if it exists - it's not needed for inference and causes
82
- # GPU->CPU loading issues (joblib.load doesn't support map_location)
83
- callbacks_path = os.path.join(model_path, "callbacks.sav")
84
- if os.path.exists(callbacks_path):
85
- os.remove(callbacks_path)
86
-
87
82
  # PyTorch Tabular needs write access, so chdir to /tmp
88
83
  original_cwd = os.getcwd()
89
84
  try:
90
85
  os.chdir("/tmp")
91
- # map_location="cpu" ensures GPU-trained models work on CPU
92
- model = TabularModel.load_model(model_path, map_location="cpu")
86
+ model = TabularModel.load_model(model_path)
93
87
  finally:
94
88
  os.chdir(original_cwd)
95
89
 
@@ -158,6 +152,42 @@ def _extract_model_configs(loaded_model: Any, n_train: int) -> dict:
158
152
  return {"trainer": trainer_config, "model": model_config}
159
153
 
160
154
 
155
+ def pull_cv_results(workbench_model: Any) -> Tuple[pd.DataFrame, pd.DataFrame]:
156
+ """Pull cross-validation results from AWS training artifacts.
157
+
158
+ This retrieves the validation predictions and training metrics that were
159
+ saved during model training.
160
+
161
+ Args:
162
+ workbench_model: Workbench model object
163
+
164
+ Returns:
165
+ Tuple of:
166
+ - DataFrame with training metrics
167
+ - DataFrame with validation predictions
168
+ """
169
+ # Get the validation predictions from S3
170
+ s3_path = f"{workbench_model.model_training_path}/validation_predictions.csv"
171
+ predictions_df = pull_s3_data(s3_path)
172
+
173
+ if predictions_df is None:
174
+ raise ValueError(f"No validation predictions found at {s3_path}")
175
+
176
+ log.info(f"Pulled {len(predictions_df)} validation predictions from {s3_path}")
177
+
178
+ # Get training metrics from model metadata
179
+ training_metrics = workbench_model.workbench_meta().get("workbench_training_metrics")
180
+
181
+ if training_metrics is None:
182
+ log.warning(f"No training metrics found in model metadata for {workbench_model.model_name}")
183
+ metrics_df = pd.DataFrame({"error": [f"No training metrics found for {workbench_model.model_name}"]})
184
+ else:
185
+ metrics_df = pd.DataFrame.from_dict(training_metrics)
186
+ log.info(f"Metrics summary:\n{metrics_df.to_string(index=False)}")
187
+
188
+ return metrics_df, predictions_df
189
+
190
+
161
191
  def cross_fold_inference(
162
192
  workbench_model: Any,
163
193
  nfolds: int = 5,
@@ -36,8 +36,14 @@ class ModelPlot(ComponentInterface):
36
36
  if df is None:
37
37
  return self.display_text("No Data")
38
38
 
39
- # Calculate the distance from the diagonal for each point
39
+ # Grab the target(s) for this model
40
40
  target = model.target()
41
+
42
+ # For multi-task models, match target to inference_run name or default to first
43
+ if isinstance(target, list):
44
+ target = next((t for t in target if t in inference_run), target[0])
45
+
46
+ # Compute error for coloring
41
47
  df["error"] = abs(df["prediction"] - df[target])
42
48
  return ScatterPlot().update_properties(
43
49
  df,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: workbench
3
- Version: 0.8.201
3
+ Version: 0.8.204
4
4
  Summary: Workbench: A Dashboard and Python API for creating and deploying AWS SageMaker Model Pipelines
5
5
  Author-email: SuperCowPowers LLC <support@supercowpowers.com>
6
6
  License: MIT License
@@ -52,7 +52,7 @@ Requires-Dist: joblib>=1.3.2
52
52
  Requires-Dist: requests>=2.26.0
53
53
  Requires-Dist: rdkit>=2024.9.5
54
54
  Requires-Dist: mordredcommunity>=2.0.6
55
- Requires-Dist: workbench-bridges>=0.1.10
55
+ Requires-Dist: workbench-bridges>=0.1.15
56
56
  Provides-Extra: ui
57
57
  Requires-Dist: plotly>=6.0.0; extra == "ui"
58
58
  Requires-Dist: dash>3.0.0; extra == "ui"
@@ -30,14 +30,14 @@ workbench/algorithms/sql/value_counts.py,sha256=F-rZoLTTKv1cHYl2_tDlvWDjczy76uLT
30
30
  workbench/api/__init__.py,sha256=KDKzFb4SL8AArtd9ucTkFYdCxbsBMbK1ZMkj0G2rACY,1065
31
31
  workbench/api/compound.py,sha256=kf5EaM5qjWwsZutcxqj9IC_MPnDV1uVHDMns9OA_GOo,2545
32
32
  workbench/api/data_source.py,sha256=Ngz36YZWxFfpJbmURhM1LQPYjh5kdpZNGo6_fCRePbA,8321
33
- workbench/api/df_store.py,sha256=Wybb3zO-jPpAi2Ns8Ks1-lagvXAaBlRpBZHhnnl3Lms,6131
33
+ workbench/api/df_store.py,sha256=1qSYM3Xb4MwMMTMaF3CX0hOCEzhIbnra5Deivg4cryk,3014
34
34
  workbench/api/endpoint.py,sha256=spLse2UoAsZdu_ZxmAvMJX_aX-zutAsQ5_SPm9Xt-nA,3839
35
- workbench/api/feature_set.py,sha256=igzOnDbdU-qkdrYZ4ldvfjiQtC1Mvj-4j8YbHuPcY2w,6903
35
+ workbench/api/feature_set.py,sha256=K0Sl59yAf_qZr8EH4rPjDotezCwP5Q7aG38FGaF4zi0,8062
36
36
  workbench/api/graph_store.py,sha256=LremJyPrQFgsHb7hxsctuCsoxx3p7TKtaY5qALHe6pc,4372
37
37
  workbench/api/meta.py,sha256=1_9989cPvf3hd3tA-83hLijOGNnhwXAF8aZF45adeDQ,8596
38
38
  workbench/api/model.py,sha256=fncUc8MJwXyteKeXOlAy5IMjE48sH_VmDBi3P2MPGG4,4458
39
39
  workbench/api/monitor.py,sha256=Cez89Uac7Tzt47FxkjoX-YDGccEhvBcxw3sZFtw4ud8,4506
40
- workbench/api/parameter_store.py,sha256=7BObkuATuP6C5AG_46kCWsmuCwuh1vgMJDBSN0gTkwM,4294
40
+ workbench/api/parameter_store.py,sha256=_3MmPxKiVy7_OIgCSRlUv9xbk8nuiOWiCtZgT-AxN1k,2574
41
41
  workbench/api/pipeline.py,sha256=MSYGrDSXrRB_oQELtAlOwBfxSBTw3REAkHy5XBHau0Y,6261
42
42
  workbench/cached/__init__.py,sha256=wvTyIFvusv2HjU3yop6OSr3js5_-SZuR8nPmlCuZQJ4,525
43
43
  workbench/cached/cached_data_source.py,sha256=A0o4H9g1aEms8HkOHWnb46vJ5fx6ebs1aCYaQcf8gPI,2649
@@ -48,23 +48,23 @@ workbench/cached/cached_model.py,sha256=iMc_fySUE5qau3feduVXMNb24JY0sBjt1g6WeLLc
48
48
  workbench/cached/cached_pipeline.py,sha256=QOVnEKu5RbIdlNpJUi-0Ebh0_-C68RigSPwKh4dvZTM,1948
49
49
  workbench/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
50
50
  workbench/core/artifacts/__init__.py,sha256=ukcgbYlI9m99bzwaBNO01K1h0-cQkzsbh_jT_GyQ-LY,1034
51
- workbench/core/artifacts/artifact.py,sha256=WFGC1F61d7uFSRB7UTWYOF8O_wk8F9rn__THJL2veLM,17752
51
+ workbench/core/artifacts/artifact.py,sha256=scWUbX2Sk1rxT8VEm_Z7YTxbOzkDASNyqqXB56xLZ2w,17721
52
52
  workbench/core/artifacts/athena_source.py,sha256=RNmCe7s6uH4gVHpcdJcL84aSbF5Q1ahJBLLGwHYRXEU,26081
53
53
  workbench/core/artifacts/cached_artifact_mixin.py,sha256=ngqFLZ4cQx_TFouXZgXZQsv_7W6XCvxVGXXSfzzaft8,3775
54
54
  workbench/core/artifacts/data_capture_core.py,sha256=q8f79rRTYiZ7T4IQRWXl8ZvPpcvZyNxYERwvo8o0OQc,14858
55
55
  workbench/core/artifacts/data_source_abstract.py,sha256=5IRCzFVK-17cd4NXPMRfx99vQAmQ0WHE5jcm5RfsVTg,10619
56
56
  workbench/core/artifacts/data_source_factory.py,sha256=YL_tA5fsgubbB3dPF6T4tO0rGgz-6oo3ge4i_YXVC-M,2380
57
- workbench/core/artifacts/endpoint_core.py,sha256=JNKSaJUlxJu3MaIUH1DWt4-3pq1mSKA_iz2O4Z-FhpE,54599
57
+ workbench/core/artifacts/df_store_core.py,sha256=AueNr_JvuLLu_ByE7cb3u-isH9u0Q7cMP-UCgCX-Ctg,3536
58
+ workbench/core/artifacts/endpoint_core.py,sha256=8HlZw4ZTsSIqYrCsbawfqrQMQo4Y9paiudq8ihN1Mzo,59317
58
59
  workbench/core/artifacts/feature_set_core.py,sha256=wZy-02WXWmSBet5t8mWXFRdv9O4MtW3hWqJuVv7Kok0,39330
59
- workbench/core/artifacts/model_core.py,sha256=FYI55IrOP_yFlcskykBJlusll6XudYw64n1vfuR2UjY,52272
60
+ workbench/core/artifacts/model_core.py,sha256=QIgV5MJr8aDY63in83thdNc5-bzkWLn5f5vvsS4aNYo,52348
60
61
  workbench/core/artifacts/monitor_core.py,sha256=M307yz7tEzOEHgv-LmtVy9jKjSbM98fHW3ckmNYrwlU,27897
62
+ workbench/core/artifacts/parameter_store_core.py,sha256=sHvjJMuybM4qdcKhH-Sx6Ur6Yn5ozA3QHwtidsnhyG8,2867
61
63
  workbench/core/cloud_platform/cloud_meta.py,sha256=-g4-LTC3D0PXb3VfaXdLR1ERijKuHdffeMK_zhD-koQ,8809
62
64
  workbench/core/cloud_platform/aws/README.md,sha256=QT5IQXoUHbIA0qQ2wO6_2P2lYjYQFVYuezc22mWY4i8,97
63
65
  workbench/core/cloud_platform/aws/aws_account_clamp.py,sha256=V5iVsoGvSRilARtTdExnt27QptzAcJaW0s3nm2B8-ow,8286
64
- workbench/core/cloud_platform/aws/aws_df_store.py,sha256=utRIlTCPwFneHHZ8_Z3Hw3rOJSeryiFA4wBtucxULRQ,15055
65
66
  workbench/core/cloud_platform/aws/aws_graph_store.py,sha256=ytYxQTplUmeWbsPmxyZbf6mO9qyTl60ewlJG8MyfyEY,9414
66
67
  workbench/core/cloud_platform/aws/aws_meta.py,sha256=eY9Pn6pl2yAyseACFb2nitR-0vLwG4i8CSEXe8Iaswc,34778
67
- workbench/core/cloud_platform/aws/aws_parameter_store.py,sha256=pOxlorMvtH_QeuvSlMJrQSeALcRaQSnskR4uPu_xStM,11075
68
68
  workbench/core/cloud_platform/aws/aws_secrets_manager.py,sha256=TUnddp1gX-OwxJ_oO5ONh7OI4Z2HC_6euGkJ-himCCk,8615
69
69
  workbench/core/cloud_platform/aws/aws_session.py,sha256=2Gc_k4Q87BBeQDgXgVR-w-qmsF6ncZR8wvTeNnixM6k,6926
70
70
  workbench/core/cloud_platform/aws/cache_dataframe.py,sha256=VnObkVqcjg7v4fegrIkXR1j-K2AHTBpSAoriUXDe12A,2314
@@ -102,14 +102,14 @@ workbench/core/transforms/features_to_features/__init__.py,sha256=47DEQpj8HBSa-_
102
102
  workbench/core/transforms/features_to_features/heavy/emr/Readme.md,sha256=YtQgCEQeKe0CQXQkhzMTYq9xOtCsCYb5P5LW2BmRKWQ,68
103
103
  workbench/core/transforms/features_to_features/heavy/glue/Readme.md,sha256=TuyCatWfoDr99zUwvOcxf-TqMkQzaMqXlj5nmFcRzfo,48
104
104
  workbench/core/transforms/features_to_model/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
105
- workbench/core/transforms/features_to_model/features_to_model.py,sha256=hALtU7GniRch9-TuMKv-YwTBfq7pfXsgN9lKFs3fj_8,20296
105
+ workbench/core/transforms/features_to_model/features_to_model.py,sha256=JdKKz3eKrKhicA1WxTfmb1IqQNCdHJE0CKDs66bLHYU,21071
106
106
  workbench/core/transforms/model_to_endpoint/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
107
- workbench/core/transforms/model_to_endpoint/model_to_endpoint.py,sha256=TIYXvuK0s383PwJ4iS6fCRhuif6oIxsoWb4CpMGJjY4,6358
107
+ workbench/core/transforms/model_to_endpoint/model_to_endpoint.py,sha256=QjfUY_Ay2-W8OszWw2vGtsKfnMY7VjiWQmnjuzLBITk,7020
108
108
  workbench/core/transforms/pandas_transforms/__init__.py,sha256=xL4MT8-fZ1SFqDbTLc8XyxjupHtB1YR6Ej0AC2nwd7I,894
109
109
  workbench/core/transforms/pandas_transforms/data_to_pandas.py,sha256=sJHPeuNF8Q8aQqgRnkdWkyvur5cbggdUVIwR-xF3Dlo,3621
110
110
  workbench/core/transforms/pandas_transforms/features_to_pandas.py,sha256=af6xdPt2V4zhh-SzQa_UYxdmNMzMLXbrbsznV5QoIJg,3441
111
111
  workbench/core/transforms/pandas_transforms/pandas_to_data.py,sha256=cqo6hQmzUGUFACvNuVLZQdgrlXrQIu4NjqK-ujPmoIc,9181
112
- workbench/core/transforms/pandas_transforms/pandas_to_features.py,sha256=mj00L40PXhw-JHG2SZe53yJAzicgn4xuM2VbmOY-wsM,21480
112
+ workbench/core/transforms/pandas_transforms/pandas_to_features.py,sha256=AqXS4ZND7lg94enclRP9wGBrYm4AmhL3c--q0o-6_JM,21972
113
113
  workbench/core/transforms/pandas_transforms/pandas_to_features_chunked.py,sha256=0R8mQlWfbIlTVmYUmrtu2gsw0AE815k6kqPgpd0bmyQ,4422
114
114
  workbench/core/views/__init__.py,sha256=UZJMAJBCMVM3uSYmnFg8c2LWtdu9-479WNAdVMIohAc,962
115
115
  workbench/core/views/column_subset_view.py,sha256=vGDKTTGrPIY-IFOeWvudJrhKiq0OjWDp5rTuuj-X40U,4261
@@ -123,8 +123,8 @@ workbench/core/views/view.py,sha256=DvmEA1xdvL980GET_cnbmHzqSy6IhlNaZcoQnVTtYis,
123
123
  workbench/core/views/view_utils.py,sha256=CwOlpqXpumCr6REi-ey7Qjz5_tpg-s4oWHmlOVu8POQ,12270
124
124
  workbench/core/views/storage/mdq_view.py,sha256=qf_ep1KwaXOIfO930laEwNIiCYP7VNOqjE3VdHfopRE,5195
125
125
  workbench/model_scripts/script_generation.py,sha256=_AhzM2qzjBuI7pIaXBRZ1YOOs2lwsKQGVM_ovL6T1bo,8135
126
- workbench/model_scripts/chemprop/chemprop.template,sha256=WjuF61kRxn5wuRDQkxLkcc8voccW8s6oo1XXmDqksYA,37076
127
- workbench/model_scripts/chemprop/generated_model_script.py,sha256=z79wCajIHYWzWB3dVGGaiQ3M3sPPxcMVY78sx35PQvQ,37079
126
+ workbench/model_scripts/chemprop/chemprop.template,sha256=XcRBEz_JYS1Vjv9MI_5BalvrWL9v2vTq1eRlVpLAtPE,38883
127
+ workbench/model_scripts/chemprop/generated_model_script.py,sha256=lSr5qHZljCzttxlq4YwypUYmYbIAl7flo5RT8nXt_vs,39755
128
128
  workbench/model_scripts/chemprop/requirements.txt,sha256=PIuUdPAeDUH3I2M_5nIrCnCfs3FL1l9V5kzHqgCcu7s,281
129
129
  workbench/model_scripts/custom_models/chem_info/Readme.md,sha256=mH1lxJ4Pb7F5nBnVXaiuxpi8zS_yjUw_LBJepVKXhlA,574
130
130
  workbench/model_scripts/custom_models/chem_info/fingerprints.py,sha256=Qvs8jaUwguWUq3Q3j695MY0t0Wk3BvroW-oWBwalMUo,5255
@@ -141,31 +141,31 @@ workbench/model_scripts/custom_models/proximity/proximity.py,sha256=dPTYD1N-JTIq
141
141
  workbench/model_scripts/custom_models/proximity/requirements.txt,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
142
142
  workbench/model_scripts/custom_models/uq_models/Readme.md,sha256=UVpL-lvtTrLqwBeQFinLhd_uNrEw4JUlggIdUSDrd-w,188
143
143
  workbench/model_scripts/custom_models/uq_models/bayesian_ridge.template,sha256=ca3CaAk6HVuNv1HnPgABTzRY3oDrRxomjgD4V1ZDwoc,6448
144
- workbench/model_scripts/custom_models/uq_models/ensemble_xgb.template,sha256=xlKLHeLQkScONnrlbAGIsrCm2wwsvcfv4Vdrw4nlc_8,13457
144
+ workbench/model_scripts/custom_models/uq_models/ensemble_xgb.template,sha256=449Enh4-7RrMrxt1oS_SHJHGV8yYcFlWHsLrCVTFQGI,13778
145
145
  workbench/model_scripts/custom_models/uq_models/gaussian_process.template,sha256=3nMlCi8nEbc4N-MQTzjfIcljfDQkUmWeLBfmd18m5fg,6632
146
- workbench/model_scripts/custom_models/uq_models/meta_uq.template,sha256=XTfhODRaHlI1jZGo9pSe-TqNsk2_nuSw0xMO2fKzDv8,14011
147
- workbench/model_scripts/custom_models/uq_models/ngboost.template,sha256=v1rviYTJGJnQRGgAyveXhOQlS-WFCTlc2vdnWq6HIXk,8241
146
+ workbench/model_scripts/custom_models/uq_models/meta_uq.template,sha256=RIC90o9iI37ylOOJBUVDVF2FmYs9kJl8AifL-AYIwAI,14282
147
+ workbench/model_scripts/custom_models/uq_models/ngboost.template,sha256=_ukYcsL4pnWvFV1oA89_wfVpxWbvoEx6MGwKxc38kSI,8512
148
148
  workbench/model_scripts/custom_models/uq_models/proximity.py,sha256=dPTYD1N-JTIqg6iL7ak_JSouaCdfmBPjG08IRRvTLXU,15836
149
149
  workbench/model_scripts/custom_models/uq_models/requirements.txt,sha256=fw7T7t_YJAXK3T6Ysbesxh_Agx_tv0oYx72cEBTqRDY,98
150
150
  workbench/model_scripts/custom_script_example/custom_model_script.py,sha256=T8aydawgRVAdSlDimoWpXxG2YuWWQkbcjBVjAeSG2_0,6408
151
151
  workbench/model_scripts/custom_script_example/requirements.txt,sha256=jWlGc7HH7vqyukTm38LN4EyDi8jDUPEay4n45z-30uc,104
152
- workbench/model_scripts/ensemble_xgb/ensemble_xgb.template,sha256=pWmuo-EVz0owvkRI-h9mUTYt1-ouyD-_yyQu6SQbYZ4,10350
152
+ workbench/model_scripts/ensemble_xgb/ensemble_xgb.template,sha256=lMEx0IkawcpTI52gSjCp1Wr0g2vWd4kIGuIqjXhA2GA,10671
153
153
  workbench/model_scripts/ensemble_xgb/requirements.txt,sha256=jWlGc7HH7vqyukTm38LN4EyDi8jDUPEay4n45z-30uc,104
154
- workbench/model_scripts/pytorch_model/generated_model_script.py,sha256=IP6yRgd3LrtKyCHRPpRJiA0PGwtxZec8PTa8ZLmmVW0,26256
155
- workbench/model_scripts/pytorch_model/pytorch.template,sha256=FYLIvuSG93yOXNH-Su_1NYRcQ-5wI_DEAuL7Sk-hIGA,23620
154
+ workbench/model_scripts/pytorch_model/generated_model_script.py,sha256=nst6kRN8T_LmmDANAaFYSC9GdGQtrDYdVBs4mU1RJ-U,32883
155
+ workbench/model_scripts/pytorch_model/pytorch.template,sha256=PFmGO_jP8S6RKvAzAXiuogkVXYTb5MKajJk_57qQDcc,30718
156
156
  workbench/model_scripts/pytorch_model/requirements.txt,sha256=ICS5nW0wix44EJO2tJszJSaUrSvhSfdedn6FcRInGx4,181
157
157
  workbench/model_scripts/scikit_learn/generated_model_script.py,sha256=xhQIglpAgPRCH9iwI3wI0N0V6p9AgqW0mVOMuSXzUCk,17187
158
158
  workbench/model_scripts/scikit_learn/requirements.txt,sha256=aVvwiJ3LgBUhM_PyFlb2gHXu_kpGPho3ANBzlOkfcvs,107
159
159
  workbench/model_scripts/scikit_learn/scikit_learn.template,sha256=QQvqx-eX9ZTbYmyupq6R6vIQwosmsmY_MRBPaHyfjdk,12586
160
- workbench/model_scripts/uq_models/generated_model_script.py,sha256=QZtOyxQeUdA10Im8mYsbWzxwQbNjYNq0f4c9DDvnQ2M,27275
161
- workbench/model_scripts/uq_models/mapie.template,sha256=gAY-cMt3E-uWILA6YBQWMeaOrpWSi3ijd35HvB0sDRA,23087
160
+ workbench/model_scripts/uq_models/generated_model_script.py,sha256=ivKtB-3MdJVnppxKez4Vz7jLW5i_sb0gLTGOcYJZ_PM,26758
161
+ workbench/model_scripts/uq_models/mapie.template,sha256=on3I40D7zyNfvfqBf5k8VXCFtmepcxKmqVWCH5Q9S84,23432
162
162
  workbench/model_scripts/uq_models/requirements.txt,sha256=fw7T7t_YJAXK3T6Ysbesxh_Agx_tv0oYx72cEBTqRDY,98
163
- workbench/model_scripts/xgb_model/generated_model_script.py,sha256=ILrmzBENoK1g9QFFXpFz4DGwiF9hwedl8olWI4DT1A8,17982
163
+ workbench/model_scripts/xgb_model/generated_model_script.py,sha256=qUGg5R-boaswzXtgKp_J7JPxFzMdRNv51QeF-lMWL-4,19334
164
164
  workbench/model_scripts/xgb_model/requirements.txt,sha256=jWlGc7HH7vqyukTm38LN4EyDi8jDUPEay4n45z-30uc,104
165
- workbench/model_scripts/xgb_model/xgb_model.template,sha256=hVsPDZkY_2ZL0xNM3YMOew_tgkGa6YfI1KuxHPRJsMg,17909
165
+ workbench/model_scripts/xgb_model/xgb_model.template,sha256=gOXHsymCZjde6L2LvrlTtMRprJ-mXczpE4ZB8mhZZ0s,19168
166
166
  workbench/repl/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
167
167
  workbench/repl/workbench_shell.py,sha256=__FOnBqe3I6Luzb-N9mAecOUfcPEkctzxBfJSKTqDDA,22504
168
- workbench/resources/open_source_api.key,sha256=3S0OTblsmC0msUPdE_dbBmI83xJNmYscuwLJ57JmuOc,433
168
+ workbench/resources/open_source_api.key,sha256=vi9099CjkNnZ1IXB6AQWcG83iFYn2db0iTfTlpGVA1o,432
169
169
  workbench/resources/signature_verify_pub.pem,sha256=V3-u-3_z2PH-805ybkKvzDOBwAbvHxcKn0jLBImEtzM,272
170
170
  workbench/scripts/check_double_bond_stereo.py,sha256=p5hnL54Weq77ES0HCELq9JeoM-PyUGkvVSeWYF2dKyo,7776
171
171
  workbench/scripts/endpoint_test.py,sha256=G4GdQMa7KlKX7WiUSFX_OHAzDdCyf8ZbVYbZBkAPiSo,5339
@@ -202,7 +202,7 @@ workbench/utils/athena_utils.py,sha256=DDyLhJujzh1PfejtGU7ZzOf5hLPOgoXmi4Lrn-_AJ
202
202
  workbench/utils/aws_utils.py,sha256=x8c_WxtdSKmBqNg8P_Z6K2m4AsSMEiD_kh2nVaUZ28c,22077
203
203
  workbench/utils/bulk_utils.py,sha256=s1lYN2Uk536MNGetekLYL_VL0N34hUjk1FX9BAz3Qu0,1182
204
204
  workbench/utils/cache.py,sha256=0R5RXYEz_XHARK3anmQC4VRMawMks_cJ8S4vwC2roAE,5524
205
- workbench/utils/chemprop_utils.py,sha256=mQ4_loPHg_yEe7GMzTpsI6vTYenRgCg2BC-v2w6Qe8Q,27368
205
+ workbench/utils/chemprop_utils.py,sha256=0eszF9K2DYB5bOxbWSomr9SuX3QANdF7ROmWa0tikzY,28805
206
206
  workbench/utils/cloudwatch_handler.py,sha256=t0L280Qa1nMq95dwnf8lB5g8FHrQAyGY5S4JwP3yIa8,5165
207
207
  workbench/utils/cloudwatch_utils.py,sha256=wXSqKcJlSnHyC0D6d4RsH8wwmx_0CsffcetUgXlZ_78,4828
208
208
  workbench/utils/color_utils.py,sha256=TmDGLK44t975lkfjt_1O-ee02QxrKfke7vPuXb-V-Uo,11779
@@ -231,7 +231,7 @@ workbench/utils/pipeline_utils.py,sha256=yzR5tgAzz6zNqvxzZR6YqsbS7r3QDKzBXozaM_A
231
231
  workbench/utils/plot_utils.py,sha256=yFveic-4aY7lKT-CPhYdbIkBr-mZqjbhaRmCySWG_kE,6537
232
232
  workbench/utils/plugin_manager.py,sha256=JWfyFHQih_J_MMtAT1cgjGVnNVPk9bM917LkfH8Z-_A,13873
233
233
  workbench/utils/prox_utils.py,sha256=V0YSxI6lboZl8Bed1GUobFqfMhfpehn2FtgqHpkuhDQ,6170
234
- workbench/utils/pytorch_utils.py,sha256=TWYybSmhHpjqdukWXQlviUcBhMXi2lqRGugy1xM1Ajw,19204
234
+ workbench/utils/pytorch_utils.py,sha256=ig91xlAaWaCp06N4Ml2yoteDQGMJkAfysktbFEImNII,20260
235
235
  workbench/utils/redis_cache.py,sha256=39LFSWmOlNNcah02D3sBnmibc-DPeKC3SNq71K4HaB4,12893
236
236
  workbench/utils/repl_utils.py,sha256=rWOMv2HiEIp8ZL6Ps6DlwiJlGr-pOhv9OZQhm3aR-1A,4668
237
237
  workbench/utils/s3_utils.py,sha256=Xme_o_cftC_jWnw6R9YKS6-6C11zaCBAoQDlY3dZb5o,7337
@@ -262,7 +262,7 @@ workbench/web_interface/components/component_interface.py,sha256=QCPWqiZLkVsAEzQ
262
262
  workbench/web_interface/components/correlation_matrix.py,sha256=Lv4vRta5-TdxBsu0G8Ea7hyyR3XyPes-k5AfL6qZWEc,6376
263
263
  workbench/web_interface/components/data_details_markdown.py,sha256=axDs6eXniglBmvFwIKjpJ5oyT-3D4FO9IcfA_cl-EJ8,9706
264
264
  workbench/web_interface/components/endpoint_metric_plots.py,sha256=H0cXuj9UQrrh_2JvRHtq7O8pMXFXKs7o9XpzySENylw,3441
265
- workbench/web_interface/components/model_plot.py,sha256=Rojx_ZED4P9gvgeEsUm6xnwMNPoeOyn0evw45BWTITc,2536
265
+ workbench/web_interface/components/model_plot.py,sha256=9KSILXvq1L_DUZszj5ozWwi43jEtJlpWdqSs3mXBPeQ,2774
266
266
  workbench/web_interface/components/plugin_interface.py,sha256=jGRq4igUTVXUT4sDqqsKKI2yjilV0ORNBQq6CjEWE84,9563
267
267
  workbench/web_interface/components/plugin_unit_test.py,sha256=Lx3HhIMHzrwDUYs2bADSFYzQq3sFHS9RyA415hyUOdc,7747
268
268
  workbench/web_interface/components/regression_plot.py,sha256=k18Bd0fcH7ig6kL5GqC_dINci3_YLle_fSEM32zXtzY,3342
@@ -291,9 +291,9 @@ workbench/web_interface/page_views/main_page.py,sha256=X4-KyGTKLAdxR-Zk2niuLJB2Y
291
291
  workbench/web_interface/page_views/models_page_view.py,sha256=M0bdC7bAzLyIaE2jviY12FF4abdMFZmg6sFuOY_LaGI,2650
292
292
  workbench/web_interface/page_views/page_view.py,sha256=Gh6YnpOGlUejx-bHZAf5pzqoQ1H1R0OSwOpGhOBO06w,455
293
293
  workbench/web_interface/page_views/pipelines_page_view.py,sha256=v2pxrIbsHBcYiblfius3JK766NZ7ciD2yPx0t3E5IJo,2656
294
- workbench-0.8.201.dist-info/licenses/LICENSE,sha256=RTBoTMeEwTgEhS-n8vgQ-VUo5qig0PWVd8xFPKU6Lck,1080
295
- workbench-0.8.201.dist-info/METADATA,sha256=nion57QnFNU_nJbuNaBEewn4vrxwzX1ykJhgOxXvjPc,10500
296
- workbench-0.8.201.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
297
- workbench-0.8.201.dist-info/entry_points.txt,sha256=j02NCuno2Y_BuE4jEvw-IL73WZ9lkTpLwom29uKcLCw,458
298
- workbench-0.8.201.dist-info/top_level.txt,sha256=Dhy72zTxaA_o_yRkPZx5zw-fwumnjGaeGf0hBN3jc_w,10
299
- workbench-0.8.201.dist-info/RECORD,,
294
+ workbench-0.8.204.dist-info/licenses/LICENSE,sha256=RTBoTMeEwTgEhS-n8vgQ-VUo5qig0PWVd8xFPKU6Lck,1080
295
+ workbench-0.8.204.dist-info/METADATA,sha256=M0chxsmVPAHxPCzxcL9fzH_nS3fu2Bc_DCkDGpaPyeU,10500
296
+ workbench-0.8.204.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
297
+ workbench-0.8.204.dist-info/entry_points.txt,sha256=j02NCuno2Y_BuE4jEvw-IL73WZ9lkTpLwom29uKcLCw,458
298
+ workbench-0.8.204.dist-info/top_level.txt,sha256=Dhy72zTxaA_o_yRkPZx5zw-fwumnjGaeGf0hBN3jc_w,10
299
+ workbench-0.8.204.dist-info/RECORD,,