workbench 0.8.197__py3-none-any.whl → 0.8.201__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. workbench/algorithms/dataframe/proximity.py +19 -12
  2. workbench/api/__init__.py +2 -1
  3. workbench/api/feature_set.py +7 -4
  4. workbench/api/model.py +1 -1
  5. workbench/core/artifacts/__init__.py +11 -2
  6. workbench/core/artifacts/endpoint_core.py +84 -46
  7. workbench/core/artifacts/feature_set_core.py +69 -1
  8. workbench/core/artifacts/model_core.py +37 -7
  9. workbench/core/cloud_platform/aws/aws_parameter_store.py +18 -2
  10. workbench/core/transforms/features_to_model/features_to_model.py +23 -20
  11. workbench/core/views/view.py +2 -2
  12. workbench/model_scripts/chemprop/chemprop.template +931 -0
  13. workbench/model_scripts/chemprop/generated_model_script.py +931 -0
  14. workbench/model_scripts/chemprop/requirements.txt +11 -0
  15. workbench/model_scripts/custom_models/chem_info/fingerprints.py +134 -0
  16. workbench/model_scripts/custom_models/chem_info/morgan_fingerprints.py +1 -1
  17. workbench/model_scripts/custom_models/proximity/proximity.py +19 -12
  18. workbench/model_scripts/custom_models/uq_models/proximity.py +19 -12
  19. workbench/model_scripts/pytorch_model/generated_model_script.py +130 -88
  20. workbench/model_scripts/pytorch_model/pytorch.template +128 -86
  21. workbench/model_scripts/scikit_learn/generated_model_script.py +302 -0
  22. workbench/model_scripts/script_generation.py +10 -7
  23. workbench/model_scripts/uq_models/generated_model_script.py +25 -18
  24. workbench/model_scripts/uq_models/mapie.template +23 -16
  25. workbench/model_scripts/xgb_model/generated_model_script.py +6 -6
  26. workbench/model_scripts/xgb_model/xgb_model.template +2 -2
  27. workbench/repl/workbench_shell.py +14 -5
  28. workbench/scripts/endpoint_test.py +162 -0
  29. workbench/scripts/{lambda_launcher.py → lambda_test.py} +10 -0
  30. workbench/utils/chemprop_utils.py +724 -0
  31. workbench/utils/pytorch_utils.py +497 -0
  32. workbench/utils/xgboost_model_utils.py +12 -5
  33. {workbench-0.8.197.dist-info → workbench-0.8.201.dist-info}/METADATA +2 -2
  34. {workbench-0.8.197.dist-info → workbench-0.8.201.dist-info}/RECORD +38 -30
  35. {workbench-0.8.197.dist-info → workbench-0.8.201.dist-info}/entry_points.txt +2 -1
  36. {workbench-0.8.197.dist-info → workbench-0.8.201.dist-info}/WHEEL +0 -0
  37. {workbench-0.8.197.dist-info → workbench-0.8.201.dist-info}/licenses/LICENSE +0 -0
  38. {workbench-0.8.197.dist-info → workbench-0.8.201.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,11 @@
1
+ # Requirements for ChemProp model scripts
2
+ # Note: These are the local dev requirements. The Docker images have their own requirements.txt
3
+ chemprop==2.2.1
4
+ rdkit==2025.9.1
5
+ torch>=2.0.0
6
+ lightning>=2.0.0
7
+ pandas>=2.0.0
8
+ numpy>=1.24.0
9
+ scikit-learn>=1.3.0
10
+ awswrangler>=3.0.0
11
+ joblib>=1.3.0
@@ -0,0 +1,134 @@
1
+ """Molecular fingerprint computation utilities"""
2
+
3
+ import logging
4
+ import pandas as pd
5
+
6
+ # Molecular Descriptor Imports
7
+ from rdkit import Chem
8
+ from rdkit.Chem import rdFingerprintGenerator
9
+ from rdkit.Chem.MolStandardize import rdMolStandardize
10
+
11
+ # Set up the logger
12
+ log = logging.getLogger("workbench")
13
+
14
+
15
+ def compute_morgan_fingerprints(df: pd.DataFrame, radius=2, n_bits=2048, counts=True) -> pd.DataFrame:
16
+ """Compute and add Morgan fingerprints to the DataFrame.
17
+
18
+ Args:
19
+ df (pd.DataFrame): Input DataFrame containing SMILES strings.
20
+ radius (int): Radius for the Morgan fingerprint.
21
+ n_bits (int): Number of bits for the fingerprint.
22
+ counts (bool): Count simulation for the fingerprint.
23
+
24
+ Returns:
25
+ pd.DataFrame: The input DataFrame with the Morgan fingerprints added as bit strings.
26
+
27
+ Note:
28
+ See: https://greglandrum.github.io/rdkit-blog/posts/2021-07-06-simulating-counts.html
29
+ """
30
+ delete_mol_column = False
31
+
32
+ # Check for the SMILES column (case-insensitive)
33
+ smiles_column = next((col for col in df.columns if col.lower() == "smiles"), None)
34
+ if smiles_column is None:
35
+ raise ValueError("Input DataFrame must have a 'smiles' column")
36
+
37
+ # Sanity check the molecule column (sometimes it gets serialized, which doesn't work)
38
+ if "molecule" in df.columns and df["molecule"].dtype == "string":
39
+ log.warning("Detected serialized molecules in 'molecule' column. Removing...")
40
+ del df["molecule"]
41
+
42
+ # Convert SMILES to RDKit molecule objects (vectorized)
43
+ if "molecule" not in df.columns:
44
+ log.info("Converting SMILES to RDKit Molecules...")
45
+ delete_mol_column = True
46
+ df["molecule"] = df[smiles_column].apply(Chem.MolFromSmiles)
47
+ # Make sure our molecules are not None
48
+ failed_smiles = df[df["molecule"].isnull()][smiles_column].tolist()
49
+ if failed_smiles:
50
+ log.error(f"Failed to convert the following SMILES to molecules: {failed_smiles}")
51
+ df = df.dropna(subset=["molecule"])
52
+
53
+ # If we have fragments in our compounds, get the largest fragment before computing fingerprints
54
+ largest_frags = df["molecule"].apply(
55
+ lambda mol: rdMolStandardize.LargestFragmentChooser().choose(mol) if mol else None
56
+ )
57
+
58
+ # Create a Morgan fingerprint generator
59
+ if counts:
60
+ n_bits *= 4 # Multiply by 4 to simulate counts
61
+ morgan_generator = rdFingerprintGenerator.GetMorganGenerator(radius=radius, fpSize=n_bits, countSimulation=counts)
62
+
63
+ # Compute Morgan fingerprints (vectorized)
64
+ fingerprints = largest_frags.apply(
65
+ lambda mol: (morgan_generator.GetFingerprint(mol).ToBitString() if mol else pd.NA)
66
+ )
67
+
68
+ # Add the fingerprints to the DataFrame
69
+ df["fingerprint"] = fingerprints
70
+
71
+ # Drop the intermediate 'molecule' column if it was added
72
+ if delete_mol_column:
73
+ del df["molecule"]
74
+ return df
75
+
76
+
77
+ if __name__ == "__main__":
78
+ print("Running molecular fingerprint tests...")
79
+ print("Note: This requires molecular_screening module to be available")
80
+
81
+ # Test molecules
82
+ test_molecules = {
83
+ "aspirin": "CC(=O)OC1=CC=CC=C1C(=O)O",
84
+ "caffeine": "CN1C=NC2=C1C(=O)N(C(=O)N2C)C",
85
+ "glucose": "C([C@@H]1[C@H]([C@@H]([C@H](C(O1)O)O)O)O)O", # With stereochemistry
86
+ "sodium_acetate": "CC(=O)[O-].[Na+]", # Salt
87
+ "benzene": "c1ccccc1",
88
+ "butene_e": "C/C=C/C", # E-butene
89
+ "butene_z": "C/C=C\\C", # Z-butene
90
+ }
91
+
92
+ # Test 1: Morgan Fingerprints
93
+ print("\n1. Testing Morgan fingerprint generation...")
94
+
95
+ test_df = pd.DataFrame({"SMILES": list(test_molecules.values()), "name": list(test_molecules.keys())})
96
+
97
+ fp_df = compute_morgan_fingerprints(test_df.copy(), radius=2, n_bits=512, counts=False)
98
+
99
+ print(" Fingerprint generation results:")
100
+ for _, row in fp_df.iterrows():
101
+ fp = row.get("fingerprint", "N/A")
102
+ fp_len = len(fp) if fp != "N/A" else 0
103
+ print(f" {row['name']:15} → {fp_len} bits")
104
+
105
+ # Test 2: Different fingerprint parameters
106
+ print("\n2. Testing different fingerprint parameters...")
107
+
108
+ # Test with counts enabled
109
+ fp_counts_df = compute_morgan_fingerprints(test_df.copy(), radius=3, n_bits=256, counts=True)
110
+
111
+ print(" With count simulation (256 bits * 4):")
112
+ for _, row in fp_counts_df.iterrows():
113
+ fp = row.get("fingerprint", "N/A")
114
+ fp_len = len(fp) if fp != "N/A" else 0
115
+ print(f" {row['name']:15} → {fp_len} bits")
116
+
117
+ # Test 3: Edge cases
118
+ print("\n3. Testing edge cases...")
119
+
120
+ # Invalid SMILES
121
+ invalid_df = pd.DataFrame({"SMILES": ["INVALID", ""]})
122
+ try:
123
+ fp_invalid = compute_morgan_fingerprints(invalid_df.copy())
124
+ print(f" ✓ Invalid SMILES handled: {len(fp_invalid)} valid molecules")
125
+ except Exception as e:
126
+ print(f" ✓ Invalid SMILES properly raised error: {type(e).__name__}")
127
+
128
+ # Test with pre-existing molecule column
129
+ mol_df = test_df.copy()
130
+ mol_df["molecule"] = mol_df["SMILES"].apply(Chem.MolFromSmiles)
131
+ fp_with_mol = compute_morgan_fingerprints(mol_df)
132
+ print(f" ✓ Pre-existing molecule column handled: {len(fp_with_mol)} fingerprints generated")
133
+
134
+ print("\n✅ All fingerprint tests completed!")
@@ -15,7 +15,7 @@ import pandas as pd
15
15
  import json
16
16
 
17
17
  # Local imports
18
- from local_utils import compute_morgan_fingerprints
18
+ from fingerprints import compute_morgan_fingerprints
19
19
 
20
20
 
21
21
  # TRAINING SECTION
@@ -68,7 +68,8 @@ class Proximity:
68
68
  self,
69
69
  top_percent: float = 1.0,
70
70
  min_delta: Optional[float] = None,
71
- k_neighbors: int = 5,
71
+ k_neighbors: int = 4,
72
+ only_coincident: bool = False,
72
73
  ) -> pd.DataFrame:
73
74
  """
74
75
  Find compounds with steep target gradients (data quality issues and activity cliffs).
@@ -80,7 +81,8 @@ class Proximity:
80
81
  Args:
81
82
  top_percent: Percentage of compounds with steepest gradients to return (e.g., 1.0 = top 1%)
82
83
  min_delta: Minimum absolute target difference to consider. If None, defaults to target_range/100
83
- k_neighbors: Number of neighbors to use for median calculation (default: 5)
84
+ k_neighbors: Number of neighbors to use for median calculation (default: 4)
85
+ only_coincident: If True, only consider compounds that are coincident (default: False)
84
86
 
85
87
  Returns:
86
88
  DataFrame of compounds with steepest gradients, sorted by gradient (descending)
@@ -99,10 +101,15 @@ class Proximity:
99
101
  min_delta = self.target_range / 100.0 if self.target_range > 0 else 0.0
100
102
  candidates = candidates[candidates["nn_target_diff"] >= min_delta]
101
103
 
102
- # Get top X% by initial gradient
103
- percentile = 100 - top_percent
104
- threshold = np.percentile(candidates["gradient"], percentile)
105
- candidates = candidates[candidates["gradient"] >= threshold].copy()
104
+ # Filter based on mode
105
+ if only_coincident:
106
+ # Only keep coincident points (nn_distance ~= 0)
107
+ candidates = candidates[candidates["nn_distance"] < epsilon].copy()
108
+ else:
109
+ # Get top X% by initial gradient
110
+ percentile = 100 - top_percent
111
+ threshold = np.percentile(candidates["gradient"], percentile)
112
+ candidates = candidates[candidates["gradient"] >= threshold].copy()
106
113
 
107
114
  # Phase 2: Verify with k-neighbor median to filter out cases where nearest neighbor is the outlier
108
115
  results = []
@@ -113,23 +120,23 @@ class Proximity:
113
120
  # Get k nearest neighbors (excluding self)
114
121
  nbrs = self.neighbors(cmpd_id, n_neighbors=k_neighbors, include_self=False)
115
122
 
116
- # Calculate median target of k nearest neighbors
117
- neighbor_median = nbrs.head(k_neighbors)[self.target].median()
123
+ # Calculate median target of k neighbors, excluding the nearest neighbor (index 0)
124
+ neighbor_median = nbrs.iloc[1:k_neighbors][self.target].median()
118
125
  median_diff = abs(cmpd_target - neighbor_median)
119
126
 
120
127
  # Only keep if compound differs from neighborhood median
121
128
  # This filters out cases where the nearest neighbor is the outlier
122
129
  if median_diff >= min_delta:
123
- mean_distance = nbrs.head(k_neighbors)["distance"].mean()
124
-
125
130
  results.append(
126
131
  {
127
132
  self.id_column: cmpd_id,
128
133
  self.target: cmpd_target,
134
+ "nn_target": row["nn_target"],
135
+ "nn_target_diff": row["nn_target_diff"],
136
+ "nn_distance": row["nn_distance"],
137
+ "gradient": row["gradient"], # Keep Phase 1 gradient
129
138
  "neighbor_median": neighbor_median,
130
139
  "neighbor_median_diff": median_diff,
131
- "mean_distance": mean_distance,
132
- "gradient": median_diff / (mean_distance + epsilon),
133
140
  }
134
141
  )
135
142
 
@@ -68,7 +68,8 @@ class Proximity:
68
68
  self,
69
69
  top_percent: float = 1.0,
70
70
  min_delta: Optional[float] = None,
71
- k_neighbors: int = 5,
71
+ k_neighbors: int = 4,
72
+ only_coincident: bool = False,
72
73
  ) -> pd.DataFrame:
73
74
  """
74
75
  Find compounds with steep target gradients (data quality issues and activity cliffs).
@@ -80,7 +81,8 @@ class Proximity:
80
81
  Args:
81
82
  top_percent: Percentage of compounds with steepest gradients to return (e.g., 1.0 = top 1%)
82
83
  min_delta: Minimum absolute target difference to consider. If None, defaults to target_range/100
83
- k_neighbors: Number of neighbors to use for median calculation (default: 5)
84
+ k_neighbors: Number of neighbors to use for median calculation (default: 4)
85
+ only_coincident: If True, only consider compounds that are coincident (default: False)
84
86
 
85
87
  Returns:
86
88
  DataFrame of compounds with steepest gradients, sorted by gradient (descending)
@@ -99,10 +101,15 @@ class Proximity:
99
101
  min_delta = self.target_range / 100.0 if self.target_range > 0 else 0.0
100
102
  candidates = candidates[candidates["nn_target_diff"] >= min_delta]
101
103
 
102
- # Get top X% by initial gradient
103
- percentile = 100 - top_percent
104
- threshold = np.percentile(candidates["gradient"], percentile)
105
- candidates = candidates[candidates["gradient"] >= threshold].copy()
104
+ # Filter based on mode
105
+ if only_coincident:
106
+ # Only keep coincident points (nn_distance ~= 0)
107
+ candidates = candidates[candidates["nn_distance"] < epsilon].copy()
108
+ else:
109
+ # Get top X% by initial gradient
110
+ percentile = 100 - top_percent
111
+ threshold = np.percentile(candidates["gradient"], percentile)
112
+ candidates = candidates[candidates["gradient"] >= threshold].copy()
106
113
 
107
114
  # Phase 2: Verify with k-neighbor median to filter out cases where nearest neighbor is the outlier
108
115
  results = []
@@ -113,23 +120,23 @@ class Proximity:
113
120
  # Get k nearest neighbors (excluding self)
114
121
  nbrs = self.neighbors(cmpd_id, n_neighbors=k_neighbors, include_self=False)
115
122
 
116
- # Calculate median target of k nearest neighbors
117
- neighbor_median = nbrs.head(k_neighbors)[self.target].median()
123
+ # Calculate median target of k neighbors, excluding the nearest neighbor (index 0)
124
+ neighbor_median = nbrs.iloc[1:k_neighbors][self.target].median()
118
125
  median_diff = abs(cmpd_target - neighbor_median)
119
126
 
120
127
  # Only keep if compound differs from neighborhood median
121
128
  # This filters out cases where the nearest neighbor is the outlier
122
129
  if median_diff >= min_delta:
123
- mean_distance = nbrs.head(k_neighbors)["distance"].mean()
124
-
125
130
  results.append(
126
131
  {
127
132
  self.id_column: cmpd_id,
128
133
  self.target: cmpd_target,
134
+ "nn_target": row["nn_target"],
135
+ "nn_target_diff": row["nn_target_diff"],
136
+ "nn_distance": row["nn_distance"],
137
+ "gradient": row["gradient"], # Keep Phase 1 gradient
129
138
  "neighbor_median": neighbor_median,
130
139
  "neighbor_median_diff": median_diff,
131
- "mean_distance": mean_distance,
132
- "gradient": median_diff / (mean_distance + epsilon),
133
140
  }
134
141
  )
135
142