alchemist-nrel 0.2.1__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. alchemist_core/__init__.py +14 -7
  2. alchemist_core/acquisition/botorch_acquisition.py +15 -6
  3. alchemist_core/audit_log.py +594 -0
  4. alchemist_core/data/experiment_manager.py +76 -5
  5. alchemist_core/models/botorch_model.py +6 -4
  6. alchemist_core/models/sklearn_model.py +74 -8
  7. alchemist_core/session.py +788 -39
  8. alchemist_core/utils/doe.py +200 -0
  9. alchemist_nrel-0.3.1.dist-info/METADATA +185 -0
  10. alchemist_nrel-0.3.1.dist-info/RECORD +66 -0
  11. {alchemist_nrel-0.2.1.dist-info → alchemist_nrel-0.3.1.dist-info}/entry_points.txt +1 -0
  12. api/example_client.py +7 -2
  13. api/main.py +21 -4
  14. api/models/requests.py +95 -1
  15. api/models/responses.py +167 -0
  16. api/routers/acquisition.py +25 -0
  17. api/routers/experiments.py +134 -6
  18. api/routers/sessions.py +438 -10
  19. api/routers/visualizations.py +10 -5
  20. api/routers/websocket.py +132 -0
  21. api/run_api.py +56 -0
  22. api/services/session_store.py +285 -54
  23. api/static/NEW_ICON.ico +0 -0
  24. api/static/NEW_ICON.png +0 -0
  25. api/static/NEW_LOGO_DARK.png +0 -0
  26. api/static/NEW_LOGO_LIGHT.png +0 -0
  27. api/static/assets/api-vcoXEqyq.js +1 -0
  28. api/static/assets/index-DWfIKU9j.js +4094 -0
  29. api/static/assets/index-sMIa_1hV.css +1 -0
  30. api/static/index.html +14 -0
  31. api/static/vite.svg +1 -0
  32. ui/gpr_panel.py +7 -2
  33. ui/notifications.py +197 -10
  34. ui/ui.py +1117 -68
  35. ui/variables_setup.py +47 -2
  36. ui/visualizations.py +60 -3
  37. alchemist_core/models/ax_model.py +0 -159
  38. alchemist_nrel-0.2.1.dist-info/METADATA +0 -206
  39. alchemist_nrel-0.2.1.dist-info/RECORD +0 -54
  40. {alchemist_nrel-0.2.1.dist-info → alchemist_nrel-0.3.1.dist-info}/WHEEL +0 -0
  41. {alchemist_nrel-0.2.1.dist-info → alchemist_nrel-0.3.1.dist-info}/licenses/LICENSE +0 -0
  42. {alchemist_nrel-0.2.1.dist-info → alchemist_nrel-0.3.1.dist-info}/top_level.txt +0 -0
api/models/responses.py CHANGED
@@ -88,6 +88,31 @@ class SessionInfoResponse(BaseModel):
88
88
  )
89
89
 
90
90
 
91
+ class SessionStateResponse(BaseModel):
92
+ """Current state of an optimization session for monitoring."""
93
+ session_id: str
94
+ n_variables: int
95
+ n_experiments: int
96
+ model_trained: bool
97
+ model_backend: Optional[str] = None
98
+ last_suggestion: Optional[Dict[str, Any]] = None
99
+ last_acquisition_value: Optional[float] = None
100
+
101
+ model_config = ConfigDict(
102
+ json_schema_extra={
103
+ "example": {
104
+ "session_id": "550e8400-e29b-41d4-a716-446655440000",
105
+ "n_variables": 2,
106
+ "n_experiments": 15,
107
+ "model_trained": True,
108
+ "model_backend": "botorch",
109
+ "last_suggestion": {"temperature": 385.2, "flow_rate": 4.3},
110
+ "last_acquisition_value": 0.025
111
+ }
112
+ }
113
+ )
114
+
115
+
91
116
  # ============================================================
92
117
  # Variable Models
93
118
  # ============================================================
@@ -112,6 +137,11 @@ class ExperimentResponse(BaseModel):
112
137
  """Response when adding an experiment."""
113
138
  message: str = "Experiment added successfully"
114
139
  n_experiments: int
140
+ model_trained: bool = Field(default=False, description="Whether model was auto-trained")
141
+ training_metrics: Optional[Dict[str, Any]] = Field(
142
+ None,
143
+ description="Training metrics if auto-train was enabled"
144
+ )
115
145
 
116
146
 
117
147
  class ExperimentsListResponse(BaseModel):
@@ -146,6 +176,30 @@ class ExperimentsSummaryResponse(BaseModel):
146
176
  )
147
177
 
148
178
 
179
+ # ============================================================
180
+ # Initial Design (DoE) Models
181
+ # ============================================================
182
+
183
+ class InitialDesignResponse(BaseModel):
184
+ """Response containing generated initial design points."""
185
+ points: List[Dict[str, Any]] = Field(..., description="Generated experimental points")
186
+ method: str = Field(..., description="Sampling method used")
187
+ n_points: int = Field(..., description="Number of points generated")
188
+
189
+ model_config = ConfigDict(
190
+ json_schema_extra={
191
+ "example": {
192
+ "points": [
193
+ {"temperature": 350.2, "flow_rate": 2.47},
194
+ {"temperature": 421.8, "flow_rate": 7.92}
195
+ ],
196
+ "method": "lhs",
197
+ "n_points": 2
198
+ }
199
+ }
200
+ )
201
+
202
+
149
203
  # ============================================================
150
204
  # Model Training Models
151
205
  # ============================================================
@@ -290,3 +344,116 @@ class ErrorResponse(BaseModel):
290
344
  }
291
345
  }
292
346
  )
347
+
348
+
349
+ # ============================================================
350
+ # Audit Log & Session Management Responses
351
+ # ============================================================
352
+
353
+ class SessionMetadataResponse(BaseModel):
354
+ """Response containing session metadata."""
355
+ session_id: str
356
+ name: str
357
+ created_at: str
358
+ last_modified: str
359
+ description: str
360
+ tags: List[str]
361
+
362
+ model_config = ConfigDict(
363
+ json_schema_extra={
364
+ "example": {
365
+ "session_id": "550e8400-e29b-41d4-a716-446655440000",
366
+ "name": "Catalyst_Screening_Nov2025",
367
+ "created_at": "2025-11-19T09:00:00",
368
+ "last_modified": "2025-11-19T14:30:00",
369
+ "description": "Pt/Pd ratio optimization",
370
+ "tags": ["catalyst", "CO2"]
371
+ }
372
+ }
373
+ )
374
+
375
+
376
+ class AuditEntryResponse(BaseModel):
377
+ """Response containing a single audit log entry."""
378
+ timestamp: str
379
+ entry_type: str
380
+ parameters: Dict[str, Any]
381
+ hash: str
382
+ notes: str
383
+
384
+ model_config = ConfigDict(
385
+ json_schema_extra={
386
+ "example": {
387
+ "timestamp": "2025-11-19T09:15:00",
388
+ "entry_type": "data_locked",
389
+ "parameters": {
390
+ "n_experiments": 15,
391
+ "variables": [],
392
+ "data_hash": "abc123"
393
+ },
394
+ "hash": "a1b2c3d4",
395
+ "notes": "Initial screening dataset"
396
+ }
397
+ }
398
+ )
399
+
400
+
401
+ class AuditLogResponse(BaseModel):
402
+ """Response containing complete audit log."""
403
+ entries: List[AuditEntryResponse]
404
+ n_entries: int
405
+
406
+ model_config = ConfigDict(
407
+ json_schema_extra={
408
+ "example": {
409
+ "entries": [],
410
+ "n_entries": 0
411
+ }
412
+ }
413
+ )
414
+
415
+
416
+ class LockDecisionResponse(BaseModel):
417
+ """Response after locking a decision."""
418
+ success: bool
419
+ entry: AuditEntryResponse
420
+ message: str
421
+
422
+ model_config = ConfigDict(
423
+ json_schema_extra={
424
+ "example": {
425
+ "success": True,
426
+ "entry": {
427
+ "timestamp": "2025-11-19T09:15:00",
428
+ "entry_type": "data_locked",
429
+ "parameters": {},
430
+ "hash": "a1b2c3d4",
431
+ "notes": ""
432
+ },
433
+ "message": "Data decision locked successfully"
434
+ }
435
+ }
436
+ )
437
+
438
+
439
+ # ============================================================
440
+ # Session Lock Models
441
+ # ============================================================
442
+
443
+ class SessionLockResponse(BaseModel):
444
+ """Response for session lock operations."""
445
+ locked: bool = Field(..., description="Whether the session is locked")
446
+ locked_by: Optional[str] = Field(None, description="Identifier of who locked the session")
447
+ locked_at: Optional[str] = Field(None, description="When the session was locked")
448
+ lock_token: Optional[str] = Field(None, description="Token for unlocking (only on lock)")
449
+
450
+ model_config = ConfigDict(
451
+ json_schema_extra={
452
+ "example": {
453
+ "locked": True,
454
+ "locked_by": "Reactor Controller v1.2",
455
+ "locked_at": "2025-12-04T16:30:00",
456
+ "lock_token": "550e8400-e29b-41d4-a716-446655440000"
457
+ }
458
+ }
459
+ )
@@ -59,6 +59,31 @@ async def suggest_next_experiments(
59
59
  # Convert to list of dicts
60
60
  suggestions = suggestions_df.to_dict('records')
61
61
 
62
+ # Record acquisition in audit log
63
+ if suggestions:
64
+ # Get current max iteration from experiments
65
+ iteration = None
66
+ if not session.experiment_manager.df.empty and 'Iteration' in session.experiment_manager.df.columns:
67
+ iteration = int(session.experiment_manager.df['Iteration'].max()) + 1
68
+
69
+ # Build parameters dict with only fields that exist
70
+ acq_params = {
71
+ "goal": request.goal,
72
+ "n_suggestions": request.n_suggestions
73
+ }
74
+ if request.xi is not None:
75
+ acq_params["xi"] = request.xi
76
+ if request.kappa is not None:
77
+ acq_params["kappa"] = request.kappa
78
+
79
+ session.audit_log.lock_acquisition(
80
+ strategy=request.strategy,
81
+ parameters=acq_params,
82
+ suggestions=suggestions,
83
+ iteration=iteration,
84
+ notes=f"Suggested {len(suggestions)} point(s) using {request.strategy}"
85
+ )
86
+
62
87
  logger.info(f"Generated {len(suggestions)} suggestions for session {session_id} using {request.strategy}")
63
88
 
64
89
  return AcquisitionResponse(
@@ -2,9 +2,14 @@
2
2
  Experiments router - Experimental data management.
3
3
  """
4
4
 
5
- from fastapi import APIRouter, Depends, UploadFile, File
6
- from ..models.requests import AddExperimentRequest, AddExperimentsBatchRequest
7
- from ..models.responses import ExperimentResponse, ExperimentsListResponse, ExperimentsSummaryResponse
5
+ from fastapi import APIRouter, Depends, UploadFile, File, Query
6
+ from ..models.requests import AddExperimentRequest, AddExperimentsBatchRequest, InitialDesignRequest
7
+ from ..models.responses import (
8
+ ExperimentResponse,
9
+ ExperimentsListResponse,
10
+ ExperimentsSummaryResponse,
11
+ InitialDesignResponse
12
+ )
8
13
  from ..dependencies import get_session
9
14
  from ..middleware.error_handlers import NoVariablesError
10
15
  from alchemist_core.session import OptimizationSession
@@ -12,6 +17,7 @@ import logging
12
17
  import pandas as pd
13
18
  import tempfile
14
19
  import os
20
+ from typing import Optional
15
21
 
16
22
  logger = logging.getLogger(__name__)
17
23
 
@@ -22,6 +28,9 @@ router = APIRouter()
22
28
  async def add_experiment(
23
29
  session_id: str,
24
30
  experiment: AddExperimentRequest,
31
+ auto_train: bool = Query(False, description="Auto-train model after adding data"),
32
+ training_backend: Optional[str] = Query(None, description="Model backend (sklearn/botorch)"),
33
+ training_kernel: Optional[str] = Query(None, description="Kernel type (rbf/matern)"),
25
34
  session: OptimizationSession = Depends(get_session)
26
35
  ):
27
36
  """
@@ -29,6 +38,11 @@ async def add_experiment(
29
38
 
30
39
  The experiment must include values for all defined variables.
31
40
  Output value is optional for candidate experiments.
41
+
42
+ Args:
43
+ auto_train: If True, retrain model after adding data
44
+ training_backend: Model backend (uses last if None)
45
+ training_kernel: Kernel type (uses last or 'rbf' if None)
32
46
  """
33
47
  # Check if variables are defined
34
48
  if len(session.search_space.variables) == 0:
@@ -37,15 +51,56 @@ async def add_experiment(
37
51
  session.add_experiment(
38
52
  inputs=experiment.inputs,
39
53
  output=experiment.output,
40
- noise=experiment.noise
54
+ noise=experiment.noise,
55
+ iteration=experiment.iteration,
56
+ reason=experiment.reason
41
57
  )
42
58
 
43
59
  n_experiments = len(session.experiment_manager.df)
44
60
  logger.info(f"Added experiment to session {session_id}. Total: {n_experiments}")
45
61
 
62
+ # Auto-train if requested (need at least 5 points to train)
63
+ model_trained = False
64
+ training_metrics = None
65
+
66
+ if auto_train and n_experiments >= 5:
67
+ try:
68
+ # Use previous config or provided config
69
+ backend = training_backend or (session.model_backend if session.model else "sklearn")
70
+ kernel = training_kernel or "rbf"
71
+
72
+ # Note: Input/output transforms are now automatically applied by core Session.train_model()
73
+ # for BoTorch models. No need to specify them here unless overriding defaults.
74
+ result = session.train_model(backend=backend, kernel=kernel)
75
+ model_trained = True
76
+ metrics = result.get("metrics", {})
77
+ hyperparameters = result.get("hyperparameters", {})
78
+ training_metrics = {
79
+ "rmse": metrics.get("rmse"),
80
+ "r2": metrics.get("r2"),
81
+ "backend": backend
82
+ }
83
+ logger.info(f"Auto-trained model for session {session_id}: {training_metrics}")
84
+
85
+ # Record in audit log if this is an optimization iteration
86
+ if experiment.iteration is not None and experiment.iteration > 0:
87
+ session.audit_log.lock_model(
88
+ backend=backend,
89
+ kernel=kernel,
90
+ hyperparameters=hyperparameters,
91
+ cv_metrics=metrics,
92
+ iteration=experiment.iteration,
93
+ notes=f"Auto-trained after iteration {experiment.iteration}"
94
+ )
95
+ except Exception as e:
96
+ logger.error(f"Auto-train failed for session {session_id}: {e}")
97
+ # Don't fail the whole request, just log it
98
+
46
99
  return ExperimentResponse(
47
100
  message="Experiment added successfully",
48
- n_experiments=n_experiments
101
+ n_experiments=n_experiments,
102
+ model_trained=model_trained,
103
+ training_metrics=training_metrics
49
104
  )
50
105
 
51
106
 
@@ -53,12 +108,20 @@ async def add_experiment(
53
108
  async def add_experiments_batch(
54
109
  session_id: str,
55
110
  batch: AddExperimentsBatchRequest,
111
+ auto_train: bool = Query(False, description="Auto-train model after adding data"),
112
+ training_backend: Optional[str] = Query(None, description="Model backend (sklearn/botorch)"),
113
+ training_kernel: Optional[str] = Query(None, description="Kernel type (rbf/matern)"),
56
114
  session: OptimizationSession = Depends(get_session)
57
115
  ):
58
116
  """
59
117
  Add multiple experiments at once.
60
118
 
61
119
  Useful for bulk data import or initialization.
120
+
121
+ Args:
122
+ auto_train: If True, retrain model after adding data
123
+ training_backend: Model backend (uses last if None)
124
+ training_kernel: Kernel type (uses last or 'rbf' if None)
62
125
  """
63
126
  # Check if variables are defined
64
127
  if len(session.search_space.variables) == 0:
@@ -74,9 +137,74 @@ async def add_experiments_batch(
74
137
  n_experiments = len(session.experiment_manager.df)
75
138
  logger.info(f"Added {len(batch.experiments)} experiments to session {session_id}. Total: {n_experiments}")
76
139
 
140
+ # Auto-train if requested
141
+ model_trained = False
142
+ training_metrics = None
143
+
144
+ if auto_train and n_experiments >= 5: # Minimum data for training
145
+ try:
146
+ backend = training_backend or (session.model_backend if session.model else "sklearn")
147
+ kernel = training_kernel or "rbf"
148
+
149
+ result = session.train_model(backend=backend, kernel=kernel)
150
+ model_trained = True
151
+ metrics = result.get("metrics", {})
152
+ training_metrics = {
153
+ "rmse": metrics.get("rmse"),
154
+ "r2": metrics.get("r2"),
155
+ "backend": backend
156
+ }
157
+ logger.info(f"Auto-trained model for session {session_id}: {training_metrics}")
158
+ except Exception as e:
159
+ logger.error(f"Auto-train failed for session {session_id}: {e}")
160
+
77
161
  return ExperimentResponse(
78
162
  message=f"Added {len(batch.experiments)} experiments successfully",
79
- n_experiments=n_experiments
163
+ n_experiments=n_experiments,
164
+ model_trained=model_trained,
165
+ training_metrics=training_metrics
166
+ )
167
+
168
+
169
+ @router.post("/{session_id}/initial-design", response_model=InitialDesignResponse)
170
+ async def generate_initial_design(
171
+ session_id: str,
172
+ request: InitialDesignRequest,
173
+ session: OptimizationSession = Depends(get_session)
174
+ ):
175
+ """
176
+ Generate initial experimental design (DoE) for autonomous operation.
177
+
178
+ Generates space-filling experimental designs before Bayesian optimization begins.
179
+ Useful for autonomous controllers to get initial points to evaluate.
180
+
181
+ Methods:
182
+ - random: Random sampling
183
+ - lhs: Latin Hypercube Sampling (space-filling)
184
+ - sobol: Sobol sequence (quasi-random)
185
+ - halton: Halton sequence (quasi-random)
186
+ - hammersly: Hammersly sequence (quasi-random)
187
+
188
+ Returns list of experiments (input combinations) to evaluate.
189
+ """
190
+ # Check if variables are defined
191
+ if len(session.search_space.variables) == 0:
192
+ raise NoVariablesError("No variables defined. Add variables to search space first.")
193
+
194
+ # Generate design
195
+ design_points = session.generate_initial_design(
196
+ method=request.method,
197
+ n_points=request.n_points,
198
+ random_seed=request.random_seed,
199
+ lhs_criterion=request.lhs_criterion
200
+ )
201
+
202
+ logger.info(f"Generated {len(design_points)} initial design points using {request.method} for session {session_id}")
203
+
204
+ return InitialDesignResponse(
205
+ points=design_points,
206
+ method=request.method,
207
+ n_points=len(design_points)
80
208
  )
81
209
 
82
210