adamops 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. adamops/__init__.py +40 -0
  2. adamops/cli.py +163 -0
  3. adamops/data/__init__.py +24 -0
  4. adamops/data/feature_engineering.py +284 -0
  5. adamops/data/loaders.py +922 -0
  6. adamops/data/preprocessors.py +227 -0
  7. adamops/data/splitters.py +218 -0
  8. adamops/data/validators.py +148 -0
  9. adamops/deployment/__init__.py +21 -0
  10. adamops/deployment/api.py +237 -0
  11. adamops/deployment/cloud.py +191 -0
  12. adamops/deployment/containerize.py +262 -0
  13. adamops/deployment/exporters.py +148 -0
  14. adamops/evaluation/__init__.py +24 -0
  15. adamops/evaluation/comparison.py +133 -0
  16. adamops/evaluation/explainability.py +143 -0
  17. adamops/evaluation/metrics.py +233 -0
  18. adamops/evaluation/reports.py +165 -0
  19. adamops/evaluation/visualization.py +238 -0
  20. adamops/models/__init__.py +21 -0
  21. adamops/models/automl.py +277 -0
  22. adamops/models/ensembles.py +228 -0
  23. adamops/models/modelops.py +308 -0
  24. adamops/models/registry.py +250 -0
  25. adamops/monitoring/__init__.py +21 -0
  26. adamops/monitoring/alerts.py +200 -0
  27. adamops/monitoring/dashboard.py +117 -0
  28. adamops/monitoring/drift.py +212 -0
  29. adamops/monitoring/performance.py +195 -0
  30. adamops/pipelines/__init__.py +15 -0
  31. adamops/pipelines/orchestrators.py +183 -0
  32. adamops/pipelines/workflows.py +212 -0
  33. adamops/utils/__init__.py +18 -0
  34. adamops/utils/config.py +457 -0
  35. adamops/utils/helpers.py +663 -0
  36. adamops/utils/logging.py +412 -0
  37. adamops-0.1.0.dist-info/METADATA +310 -0
  38. adamops-0.1.0.dist-info/RECORD +42 -0
  39. adamops-0.1.0.dist-info/WHEEL +5 -0
  40. adamops-0.1.0.dist-info/entry_points.txt +2 -0
  41. adamops-0.1.0.dist-info/licenses/LICENSE +21 -0
  42. adamops-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,212 @@
1
+ """
2
+ AdamOps Workflows Module
3
+
4
+ Define ML workflows as DAGs.
5
+ """
6
+
7
+ from typing import Any, Callable, Dict, List, Optional, Tuple
8
+ from datetime import datetime
9
+ from enum import Enum
10
+ import traceback
11
+
12
+ from adamops.utils.logging import get_logger
13
+
14
+ logger = get_logger(__name__)
15
+
16
+
17
+ class TaskStatus(Enum):
18
+ PENDING = "pending"
19
+ RUNNING = "running"
20
+ COMPLETED = "completed"
21
+ FAILED = "failed"
22
+ SKIPPED = "skipped"
23
+
24
+
25
+ class Task:
26
+ """Represents a single task in a workflow."""
27
+
28
+ def __init__(self, name: str, func: Callable, dependencies: Optional[List[str]] = None,
29
+ retry: int = 0, timeout: Optional[int] = None):
30
+ self.name = name
31
+ self.func = func
32
+ self.dependencies = dependencies or []
33
+ self.retry = retry
34
+ self.timeout = timeout
35
+ self.status = TaskStatus.PENDING
36
+ self.result: Any = None
37
+ self.error: Optional[str] = None
38
+ self.start_time: Optional[datetime] = None
39
+ self.end_time: Optional[datetime] = None
40
+
41
+ @property
42
+ def duration(self) -> Optional[float]:
43
+ if self.start_time and self.end_time:
44
+ return (self.end_time - self.start_time).total_seconds()
45
+ return None
46
+
47
+ def run(self, context: Dict) -> Any:
48
+ """Execute the task."""
49
+ self.status = TaskStatus.RUNNING
50
+ self.start_time = datetime.now()
51
+
52
+ attempts = 0
53
+ while attempts <= self.retry:
54
+ try:
55
+ self.result = self.func(context)
56
+ self.status = TaskStatus.COMPLETED
57
+ self.end_time = datetime.now()
58
+ logger.info(f"Task '{self.name}' completed in {self.duration:.2f}s")
59
+ return self.result
60
+ except Exception as e:
61
+ attempts += 1
62
+ if attempts > self.retry:
63
+ self.status = TaskStatus.FAILED
64
+ self.error = str(e)
65
+ self.end_time = datetime.now()
66
+ logger.error(f"Task '{self.name}' failed: {e}")
67
+ raise
68
+ logger.warning(f"Task '{self.name}' failed, retrying ({attempts}/{self.retry})")
69
+
70
+ def to_dict(self) -> Dict:
71
+ return {
72
+ "name": self.name,
73
+ "status": self.status.value,
74
+ "dependencies": self.dependencies,
75
+ "duration": self.duration,
76
+ "error": self.error,
77
+ }
78
+
79
+
80
+ class Workflow:
81
+ """DAG-based workflow for ML pipelines."""
82
+
83
+ def __init__(self, name: str, description: str = ""):
84
+ self.name = name
85
+ self.description = description
86
+ self.tasks: Dict[str, Task] = {}
87
+ self.context: Dict = {}
88
+ self.status = TaskStatus.PENDING
89
+
90
+ def add_task(self, name: str, func: Callable, dependencies: Optional[List[str]] = None,
91
+ **kwargs) -> "Workflow":
92
+ """Add a task to the workflow."""
93
+ task = Task(name, func, dependencies, **kwargs)
94
+ self.tasks[name] = task
95
+ return self
96
+
97
+ def task(self, name: str = None, dependencies: Optional[List[str]] = None, **kwargs):
98
+ """Decorator to add a task."""
99
+ def decorator(func):
100
+ task_name = name or func.__name__
101
+ self.add_task(task_name, func, dependencies, **kwargs)
102
+ return func
103
+ return decorator
104
+
105
+ def _get_execution_order(self) -> List[str]:
106
+ """Topological sort for task execution order."""
107
+ visited = set()
108
+ order = []
109
+
110
+ def visit(name: str):
111
+ if name in visited:
112
+ return
113
+ visited.add(name)
114
+
115
+ task = self.tasks[name]
116
+ for dep in task.dependencies:
117
+ if dep not in self.tasks:
118
+ raise ValueError(f"Unknown dependency: {dep}")
119
+ visit(dep)
120
+
121
+ order.append(name)
122
+
123
+ for name in self.tasks:
124
+ visit(name)
125
+
126
+ return order
127
+
128
+ def run(self, initial_context: Optional[Dict] = None) -> Dict:
129
+ """Execute the workflow."""
130
+ self.context = initial_context or {}
131
+ self.status = TaskStatus.RUNNING
132
+
133
+ logger.info(f"Starting workflow: {self.name}")
134
+ start_time = datetime.now()
135
+
136
+ try:
137
+ execution_order = self._get_execution_order()
138
+
139
+ for task_name in execution_order:
140
+ task = self.tasks[task_name]
141
+
142
+ # Check dependencies
143
+ deps_ok = all(
144
+ self.tasks[dep].status == TaskStatus.COMPLETED
145
+ for dep in task.dependencies
146
+ )
147
+
148
+ if not deps_ok:
149
+ task.status = TaskStatus.SKIPPED
150
+ logger.warning(f"Skipping '{task_name}' due to failed dependencies")
151
+ continue
152
+
153
+ # Run task
154
+ result = task.run(self.context)
155
+ self.context[task_name] = result
156
+
157
+ self.status = TaskStatus.COMPLETED
158
+ logger.info(f"Workflow '{self.name}' completed in {(datetime.now() - start_time).total_seconds():.2f}s")
159
+
160
+ except Exception as e:
161
+ self.status = TaskStatus.FAILED
162
+ logger.error(f"Workflow '{self.name}' failed: {e}")
163
+ raise
164
+
165
+ return self.context
166
+
167
+ def get_status(self) -> Dict:
168
+ """Get workflow status."""
169
+ return {
170
+ "name": self.name,
171
+ "status": self.status.value,
172
+ "tasks": {name: task.to_dict() for name, task in self.tasks.items()},
173
+ }
174
+
175
+ def visualize(self) -> str:
176
+ """Generate ASCII visualization of workflow."""
177
+ lines = [f"Workflow: {self.name}", "=" * 40]
178
+
179
+ for name in self._get_execution_order():
180
+ task = self.tasks[name]
181
+ deps = ", ".join(task.dependencies) if task.dependencies else "None"
182
+ status = task.status.value.upper()
183
+ lines.append(f" [{status}] {name} <- {deps}")
184
+
185
+ return "\n".join(lines)
186
+
187
+
188
+ def create_ml_pipeline(name: str = "ml_pipeline") -> Workflow:
189
+ """Create a standard ML pipeline workflow."""
190
+ workflow = Workflow(name, "Standard ML Training Pipeline")
191
+
192
+ @workflow.task("load_data")
193
+ def load_data(ctx):
194
+ logger.info("Loading data...")
195
+ return ctx.get("data_path")
196
+
197
+ @workflow.task("preprocess", dependencies=["load_data"])
198
+ def preprocess(ctx):
199
+ logger.info("Preprocessing data...")
200
+ return {"preprocessed": True}
201
+
202
+ @workflow.task("train", dependencies=["preprocess"])
203
+ def train(ctx):
204
+ logger.info("Training model...")
205
+ return {"model": "trained"}
206
+
207
+ @workflow.task("evaluate", dependencies=["train"])
208
+ def evaluate(ctx):
209
+ logger.info("Evaluating model...")
210
+ return {"metrics": {"accuracy": 0.95}}
211
+
212
+ return workflow
@@ -0,0 +1,18 @@
1
+ """
2
+ AdamOps Utils Module
3
+
4
+ Provides utility functions:
5
+ - config: Configuration management
6
+ - logging: Centralized logging
7
+ - helpers: Common helper functions
8
+ """
9
+
10
+ from adamops.utils import config
11
+ from adamops.utils import logging
12
+ from adamops.utils import helpers
13
+
14
+ __all__ = [
15
+ "config",
16
+ "logging",
17
+ "helpers",
18
+ ]
@@ -0,0 +1,457 @@
1
+ """
2
+ AdamOps Configuration Module
3
+
4
+ Provides centralized configuration management for the entire library.
5
+ Supports YAML, JSON, and environment variable configurations.
6
+ """
7
+
8
+ import os
9
+ import json
10
+ from pathlib import Path
11
+ from typing import Any, Dict, Optional, Union
12
+ from dataclasses import dataclass, field
13
+
14
+ try:
15
+ import yaml
16
+ YAML_AVAILABLE = True
17
+ except ImportError:
18
+ YAML_AVAILABLE = False
19
+
20
+ try:
21
+ from dotenv import load_dotenv
22
+ DOTENV_AVAILABLE = True
23
+ except ImportError:
24
+ DOTENV_AVAILABLE = False
25
+
26
+
27
+ @dataclass
28
+ class DataConfig:
29
+ """Configuration for data module."""
30
+ default_encoding: str = "utf-8"
31
+ missing_threshold: float = 0.5
32
+ outlier_method: str = "iqr"
33
+ outlier_threshold: float = 1.5
34
+ validation_sample_size: int = 10000
35
+ auto_detect_types: bool = True
36
+
37
+
38
+ @dataclass
39
+ class ModelConfig:
40
+ """Configuration for model module."""
41
+ default_random_state: int = 42
42
+ cv_folds: int = 5
43
+ early_stopping_rounds: int = 50
44
+ n_jobs: int = -1
45
+ verbose: int = 0
46
+
47
+
48
+ @dataclass
49
+ class AutoMLConfig:
50
+ """Configuration for AutoML module."""
51
+ time_limit: int = 3600
52
+ max_trials: int = 100
53
+ tuning_method: str = "bayesian"
54
+ optimization_metric: str = "auto"
55
+ early_stopping: bool = True
56
+
57
+
58
+ @dataclass
59
+ class DeploymentConfig:
60
+ """Configuration for deployment module."""
61
+ default_port: int = 8000
62
+ default_host: str = "0.0.0.0"
63
+ api_framework: str = "fastapi"
64
+ enable_cors: bool = True
65
+ log_requests: bool = True
66
+
67
+
68
+ @dataclass
69
+ class MonitoringConfig:
70
+ """Configuration for monitoring module."""
71
+ drift_threshold: float = 0.05
72
+ alert_email: Optional[str] = None
73
+ check_interval: int = 3600
74
+ log_predictions: bool = True
75
+
76
+
77
+ @dataclass
78
+ class LoggingConfig:
79
+ """Configuration for logging."""
80
+ level: str = "INFO"
81
+ format: str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
82
+ file: Optional[str] = None
83
+ console: bool = True
84
+ max_bytes: int = 10485760 # 10MB
85
+ backup_count: int = 5
86
+
87
+
88
+ @dataclass
89
+ class AdamOpsConfig:
90
+ """Main configuration class for AdamOps."""
91
+ data: DataConfig = field(default_factory=DataConfig)
92
+ model: ModelConfig = field(default_factory=ModelConfig)
93
+ automl: AutoMLConfig = field(default_factory=AutoMLConfig)
94
+ deployment: DeploymentConfig = field(default_factory=DeploymentConfig)
95
+ monitoring: MonitoringConfig = field(default_factory=MonitoringConfig)
96
+ logging: LoggingConfig = field(default_factory=LoggingConfig)
97
+
98
+ # Registry settings
99
+ registry_backend: str = "json" # json or sqlite
100
+ registry_path: str = ".adamops_registry"
101
+
102
+ # General settings
103
+ cache_enabled: bool = True
104
+ cache_path: str = ".adamops_cache"
105
+
106
+
107
+ # Global configuration instance
108
+ _config: Optional[AdamOpsConfig] = None
109
+
110
+
111
+ def get_config() -> AdamOpsConfig:
112
+ """
113
+ Get the global configuration instance.
114
+
115
+ Returns:
116
+ AdamOpsConfig: The global configuration object.
117
+
118
+ Example:
119
+ >>> config = get_config()
120
+ >>> print(config.model.cv_folds)
121
+ 5
122
+ """
123
+ global _config
124
+ if _config is None:
125
+ _config = AdamOpsConfig()
126
+ return _config
127
+
128
+
129
+ def set_config(config: AdamOpsConfig) -> None:
130
+ """
131
+ Set the global configuration instance.
132
+
133
+ Args:
134
+ config: The configuration object to set as global.
135
+
136
+ Example:
137
+ >>> custom_config = AdamOpsConfig()
138
+ >>> custom_config.model.cv_folds = 10
139
+ >>> set_config(custom_config)
140
+ """
141
+ global _config
142
+ _config = config
143
+
144
+
145
+ def reset_config() -> None:
146
+ """
147
+ Reset the global configuration to defaults.
148
+
149
+ Example:
150
+ >>> reset_config()
151
+ >>> config = get_config()
152
+ >>> print(config.model.cv_folds)
153
+ 5
154
+ """
155
+ global _config
156
+ _config = AdamOpsConfig()
157
+
158
+
159
+ def load_config_from_file(filepath: Union[str, Path]) -> AdamOpsConfig:
160
+ """
161
+ Load configuration from a YAML or JSON file.
162
+
163
+ Args:
164
+ filepath: Path to the configuration file.
165
+
166
+ Returns:
167
+ AdamOpsConfig: Loaded configuration object.
168
+
169
+ Raises:
170
+ FileNotFoundError: If the file doesn't exist.
171
+ ValueError: If the file format is not supported.
172
+
173
+ Example:
174
+ >>> config = load_config_from_file("config.yaml")
175
+ """
176
+ filepath = Path(filepath)
177
+
178
+ if not filepath.exists():
179
+ raise FileNotFoundError(f"Configuration file not found: {filepath}")
180
+
181
+ with open(filepath, "r", encoding="utf-8") as f:
182
+ if filepath.suffix in [".yaml", ".yml"]:
183
+ if not YAML_AVAILABLE:
184
+ raise ImportError("PyYAML is required to load YAML config files. Install with: pip install pyyaml")
185
+ config_dict = yaml.safe_load(f)
186
+ elif filepath.suffix == ".json":
187
+ config_dict = json.load(f)
188
+ else:
189
+ raise ValueError(f"Unsupported config file format: {filepath.suffix}")
190
+
191
+ return _dict_to_config(config_dict)
192
+
193
+
194
+ def save_config_to_file(config: AdamOpsConfig, filepath: Union[str, Path]) -> None:
195
+ """
196
+ Save configuration to a YAML or JSON file.
197
+
198
+ Args:
199
+ config: Configuration object to save.
200
+ filepath: Path to save the configuration to.
201
+
202
+ Example:
203
+ >>> config = get_config()
204
+ >>> save_config_to_file(config, "config.yaml")
205
+ """
206
+ filepath = Path(filepath)
207
+ filepath.parent.mkdir(parents=True, exist_ok=True)
208
+
209
+ config_dict = _config_to_dict(config)
210
+
211
+ with open(filepath, "w", encoding="utf-8") as f:
212
+ if filepath.suffix in [".yaml", ".yml"]:
213
+ if not YAML_AVAILABLE:
214
+ raise ImportError("PyYAML is required to save YAML config files. Install with: pip install pyyaml")
215
+ yaml.dump(config_dict, f, default_flow_style=False, indent=2)
216
+ elif filepath.suffix == ".json":
217
+ json.dump(config_dict, f, indent=2)
218
+ else:
219
+ raise ValueError(f"Unsupported config file format: {filepath.suffix}")
220
+
221
+
222
+ def load_config_from_env(prefix: str = "ADAMOPS") -> AdamOpsConfig:
223
+ """
224
+ Load configuration from environment variables.
225
+
226
+ Environment variables should be named as {prefix}_{SECTION}_{KEY}.
227
+ For example: ADAMOPS_MODEL_CV_FOLDS=10
228
+
229
+ Args:
230
+ prefix: Prefix for environment variables.
231
+
232
+ Returns:
233
+ AdamOpsConfig: Configuration with values from environment.
234
+
235
+ Example:
236
+ >>> # Set env: ADAMOPS_MODEL_CV_FOLDS=10
237
+ >>> config = load_config_from_env()
238
+ >>> print(config.model.cv_folds)
239
+ 10
240
+ """
241
+ if DOTENV_AVAILABLE:
242
+ load_dotenv()
243
+
244
+ config = AdamOpsConfig()
245
+
246
+ # Map of environment variable suffixes to config attributes
247
+ env_mappings = {
248
+ # Data config
249
+ f"{prefix}_DATA_DEFAULT_ENCODING": ("data", "default_encoding", str),
250
+ f"{prefix}_DATA_MISSING_THRESHOLD": ("data", "missing_threshold", float),
251
+ f"{prefix}_DATA_OUTLIER_METHOD": ("data", "outlier_method", str),
252
+ f"{prefix}_DATA_OUTLIER_THRESHOLD": ("data", "outlier_threshold", float),
253
+
254
+ # Model config
255
+ f"{prefix}_MODEL_RANDOM_STATE": ("model", "default_random_state", int),
256
+ f"{prefix}_MODEL_CV_FOLDS": ("model", "cv_folds", int),
257
+ f"{prefix}_MODEL_N_JOBS": ("model", "n_jobs", int),
258
+
259
+ # AutoML config
260
+ f"{prefix}_AUTOML_TIME_LIMIT": ("automl", "time_limit", int),
261
+ f"{prefix}_AUTOML_MAX_TRIALS": ("automl", "max_trials", int),
262
+ f"{prefix}_AUTOML_TUNING_METHOD": ("automl", "tuning_method", str),
263
+
264
+ # Deployment config
265
+ f"{prefix}_DEPLOY_PORT": ("deployment", "default_port", int),
266
+ f"{prefix}_DEPLOY_HOST": ("deployment", "default_host", str),
267
+ f"{prefix}_DEPLOY_FRAMEWORK": ("deployment", "api_framework", str),
268
+
269
+ # Monitoring config
270
+ f"{prefix}_MONITOR_DRIFT_THRESHOLD": ("monitoring", "drift_threshold", float),
271
+ f"{prefix}_MONITOR_CHECK_INTERVAL": ("monitoring", "check_interval", int),
272
+
273
+ # Logging config
274
+ f"{prefix}_LOG_LEVEL": ("logging", "level", str),
275
+ f"{prefix}_LOG_FILE": ("logging", "file", str),
276
+
277
+ # General settings
278
+ f"{prefix}_REGISTRY_BACKEND": (None, "registry_backend", str),
279
+ f"{prefix}_REGISTRY_PATH": (None, "registry_path", str),
280
+ }
281
+
282
+ for env_var, (section, attr, type_conv) in env_mappings.items():
283
+ value = os.environ.get(env_var)
284
+ if value is not None:
285
+ try:
286
+ converted_value = type_conv(value)
287
+ if section is not None:
288
+ setattr(getattr(config, section), attr, converted_value)
289
+ else:
290
+ setattr(config, attr, converted_value)
291
+ except (ValueError, TypeError):
292
+ pass # Skip invalid values
293
+
294
+ return config
295
+
296
+
297
+ def _config_to_dict(config: AdamOpsConfig) -> Dict[str, Any]:
298
+ """Convert configuration object to dictionary."""
299
+ return {
300
+ "data": {
301
+ "default_encoding": config.data.default_encoding,
302
+ "missing_threshold": config.data.missing_threshold,
303
+ "outlier_method": config.data.outlier_method,
304
+ "outlier_threshold": config.data.outlier_threshold,
305
+ "validation_sample_size": config.data.validation_sample_size,
306
+ "auto_detect_types": config.data.auto_detect_types,
307
+ },
308
+ "model": {
309
+ "default_random_state": config.model.default_random_state,
310
+ "cv_folds": config.model.cv_folds,
311
+ "early_stopping_rounds": config.model.early_stopping_rounds,
312
+ "n_jobs": config.model.n_jobs,
313
+ "verbose": config.model.verbose,
314
+ },
315
+ "automl": {
316
+ "time_limit": config.automl.time_limit,
317
+ "max_trials": config.automl.max_trials,
318
+ "tuning_method": config.automl.tuning_method,
319
+ "optimization_metric": config.automl.optimization_metric,
320
+ "early_stopping": config.automl.early_stopping,
321
+ },
322
+ "deployment": {
323
+ "default_port": config.deployment.default_port,
324
+ "default_host": config.deployment.default_host,
325
+ "api_framework": config.deployment.api_framework,
326
+ "enable_cors": config.deployment.enable_cors,
327
+ "log_requests": config.deployment.log_requests,
328
+ },
329
+ "monitoring": {
330
+ "drift_threshold": config.monitoring.drift_threshold,
331
+ "alert_email": config.monitoring.alert_email,
332
+ "check_interval": config.monitoring.check_interval,
333
+ "log_predictions": config.monitoring.log_predictions,
334
+ },
335
+ "logging": {
336
+ "level": config.logging.level,
337
+ "format": config.logging.format,
338
+ "file": config.logging.file,
339
+ "console": config.logging.console,
340
+ "max_bytes": config.logging.max_bytes,
341
+ "backup_count": config.logging.backup_count,
342
+ },
343
+ "registry_backend": config.registry_backend,
344
+ "registry_path": config.registry_path,
345
+ "cache_enabled": config.cache_enabled,
346
+ "cache_path": config.cache_path,
347
+ }
348
+
349
+
350
+ def _dict_to_config(config_dict: Dict[str, Any]) -> AdamOpsConfig:
351
+ """Convert dictionary to configuration object."""
352
+ config = AdamOpsConfig()
353
+
354
+ # Data config
355
+ if "data" in config_dict:
356
+ data = config_dict["data"]
357
+ config.data = DataConfig(
358
+ default_encoding=data.get("default_encoding", config.data.default_encoding),
359
+ missing_threshold=data.get("missing_threshold", config.data.missing_threshold),
360
+ outlier_method=data.get("outlier_method", config.data.outlier_method),
361
+ outlier_threshold=data.get("outlier_threshold", config.data.outlier_threshold),
362
+ validation_sample_size=data.get("validation_sample_size", config.data.validation_sample_size),
363
+ auto_detect_types=data.get("auto_detect_types", config.data.auto_detect_types),
364
+ )
365
+
366
+ # Model config
367
+ if "model" in config_dict:
368
+ model = config_dict["model"]
369
+ config.model = ModelConfig(
370
+ default_random_state=model.get("default_random_state", config.model.default_random_state),
371
+ cv_folds=model.get("cv_folds", config.model.cv_folds),
372
+ early_stopping_rounds=model.get("early_stopping_rounds", config.model.early_stopping_rounds),
373
+ n_jobs=model.get("n_jobs", config.model.n_jobs),
374
+ verbose=model.get("verbose", config.model.verbose),
375
+ )
376
+
377
+ # AutoML config
378
+ if "automl" in config_dict:
379
+ automl = config_dict["automl"]
380
+ config.automl = AutoMLConfig(
381
+ time_limit=automl.get("time_limit", config.automl.time_limit),
382
+ max_trials=automl.get("max_trials", config.automl.max_trials),
383
+ tuning_method=automl.get("tuning_method", config.automl.tuning_method),
384
+ optimization_metric=automl.get("optimization_metric", config.automl.optimization_metric),
385
+ early_stopping=automl.get("early_stopping", config.automl.early_stopping),
386
+ )
387
+
388
+ # Deployment config
389
+ if "deployment" in config_dict:
390
+ deploy = config_dict["deployment"]
391
+ config.deployment = DeploymentConfig(
392
+ default_port=deploy.get("default_port", config.deployment.default_port),
393
+ default_host=deploy.get("default_host", config.deployment.default_host),
394
+ api_framework=deploy.get("api_framework", config.deployment.api_framework),
395
+ enable_cors=deploy.get("enable_cors", config.deployment.enable_cors),
396
+ log_requests=deploy.get("log_requests", config.deployment.log_requests),
397
+ )
398
+
399
+ # Monitoring config
400
+ if "monitoring" in config_dict:
401
+ monitor = config_dict["monitoring"]
402
+ config.monitoring = MonitoringConfig(
403
+ drift_threshold=monitor.get("drift_threshold", config.monitoring.drift_threshold),
404
+ alert_email=monitor.get("alert_email", config.monitoring.alert_email),
405
+ check_interval=monitor.get("check_interval", config.monitoring.check_interval),
406
+ log_predictions=monitor.get("log_predictions", config.monitoring.log_predictions),
407
+ )
408
+
409
+ # Logging config
410
+ if "logging" in config_dict:
411
+ log = config_dict["logging"]
412
+ config.logging = LoggingConfig(
413
+ level=log.get("level", config.logging.level),
414
+ format=log.get("format", config.logging.format),
415
+ file=log.get("file", config.logging.file),
416
+ console=log.get("console", config.logging.console),
417
+ max_bytes=log.get("max_bytes", config.logging.max_bytes),
418
+ backup_count=log.get("backup_count", config.logging.backup_count),
419
+ )
420
+
421
+ # General settings
422
+ config.registry_backend = config_dict.get("registry_backend", config.registry_backend)
423
+ config.registry_path = config_dict.get("registry_path", config.registry_path)
424
+ config.cache_enabled = config_dict.get("cache_enabled", config.cache_enabled)
425
+ config.cache_path = config_dict.get("cache_path", config.cache_path)
426
+
427
+ return config
428
+
429
+
430
+ def update_config(**kwargs) -> AdamOpsConfig:
431
+ """
432
+ Update specific configuration values.
433
+
434
+ Args:
435
+ **kwargs: Configuration values in format section__key=value.
436
+
437
+ Returns:
438
+ AdamOpsConfig: Updated configuration object.
439
+
440
+ Example:
441
+ >>> config = update_config(model__cv_folds=10, automl__time_limit=7200)
442
+ >>> print(config.model.cv_folds)
443
+ 10
444
+ """
445
+ config = get_config()
446
+
447
+ for key, value in kwargs.items():
448
+ if "__" in key:
449
+ section, attr = key.split("__", 1)
450
+ if hasattr(config, section):
451
+ section_config = getattr(config, section)
452
+ if hasattr(section_config, attr):
453
+ setattr(section_config, attr, value)
454
+ elif hasattr(config, key):
455
+ setattr(config, key, value)
456
+
457
+ return config