ml-dash 0.0.11__py3-none-any.whl → 0.5.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. ml_dash/__init__.py +59 -1
  2. ml_dash/auto_start.py +42 -0
  3. ml_dash/cli.py +67 -0
  4. ml_dash/cli_commands/__init__.py +1 -0
  5. ml_dash/cli_commands/download.py +797 -0
  6. ml_dash/cli_commands/list.py +343 -0
  7. ml_dash/cli_commands/upload.py +1298 -0
  8. ml_dash/client.py +955 -0
  9. ml_dash/config.py +114 -11
  10. ml_dash/experiment.py +1020 -0
  11. ml_dash/files.py +688 -0
  12. ml_dash/log.py +181 -0
  13. ml_dash/metric.py +292 -0
  14. ml_dash/params.py +188 -0
  15. ml_dash/storage.py +1115 -0
  16. ml_dash-0.5.9.dist-info/METADATA +244 -0
  17. ml_dash-0.5.9.dist-info/RECORD +20 -0
  18. ml_dash-0.5.9.dist-info/WHEEL +4 -0
  19. ml_dash-0.5.9.dist-info/entry_points.txt +3 -0
  20. ml_dash/app.py +0 -33
  21. ml_dash/file_events.py +0 -71
  22. ml_dash/file_handlers.py +0 -141
  23. ml_dash/file_utils.py +0 -5
  24. ml_dash/file_watcher.py +0 -30
  25. ml_dash/main.py +0 -60
  26. ml_dash/mime_types.py +0 -20
  27. ml_dash/schema/__init__.py +0 -110
  28. ml_dash/schema/archive.py +0 -165
  29. ml_dash/schema/directories.py +0 -59
  30. ml_dash/schema/experiments.py +0 -65
  31. ml_dash/schema/files/__init__.py +0 -204
  32. ml_dash/schema/files/file_helpers.py +0 -79
  33. ml_dash/schema/files/images.py +0 -27
  34. ml_dash/schema/files/metrics.py +0 -64
  35. ml_dash/schema/files/parameters.py +0 -50
  36. ml_dash/schema/files/series.py +0 -235
  37. ml_dash/schema/files/videos.py +0 -27
  38. ml_dash/schema/helpers.py +0 -66
  39. ml_dash/schema/projects.py +0 -65
  40. ml_dash/schema/schema_helpers.py +0 -19
  41. ml_dash/schema/users.py +0 -33
  42. ml_dash/sse.py +0 -18
  43. ml_dash-0.0.11.dist-info/METADATA +0 -67
  44. ml_dash-0.0.11.dist-info/RECORD +0 -30
  45. ml_dash-0.0.11.dist-info/WHEEL +0 -5
  46. ml_dash-0.0.11.dist-info/top_level.txt +0 -1
  47. /ml_dash/{example.py → py.typed} +0 -0
ml_dash/log.py ADDED
@@ -0,0 +1,181 @@
1
+ """
2
+ Log API for ML-Dash SDK.
3
+
4
+ Provides fluent interface for structured logging with validation.
5
+ """
6
+
7
+ from typing import Optional, Dict, Any, TYPE_CHECKING
8
+ from datetime import datetime
9
+ from enum import Enum
10
+
11
+ if TYPE_CHECKING:
12
+ from .experiment import Experiment
13
+
14
+
15
+ class LogLevel(Enum):
16
+ """
17
+ Standard log levels for ML-Dash.
18
+
19
+ Supported levels:
20
+ - INFO: Informational messages
21
+ - WARN: Warning messages
22
+ - ERROR: Error messages
23
+ - DEBUG: Debug messages
24
+ - FATAL: Fatal error messages
25
+ """
26
+ INFO = "info"
27
+ WARN = "warn"
28
+ ERROR = "error"
29
+ DEBUG = "debug"
30
+ FATAL = "fatal"
31
+
32
+ @classmethod
33
+ def validate(cls, level: str) -> str:
34
+ """
35
+ Validate and normalize log level.
36
+
37
+ Args:
38
+ level: Log level string (case-insensitive)
39
+
40
+ Returns:
41
+ Normalized log level string (lowercase)
42
+
43
+ Raises:
44
+ ValueError: If level is not one of the 5 standard levels
45
+
46
+ Example:
47
+ >>> LogLevel.validate("INFO")
48
+ "info"
49
+ >>> LogLevel.validate("invalid")
50
+ ValueError: Invalid log level: 'invalid'. Must be one of: info, warn, error, debug, fatal
51
+ """
52
+ level_lower = level.lower()
53
+ try:
54
+ return cls[level_lower.upper()].value
55
+ except KeyError:
56
+ valid_levels = ", ".join([l.value for l in cls])
57
+ raise ValueError(
58
+ f"Invalid log level: '{level}'. "
59
+ f"Must be one of: {valid_levels}"
60
+ )
61
+
62
+
63
+ class LogBuilder:
64
+ """
65
+ Fluent builder for creating log entries.
66
+
67
+ This class is returned by Experiment.log() when no message is provided.
68
+ It allows for a fluent API style where metadata is set first, then
69
+ the log level method is called to write the log.
70
+
71
+ Example:
72
+ experiment.log(metadata={"epoch": 1}).info("Training started")
73
+ experiment.log().error("Failed", error_code=500)
74
+ """
75
+
76
+ def __init__(self, experiment: 'Experiment', metadata: Optional[Dict[str, Any]] = None):
77
+ """
78
+ Initialize LogBuilder.
79
+
80
+ Args:
81
+ experiment: Parent Experiment instance
82
+ metadata: Optional metadata dict from log() call
83
+ """
84
+ self._experiment = experiment
85
+ self._metadata = metadata
86
+
87
+ def info(self, message: str, **extra_metadata) -> None:
88
+ """
89
+ Write info level log.
90
+
91
+ Args:
92
+ message: Log message
93
+ **extra_metadata: Additional metadata as keyword arguments
94
+
95
+ Example:
96
+ experiment.log().info("Training started")
97
+ experiment.log().info("Epoch complete", epoch=1, loss=0.5)
98
+ """
99
+ self._write(LogLevel.INFO.value, message, extra_metadata)
100
+
101
+ def warn(self, message: str, **extra_metadata) -> None:
102
+ """
103
+ Write warning level log.
104
+
105
+ Args:
106
+ message: Log message
107
+ **extra_metadata: Additional metadata as keyword arguments
108
+
109
+ Example:
110
+ experiment.log().warn("High loss detected", loss=1.5)
111
+ """
112
+ self._write(LogLevel.WARN.value, message, extra_metadata)
113
+
114
+ def error(self, message: str, **extra_metadata) -> None:
115
+ """
116
+ Write error level log.
117
+
118
+ Args:
119
+ message: Log message
120
+ **extra_metadata: Additional metadata as keyword arguments
121
+
122
+ Example:
123
+ experiment.log().error("Failed to save", path="/models/checkpoint.pth")
124
+ """
125
+ self._write(LogLevel.ERROR.value, message, extra_metadata)
126
+
127
+ def debug(self, message: str, **extra_metadata) -> None:
128
+ """
129
+ Write debug level log.
130
+
131
+ Args:
132
+ message: Log message
133
+ **extra_metadata: Additional metadata as keyword arguments
134
+
135
+ Example:
136
+ experiment.log().debug("Memory usage", memory_mb=2500)
137
+ """
138
+ self._write(LogLevel.DEBUG.value, message, extra_metadata)
139
+
140
+ def fatal(self, message: str, **extra_metadata) -> None:
141
+ """
142
+ Write fatal level log.
143
+
144
+ Args:
145
+ message: Log message
146
+ **extra_metadata: Additional metadata as keyword arguments
147
+
148
+ Example:
149
+ experiment.log().fatal("Unrecoverable error", exit_code=1)
150
+ """
151
+ self._write(LogLevel.FATAL.value, message, extra_metadata)
152
+
153
+ def _write(self, level: str, message: str, extra_metadata: Dict[str, Any]) -> None:
154
+ """
155
+ Internal: Execute the actual log write.
156
+
157
+ Merges metadata from log() call with metadata from level method,
158
+ then writes immediately (no buffering).
159
+
160
+ Args:
161
+ level: Log level (already validated)
162
+ message: Log message
163
+ extra_metadata: Additional metadata from level method kwargs
164
+ """
165
+ # Merge metadata from log() call with metadata from level method
166
+ if self._metadata and extra_metadata:
167
+ final_metadata = {**self._metadata, **extra_metadata}
168
+ elif self._metadata:
169
+ final_metadata = self._metadata
170
+ elif extra_metadata:
171
+ final_metadata = extra_metadata
172
+ else:
173
+ final_metadata = None
174
+
175
+ # Write immediately (no buffering)
176
+ self._experiment._write_log(
177
+ message=message,
178
+ level=level,
179
+ metadata=final_metadata,
180
+ timestamp=None
181
+ )
ml_dash/metric.py ADDED
@@ -0,0 +1,292 @@
1
+ """
2
+ Metric API - Time-series data metricing for ML experiments.
3
+
4
+ Metrics are used for storing continuous data series like training metrics,
5
+ validation losses, system measurements, etc.
6
+ """
7
+
8
+ from typing import Dict, Any, List, Optional, TYPE_CHECKING
9
+
10
+ if TYPE_CHECKING:
11
+ from .experiment import Experiment
12
+
13
+
14
+ class MetricsManager:
15
+ """
16
+ Manager for metric operations that supports both named and unnamed usage.
17
+
18
+ Supports three usage patterns:
19
+ 1. Named via call: experiment.metrics("loss").append(value=0.5, step=1)
20
+ 2. Named via argument: experiment.metrics.append(name="loss", value=0.5, step=1)
21
+ 3. Unnamed: experiment.metrics.append(value=0.5, step=1) # name=None
22
+
23
+ Usage:
24
+ # With explicit metric name (via call)
25
+ experiment.metrics("train_loss").append(value=0.5, step=100)
26
+
27
+ # With explicit metric name (via argument)
28
+ experiment.metrics.append(name="train_loss", value=0.5, step=100)
29
+
30
+ # Without name (uses None as metric name)
31
+ experiment.metrics.append(value=0.5, step=100)
32
+ """
33
+
34
+ def __init__(self, experiment: 'Experiment'):
35
+ """
36
+ Initialize MetricsManager.
37
+
38
+ Args:
39
+ experiment: Parent Experiment instance
40
+ """
41
+ self._experiment = experiment
42
+
43
+ def __call__(self, name: str, description: Optional[str] = None,
44
+ tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None) -> 'MetricBuilder':
45
+ """
46
+ Get a MetricBuilder for a specific metric name.
47
+
48
+ Args:
49
+ name: Metric name (unique within experiment)
50
+ description: Optional metric description
51
+ tags: Optional tags for categorization
52
+ metadata: Optional structured metadata
53
+
54
+ Returns:
55
+ MetricBuilder instance for the named metric
56
+
57
+ Examples:
58
+ experiment.metrics("loss").append(value=0.5, step=1)
59
+ """
60
+ return MetricBuilder(self._experiment, name, description, tags, metadata)
61
+
62
+ def append(self, name: Optional[str] = None, data: Optional[Dict[str, Any]] = None, **kwargs) -> Dict[str, Any]:
63
+ """
64
+ Append a data point to a metric (name can be optional).
65
+
66
+ Args:
67
+ name: Metric name (optional, can be None for unnamed metrics)
68
+ data: Data dict (alternative to kwargs)
69
+ **kwargs: Data as keyword arguments
70
+
71
+ Returns:
72
+ Response dict with metric metadata
73
+
74
+ Examples:
75
+ experiment.metrics.append(name="loss", value=0.5, step=1)
76
+ experiment.metrics.append(value=0.5, step=1) # name=None
77
+ experiment.metrics.append(name="loss", data={"value": 0.5, "step": 1})
78
+ """
79
+ if data is None:
80
+ data = kwargs
81
+ return self._experiment._append_to_metric(name, data, None, None, None)
82
+
83
+ def append_batch(self, name: Optional[str] = None, data_points: Optional[List[Dict[str, Any]]] = None,
84
+ description: Optional[str] = None,
85
+ tags: Optional[List[str]] = None,
86
+ metadata: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
87
+ """
88
+ Append multiple data points to a metric.
89
+
90
+ Args:
91
+ name: Metric name (optional, can be None for unnamed metrics)
92
+ data_points: List of data point dicts
93
+ description: Optional metric description
94
+ tags: Optional tags for categorization
95
+ metadata: Optional structured metadata
96
+
97
+ Returns:
98
+ Response dict with metric metadata
99
+
100
+ Examples:
101
+ experiment.metrics.append_batch(
102
+ name="loss",
103
+ data_points=[
104
+ {"value": 0.5, "step": 1},
105
+ {"value": 0.4, "step": 2}
106
+ ]
107
+ )
108
+ experiment.metrics.append_batch(
109
+ data_points=[
110
+ {"value": 0.5, "step": 1},
111
+ {"value": 0.4, "step": 2}
112
+ ]
113
+ ) # name=None
114
+ """
115
+ if data_points is None:
116
+ data_points = []
117
+ return self._experiment._append_batch_to_metric(name, data_points, description, tags, metadata)
118
+
119
+
120
+ class MetricBuilder:
121
+ """
122
+ Builder for metric operations.
123
+
124
+ Provides fluent API for appending, reading, and querying metric data.
125
+
126
+ Usage:
127
+ # Append single data point
128
+ experiment.metric(name="train_loss").append(value=0.5, step=100)
129
+
130
+ # Append batch
131
+ experiment.metric(name="train_loss").append_batch([
132
+ {"value": 0.5, "step": 100},
133
+ {"value": 0.45, "step": 101}
134
+ ])
135
+
136
+ # Read data
137
+ data = experiment.metric(name="train_loss").read(start_index=0, limit=100)
138
+
139
+ # Get statistics
140
+ stats = experiment.metric(name="train_loss").stats()
141
+ """
142
+
143
+ def __init__(self, experiment: 'Experiment', name: str, description: Optional[str] = None,
144
+ tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None):
145
+ """
146
+ Initialize MetricBuilder.
147
+
148
+ Args:
149
+ experiment: Parent Experiment instance
150
+ name: Metric name (unique within experiment)
151
+ description: Optional metric description
152
+ tags: Optional tags for categorization
153
+ metadata: Optional structured metadata (units, type, etc.)
154
+ """
155
+ self._experiment = experiment
156
+ self._name = name
157
+ self._description = description
158
+ self._tags = tags
159
+ self._metadata = metadata
160
+
161
+ def append(self, **kwargs) -> 'MetricBuilder':
162
+ """
163
+ Append a single data point to the metric.
164
+
165
+ The data point can have any structure - common patterns:
166
+ - {value: 0.5, step: 100}
167
+ - {loss: 0.3, accuracy: 0.92, epoch: 5}
168
+ - {timestamp: "...", temperature: 25.5, humidity: 60}
169
+
170
+ Args:
171
+ **kwargs: Data point fields (flexible schema)
172
+
173
+ Returns:
174
+ Dict with metricId, index, bufferedDataPoints, chunkSize
175
+
176
+ Example:
177
+ result = experiment.metric(name="train_loss").append(value=0.5, step=100, epoch=1)
178
+ print(f"Appended at index {result['index']}")
179
+ """
180
+ result = self._experiment._append_to_metric(
181
+ name=self._name,
182
+ data=kwargs,
183
+ description=self._description,
184
+ tags=self._tags,
185
+ metadata=self._metadata
186
+ )
187
+ return result
188
+
189
+ def append_batch(self, data_points: List[Dict[str, Any]]) -> Dict[str, Any]:
190
+ """
191
+ Append multiple data points in batch (more efficient than multiple append calls).
192
+
193
+ Args:
194
+ data_points: List of data point dicts
195
+
196
+ Returns:
197
+ Dict with metricId, startIndex, endIndex, count, bufferedDataPoints, chunkSize
198
+
199
+ Example:
200
+ result = experiment.metric(name="metrics").append_batch([
201
+ {"loss": 0.5, "acc": 0.8, "step": 1},
202
+ {"loss": 0.4, "acc": 0.85, "step": 2},
203
+ {"loss": 0.3, "acc": 0.9, "step": 3}
204
+ ])
205
+ print(f"Appended {result['count']} points")
206
+ """
207
+ if not data_points:
208
+ raise ValueError("data_points cannot be empty")
209
+
210
+ result = self._experiment._append_batch_to_metric(
211
+ name=self._name,
212
+ data_points=data_points,
213
+ description=self._description,
214
+ tags=self._tags,
215
+ metadata=self._metadata
216
+ )
217
+ return result
218
+
219
+ def read(self, start_index: int = 0, limit: int = 1000) -> Dict[str, Any]:
220
+ """
221
+ Read data points from the metric by index range.
222
+
223
+ Args:
224
+ start_index: Starting index (inclusive, default 0)
225
+ limit: Maximum number of points to read (default 1000, max 10000)
226
+
227
+ Returns:
228
+ Dict with keys:
229
+ - data: List of {index: str, data: dict, createdAt: str}
230
+ - startIndex: Starting index
231
+ - endIndex: Ending index
232
+ - total: Number of points returned
233
+ - hasMore: Whether more data exists beyond this range
234
+
235
+ Example:
236
+ result = experiment.metric(name="train_loss").read(start_index=0, limit=100)
237
+ for point in result['data']:
238
+ print(f"Index {point['index']}: {point['data']}")
239
+ """
240
+ return self._experiment._read_metric_data(
241
+ name=self._name,
242
+ start_index=start_index,
243
+ limit=limit
244
+ )
245
+
246
+ def stats(self) -> Dict[str, Any]:
247
+ """
248
+ Get metric statistics and metadata.
249
+
250
+ Returns:
251
+ Dict with metric info:
252
+ - metricId: Unique metric ID
253
+ - name: Metric name
254
+ - description: Metric description (if set)
255
+ - tags: Tags list
256
+ - metadata: User metadata
257
+ - totalDataPoints: Total points (buffered + chunked)
258
+ - bufferedDataPoints: Points in MongoDB (hot storage)
259
+ - chunkedDataPoints: Points in S3 (cold storage)
260
+ - totalChunks: Number of chunks in S3
261
+ - chunkSize: Chunking threshold
262
+ - firstDataAt: Timestamp of first point (if data has timestamp)
263
+ - lastDataAt: Timestamp of last point (if data has timestamp)
264
+ - createdAt: Metric creation time
265
+ - updatedAt: Last update time
266
+
267
+ Example:
268
+ stats = experiment.metric(name="train_loss").stats()
269
+ print(f"Total points: {stats['totalDataPoints']}")
270
+ print(f"Buffered: {stats['bufferedDataPoints']}, Chunked: {stats['chunkedDataPoints']}")
271
+ """
272
+ return self._experiment._get_metric_stats(name=self._name)
273
+
274
+ def list_all(self) -> List[Dict[str, Any]]:
275
+ """
276
+ List all metrics in the experiment.
277
+
278
+ Returns:
279
+ List of metric summaries with keys:
280
+ - metricId: Unique metric ID
281
+ - name: Metric name
282
+ - description: Metric description
283
+ - tags: Tags list
284
+ - totalDataPoints: Total data points
285
+ - createdAt: Creation timestamp
286
+
287
+ Example:
288
+ metrics = experiment.metric().list_all()
289
+ for metric in metrics:
290
+ print(f"{metric['name']}: {metric['totalDataPoints']} points")
291
+ """
292
+ return self._experiment._list_metrics()
ml_dash/params.py ADDED
@@ -0,0 +1,188 @@
1
+ """
2
+ Parameters module for ML-Dash SDK.
3
+
4
+ Provides fluent API for parameter management with automatic dict flattening.
5
+ Nested dicts are flattened to dot-notation: {"model": {"lr": 0.001}} → {"model.lr": 0.001}
6
+ """
7
+
8
+ from typing import Dict, Any, Optional, TYPE_CHECKING
9
+
10
+ if TYPE_CHECKING:
11
+ from .experiment import Experiment
12
+
13
+
14
+ class ParametersBuilder:
15
+ """
16
+ Fluent interface for parameter operations.
17
+
18
+ Usage:
19
+ experiment.parameters().set(model={"lr": 0.001}, optimizer="adam")
20
+ params = experiment.parameters().get()
21
+ params_nested = experiment.parameters().get(flatten=False)
22
+ """
23
+
24
+ def __init__(self, experiment: 'Experiment'):
25
+ """
26
+ Initialize parameters builder.
27
+
28
+ Args:
29
+ experiment: Parent experiment instance
30
+ """
31
+ self._experiment = experiment
32
+
33
+ def set(self, **kwargs) -> 'ParametersBuilder':
34
+ """
35
+ Set/merge parameters. Always merges with existing parameters (upsert behavior).
36
+
37
+ Nested dicts are automatically flattened:
38
+ set(model={"lr": 0.001, "batch_size": 32})
39
+ → {"model.lr": 0.001, "model.batch_size": 32}
40
+
41
+ Args:
42
+ **kwargs: Parameters to set (can be nested dicts)
43
+
44
+ Returns:
45
+ Self for potential chaining
46
+
47
+ Raises:
48
+ RuntimeError: If experiment is not open
49
+ RuntimeError: If experiment is write-protected
50
+
51
+ Examples:
52
+ # Set nested parameters
53
+ experiment.parameters().set(
54
+ model={"lr": 0.001, "batch_size": 32},
55
+ optimizer="adam"
56
+ )
57
+
58
+ # Merge/update specific parameters
59
+ experiment.parameters().set(model={"lr": 0.0001}) # Only updates model.lr
60
+
61
+ # Set flat parameters with dot notation
62
+ experiment.parameters().set(**{"model.lr": 0.001, "model.batch_size": 32})
63
+ """
64
+ if not self._experiment._is_open:
65
+ raise RuntimeError("Experiment not open. Use experiment.run.start() or context manager.")
66
+
67
+ if self._experiment._write_protected:
68
+ raise RuntimeError("Experiment is write-protected and cannot be modified.")
69
+
70
+ # Flatten the kwargs
71
+ flattened = self.flatten_dict(kwargs)
72
+
73
+ if not flattened:
74
+ # No parameters to set, just return
75
+ return self
76
+
77
+ # Write parameters through experiment
78
+ self._experiment._write_params(flattened)
79
+
80
+ return self
81
+
82
+ def get(self, flatten: bool = True) -> Dict[str, Any]:
83
+ """
84
+ Get parameters from the experiment.
85
+
86
+ Args:
87
+ flatten: If True, returns flattened dict with dot notation.
88
+ If False, returns nested dict structure.
89
+
90
+ Returns:
91
+ Parameters dict (flattened or nested based on flatten arg)
92
+
93
+ Raises:
94
+ RuntimeError: If experiment is not open
95
+
96
+ Examples:
97
+ # Get flattened parameters
98
+ params = experiment.parameters().get()
99
+ # → {"model.lr": 0.001, "model.batch_size": 32, "optimizer": "adam"}
100
+
101
+ # Get nested parameters
102
+ params = experiment.parameters().get(flatten=False)
103
+ # → {"model": {"lr": 0.001, "batch_size": 32}, "optimizer": "adam"}
104
+ """
105
+ if not self._experiment._is_open:
106
+ raise RuntimeError("Experiment not open. Use experiment.open() or context manager.")
107
+
108
+ # Read parameters through experiment
109
+ params = self._experiment._read_params()
110
+
111
+ if params is None:
112
+ return {}
113
+
114
+ # Return as-is if flatten=True (stored flattened), or unflatten if needed
115
+ if flatten:
116
+ return params
117
+ else:
118
+ return self.unflatten_dict(params)
119
+
120
+ @staticmethod
121
+ def flatten_dict(d: Dict[str, Any], parent_key: str = '', sep: str = '.') -> Dict[str, Any]:
122
+ """
123
+ Flatten a nested dictionary into dot-notation keys.
124
+
125
+ Args:
126
+ d: Dictionary to flatten (can contain nested dicts)
127
+ parent_key: Prefix for keys (used in recursion)
128
+ sep: Separator character (default: '.')
129
+
130
+ Returns:
131
+ Flattened dictionary with dot-notation keys
132
+
133
+ Examples:
134
+ >>> flatten_dict({"a": {"b": 1, "c": 2}, "d": 3})
135
+ {"a.b": 1, "a.c": 2, "d": 3}
136
+
137
+ >>> flatten_dict({"model": {"lr": 0.001, "layers": {"hidden": 128}}})
138
+ {"model.lr": 0.001, "model.layers.hidden": 128}
139
+ """
140
+ items = []
141
+
142
+ for k, v in d.items():
143
+ new_key = f"{parent_key}{sep}{k}" if parent_key else k
144
+
145
+ if isinstance(v, dict):
146
+ # Recursively flatten nested dicts
147
+ items.extend(ParametersBuilder.flatten_dict(v, new_key, sep=sep).items())
148
+ else:
149
+ # Keep non-dict values as-is
150
+ items.append((new_key, v))
151
+
152
+ return dict(items)
153
+
154
+ @staticmethod
155
+ def unflatten_dict(d: Dict[str, Any], sep: str = '.') -> Dict[str, Any]:
156
+ """
157
+ Unflatten a dot-notation dictionary into nested structure.
158
+
159
+ Args:
160
+ d: Flattened dictionary with dot-notation keys
161
+ sep: Separator character (default: '.')
162
+
163
+ Returns:
164
+ Nested dictionary structure
165
+
166
+ Examples:
167
+ >>> unflatten_dict({"a.b": 1, "a.c": 2, "d": 3})
168
+ {"a": {"b": 1, "c": 2}, "d": 3}
169
+
170
+ >>> unflatten_dict({"model.lr": 0.001, "model.layers.hidden": 128})
171
+ {"model": {"lr": 0.001, "layers": {"hidden": 128}}}
172
+ """
173
+ result = {}
174
+
175
+ for key, value in d.items():
176
+ parts = key.split(sep)
177
+ current = result
178
+
179
+ # Navigate/create nested structure
180
+ for part in parts[:-1]:
181
+ if part not in current:
182
+ current[part] = {}
183
+ current = current[part]
184
+
185
+ # Set the final value
186
+ current[parts[-1]] = value
187
+
188
+ return result