ml-dash 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ml_dash/params.py ADDED
@@ -0,0 +1,277 @@
1
+ """
2
+ Parameters module for ML-Dash SDK.
3
+
4
+ Provides fluent API for parameter management with automatic dict flattening.
5
+ Nested dicts are flattened to dot-notation: {"model": {"lr": 0.001}} → {"model.lr": 0.001}
6
+ """
7
+
8
+ from typing import Dict, Any, Optional, TYPE_CHECKING
9
+ import inspect
10
+
11
+ if TYPE_CHECKING:
12
+ from .experiment import Experiment
13
+
14
+
15
+ class ParametersBuilder:
16
+ """
17
+ Fluent interface for parameter operations.
18
+
19
+ Usage:
20
+ experiment.parameters().set(model={"lr": 0.001}, optimizer="adam")
21
+ params = experiment.parameters().get()
22
+ params_nested = experiment.parameters().get(flatten=False)
23
+ """
24
+
25
+ def __init__(self, experiment: 'Experiment'):
26
+ """
27
+ Initialize parameters builder.
28
+
29
+ Args:
30
+ experiment: Parent experiment instance
31
+ """
32
+ self._experiment = experiment
33
+
34
+ def set(self, **kwargs) -> 'ParametersBuilder':
35
+ """
36
+ Set/merge parameters. Always merges with existing parameters (upsert behavior).
37
+
38
+ Nested dicts are automatically flattened:
39
+ set(model={"lr": 0.001, "batch_size": 32})
40
+ → {"model.lr": 0.001, "model.batch_size": 32}
41
+
42
+ Args:
43
+ **kwargs: Parameters to set (can be nested dicts)
44
+
45
+ Returns:
46
+ Self for potential chaining
47
+
48
+ Raises:
49
+ RuntimeError: If experiment is not open
50
+ RuntimeError: If experiment is write-protected
51
+
52
+ Examples:
53
+ # Set nested parameters
54
+ experiment.parameters().set(
55
+ model={"lr": 0.001, "batch_size": 32},
56
+ optimizer="adam"
57
+ )
58
+
59
+ # Merge/update specific parameters
60
+ experiment.parameters().set(model={"lr": 0.0001}) # Only updates model.lr
61
+
62
+ # Set flat parameters with dot notation
63
+ experiment.parameters().set(**{"model.lr": 0.001, "model.batch_size": 32})
64
+ """
65
+ if not self._experiment._is_open:
66
+ raise RuntimeError(
67
+ "Experiment not started. Use 'with experiment.run:' or call experiment.run.start() first.\n"
68
+ "Example:\n"
69
+ " with dxp.run:\n"
70
+ " dxp.params.set(lr=0.001)"
71
+ )
72
+
73
+ if self._experiment._write_protected:
74
+ raise RuntimeError("Experiment is write-protected and cannot be modified.")
75
+
76
+ # Convert class objects to dicts (for params_proto support)
77
+ processed_kwargs = self._process_class_objects(kwargs)
78
+
79
+ # Flatten the kwargs
80
+ flattened = self.flatten_dict(processed_kwargs)
81
+
82
+ if not flattened:
83
+ # No parameters to set, just return
84
+ return self
85
+
86
+ # Write parameters through experiment
87
+ self._experiment._write_params(flattened)
88
+
89
+ return self
90
+
91
+ def log(self, **kwargs) -> 'ParametersBuilder':
92
+ """
93
+ Alias for set(). Sets/merges parameters.
94
+
95
+ This method exists for better parameter organization and semantic clarity.
96
+ It behaves exactly the same as set().
97
+
98
+ Nested dicts are automatically flattened:
99
+ log(model={"lr": 0.001, "batch_size": 32})
100
+ → {"model.lr": 0.001, "model.batch_size": 32}
101
+
102
+ Args:
103
+ **kwargs: Parameters to set (can be nested dicts)
104
+
105
+ Returns:
106
+ Self for potential chaining
107
+
108
+ Raises:
109
+ RuntimeError: If experiment is not open
110
+ RuntimeError: If experiment is write-protected
111
+
112
+ Examples:
113
+ # Set parameters using log() - same as set()
114
+ experiment.params.log(
115
+ learning_rate=0.001,
116
+ batch_size=32,
117
+ model="resnet50"
118
+ )
119
+
120
+ # Track parameter changes during training
121
+ for epoch in range(10):
122
+ if epoch == 5:
123
+ experiment.params.log(learning_rate=0.0001) # Log LR decay
124
+ """
125
+ # Just call set() - they behave exactly the same
126
+ return self.set(**kwargs)
127
+
128
+ def get(self, flatten: bool = True) -> Dict[str, Any]:
129
+ """
130
+ Get parameters from the experiment.
131
+
132
+ Args:
133
+ flatten: If True, returns flattened dict with dot notation.
134
+ If False, returns nested dict structure.
135
+
136
+ Returns:
137
+ Parameters dict (flattened or nested based on flatten arg)
138
+
139
+ Raises:
140
+ RuntimeError: If experiment is not open
141
+
142
+ Examples:
143
+ # Get flattened parameters
144
+ params = experiment.parameters().get()
145
+ # → {"model.lr": 0.001, "model.batch_size": 32, "optimizer": "adam"}
146
+
147
+ # Get nested parameters
148
+ params = experiment.parameters().get(flatten=False)
149
+ # → {"model": {"lr": 0.001, "batch_size": 32}, "optimizer": "adam"}
150
+ """
151
+ if not self._experiment._is_open:
152
+ raise RuntimeError(
153
+ "Experiment not started. Use 'with experiment.run:' or call experiment.run.start() first.\n"
154
+ "Example:\n"
155
+ " with dxp.run:\n"
156
+ " dxp.params.get()"
157
+ )
158
+
159
+ # Read parameters through experiment
160
+ params = self._experiment._read_params()
161
+
162
+ if params is None:
163
+ return {}
164
+
165
+ # Return as-is if flatten=True (stored flattened), or unflatten if needed
166
+ if flatten:
167
+ return params
168
+ else:
169
+ return self.unflatten_dict(params)
170
+
171
+ @staticmethod
172
+ def flatten_dict(d: Dict[str, Any], parent_key: str = '', sep: str = '.') -> Dict[str, Any]:
173
+ """
174
+ Flatten a nested dictionary into dot-notation keys.
175
+
176
+ Args:
177
+ d: Dictionary to flatten (can contain nested dicts)
178
+ parent_key: Prefix for keys (used in recursion)
179
+ sep: Separator character (default: '.')
180
+
181
+ Returns:
182
+ Flattened dictionary with dot-notation keys
183
+
184
+ Examples:
185
+ >>> flatten_dict({"a": {"b": 1, "c": 2}, "d": 3})
186
+ {"a.b": 1, "a.c": 2, "d": 3}
187
+
188
+ >>> flatten_dict({"model": {"lr": 0.001, "layers": {"hidden": 128}}})
189
+ {"model.lr": 0.001, "model.layers.hidden": 128}
190
+ """
191
+ items = []
192
+
193
+ for k, v in d.items():
194
+ new_key = f"{parent_key}{sep}{k}" if parent_key else k
195
+
196
+ if isinstance(v, dict):
197
+ # Recursively flatten nested dicts
198
+ items.extend(ParametersBuilder.flatten_dict(v, new_key, sep=sep).items())
199
+ else:
200
+ # Keep non-dict values as-is
201
+ items.append((new_key, v))
202
+
203
+ return dict(items)
204
+
205
+ @staticmethod
206
+ def unflatten_dict(d: Dict[str, Any], sep: str = '.') -> Dict[str, Any]:
207
+ """
208
+ Unflatten a dot-notation dictionary into nested structure.
209
+
210
+ Args:
211
+ d: Flattened dictionary with dot-notation keys
212
+ sep: Separator character (default: '.')
213
+
214
+ Returns:
215
+ Nested dictionary structure
216
+
217
+ Examples:
218
+ >>> unflatten_dict({"a.b": 1, "a.c": 2, "d": 3})
219
+ {"a": {"b": 1, "c": 2}, "d": 3}
220
+
221
+ >>> unflatten_dict({"model.lr": 0.001, "model.layers.hidden": 128})
222
+ {"model": {"lr": 0.001, "layers": {"hidden": 128}}}
223
+ """
224
+ result = {}
225
+
226
+ for key, value in d.items():
227
+ parts = key.split(sep)
228
+ current = result
229
+
230
+ # Navigate/create nested structure
231
+ for part in parts[:-1]:
232
+ if part not in current:
233
+ current[part] = {}
234
+ current = current[part]
235
+
236
+ # Set the final value
237
+ current[parts[-1]] = value
238
+
239
+ return result
240
+
241
+ @staticmethod
242
+ def _process_class_objects(d: Dict[str, Any]) -> Dict[str, Any]:
243
+ """
244
+ Convert class objects to dicts by extracting their attributes.
245
+
246
+ This enables passing configuration classes directly:
247
+ dxp.params.log(Args=Args) # Args is a class
248
+ → {"Args": {"batch_size": 64, "lr": 0.001, ...}}
249
+
250
+ Args:
251
+ d: Dictionary that may contain class objects as values
252
+
253
+ Returns:
254
+ Dictionary with class objects converted to attribute dicts
255
+
256
+ Examples:
257
+ >>> class Args:
258
+ ... batch_size = 64
259
+ ... lr = 0.001
260
+ >>> _process_class_objects({"Args": Args})
261
+ {"Args": {"batch_size": 64, "lr": 0.001}}
262
+ """
263
+ result = {}
264
+ for key, value in d.items():
265
+ if inspect.isclass(value):
266
+ # Extract class attributes (skip private/magic and callables)
267
+ attrs = {}
268
+ for attr_name, attr_value in vars(value).items():
269
+ if not attr_name.startswith('_') and not callable(attr_value):
270
+ # Recursively handle nested types
271
+ if isinstance(attr_value, type):
272
+ continue # Skip type annotations
273
+ attrs[attr_name] = attr_value
274
+ result[key] = attrs
275
+ else:
276
+ result[key] = value
277
+ return result
ml_dash/py.typed ADDED
File without changes
@@ -0,0 +1,55 @@
1
+ """
2
+ Pre-configured remote experiment singleton for ML-Dash SDK.
3
+
4
+ Provides a pre-configured experiment singleton named 'rdxp' that uses remote mode.
5
+ Requires manual start using 'with' statement or explicit start() call.
6
+
7
+ IMPORTANT: Before using rdxp, you must authenticate with the ML-Dash server:
8
+ # First time setup - authenticate with the server
9
+ python -m ml_dash.cli login
10
+
11
+ Usage:
12
+ from ml_dash import rdxp
13
+
14
+ # Use with statement (recommended)
15
+ with rdxp.run:
16
+ rdxp.log().info("Hello from rdxp!")
17
+ rdxp.params.set(lr=0.001)
18
+ rdxp.metrics("loss").append(step=0, value=0.5)
19
+ # Automatically completes on exit from with block
20
+
21
+ # Or start/complete manually
22
+ rdxp.run.start()
23
+ rdxp.log().info("Training...")
24
+ rdxp.run.complete()
25
+
26
+ Configuration:
27
+ - Default server: https://api.dash.ml
28
+ - To use a different server, set MLDASH_API_URL environment variable
29
+ - Authentication token is auto-loaded from secure storage
30
+ """
31
+
32
+ import atexit
33
+ from .experiment import Experiment
34
+
35
+ # Create pre-configured singleton experiment for remote mode
36
+ # Uses remote API server - token auto-loaded from storage
37
+ rdxp = Experiment(
38
+ name="rdxp",
39
+ project="scratch",
40
+ remote="https://api.dash.ml"
41
+ )
42
+
43
+ # Register cleanup handler to complete experiment on Python exit (if still open)
44
+ def _cleanup():
45
+ """Complete the rdxp experiment on exit if still open."""
46
+ if rdxp._is_open:
47
+ try:
48
+ rdxp.run.complete()
49
+ except Exception:
50
+ # Silently ignore errors during cleanup
51
+ pass
52
+
53
+ atexit.register(_cleanup)
54
+
55
+ __all__ = ["rdxp"]