ml-dash 0.0.11__py3-none-any.whl → 0.5.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. ml_dash/__init__.py +59 -1
  2. ml_dash/auto_start.py +42 -0
  3. ml_dash/cli.py +67 -0
  4. ml_dash/cli_commands/__init__.py +1 -0
  5. ml_dash/cli_commands/download.py +797 -0
  6. ml_dash/cli_commands/list.py +343 -0
  7. ml_dash/cli_commands/upload.py +1298 -0
  8. ml_dash/client.py +955 -0
  9. ml_dash/config.py +114 -11
  10. ml_dash/experiment.py +1020 -0
  11. ml_dash/files.py +688 -0
  12. ml_dash/log.py +181 -0
  13. ml_dash/metric.py +292 -0
  14. ml_dash/params.py +188 -0
  15. ml_dash/storage.py +1115 -0
  16. ml_dash-0.5.9.dist-info/METADATA +244 -0
  17. ml_dash-0.5.9.dist-info/RECORD +20 -0
  18. ml_dash-0.5.9.dist-info/WHEEL +4 -0
  19. ml_dash-0.5.9.dist-info/entry_points.txt +3 -0
  20. ml_dash/app.py +0 -33
  21. ml_dash/file_events.py +0 -71
  22. ml_dash/file_handlers.py +0 -141
  23. ml_dash/file_utils.py +0 -5
  24. ml_dash/file_watcher.py +0 -30
  25. ml_dash/main.py +0 -60
  26. ml_dash/mime_types.py +0 -20
  27. ml_dash/schema/__init__.py +0 -110
  28. ml_dash/schema/archive.py +0 -165
  29. ml_dash/schema/directories.py +0 -59
  30. ml_dash/schema/experiments.py +0 -65
  31. ml_dash/schema/files/__init__.py +0 -204
  32. ml_dash/schema/files/file_helpers.py +0 -79
  33. ml_dash/schema/files/images.py +0 -27
  34. ml_dash/schema/files/metrics.py +0 -64
  35. ml_dash/schema/files/parameters.py +0 -50
  36. ml_dash/schema/files/series.py +0 -235
  37. ml_dash/schema/files/videos.py +0 -27
  38. ml_dash/schema/helpers.py +0 -66
  39. ml_dash/schema/projects.py +0 -65
  40. ml_dash/schema/schema_helpers.py +0 -19
  41. ml_dash/schema/users.py +0 -33
  42. ml_dash/sse.py +0 -18
  43. ml_dash-0.0.11.dist-info/METADATA +0 -67
  44. ml_dash-0.0.11.dist-info/RECORD +0 -30
  45. ml_dash-0.0.11.dist-info/WHEEL +0 -5
  46. ml_dash-0.0.11.dist-info/top_level.txt +0 -1
  47. /ml_dash/{example.py → py.typed} +0 -0
ml_dash/experiment.py ADDED
@@ -0,0 +1,1020 @@
1
+ """
2
+ Experiment class for ML-Dash SDK.
3
+
4
+ Supports three usage styles:
5
+ 1. Decorator: @ml_dash_experiment(...)
6
+ 2. Context manager: with Experiment(...) as exp:
7
+ 3. Direct instantiation: exp = Experiment(...)
8
+ """
9
+
10
+ from typing import Optional, Dict, Any, List, Callable
11
+ from enum import Enum
12
+ import functools
13
+ from pathlib import Path
14
+ from datetime import datetime
15
+
16
+ from .client import RemoteClient
17
+ from .storage import LocalStorage
18
+ from .log import LogLevel, LogBuilder
19
+ from .params import ParametersBuilder
20
+ from .files import FileBuilder
21
+
22
+
23
+ class OperationMode(Enum):
24
+ """Operation mode for the experiment."""
25
+ LOCAL = "local"
26
+ REMOTE = "remote"
27
+ HYBRID = "hybrid" # Future: sync local to remote
28
+
29
+
30
+ class RunManager:
31
+ """
32
+ Lifecycle manager for experiments.
33
+
34
+ Supports three usage patterns:
35
+ 1. Method calls: experiment.run.start(), experiment.run.complete()
36
+ 2. Context manager: with Experiment(...).run as exp:
37
+ 3. Decorator: @exp.run or @Experiment(...).run
38
+ """
39
+
40
+ def __init__(self, experiment: "Experiment"):
41
+ """
42
+ Initialize RunManager.
43
+
44
+ Args:
45
+ experiment: Parent Experiment instance
46
+ """
47
+ self._experiment = experiment
48
+
49
+ def start(self) -> "Experiment":
50
+ """
51
+ Start the experiment (sets status to RUNNING).
52
+
53
+ Returns:
54
+ The experiment instance for chaining
55
+ """
56
+ return self._experiment._open()
57
+
58
+ def complete(self) -> None:
59
+ """Mark experiment as completed (status: COMPLETED)."""
60
+ self._experiment._close(status="COMPLETED")
61
+
62
+ def fail(self) -> None:
63
+ """Mark experiment as failed (status: FAILED)."""
64
+ self._experiment._close(status="FAILED")
65
+
66
+ def cancel(self) -> None:
67
+ """Mark experiment as cancelled (status: CANCELLED)."""
68
+ self._experiment._close(status="CANCELLED")
69
+
70
+ def __enter__(self) -> "Experiment":
71
+ """Context manager entry - starts the experiment."""
72
+ return self.start()
73
+
74
+ def __exit__(self, exc_type, exc_val, exc_tb):
75
+ """Context manager exit - completes or fails the experiment."""
76
+ if exc_type is not None:
77
+ self.fail()
78
+ else:
79
+ self.complete()
80
+ return False
81
+
82
+ def __call__(self, func: Callable) -> Callable:
83
+ """
84
+ Decorator support for wrapping functions with experiment lifecycle.
85
+
86
+ Usage:
87
+ @exp.run
88
+ def train(exp):
89
+ exp.log("Training...")
90
+ """
91
+ @functools.wraps(func)
92
+ def wrapper(*args, **kwargs):
93
+ with self as exp:
94
+ return func(exp, *args, **kwargs)
95
+ return wrapper
96
+
97
+
98
+ class Experiment:
99
+ """
100
+ ML-Dash experiment for metricing experiments.
101
+
102
+ Usage examples:
103
+
104
+ # Remote mode
105
+ experiment = Experiment(
106
+ name="my-experiment",
107
+ project="my-project",
108
+ remote="http://localhost:3000",
109
+ api_key="your-jwt-token"
110
+ )
111
+
112
+ # Local mode
113
+ experiment = Experiment(
114
+ name="my-experiment",
115
+ project="my-project",
116
+ local_path=".ml-dash"
117
+ )
118
+
119
+ # Context manager
120
+ with Experiment(...) as exp:
121
+ exp.log(...)
122
+
123
+ # Decorator
124
+ @ml_dash_experiment(name="exp", project="ws", remote="...")
125
+ def train():
126
+ ...
127
+ """
128
+
129
+ def __init__(
130
+ self,
131
+ name: str,
132
+ project: str,
133
+ *,
134
+ description: Optional[str] = None,
135
+ tags: Optional[List[str]] = None,
136
+ bindrs: Optional[List[str]] = None,
137
+ folder: Optional[str] = None,
138
+ metadata: Optional[Dict[str, Any]] = None,
139
+ # Mode configuration
140
+ remote: Optional[str] = None,
141
+ api_key: Optional[str] = None,
142
+ user_name: Optional[str] = None,
143
+ local_path: Optional[str] = None,
144
+ # Internal parameters
145
+ _write_protected: bool = False,
146
+ ):
147
+ """
148
+ Initialize an ML-Dash experiment.
149
+
150
+ Args:
151
+ name: Experiment name (unique within project)
152
+ project: Project name
153
+ description: Optional experiment description
154
+ tags: Optional list of tags
155
+ bindrs: Optional list of bindrs
156
+ folder: Optional folder path (e.g., "/experiments/baseline")
157
+ metadata: Optional metadata dict
158
+ remote: Remote API URL (e.g., "http://localhost:3000")
159
+ api_key: JWT token for authentication (if not provided, will be generated from user_name)
160
+ user_name: Username for authentication (generates API key if api_key not provided)
161
+ local_path: Local storage root path (for local mode)
162
+ _write_protected: Internal parameter - if True, experiment becomes immutable after creation
163
+ """
164
+ self.name = name
165
+ self.project = project
166
+ self.description = description
167
+ self.tags = tags
168
+ self.bindrs = bindrs
169
+ self.folder = folder
170
+ self._write_protected = _write_protected
171
+ self.metadata = metadata
172
+
173
+ # Generate API key from username if not provided
174
+ if remote and not api_key and user_name:
175
+ api_key = self._generate_api_key_from_username(user_name)
176
+
177
+ # Determine operation mode
178
+ if remote and local_path:
179
+ self.mode = OperationMode.HYBRID
180
+ elif remote:
181
+ self.mode = OperationMode.REMOTE
182
+ elif local_path:
183
+ self.mode = OperationMode.LOCAL
184
+ else:
185
+ raise ValueError(
186
+ "Must specify either 'remote' (with api_key/user_name) or 'local_path'"
187
+ )
188
+
189
+ # Initialize backend
190
+ self._client: Optional[RemoteClient] = None
191
+ self._storage: Optional[LocalStorage] = None
192
+ self._experiment_id: Optional[str] = None
193
+ self._experiment_data: Optional[Dict[str, Any]] = None
194
+ self._is_open = False
195
+
196
+ if self.mode in (OperationMode.REMOTE, OperationMode.HYBRID):
197
+ if not api_key:
198
+ raise ValueError("Either api_key or user_name is required for remote mode")
199
+ self._client = RemoteClient(base_url=remote, api_key=api_key)
200
+
201
+ if self.mode in (OperationMode.LOCAL, OperationMode.HYBRID):
202
+ if not local_path:
203
+ raise ValueError("local_path is required for local mode")
204
+ self._storage = LocalStorage(root_path=Path(local_path))
205
+
206
+ @staticmethod
207
+ def _generate_api_key_from_username(user_name: str) -> str:
208
+ """
209
+ Generate a deterministic API key (JWT) from username.
210
+
211
+ This is a temporary solution until proper user authentication is implemented.
212
+ Generates a unique user ID from the username and creates a JWT token.
213
+
214
+ Args:
215
+ user_name: Username to generate API key from
216
+
217
+ Returns:
218
+ JWT token string
219
+ """
220
+ import hashlib
221
+ import time
222
+ import jwt
223
+
224
+ # Generate deterministic user ID from username (first 10 digits of SHA256 hash)
225
+ user_id = str(int(hashlib.sha256(user_name.encode()).hexdigest()[:16], 16))[:10]
226
+
227
+ # JWT payload
228
+ payload = {
229
+ "userId": user_id,
230
+ "userName": user_name,
231
+ "iat": int(time.time()),
232
+ "exp": int(time.time()) + (30 * 24 * 60 * 60) # 30 days expiration
233
+ }
234
+
235
+ # Secret key for signing (should match server's JWT_SECRET)
236
+ secret = "your-secret-key-change-this-in-production"
237
+
238
+ # Generate JWT
239
+ token = jwt.encode(payload, secret, algorithm="HS256")
240
+
241
+ return token
242
+
243
+ def _open(self) -> "Experiment":
244
+ """
245
+ Internal method to open the experiment (create or update on server/filesystem).
246
+
247
+ Returns:
248
+ self for chaining
249
+ """
250
+ if self._is_open:
251
+ return self
252
+
253
+ if self._client:
254
+ # Remote mode: create/update experiment via API
255
+ response = self._client.create_or_update_experiment(
256
+ project=self.project,
257
+ name=self.name,
258
+ description=self.description,
259
+ tags=self.tags,
260
+ bindrs=self.bindrs,
261
+ folder=self.folder,
262
+ write_protected=self._write_protected,
263
+ metadata=self.metadata,
264
+ )
265
+ self._experiment_data = response
266
+ self._experiment_id = response["experiment"]["id"]
267
+
268
+ if self._storage:
269
+ # Local mode: create experiment directory structure
270
+ self._storage.create_experiment(
271
+ project=self.project,
272
+ name=self.name,
273
+ description=self.description,
274
+ tags=self.tags,
275
+ bindrs=self.bindrs,
276
+ folder=self.folder,
277
+ metadata=self.metadata,
278
+ )
279
+
280
+ self._is_open = True
281
+ return self
282
+
283
+ def _close(self, status: str = "COMPLETED"):
284
+ """
285
+ Internal method to close the experiment and update status.
286
+
287
+ Args:
288
+ status: Status to set - "COMPLETED" (default), "FAILED", or "CANCELLED"
289
+ """
290
+ if not self._is_open:
291
+ return
292
+
293
+ # Flush any pending writes
294
+ if self._storage:
295
+ self._storage.flush()
296
+
297
+ # Update experiment status in remote mode
298
+ if self._client and self._experiment_id:
299
+ try:
300
+ self._client.update_experiment_status(
301
+ experiment_id=self._experiment_id,
302
+ status=status
303
+ )
304
+ except Exception as e:
305
+ # Log error but don't fail the close operation
306
+ print(f"Warning: Failed to update experiment status: {e}")
307
+
308
+ self._is_open = False
309
+
310
+ @property
311
+ def run(self) -> RunManager:
312
+ """
313
+ Get the RunManager for lifecycle operations.
314
+
315
+ Usage:
316
+ # Method calls
317
+ experiment.run.start()
318
+ experiment.run.complete()
319
+
320
+ # Context manager
321
+ with Experiment(...).run as exp:
322
+ exp.log("Training...")
323
+
324
+ # Decorator
325
+ @experiment.run
326
+ def train(exp):
327
+ exp.log("Training...")
328
+
329
+ Returns:
330
+ RunManager instance
331
+ """
332
+ return RunManager(self)
333
+
334
+ @property
335
+ def params(self) -> ParametersBuilder:
336
+ """
337
+ Get a ParametersBuilder for parameter operations.
338
+
339
+ Usage:
340
+ # Set parameters
341
+ experiment.params.set(lr=0.001, batch_size=32)
342
+
343
+ # Get parameters
344
+ params = experiment.params.get()
345
+
346
+ Returns:
347
+ ParametersBuilder instance
348
+
349
+ Raises:
350
+ RuntimeError: If experiment is not open
351
+ """
352
+ if not self._is_open:
353
+ raise RuntimeError("Experiment not open. Use experiment.run.start() or context manager.")
354
+
355
+ return ParametersBuilder(self)
356
+
357
+ def log(
358
+ self,
359
+ message: Optional[str] = None,
360
+ level: Optional[str] = None,
361
+ metadata: Optional[Dict[str, Any]] = None,
362
+ **extra_metadata
363
+ ) -> Optional[LogBuilder]:
364
+ """
365
+ Create a log entry or return a LogBuilder for fluent API.
366
+
367
+ This method supports two styles:
368
+
369
+ 1. Fluent style (no message provided):
370
+ Returns a LogBuilder that allows chaining with level methods.
371
+
372
+ Examples:
373
+ experiment.log(metadata={"epoch": 1}).info("Training started")
374
+ experiment.log().error("Failed", error_code=500)
375
+
376
+ 2. Traditional style (message provided):
377
+ Writes the log immediately and returns None.
378
+
379
+ Examples:
380
+ experiment.log("Training started", level="info", epoch=1)
381
+ experiment.log("Training started") # Defaults to "info"
382
+
383
+ Args:
384
+ message: Optional log message (for traditional style)
385
+ level: Optional log level (for traditional style, defaults to "info")
386
+ metadata: Optional metadata dict
387
+ **extra_metadata: Additional metadata as keyword arguments
388
+
389
+ Returns:
390
+ LogBuilder if no message provided (fluent mode)
391
+ None if log was written directly (traditional mode)
392
+
393
+ Raises:
394
+ RuntimeError: If experiment is not open
395
+ ValueError: If log level is invalid
396
+ """
397
+ if not self._is_open:
398
+ raise RuntimeError("Experiment not open. Use experiment.open() or context manager.")
399
+
400
+ # Fluent mode: return LogBuilder
401
+ if message is None:
402
+ combined_metadata = {**(metadata or {}), **extra_metadata}
403
+ return LogBuilder(self, combined_metadata if combined_metadata else None)
404
+
405
+ # Traditional mode: write immediately
406
+ level = level or LogLevel.INFO.value # Default to "info"
407
+ level = LogLevel.validate(level) # Validate level
408
+
409
+ combined_metadata = {**(metadata or {}), **extra_metadata}
410
+ self._write_log(
411
+ message=message,
412
+ level=level,
413
+ metadata=combined_metadata if combined_metadata else None,
414
+ timestamp=None
415
+ )
416
+ return None
417
+
418
+ def _write_log(
419
+ self,
420
+ message: str,
421
+ level: str,
422
+ metadata: Optional[Dict[str, Any]],
423
+ timestamp: Optional[datetime]
424
+ ) -> None:
425
+ """
426
+ Internal method to write a log entry immediately.
427
+ No buffering - writes directly to storage/remote.
428
+
429
+ Args:
430
+ message: Log message
431
+ level: Log level (already validated)
432
+ metadata: Optional metadata dict
433
+ timestamp: Optional custom timestamp (defaults to now)
434
+ """
435
+ log_entry = {
436
+ "timestamp": (timestamp or datetime.utcnow()).isoformat() + "Z",
437
+ "level": level,
438
+ "message": message,
439
+ }
440
+
441
+ if metadata:
442
+ log_entry["metadata"] = metadata
443
+
444
+ # Write immediately (no buffering)
445
+ if self._client:
446
+ # Remote mode: send to API (wrapped in array for batch API)
447
+ self._client.create_log_entries(
448
+ experiment_id=self._experiment_id,
449
+ logs=[log_entry] # Single log in array
450
+ )
451
+
452
+ if self._storage:
453
+ # Local mode: write to file immediately
454
+ self._storage.write_log(
455
+ project=self.project,
456
+ experiment=self.name,
457
+ message=log_entry["message"],
458
+ level=log_entry["level"],
459
+ metadata=log_entry.get("metadata"),
460
+ timestamp=log_entry["timestamp"]
461
+ )
462
+
463
+ def files(self, **kwargs) -> FileBuilder:
464
+ """
465
+ Get a FileBuilder for fluent file operations.
466
+
467
+ Returns:
468
+ FileBuilder instance for chaining
469
+
470
+ Raises:
471
+ RuntimeError: If experiment is not open
472
+
473
+ Examples:
474
+ # Upload file
475
+ experiment.files(file_path="./model.pt", prefix="/models").save()
476
+
477
+ # List files
478
+ files = experiment.files().list()
479
+ files = experiment.files(prefix="/models").list()
480
+
481
+ # Download file
482
+ experiment.files(file_id="123").download()
483
+
484
+ # Delete file
485
+ experiment.files(file_id="123").delete()
486
+ """
487
+ if not self._is_open:
488
+ raise RuntimeError("Experiment not open. Use experiment.open() or context manager.")
489
+
490
+ return FileBuilder(self, **kwargs)
491
+
492
+ def _upload_file(
493
+ self,
494
+ file_path: str,
495
+ prefix: str,
496
+ filename: str,
497
+ description: Optional[str],
498
+ tags: Optional[List[str]],
499
+ metadata: Optional[Dict[str, Any]],
500
+ checksum: str,
501
+ content_type: str,
502
+ size_bytes: int
503
+ ) -> Dict[str, Any]:
504
+ """
505
+ Internal method to upload a file.
506
+
507
+ Args:
508
+ file_path: Local file path
509
+ prefix: Logical path prefix
510
+ filename: Original filename
511
+ description: Optional description
512
+ tags: Optional tags
513
+ metadata: Optional metadata
514
+ checksum: SHA256 checksum
515
+ content_type: MIME type
516
+ size_bytes: File size in bytes
517
+
518
+ Returns:
519
+ File metadata dict
520
+ """
521
+ result = None
522
+
523
+ if self._client:
524
+ # Remote mode: upload to API
525
+ result = self._client.upload_file(
526
+ experiment_id=self._experiment_id,
527
+ file_path=file_path,
528
+ prefix=prefix,
529
+ filename=filename,
530
+ description=description,
531
+ tags=tags,
532
+ metadata=metadata,
533
+ checksum=checksum,
534
+ content_type=content_type,
535
+ size_bytes=size_bytes
536
+ )
537
+
538
+ if self._storage:
539
+ # Local mode: copy to local storage
540
+ result = self._storage.write_file(
541
+ project=self.project,
542
+ experiment=self.name,
543
+ file_path=file_path,
544
+ prefix=prefix,
545
+ filename=filename,
546
+ description=description,
547
+ tags=tags,
548
+ metadata=metadata,
549
+ checksum=checksum,
550
+ content_type=content_type,
551
+ size_bytes=size_bytes
552
+ )
553
+
554
+ return result
555
+
556
+ def _list_files(
557
+ self,
558
+ prefix: Optional[str] = None,
559
+ tags: Optional[List[str]] = None
560
+ ) -> List[Dict[str, Any]]:
561
+ """
562
+ Internal method to list files.
563
+
564
+ Args:
565
+ prefix: Optional prefix filter
566
+ tags: Optional tags filter
567
+
568
+ Returns:
569
+ List of file metadata dicts
570
+ """
571
+ files = []
572
+
573
+ if self._client:
574
+ # Remote mode: fetch from API
575
+ files = self._client.list_files(
576
+ experiment_id=self._experiment_id,
577
+ prefix=prefix,
578
+ tags=tags
579
+ )
580
+
581
+ if self._storage:
582
+ # Local mode: read from metadata file
583
+ files = self._storage.list_files(
584
+ project=self.project,
585
+ experiment=self.name,
586
+ prefix=prefix,
587
+ tags=tags
588
+ )
589
+
590
+ return files
591
+
592
+ def _download_file(
593
+ self,
594
+ file_id: str,
595
+ dest_path: Optional[str] = None
596
+ ) -> str:
597
+ """
598
+ Internal method to download a file.
599
+
600
+ Args:
601
+ file_id: File ID
602
+ dest_path: Optional destination path (defaults to original filename)
603
+
604
+ Returns:
605
+ Path to downloaded file
606
+ """
607
+ if self._client:
608
+ # Remote mode: download from API
609
+ return self._client.download_file(
610
+ experiment_id=self._experiment_id,
611
+ file_id=file_id,
612
+ dest_path=dest_path
613
+ )
614
+
615
+ if self._storage:
616
+ # Local mode: copy from local storage
617
+ return self._storage.read_file(
618
+ project=self.project,
619
+ experiment=self.name,
620
+ file_id=file_id,
621
+ dest_path=dest_path
622
+ )
623
+
624
+ raise RuntimeError("No client or storage configured")
625
+
626
+ def _delete_file(self, file_id: str) -> Dict[str, Any]:
627
+ """
628
+ Internal method to delete a file.
629
+
630
+ Args:
631
+ file_id: File ID
632
+
633
+ Returns:
634
+ Dict with id and deletedAt
635
+ """
636
+ result = None
637
+
638
+ if self._client:
639
+ # Remote mode: delete via API
640
+ result = self._client.delete_file(
641
+ experiment_id=self._experiment_id,
642
+ file_id=file_id
643
+ )
644
+
645
+ if self._storage:
646
+ # Local mode: soft delete in metadata
647
+ result = self._storage.delete_file(
648
+ project=self.project,
649
+ experiment=self.name,
650
+ file_id=file_id
651
+ )
652
+
653
+ return result
654
+
655
+ def _update_file(
656
+ self,
657
+ file_id: str,
658
+ description: Optional[str],
659
+ tags: Optional[List[str]],
660
+ metadata: Optional[Dict[str, Any]]
661
+ ) -> Dict[str, Any]:
662
+ """
663
+ Internal method to update file metadata.
664
+
665
+ Args:
666
+ file_id: File ID
667
+ description: Optional description
668
+ tags: Optional tags
669
+ metadata: Optional metadata
670
+
671
+ Returns:
672
+ Updated file metadata dict
673
+ """
674
+ result = None
675
+
676
+ if self._client:
677
+ # Remote mode: update via API
678
+ result = self._client.update_file(
679
+ experiment_id=self._experiment_id,
680
+ file_id=file_id,
681
+ description=description,
682
+ tags=tags,
683
+ metadata=metadata
684
+ )
685
+
686
+ if self._storage:
687
+ # Local mode: update in metadata file
688
+ result = self._storage.update_file_metadata(
689
+ project=self.project,
690
+ experiment=self.name,
691
+ file_id=file_id,
692
+ description=description,
693
+ tags=tags,
694
+ metadata=metadata
695
+ )
696
+
697
+ return result
698
+
699
+
700
+ def _write_params(self, flattened_params: Dict[str, Any]) -> None:
701
+ """
702
+ Internal method to write/merge parameters.
703
+
704
+ Args:
705
+ flattened_params: Already-flattened parameter dict with dot notation
706
+ """
707
+ if self._client:
708
+ # Remote mode: send to API
709
+ self._client.set_parameters(
710
+ experiment_id=self._experiment_id,
711
+ data=flattened_params
712
+ )
713
+
714
+ if self._storage:
715
+ # Local mode: write to file
716
+ self._storage.write_parameters(
717
+ project=self.project,
718
+ experiment=self.name,
719
+ data=flattened_params
720
+ )
721
+
722
+ def _read_params(self) -> Optional[Dict[str, Any]]:
723
+ """
724
+ Internal method to read parameters.
725
+
726
+ Returns:
727
+ Flattened parameters dict, or None if no parameters exist
728
+ """
729
+ params = None
730
+
731
+ if self._client:
732
+ # Remote mode: fetch from API
733
+ try:
734
+ params = self._client.get_parameters(experiment_id=self._experiment_id)
735
+ except Exception:
736
+ # Parameters don't exist yet
737
+ params = None
738
+
739
+ if self._storage:
740
+ # Local mode: read from file
741
+ params = self._storage.read_parameters(
742
+ project=self.project,
743
+ experiment=self.name
744
+ )
745
+
746
+ return params
747
+
748
+ @property
749
+ def metrics(self) -> 'MetricsManager':
750
+ """
751
+ Get a MetricsManager for metric operations.
752
+
753
+ Supports two usage patterns:
754
+ 1. Named: experiment.metrics("loss").append(value=0.5, step=1)
755
+ 2. Unnamed: experiment.metrics.append(name="loss", value=0.5, step=1)
756
+
757
+ Returns:
758
+ MetricsManager instance
759
+
760
+ Raises:
761
+ RuntimeError: If experiment is not open
762
+
763
+ Examples:
764
+ # Named metric
765
+ experiment.metrics("train_loss").append(value=0.5, step=100)
766
+
767
+ # Unnamed (name in append call)
768
+ experiment.metrics.append(name="train_loss", value=0.5, step=100)
769
+
770
+ # Append batch
771
+ experiment.metrics("metrics").append_batch([
772
+ {"loss": 0.5, "acc": 0.8, "step": 1},
773
+ {"loss": 0.4, "acc": 0.85, "step": 2}
774
+ ])
775
+
776
+ # Read data
777
+ data = experiment.metrics("train_loss").read(start_index=0, limit=100)
778
+
779
+ # Get statistics
780
+ stats = experiment.metrics("train_loss").stats()
781
+ """
782
+ from .metric import MetricsManager
783
+
784
+ if not self._is_open:
785
+ raise RuntimeError(
786
+ "Cannot use metrics on closed experiment. "
787
+ "Use 'with Experiment(...).run as experiment:' or call experiment.run.start() first."
788
+ )
789
+
790
+ return MetricsManager(self)
791
+
792
+ def _append_to_metric(
793
+ self,
794
+ name: Optional[str],
795
+ data: Dict[str, Any],
796
+ description: Optional[str],
797
+ tags: Optional[List[str]],
798
+ metadata: Optional[Dict[str, Any]]
799
+ ) -> Dict[str, Any]:
800
+ """
801
+ Internal method to append a single data point to a metric.
802
+
803
+ Args:
804
+ name: Metric name (can be None for unnamed metrics)
805
+ data: Data point (flexible schema)
806
+ description: Optional metric description
807
+ tags: Optional tags
808
+ metadata: Optional metadata
809
+
810
+ Returns:
811
+ Dict with metricId, index, bufferedDataPoints, chunkSize
812
+ """
813
+ result = None
814
+
815
+ if self._client:
816
+ # Remote mode: append via API
817
+ result = self._client.append_to_metric(
818
+ experiment_id=self._experiment_id,
819
+ metric_name=name,
820
+ data=data,
821
+ description=description,
822
+ tags=tags,
823
+ metadata=metadata
824
+ )
825
+
826
+ if self._storage:
827
+ # Local mode: append to local storage
828
+ result = self._storage.append_to_metric(
829
+ project=self.project,
830
+ experiment=self.name,
831
+ metric_name=name,
832
+ data=data,
833
+ description=description,
834
+ tags=tags,
835
+ metadata=metadata
836
+ )
837
+
838
+ return result
839
+
840
+ def _append_batch_to_metric(
841
+ self,
842
+ name: Optional[str],
843
+ data_points: List[Dict[str, Any]],
844
+ description: Optional[str],
845
+ tags: Optional[List[str]],
846
+ metadata: Optional[Dict[str, Any]]
847
+ ) -> Dict[str, Any]:
848
+ """
849
+ Internal method to append multiple data points to a metric.
850
+
851
+ Args:
852
+ name: Metric name (can be None for unnamed metrics)
853
+ data_points: List of data points
854
+ description: Optional metric description
855
+ tags: Optional tags
856
+ metadata: Optional metadata
857
+
858
+ Returns:
859
+ Dict with metricId, startIndex, endIndex, count
860
+ """
861
+ result = None
862
+
863
+ if self._client:
864
+ # Remote mode: append batch via API
865
+ result = self._client.append_batch_to_metric(
866
+ experiment_id=self._experiment_id,
867
+ metric_name=name,
868
+ data_points=data_points,
869
+ description=description,
870
+ tags=tags,
871
+ metadata=metadata
872
+ )
873
+
874
+ if self._storage:
875
+ # Local mode: append batch to local storage
876
+ result = self._storage.append_batch_to_metric(
877
+ project=self.project,
878
+ experiment=self.name,
879
+ metric_name=name,
880
+ data_points=data_points,
881
+ description=description,
882
+ tags=tags,
883
+ metadata=metadata
884
+ )
885
+
886
+ return result
887
+
888
+ def _read_metric_data(
889
+ self,
890
+ name: str,
891
+ start_index: int,
892
+ limit: int
893
+ ) -> Dict[str, Any]:
894
+ """
895
+ Internal method to read data points from a metric.
896
+
897
+ Args:
898
+ name: Metric name
899
+ start_index: Starting index
900
+ limit: Max points to read
901
+
902
+ Returns:
903
+ Dict with data, startIndex, endIndex, total, hasMore
904
+ """
905
+ result = None
906
+
907
+ if self._client:
908
+ # Remote mode: read via API
909
+ result = self._client.read_metric_data(
910
+ experiment_id=self._experiment_id,
911
+ metric_name=name,
912
+ start_index=start_index,
913
+ limit=limit
914
+ )
915
+
916
+ if self._storage:
917
+ # Local mode: read from local storage
918
+ result = self._storage.read_metric_data(
919
+ project=self.project,
920
+ experiment=self.name,
921
+ metric_name=name,
922
+ start_index=start_index,
923
+ limit=limit
924
+ )
925
+
926
+ return result
927
+
928
+ def _get_metric_stats(self, name: str) -> Dict[str, Any]:
929
+ """
930
+ Internal method to get metric statistics.
931
+
932
+ Args:
933
+ name: Metric name
934
+
935
+ Returns:
936
+ Dict with metric stats
937
+ """
938
+ result = None
939
+
940
+ if self._client:
941
+ # Remote mode: get stats via API
942
+ result = self._client.get_metric_stats(
943
+ experiment_id=self._experiment_id,
944
+ metric_name=name
945
+ )
946
+
947
+ if self._storage:
948
+ # Local mode: get stats from local storage
949
+ result = self._storage.get_metric_stats(
950
+ project=self.project,
951
+ experiment=self.name,
952
+ metric_name=name
953
+ )
954
+
955
+ return result
956
+
957
+ def _list_metrics(self) -> List[Dict[str, Any]]:
958
+ """
959
+ Internal method to list all metrics in experiment.
960
+
961
+ Returns:
962
+ List of metric summaries
963
+ """
964
+ result = None
965
+
966
+ if self._client:
967
+ # Remote mode: list via API
968
+ result = self._client.list_metrics(experiment_id=self._experiment_id)
969
+
970
+ if self._storage:
971
+ # Local mode: list from local storage
972
+ result = self._storage.list_metrics(
973
+ project=self.project,
974
+ experiment=self.name
975
+ )
976
+
977
+ return result or []
978
+
979
+ @property
980
+ def id(self) -> Optional[str]:
981
+ """Get the experiment ID (only available after open in remote mode)."""
982
+ return self._experiment_id
983
+
984
+ @property
985
+ def data(self) -> Optional[Dict[str, Any]]:
986
+ """Get the full experiment data (only available after open in remote mode)."""
987
+ return self._experiment_data
988
+
989
+
990
+ def ml_dash_experiment(
991
+ name: str,
992
+ project: str,
993
+ **kwargs
994
+ ) -> Callable:
995
+ """
996
+ Decorator for wrapping functions with an ML-Dash experiment.
997
+
998
+ Usage:
999
+ @ml_dash_experiment(
1000
+ name="my-experiment",
1001
+ project="my-project",
1002
+ remote="http://localhost:3000",
1003
+ api_key="your-token"
1004
+ )
1005
+ def train_model():
1006
+ # Function code here
1007
+ pass
1008
+
1009
+ The decorated function will receive an 'experiment' keyword argument
1010
+ with the active Experiment instance.
1011
+ """
1012
+ def decorator(func: Callable) -> Callable:
1013
+ @functools.wraps(func)
1014
+ def wrapper(*args, **func_kwargs):
1015
+ with Experiment(name=name, project=project, **kwargs).run as experiment:
1016
+ # Inject experiment into function kwargs
1017
+ func_kwargs['experiment'] = experiment
1018
+ return func(*args, **func_kwargs)
1019
+ return wrapper
1020
+ return decorator