ml-dash 0.0.17__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. ml_dash/__init__.py +58 -1
  2. ml_dash/client.py +562 -0
  3. ml_dash/experiment.py +916 -0
  4. ml_dash/files.py +313 -0
  5. ml_dash/log.py +181 -0
  6. ml_dash/metric.py +186 -0
  7. ml_dash/params.py +188 -0
  8. ml_dash/storage.py +922 -0
  9. ml_dash-0.2.1.dist-info/METADATA +237 -0
  10. ml_dash-0.2.1.dist-info/RECORD +12 -0
  11. ml_dash-0.2.1.dist-info/WHEEL +4 -0
  12. app-build/asset-manifest.json +0 -15
  13. app-build/favicon.ico +0 -0
  14. app-build/github-markdown.css +0 -957
  15. app-build/index.html +0 -1
  16. app-build/manifest.json +0 -15
  17. app-build/monaco-editor-worker-loader-proxy.js +0 -6
  18. app-build/precache-manifest.ffc09f8a591c529a1bd5c6f21f49815f.js +0 -26
  19. app-build/service-worker.js +0 -34
  20. ml_dash/app.py +0 -60
  21. ml_dash/config.py +0 -16
  22. ml_dash/file_events.py +0 -71
  23. ml_dash/file_handlers.py +0 -141
  24. ml_dash/file_utils.py +0 -5
  25. ml_dash/file_watcher.py +0 -30
  26. ml_dash/main.py +0 -60
  27. ml_dash/mime_types.py +0 -20
  28. ml_dash/schema/__init__.py +0 -110
  29. ml_dash/schema/archive.py +0 -165
  30. ml_dash/schema/directories.py +0 -59
  31. ml_dash/schema/experiments.py +0 -65
  32. ml_dash/schema/files/__init__.py +0 -204
  33. ml_dash/schema/files/file_helpers.py +0 -79
  34. ml_dash/schema/files/images.py +0 -27
  35. ml_dash/schema/files/metrics.py +0 -64
  36. ml_dash/schema/files/parameters.py +0 -50
  37. ml_dash/schema/files/series.py +0 -235
  38. ml_dash/schema/files/videos.py +0 -27
  39. ml_dash/schema/helpers.py +0 -66
  40. ml_dash/schema/projects.py +0 -65
  41. ml_dash/schema/schema_helpers.py +0 -19
  42. ml_dash/schema/users.py +0 -33
  43. ml_dash/sse.py +0 -18
  44. ml_dash-0.0.17.dist-info/METADATA +0 -67
  45. ml_dash-0.0.17.dist-info/RECORD +0 -38
  46. ml_dash-0.0.17.dist-info/WHEEL +0 -5
  47. ml_dash-0.0.17.dist-info/top_level.txt +0 -2
  48. /ml_dash/{example.py → py.typed} +0 -0
ml_dash/experiment.py ADDED
@@ -0,0 +1,916 @@
1
+ """
2
+ Experiment class for ML-Dash SDK.
3
+
4
+ Supports three usage styles:
5
+ 1. Decorator: @ml_dash_experiment(...)
6
+ 2. Context manager: with Experiment(...) as exp:
7
+ 3. Direct instantiation: exp = Experiment(...)
8
+ """
9
+
10
+ from typing import Optional, Dict, Any, List, Callable
11
+ from enum import Enum
12
+ import functools
13
+ from pathlib import Path
14
+ from datetime import datetime
15
+
16
+ from .client import RemoteClient
17
+ from .storage import LocalStorage
18
+ from .log import LogLevel, LogBuilder
19
+ from .params import ParametersBuilder
20
+ from .files import FileBuilder
21
+
22
+
23
+ class OperationMode(Enum):
24
+ """Operation mode for the experiment."""
25
+ LOCAL = "local"
26
+ REMOTE = "remote"
27
+ HYBRID = "hybrid" # Future: sync local to remote
28
+
29
+
30
+ class Experiment:
31
+ """
32
+ ML-Dash experiment for metricing experiments.
33
+
34
+ Usage examples:
35
+
36
+ # Remote mode
37
+ experiment = Experiment(
38
+ name="my-experiment",
39
+ project="my-project",
40
+ remote="http://localhost:3000",
41
+ api_key="your-jwt-token"
42
+ )
43
+
44
+ # Local mode
45
+ experiment = Experiment(
46
+ name="my-experiment",
47
+ project="my-project",
48
+ local_path=".ml-dash"
49
+ )
50
+
51
+ # Context manager
52
+ with Experiment(...) as exp:
53
+ exp.log(...)
54
+
55
+ # Decorator
56
+ @ml_dash_experiment(name="exp", project="ws", remote="...")
57
+ def train():
58
+ ...
59
+ """
60
+
61
+ def __init__(
62
+ self,
63
+ name: str,
64
+ project: str,
65
+ *,
66
+ description: Optional[str] = None,
67
+ tags: Optional[List[str]] = None,
68
+ folder: Optional[str] = None,
69
+ write_protected: bool = False,
70
+ metadata: Optional[Dict[str, Any]] = None,
71
+ # Mode configuration
72
+ remote: Optional[str] = None,
73
+ api_key: Optional[str] = None,
74
+ user_name: Optional[str] = None,
75
+ local_path: Optional[str] = None,
76
+ ):
77
+ """
78
+ Initialize an ML-Dash experiment.
79
+
80
+ Args:
81
+ name: Experiment name (unique within project)
82
+ project: Project name
83
+ description: Optional experiment description
84
+ tags: Optional list of tags
85
+ folder: Optional folder path (e.g., "/experiments/baseline")
86
+ write_protected: If True, experiment becomes immutable after creation
87
+ metadata: Optional metadata dict
88
+ remote: Remote API URL (e.g., "http://localhost:3000")
89
+ api_key: JWT token for authentication (if not provided, will be generated from user_name)
90
+ user_name: Username for authentication (generates API key if api_key not provided)
91
+ local_path: Local storage root path (for local mode)
92
+ """
93
+ self.name = name
94
+ self.project = project
95
+ self.description = description
96
+ self.tags = tags
97
+ self.folder = folder
98
+ self.write_protected = write_protected
99
+ self.metadata = metadata
100
+
101
+ # Generate API key from username if not provided
102
+ if remote and not api_key and user_name:
103
+ api_key = self._generate_api_key_from_username(user_name)
104
+
105
+ # Determine operation mode
106
+ if remote and local_path:
107
+ self.mode = OperationMode.HYBRID
108
+ elif remote:
109
+ self.mode = OperationMode.REMOTE
110
+ elif local_path:
111
+ self.mode = OperationMode.LOCAL
112
+ else:
113
+ raise ValueError(
114
+ "Must specify either 'remote' (with api_key/user_name) or 'local_path'"
115
+ )
116
+
117
+ # Initialize backend
118
+ self._client: Optional[RemoteClient] = None
119
+ self._storage: Optional[LocalStorage] = None
120
+ self._experiment_id: Optional[str] = None
121
+ self._experiment_data: Optional[Dict[str, Any]] = None
122
+ self._is_open = False
123
+
124
+ if self.mode in (OperationMode.REMOTE, OperationMode.HYBRID):
125
+ if not api_key:
126
+ raise ValueError("Either api_key or user_name is required for remote mode")
127
+ self._client = RemoteClient(base_url=remote, api_key=api_key)
128
+
129
+ if self.mode in (OperationMode.LOCAL, OperationMode.HYBRID):
130
+ if not local_path:
131
+ raise ValueError("local_path is required for local mode")
132
+ self._storage = LocalStorage(root_path=Path(local_path))
133
+
134
+ @staticmethod
135
+ def _generate_api_key_from_username(user_name: str) -> str:
136
+ """
137
+ Generate a deterministic API key (JWT) from username.
138
+
139
+ This is a temporary solution until proper user authentication is implemented.
140
+ Generates a unique user ID from the username and creates a JWT token.
141
+
142
+ Args:
143
+ user_name: Username to generate API key from
144
+
145
+ Returns:
146
+ JWT token string
147
+ """
148
+ import hashlib
149
+ import time
150
+ import jwt
151
+
152
+ # Generate deterministic user ID from username (first 10 digits of SHA256 hash)
153
+ user_id = str(int(hashlib.sha256(user_name.encode()).hexdigest()[:16], 16))[:10]
154
+
155
+ # JWT payload
156
+ payload = {
157
+ "userId": user_id,
158
+ "userName": user_name,
159
+ "iat": int(time.time()),
160
+ "exp": int(time.time()) + (30 * 24 * 60 * 60) # 30 days expiration
161
+ }
162
+
163
+ # Secret key for signing (should match server's JWT_SECRET)
164
+ secret = "your-secret-key-change-this-in-production"
165
+
166
+ # Generate JWT
167
+ token = jwt.encode(payload, secret, algorithm="HS256")
168
+
169
+ return token
170
+
171
+ def open(self) -> "Experiment":
172
+ """
173
+ Open the experiment (create or update on server/filesystem).
174
+
175
+ Returns:
176
+ self for chaining
177
+ """
178
+ if self._is_open:
179
+ return self
180
+
181
+ if self._client:
182
+ # Remote mode: create/update experiment via API
183
+ response = self._client.create_or_update_experiment(
184
+ project=self.project,
185
+ name=self.name,
186
+ description=self.description,
187
+ tags=self.tags,
188
+ folder=self.folder,
189
+ write_protected=self.write_protected,
190
+ metadata=self.metadata,
191
+ )
192
+ self._experiment_data = response
193
+ self._experiment_id = response["experiment"]["id"]
194
+
195
+ if self._storage:
196
+ # Local mode: create experiment directory structure
197
+ self._storage.create_experiment(
198
+ project=self.project,
199
+ name=self.name,
200
+ description=self.description,
201
+ tags=self.tags,
202
+ folder=self.folder,
203
+ metadata=self.metadata,
204
+ )
205
+
206
+ self._is_open = True
207
+ return self
208
+
209
+ def close(self):
210
+ """Close the experiment."""
211
+ if not self._is_open:
212
+ return
213
+
214
+ # Flush any pending writes
215
+ if self._storage:
216
+ self._storage.flush()
217
+
218
+ self._is_open = False
219
+
220
+ def __enter__(self) -> "Experiment":
221
+ """Context manager entry."""
222
+ return self.open()
223
+
224
+ def __exit__(self, exc_type, exc_val, exc_tb):
225
+ """Context manager exit."""
226
+ self.close()
227
+ return False
228
+
229
+ def log(
230
+ self,
231
+ message: Optional[str] = None,
232
+ level: Optional[str] = None,
233
+ metadata: Optional[Dict[str, Any]] = None,
234
+ **extra_metadata
235
+ ) -> Optional[LogBuilder]:
236
+ """
237
+ Create a log entry or return a LogBuilder for fluent API.
238
+
239
+ This method supports two styles:
240
+
241
+ 1. Fluent style (no message provided):
242
+ Returns a LogBuilder that allows chaining with level methods.
243
+
244
+ Examples:
245
+ experiment.log(metadata={"epoch": 1}).info("Training started")
246
+ experiment.log().error("Failed", error_code=500)
247
+
248
+ 2. Traditional style (message provided):
249
+ Writes the log immediately and returns None.
250
+
251
+ Examples:
252
+ experiment.log("Training started", level="info", epoch=1)
253
+ experiment.log("Training started") # Defaults to "info"
254
+
255
+ Args:
256
+ message: Optional log message (for traditional style)
257
+ level: Optional log level (for traditional style, defaults to "info")
258
+ metadata: Optional metadata dict
259
+ **extra_metadata: Additional metadata as keyword arguments
260
+
261
+ Returns:
262
+ LogBuilder if no message provided (fluent mode)
263
+ None if log was written directly (traditional mode)
264
+
265
+ Raises:
266
+ RuntimeError: If experiment is not open
267
+ ValueError: If log level is invalid
268
+ """
269
+ if not self._is_open:
270
+ raise RuntimeError("Experiment not open. Use experiment.open() or context manager.")
271
+
272
+ # Fluent mode: return LogBuilder
273
+ if message is None:
274
+ combined_metadata = {**(metadata or {}), **extra_metadata}
275
+ return LogBuilder(self, combined_metadata if combined_metadata else None)
276
+
277
+ # Traditional mode: write immediately
278
+ level = level or LogLevel.INFO.value # Default to "info"
279
+ level = LogLevel.validate(level) # Validate level
280
+
281
+ combined_metadata = {**(metadata or {}), **extra_metadata}
282
+ self._write_log(
283
+ message=message,
284
+ level=level,
285
+ metadata=combined_metadata if combined_metadata else None,
286
+ timestamp=None
287
+ )
288
+ return None
289
+
290
+ def _write_log(
291
+ self,
292
+ message: str,
293
+ level: str,
294
+ metadata: Optional[Dict[str, Any]],
295
+ timestamp: Optional[datetime]
296
+ ) -> None:
297
+ """
298
+ Internal method to write a log entry immediately.
299
+ No buffering - writes directly to storage/remote.
300
+
301
+ Args:
302
+ message: Log message
303
+ level: Log level (already validated)
304
+ metadata: Optional metadata dict
305
+ timestamp: Optional custom timestamp (defaults to now)
306
+ """
307
+ log_entry = {
308
+ "timestamp": (timestamp or datetime.utcnow()).isoformat() + "Z",
309
+ "level": level,
310
+ "message": message,
311
+ }
312
+
313
+ if metadata:
314
+ log_entry["metadata"] = metadata
315
+
316
+ # Write immediately (no buffering)
317
+ if self._client:
318
+ # Remote mode: send to API (wrapped in array for batch API)
319
+ self._client.create_log_entries(
320
+ experiment_id=self._experiment_id,
321
+ logs=[log_entry] # Single log in array
322
+ )
323
+
324
+ if self._storage:
325
+ # Local mode: write to file immediately
326
+ self._storage.write_log(
327
+ project=self.project,
328
+ experiment=self.name,
329
+ message=log_entry["message"],
330
+ level=log_entry["level"],
331
+ metadata=log_entry.get("metadata"),
332
+ timestamp=log_entry["timestamp"]
333
+ )
334
+
335
+ def file(self, **kwargs) -> FileBuilder:
336
+ """
337
+ Get a FileBuilder for fluent file operations.
338
+
339
+ Returns:
340
+ FileBuilder instance for chaining
341
+
342
+ Raises:
343
+ RuntimeError: If experiment is not open
344
+
345
+ Examples:
346
+ # Upload file
347
+ experiment.file(file_path="./model.pt", prefix="/models").save()
348
+
349
+ # List files
350
+ files = experiment.file().list()
351
+ files = experiment.file(prefix="/models").list()
352
+
353
+ # Download file
354
+ experiment.file(file_id="123").download()
355
+
356
+ # Delete file
357
+ experiment.file(file_id="123").delete()
358
+ """
359
+ if not self._is_open:
360
+ raise RuntimeError("Experiment not open. Use experiment.open() or context manager.")
361
+
362
+ return FileBuilder(self, **kwargs)
363
+
364
+ def _upload_file(
365
+ self,
366
+ file_path: str,
367
+ prefix: str,
368
+ filename: str,
369
+ description: Optional[str],
370
+ tags: Optional[List[str]],
371
+ metadata: Optional[Dict[str, Any]],
372
+ checksum: str,
373
+ content_type: str,
374
+ size_bytes: int
375
+ ) -> Dict[str, Any]:
376
+ """
377
+ Internal method to upload a file.
378
+
379
+ Args:
380
+ file_path: Local file path
381
+ prefix: Logical path prefix
382
+ filename: Original filename
383
+ description: Optional description
384
+ tags: Optional tags
385
+ metadata: Optional metadata
386
+ checksum: SHA256 checksum
387
+ content_type: MIME type
388
+ size_bytes: File size in bytes
389
+
390
+ Returns:
391
+ File metadata dict
392
+ """
393
+ result = None
394
+
395
+ if self._client:
396
+ # Remote mode: upload to API
397
+ result = self._client.upload_file(
398
+ experiment_id=self._experiment_id,
399
+ file_path=file_path,
400
+ prefix=prefix,
401
+ filename=filename,
402
+ description=description,
403
+ tags=tags,
404
+ metadata=metadata,
405
+ checksum=checksum,
406
+ content_type=content_type,
407
+ size_bytes=size_bytes
408
+ )
409
+
410
+ if self._storage:
411
+ # Local mode: copy to local storage
412
+ result = self._storage.write_file(
413
+ project=self.project,
414
+ experiment=self.name,
415
+ file_path=file_path,
416
+ prefix=prefix,
417
+ filename=filename,
418
+ description=description,
419
+ tags=tags,
420
+ metadata=metadata,
421
+ checksum=checksum,
422
+ content_type=content_type,
423
+ size_bytes=size_bytes
424
+ )
425
+
426
+ return result
427
+
428
+ def _list_files(
429
+ self,
430
+ prefix: Optional[str] = None,
431
+ tags: Optional[List[str]] = None
432
+ ) -> List[Dict[str, Any]]:
433
+ """
434
+ Internal method to list files.
435
+
436
+ Args:
437
+ prefix: Optional prefix filter
438
+ tags: Optional tags filter
439
+
440
+ Returns:
441
+ List of file metadata dicts
442
+ """
443
+ files = []
444
+
445
+ if self._client:
446
+ # Remote mode: fetch from API
447
+ files = self._client.list_files(
448
+ experiment_id=self._experiment_id,
449
+ prefix=prefix,
450
+ tags=tags
451
+ )
452
+
453
+ if self._storage:
454
+ # Local mode: read from metadata file
455
+ files = self._storage.list_files(
456
+ project=self.project,
457
+ experiment=self.name,
458
+ prefix=prefix,
459
+ tags=tags
460
+ )
461
+
462
+ return files
463
+
464
+ def _download_file(
465
+ self,
466
+ file_id: str,
467
+ dest_path: Optional[str] = None
468
+ ) -> str:
469
+ """
470
+ Internal method to download a file.
471
+
472
+ Args:
473
+ file_id: File ID
474
+ dest_path: Optional destination path (defaults to original filename)
475
+
476
+ Returns:
477
+ Path to downloaded file
478
+ """
479
+ if self._client:
480
+ # Remote mode: download from API
481
+ return self._client.download_file(
482
+ experiment_id=self._experiment_id,
483
+ file_id=file_id,
484
+ dest_path=dest_path
485
+ )
486
+
487
+ if self._storage:
488
+ # Local mode: copy from local storage
489
+ return self._storage.read_file(
490
+ project=self.project,
491
+ experiment=self.name,
492
+ file_id=file_id,
493
+ dest_path=dest_path
494
+ )
495
+
496
+ raise RuntimeError("No client or storage configured")
497
+
498
+ def _delete_file(self, file_id: str) -> Dict[str, Any]:
499
+ """
500
+ Internal method to delete a file.
501
+
502
+ Args:
503
+ file_id: File ID
504
+
505
+ Returns:
506
+ Dict with id and deletedAt
507
+ """
508
+ result = None
509
+
510
+ if self._client:
511
+ # Remote mode: delete via API
512
+ result = self._client.delete_file(
513
+ experiment_id=self._experiment_id,
514
+ file_id=file_id
515
+ )
516
+
517
+ if self._storage:
518
+ # Local mode: soft delete in metadata
519
+ result = self._storage.delete_file(
520
+ project=self.project,
521
+ experiment=self.name,
522
+ file_id=file_id
523
+ )
524
+
525
+ return result
526
+
527
+ def _update_file(
528
+ self,
529
+ file_id: str,
530
+ description: Optional[str],
531
+ tags: Optional[List[str]],
532
+ metadata: Optional[Dict[str, Any]]
533
+ ) -> Dict[str, Any]:
534
+ """
535
+ Internal method to update file metadata.
536
+
537
+ Args:
538
+ file_id: File ID
539
+ description: Optional description
540
+ tags: Optional tags
541
+ metadata: Optional metadata
542
+
543
+ Returns:
544
+ Updated file metadata dict
545
+ """
546
+ result = None
547
+
548
+ if self._client:
549
+ # Remote mode: update via API
550
+ result = self._client.update_file(
551
+ experiment_id=self._experiment_id,
552
+ file_id=file_id,
553
+ description=description,
554
+ tags=tags,
555
+ metadata=metadata
556
+ )
557
+
558
+ if self._storage:
559
+ # Local mode: update in metadata file
560
+ result = self._storage.update_file_metadata(
561
+ project=self.project,
562
+ experiment=self.name,
563
+ file_id=file_id,
564
+ description=description,
565
+ tags=tags,
566
+ metadata=metadata
567
+ )
568
+
569
+ return result
570
+
571
+ def parameters(self) -> ParametersBuilder:
572
+ """
573
+ Get a ParametersBuilder for fluent parameter operations.
574
+
575
+ Returns:
576
+ ParametersBuilder instance for chaining
577
+
578
+ Raises:
579
+ RuntimeError: If experiment is not open
580
+
581
+ Examples:
582
+ # Set parameters
583
+ experiment.parameters().set(
584
+ model={"lr": 0.001, "batch_size": 32},
585
+ optimizer="adam"
586
+ )
587
+
588
+ # Get parameters
589
+ params = experiment.parameters().get() # Flattened
590
+ params = experiment.parameters().get(flatten=False) # Nested
591
+ """
592
+ if not self._is_open:
593
+ raise RuntimeError("Experiment not open. Use experiment.open() or context manager.")
594
+
595
+ return ParametersBuilder(self)
596
+
597
+ def _write_params(self, flattened_params: Dict[str, Any]) -> None:
598
+ """
599
+ Internal method to write/merge parameters.
600
+
601
+ Args:
602
+ flattened_params: Already-flattened parameter dict with dot notation
603
+ """
604
+ if self._client:
605
+ # Remote mode: send to API
606
+ self._client.set_parameters(
607
+ experiment_id=self._experiment_id,
608
+ data=flattened_params
609
+ )
610
+
611
+ if self._storage:
612
+ # Local mode: write to file
613
+ self._storage.write_parameters(
614
+ project=self.project,
615
+ experiment=self.name,
616
+ data=flattened_params
617
+ )
618
+
619
+ def _read_params(self) -> Optional[Dict[str, Any]]:
620
+ """
621
+ Internal method to read parameters.
622
+
623
+ Returns:
624
+ Flattened parameters dict, or None if no parameters exist
625
+ """
626
+ params = None
627
+
628
+ if self._client:
629
+ # Remote mode: fetch from API
630
+ try:
631
+ params = self._client.get_parameters(experiment_id=self._experiment_id)
632
+ except Exception:
633
+ # Parameters don't exist yet
634
+ params = None
635
+
636
+ if self._storage:
637
+ # Local mode: read from file
638
+ params = self._storage.read_parameters(
639
+ project=self.project,
640
+ experiment=self.name
641
+ )
642
+
643
+ return params
644
+
645
+ def metric(self, name: str, description: Optional[str] = None,
646
+ tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None) -> 'MetricBuilder':
647
+ """
648
+ Get a MetricBuilder for fluent metric operations.
649
+
650
+ Args:
651
+ name: Metric name (unique within experiment)
652
+ description: Optional metric description
653
+ tags: Optional tags for categorization
654
+ metadata: Optional structured metadata
655
+
656
+ Returns:
657
+ MetricBuilder instance for chaining
658
+
659
+ Raises:
660
+ RuntimeError: If experiment is not open
661
+
662
+ Examples:
663
+ # Append single data point
664
+ experiment.metric(name="train_loss").append(value=0.5, step=100)
665
+
666
+ # Append batch
667
+ experiment.metric(name="metrics").append_batch([
668
+ {"loss": 0.5, "acc": 0.8, "step": 1},
669
+ {"loss": 0.4, "acc": 0.85, "step": 2}
670
+ ])
671
+
672
+ # Read data
673
+ data = experiment.metric(name="train_loss").read(start_index=0, limit=100)
674
+
675
+ # Get statistics
676
+ stats = experiment.metric(name="train_loss").stats()
677
+ """
678
+ from .metric import MetricBuilder
679
+
680
+ if not self._is_open:
681
+ raise RuntimeError(
682
+ "Cannot use metric on closed experiment. "
683
+ "Use 'with Experiment(...) as experiment:' or call experiment.open() first."
684
+ )
685
+
686
+ return MetricBuilder(self, name, description, tags, metadata)
687
+
688
+ def _append_to_metric(
689
+ self,
690
+ name: str,
691
+ data: Dict[str, Any],
692
+ description: Optional[str],
693
+ tags: Optional[List[str]],
694
+ metadata: Optional[Dict[str, Any]]
695
+ ) -> Dict[str, Any]:
696
+ """
697
+ Internal method to append a single data point to a metric.
698
+
699
+ Args:
700
+ name: Metric name
701
+ data: Data point (flexible schema)
702
+ description: Optional metric description
703
+ tags: Optional tags
704
+ metadata: Optional metadata
705
+
706
+ Returns:
707
+ Dict with metricId, index, bufferedDataPoints, chunkSize
708
+ """
709
+ result = None
710
+
711
+ if self._client:
712
+ # Remote mode: append via API
713
+ result = self._client.append_to_metric(
714
+ experiment_id=self._experiment_id,
715
+ metric_name=name,
716
+ data=data,
717
+ description=description,
718
+ tags=tags,
719
+ metadata=metadata
720
+ )
721
+
722
+ if self._storage:
723
+ # Local mode: append to local storage
724
+ result = self._storage.append_to_metric(
725
+ project=self.project,
726
+ experiment=self.name,
727
+ metric_name=name,
728
+ data=data,
729
+ description=description,
730
+ tags=tags,
731
+ metadata=metadata
732
+ )
733
+
734
+ return result
735
+
736
+ def _append_batch_to_metric(
737
+ self,
738
+ name: str,
739
+ data_points: List[Dict[str, Any]],
740
+ description: Optional[str],
741
+ tags: Optional[List[str]],
742
+ metadata: Optional[Dict[str, Any]]
743
+ ) -> Dict[str, Any]:
744
+ """
745
+ Internal method to append multiple data points to a metric.
746
+
747
+ Args:
748
+ name: Metric name
749
+ data_points: List of data points
750
+ description: Optional metric description
751
+ tags: Optional tags
752
+ metadata: Optional metadata
753
+
754
+ Returns:
755
+ Dict with metricId, startIndex, endIndex, count
756
+ """
757
+ result = None
758
+
759
+ if self._client:
760
+ # Remote mode: append batch via API
761
+ result = self._client.append_batch_to_metric(
762
+ experiment_id=self._experiment_id,
763
+ metric_name=name,
764
+ data_points=data_points,
765
+ description=description,
766
+ tags=tags,
767
+ metadata=metadata
768
+ )
769
+
770
+ if self._storage:
771
+ # Local mode: append batch to local storage
772
+ result = self._storage.append_batch_to_metric(
773
+ project=self.project,
774
+ experiment=self.name,
775
+ metric_name=name,
776
+ data_points=data_points,
777
+ description=description,
778
+ tags=tags,
779
+ metadata=metadata
780
+ )
781
+
782
+ return result
783
+
784
+ def _read_metric_data(
785
+ self,
786
+ name: str,
787
+ start_index: int,
788
+ limit: int
789
+ ) -> Dict[str, Any]:
790
+ """
791
+ Internal method to read data points from a metric.
792
+
793
+ Args:
794
+ name: Metric name
795
+ start_index: Starting index
796
+ limit: Max points to read
797
+
798
+ Returns:
799
+ Dict with data, startIndex, endIndex, total, hasMore
800
+ """
801
+ result = None
802
+
803
+ if self._client:
804
+ # Remote mode: read via API
805
+ result = self._client.read_metric_data(
806
+ experiment_id=self._experiment_id,
807
+ metric_name=name,
808
+ start_index=start_index,
809
+ limit=limit
810
+ )
811
+
812
+ if self._storage:
813
+ # Local mode: read from local storage
814
+ result = self._storage.read_metric_data(
815
+ project=self.project,
816
+ experiment=self.name,
817
+ metric_name=name,
818
+ start_index=start_index,
819
+ limit=limit
820
+ )
821
+
822
+ return result
823
+
824
+ def _get_metric_stats(self, name: str) -> Dict[str, Any]:
825
+ """
826
+ Internal method to get metric statistics.
827
+
828
+ Args:
829
+ name: Metric name
830
+
831
+ Returns:
832
+ Dict with metric stats
833
+ """
834
+ result = None
835
+
836
+ if self._client:
837
+ # Remote mode: get stats via API
838
+ result = self._client.get_metric_stats(
839
+ experiment_id=self._experiment_id,
840
+ metric_name=name
841
+ )
842
+
843
+ if self._storage:
844
+ # Local mode: get stats from local storage
845
+ result = self._storage.get_metric_stats(
846
+ project=self.project,
847
+ experiment=self.name,
848
+ metric_name=name
849
+ )
850
+
851
+ return result
852
+
853
+ def _list_metrics(self) -> List[Dict[str, Any]]:
854
+ """
855
+ Internal method to list all metrics in experiment.
856
+
857
+ Returns:
858
+ List of metric summaries
859
+ """
860
+ result = None
861
+
862
+ if self._client:
863
+ # Remote mode: list via API
864
+ result = self._client.list_metrics(experiment_id=self._experiment_id)
865
+
866
+ if self._storage:
867
+ # Local mode: list from local storage
868
+ result = self._storage.list_metrics(
869
+ project=self.project,
870
+ experiment=self.name
871
+ )
872
+
873
+ return result or []
874
+
875
+ @property
876
+ def id(self) -> Optional[str]:
877
+ """Get the experiment ID (only available after open in remote mode)."""
878
+ return self._experiment_id
879
+
880
+ @property
881
+ def data(self) -> Optional[Dict[str, Any]]:
882
+ """Get the full experiment data (only available after open in remote mode)."""
883
+ return self._experiment_data
884
+
885
+
886
+ def ml_dash_experiment(
887
+ name: str,
888
+ project: str,
889
+ **kwargs
890
+ ) -> Callable:
891
+ """
892
+ Decorator for wrapping functions with an ML-Dash experiment.
893
+
894
+ Usage:
895
+ @ml_dash_experiment(
896
+ name="my-experiment",
897
+ project="my-project",
898
+ remote="http://localhost:3000",
899
+ api_key="your-token"
900
+ )
901
+ def train_model():
902
+ # Function code here
903
+ pass
904
+
905
+ The decorated function will receive an 'experiment' keyword argument
906
+ with the active Experiment instance.
907
+ """
908
+ def decorator(func: Callable) -> Callable:
909
+ @functools.wraps(func)
910
+ def wrapper(*args, **func_kwargs):
911
+ with Experiment(name=name, project=project, **kwargs) as experiment:
912
+ # Inject experiment into function kwargs
913
+ func_kwargs['experiment'] = experiment
914
+ return func(*args, **func_kwargs)
915
+ return wrapper
916
+ return decorator