ml-dash 0.6.2__py3-none-any.whl → 0.6.2rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ml_dash/log.py CHANGED
@@ -69,8 +69,8 @@ class LogBuilder:
69
69
  the log level method is called to write the log.
70
70
 
71
71
  Example:
72
- exp.logs.info("Training started", epoch=1)
73
- exp.logs.error("Failed", error_code=500)
72
+ experiment.log(metadata={"epoch": 1}).info("Training started")
73
+ experiment.log().error("Failed", error_code=500)
74
74
  """
75
75
 
76
76
  def __init__(self, experiment: 'Experiment', metadata: Optional[Dict[str, Any]] = None):
@@ -93,8 +93,8 @@ class LogBuilder:
93
93
  **extra_metadata: Additional metadata as keyword arguments
94
94
 
95
95
  Example:
96
- exp.log("Training started", level="info")
97
- exp.log("Epoch complete", level="info", epoch=1, loss=0.5)
96
+ experiment.log().info("Training started")
97
+ experiment.log().info("Epoch complete", epoch=1, loss=0.5)
98
98
  """
99
99
  self._write(LogLevel.INFO.value, message, extra_metadata)
100
100
 
@@ -107,7 +107,7 @@ class LogBuilder:
107
107
  **extra_metadata: Additional metadata as keyword arguments
108
108
 
109
109
  Example:
110
- exp.logs.warn("High loss detected", loss=1.5)
110
+ experiment.log().warn("High loss detected", loss=1.5)
111
111
  """
112
112
  self._write(LogLevel.WARN.value, message, extra_metadata)
113
113
 
@@ -120,7 +120,7 @@ class LogBuilder:
120
120
  **extra_metadata: Additional metadata as keyword arguments
121
121
 
122
122
  Example:
123
- exp.logs.error("Failed to save", path="/models/checkpoint.pth")
123
+ experiment.log().error("Failed to save", path="/models/checkpoint.pth")
124
124
  """
125
125
  self._write(LogLevel.ERROR.value, message, extra_metadata)
126
126
 
@@ -133,7 +133,7 @@ class LogBuilder:
133
133
  **extra_metadata: Additional metadata as keyword arguments
134
134
 
135
135
  Example:
136
- exp.logs.debug("Memory usage", memory_mb=2500)
136
+ experiment.log().debug("Memory usage", memory_mb=2500)
137
137
  """
138
138
  self._write(LogLevel.DEBUG.value, message, extra_metadata)
139
139
 
ml_dash/metric.py CHANGED
@@ -1,5 +1,5 @@
1
1
  """
2
- Metric API - Time-series data logging for ML experiments.
2
+ Metric API - Time-series data metricing for ML experiments.
3
3
 
4
4
  Metrics are used for storing continuous data series like training metrics,
5
5
  validation losses, system measurements, etc.
@@ -13,203 +13,6 @@ if TYPE_CHECKING:
13
13
  from .experiment import Experiment
14
14
 
15
15
 
16
- class BufferManager:
17
- """
18
- Global buffer manager for collecting metric values across prefixes.
19
-
20
- Accumulates values via metrics("prefix").buffer(...) and computes
21
- statistics when log_summary() is called.
22
-
23
- Usage:
24
- # Accumulate with prefix
25
- metrics("train").buffer(loss=0.5, accuracy=0.81)
26
- metrics("val").buffer(loss=0.6, accuracy=0.78)
27
-
28
- # Log summaries (all buffered prefixes)
29
- metrics.buffer.log_summary() # default: "mean"
30
- metrics.buffer.log_summary("mean", "std", "p95")
31
-
32
- # Log non-buffered values directly
33
- metrics.log(epoch=epoch, lr=lr)
34
-
35
- # Final flush to storage
36
- metrics.flush()
37
- """
38
-
39
- # Supported aggregation functions
40
- SUPPORTED_AGGS = {
41
- "mean", "std", "min", "max", "count",
42
- "median", "sum",
43
- "p50", "p90", "p95", "p99",
44
- "last", "first"
45
- }
46
-
47
- def __init__(self, metrics_manager: 'MetricsManager'):
48
- """
49
- Initialize BufferManager.
50
-
51
- Args:
52
- metrics_manager: Parent MetricsManager instance
53
- """
54
- self._metrics_manager = metrics_manager
55
- # Buffers per prefix: {prefix: {key: [values]}}
56
- self._buffers: Dict[Optional[str], Dict[str, List[float]]] = defaultdict(lambda: defaultdict(list))
57
-
58
- def _store(self, prefix: Optional[str], **kwargs) -> None:
59
- """
60
- Store values in buffer for a specific prefix.
61
-
62
- Args:
63
- prefix: Metric prefix (e.g., "train", "val")
64
- **kwargs: Metric values to buffer (e.g., loss=0.5, accuracy=0.9)
65
- """
66
- for key, value in kwargs.items():
67
- # Handle None values gracefully
68
- if value is None:
69
- value = float('nan')
70
- try:
71
- self._buffers[prefix][key].append(float(value))
72
- except (TypeError, ValueError):
73
- # Skip non-numeric values silently
74
- continue
75
-
76
- def _compute_stats(self, values: List[float], aggs: tuple) -> Dict[str, float]:
77
- """
78
- Compute statistics for a list of values.
79
-
80
- Args:
81
- values: List of numeric values
82
- aggs: Tuple of aggregation names
83
-
84
- Returns:
85
- Dict with computed statistics
86
- """
87
- # Filter out NaN values
88
- clean_values = [v for v in values if not (isinstance(v, float) and v != v)]
89
-
90
- if not clean_values:
91
- return {}
92
-
93
- stats = {}
94
- for agg in aggs:
95
- if agg == "mean":
96
- stats["mean"] = statistics.mean(clean_values)
97
- elif agg == "std":
98
- if len(clean_values) >= 2:
99
- stats["std"] = statistics.stdev(clean_values)
100
- else:
101
- stats["std"] = 0.0
102
- elif agg == "min":
103
- stats["min"] = min(clean_values)
104
- elif agg == "max":
105
- stats["max"] = max(clean_values)
106
- elif agg == "count":
107
- stats["count"] = len(clean_values)
108
- elif agg == "median" or agg == "p50":
109
- stats[agg] = statistics.median(clean_values)
110
- elif agg == "sum":
111
- stats["sum"] = sum(clean_values)
112
- elif agg == "p90":
113
- stats["p90"] = self._percentile(clean_values, 90)
114
- elif agg == "p95":
115
- stats["p95"] = self._percentile(clean_values, 95)
116
- elif agg == "p99":
117
- stats["p99"] = self._percentile(clean_values, 99)
118
- elif agg == "last":
119
- stats["last"] = clean_values[-1]
120
- elif agg == "first":
121
- stats["first"] = clean_values[0]
122
-
123
- return stats
124
-
125
- def _percentile(self, values: List[float], p: int) -> float:
126
- """Compute percentile of values."""
127
- sorted_vals = sorted(values)
128
- k = (len(sorted_vals) - 1) * p / 100
129
- f = int(k)
130
- c = f + 1 if f + 1 < len(sorted_vals) else f
131
- return sorted_vals[f] + (k - f) * (sorted_vals[c] - sorted_vals[f])
132
-
133
- def log_summary(self, *aggs: str) -> None:
134
- """
135
- Compute statistics from buffered values and log them.
136
-
137
- Args:
138
- *aggs: Aggregation functions to compute. Defaults to ("mean",).
139
- Supported: "mean", "std", "min", "max", "count",
140
- "median", "sum", "p50", "p90", "p95", "p99",
141
- "last", "first"
142
-
143
- Example:
144
- metrics.buffer.log_summary() # default: mean
145
- metrics.buffer.log_summary("mean", "std") # mean and std
146
- metrics.buffer.log_summary("mean", "p95") # mean and 95th percentile
147
- """
148
- # Default to mean
149
- if not aggs:
150
- aggs = ("mean",)
151
-
152
- # Validate aggregations
153
- for agg in aggs:
154
- if agg not in self.SUPPORTED_AGGS:
155
- raise ValueError(f"Unsupported aggregation: {agg}. Supported: {self.SUPPORTED_AGGS}")
156
-
157
- # Process each prefix's buffer
158
- for prefix, buffer in list(self._buffers.items()):
159
- if not buffer:
160
- continue
161
-
162
- output_data = {}
163
-
164
- for key, values in buffer.items():
165
- if not values:
166
- continue
167
-
168
- stats = self._compute_stats(values, aggs)
169
-
170
- # Add stats with hierarchical naming (key.agg)
171
- for stat_name, stat_value in stats.items():
172
- output_data[f"{key}.{stat_name}"] = stat_value
173
-
174
- if output_data:
175
- # Log to the appropriate metric
176
- self._metrics_manager(prefix).log(**output_data)
177
-
178
- # Clear all buffers
179
- self._buffers.clear()
180
-
181
- def peek(self, prefix: Optional[str] = None, *keys: str, limit: int = 5) -> Dict[str, List[float]]:
182
- """
183
- Non-destructive inspection of buffered values.
184
-
185
- Args:
186
- prefix: Specific prefix to peek at (None for all)
187
- *keys: Optional specific keys to peek at. If empty, shows all.
188
- limit: Number of most recent values to show (default 5)
189
-
190
- Returns:
191
- Dict of buffered values (truncated to last `limit` items)
192
- """
193
- if prefix is not None:
194
- buffer = self._buffers.get(prefix, {})
195
- keys_to_show = keys if keys else buffer.keys()
196
- return {
197
- k: buffer[k][-limit:] if limit else buffer[k]
198
- for k in keys_to_show
199
- if k in buffer and buffer[k]
200
- }
201
- else:
202
- # Return all buffers
203
- result = {}
204
- for p, buffer in self._buffers.items():
205
- prefix_str = p if p else "(default)"
206
- keys_to_show = keys if keys else buffer.keys()
207
- for k in keys_to_show:
208
- if k in buffer and buffer[k]:
209
- result[f"{prefix_str}/{k}"] = buffer[k][-limit:] if limit else buffer[k]
210
- return result
211
-
212
-
213
16
  class SummaryCache:
214
17
  """
215
18
  Buffer for collecting metric values and computing statistics periodically.
@@ -332,8 +135,8 @@ class SummaryCache:
332
135
  if not output_data:
333
136
  return
334
137
 
335
- # Log combined data as a single metric data point
336
- self._metric_builder.log(**output_data)
138
+ # Append combined data as a single metric data point
139
+ self._metric_builder.append(**output_data)
337
140
 
338
141
  # Clear buffer if requested (default behavior for "tiled" mode)
339
142
  if clear:
@@ -366,23 +169,20 @@ class MetricsManager:
366
169
  """
367
170
  Manager for metric operations that supports both named and unnamed usage.
368
171
 
369
- Supports two usage patterns:
370
- 1. Named via call: experiment.metrics("train").log(loss=0.5, accuracy=0.9)
371
- 2. Unnamed: experiment.metrics.log(epoch=1).flush()
172
+ Supports three usage patterns:
173
+ 1. Named via call: experiment.metrics("loss").append(value=0.5, step=1)
174
+ 2. Named via argument: experiment.metrics.append(name="loss", value=0.5, step=1)
175
+ 3. Unnamed: experiment.metrics.append(value=0.5, step=1) # name=None
372
176
 
373
177
  Usage:
374
178
  # With explicit metric name (via call)
375
- experiment.metrics("train").log(loss=0.5, accuracy=0.9)
179
+ experiment.metrics("train_loss").append(value=0.5, step=100)
376
180
 
377
- # With epoch context (unnamed metric)
378
- experiment.metrics.log(epoch=epoch).flush()
181
+ # With explicit metric name (via argument)
182
+ experiment.metrics.append(name="train_loss", value=0.5, step=100)
379
183
 
380
- # Nested dict pattern (single call for all metrics)
381
- experiment.metrics.log(
382
- epoch=100,
383
- train=dict(loss=0.142, accuracy=0.80),
384
- eval=dict(loss=0.201, accuracy=0.76)
385
- )
184
+ # Without name (uses None as metric name)
185
+ experiment.metrics.append(value=0.5, step=100)
386
186
  """
387
187
 
388
188
  def __init__(self, experiment: 'Experiment'):
@@ -394,31 +194,6 @@ class MetricsManager:
394
194
  """
395
195
  self._experiment = experiment
396
196
  self._metric_builders: Dict[str, 'MetricBuilder'] = {} # Cache for MetricBuilder instances
397
- self._buffer_manager: Optional[BufferManager] = None # Lazy initialization
398
-
399
- @property
400
- def buffer(self) -> BufferManager:
401
- """
402
- Get the global BufferManager for buffered metric operations.
403
-
404
- The buffer manager collects values across prefixes and computes
405
- statistics when log_summary() is called.
406
-
407
- Returns:
408
- BufferManager instance
409
-
410
- Example:
411
- # Accumulate values
412
- metrics("train").buffer(loss=0.5, accuracy=0.81)
413
- metrics("val").buffer(loss=0.6, accuracy=0.78)
414
-
415
- # Log summaries
416
- metrics.buffer.log_summary() # default: mean
417
- metrics.buffer.log_summary("mean", "std", "p95")
418
- """
419
- if self._buffer_manager is None:
420
- self._buffer_manager = BufferManager(self)
421
- return self._buffer_manager
422
197
 
423
198
  def __call__(self, name: str, description: Optional[str] = None,
424
199
  tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None) -> 'MetricBuilder':
@@ -435,7 +210,7 @@ class MetricsManager:
435
210
  MetricBuilder instance for the named metric (same instance on repeated calls)
436
211
 
437
212
  Examples:
438
- experiment.metrics("train").log(loss=0.5, accuracy=0.9)
213
+ experiment.metrics("loss").append(value=0.5, step=1)
439
214
 
440
215
  Note:
441
216
  MetricBuilder instances are cached by name, so repeated calls with the
@@ -444,110 +219,92 @@ class MetricsManager:
444
219
  """
445
220
  # Cache key includes name only (description/tags/metadata are set once on first call)
446
221
  if name not in self._metric_builders:
447
- self._metric_builders[name] = MetricBuilder(
448
- self._experiment, name, description, tags, metadata,
449
- metrics_manager=self
450
- )
222
+ self._metric_builders[name] = MetricBuilder(self._experiment, name, description, tags, metadata)
451
223
  return self._metric_builders[name]
452
224
 
453
- def log(self, _flush: bool = False, **kwargs) -> 'MetricsManager':
225
+ def append(self, name: Optional[str] = None, data: Optional[Dict[str, Any]] = None, **kwargs) -> Dict[str, Any]:
454
226
  """
455
- Log a data point to the unnamed (root) metric.
456
-
457
- Supports two patterns:
458
-
459
- 1. Simple key-value pairs:
460
- experiment.metrics.log(epoch=epoch).flush()
461
-
462
- 2. Nested dict pattern (logs to multiple prefixed metrics):
463
- experiment.metrics.log(
464
- epoch=100,
465
- train=dict(loss=0.142, accuracy=0.80),
466
- eval=dict(loss=0.201, accuracy=0.76)
467
- )
227
+ Append a data point to a metric (name can be optional).
468
228
 
469
229
  Args:
470
- _flush: If True, flush after logging (equivalent to calling .flush())
471
- **kwargs: Data point fields. Dict values are expanded to prefixed metrics.
230
+ name: Metric name (optional, can be None for unnamed metrics)
231
+ data: Data dict (alternative to kwargs)
232
+ **kwargs: Data as keyword arguments
472
233
 
473
234
  Returns:
474
- Self for method chaining
235
+ Response dict with metric metadata
475
236
 
476
237
  Examples:
477
- # Log epoch context and flush
478
- experiment.metrics.log(epoch=epoch).flush()
479
-
480
- # Log with nested dicts (single call for all metrics)
481
- experiment.metrics.log(
482
- epoch=100,
483
- train=dict(loss=0.142, accuracy=0.80),
484
- eval=dict(loss=0.201, accuracy=0.76)
485
- )
486
-
487
- # Equivalent to _flush=True
488
- experiment.metrics.log(epoch=100, _flush=True)
238
+ experiment.metrics.append(name="loss", value=0.5, step=1)
239
+ experiment.metrics.append(value=0.5, step=1) # name=None
240
+ experiment.metrics.append(name="loss", data={"value": 0.5, "step": 1})
489
241
  """
490
- # Separate nested dicts from scalar values
491
- scalar_data = {}
492
- nested_data = {}
493
-
494
- for key, value in kwargs.items():
495
- if isinstance(value, dict):
496
- nested_data[key] = value
497
- else:
498
- scalar_data[key] = value
499
-
500
- # Log scalar data to unnamed metric
501
- if scalar_data:
502
- self._experiment._append_to_metric(None, scalar_data, None, None, None)
503
-
504
- # Log nested dicts to their respective prefixed metrics
505
- for prefix, data in nested_data.items():
506
- # Include scalar data (like epoch) with each nested metric
507
- combined_data = {**scalar_data, **data}
508
- self(prefix).log(**combined_data)
509
-
510
- if _flush:
511
- self.flush()
242
+ if data is None:
243
+ data = kwargs
244
+ return self._experiment._append_to_metric(name, data, None, None, None)
512
245
 
513
- return self
514
-
515
- def flush(self) -> 'MetricsManager':
246
+ def append_batch(self, name: Optional[str] = None, data_points: Optional[List[Dict[str, Any]]] = None,
247
+ description: Optional[str] = None,
248
+ tags: Optional[List[str]] = None,
249
+ metadata: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
516
250
  """
517
- Flush buffered data (for method chaining).
251
+ Append multiple data points to a metric.
518
252
 
519
- Currently a no-op as data is written immediately, but supports
520
- the fluent API pattern:
521
- experiment.metrics.log(epoch=epoch).flush()
253
+ Args:
254
+ name: Metric name (optional, can be None for unnamed metrics)
255
+ data_points: List of data point dicts
256
+ description: Optional metric description
257
+ tags: Optional tags for categorization
258
+ metadata: Optional structured metadata
522
259
 
523
260
  Returns:
524
- Self for method chaining
261
+ Response dict with metric metadata
262
+
263
+ Examples:
264
+ experiment.metrics.append_batch(
265
+ name="loss",
266
+ data_points=[
267
+ {"value": 0.5, "step": 1},
268
+ {"value": 0.4, "step": 2}
269
+ ]
270
+ )
271
+ experiment.metrics.append_batch(
272
+ data_points=[
273
+ {"value": 0.5, "step": 1},
274
+ {"value": 0.4, "step": 2}
275
+ ]
276
+ ) # name=None
525
277
  """
526
- # Data is written immediately, so nothing to flush
527
- # This method exists for API consistency and chaining
528
- return self
278
+ if data_points is None:
279
+ data_points = []
280
+ return self._experiment._append_batch_to_metric(name, data_points, description, tags, metadata)
529
281
 
530
282
 
531
283
  class MetricBuilder:
532
284
  """
533
285
  Builder for metric operations.
534
286
 
535
- Provides fluent API for logging, reading, and querying metric data.
287
+ Provides fluent API for appending, reading, and querying metric data.
536
288
 
537
289
  Usage:
538
- # Log single data point
539
- experiment.metrics("train").log(loss=0.5, accuracy=0.9)
290
+ # Append single data point
291
+ experiment.metric(name="train_loss").append(value=0.5, step=100)
292
+
293
+ # Append batch
294
+ experiment.metric(name="train_loss").append_batch([
295
+ {"value": 0.5, "step": 100},
296
+ {"value": 0.45, "step": 101}
297
+ ])
540
298
 
541
299
  # Read data
542
- data = experiment.metrics("train").read(start_index=0, limit=100)
300
+ data = experiment.metric(name="train_loss").read(start_index=0, limit=100)
543
301
 
544
302
  # Get statistics
545
- stats = experiment.metrics("train").stats()
303
+ stats = experiment.metric(name="train_loss").stats()
546
304
  """
547
305
 
548
306
  def __init__(self, experiment: 'Experiment', name: str, description: Optional[str] = None,
549
- tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None,
550
- metrics_manager: Optional['MetricsManager'] = None):
307
+ tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None):
551
308
  """
552
309
  Initialize MetricBuilder.
553
310
 
@@ -557,87 +314,71 @@ class MetricBuilder:
557
314
  description: Optional metric description
558
315
  tags: Optional tags for categorization
559
316
  metadata: Optional structured metadata (units, type, etc.)
560
- metrics_manager: Parent MetricsManager (for buffer access)
561
317
  """
562
318
  self._experiment = experiment
563
319
  self._name = name
564
320
  self._description = description
565
321
  self._tags = tags
566
322
  self._metadata = metadata
567
- self._metrics_manager = metrics_manager
568
323
  self._summary_cache = None # Lazy initialization
569
324
 
570
- def buffer(self, **kwargs) -> 'MetricBuilder':
571
- """
572
- Buffer values for later aggregation via metrics.buffer.log_summary().
573
-
574
- Values are accumulated and statistics are computed when log_summary() is called.
575
-
576
- Args:
577
- **kwargs: Metric values to buffer (e.g., loss=0.5, accuracy=0.9)
578
-
579
- Returns:
580
- Self for method chaining
581
-
582
- Example:
583
- # Accumulate values during training
584
- for batch in dataloader:
585
- metrics("train").buffer(loss=loss, acc=acc)
586
-
587
- # Log summary at end of epoch
588
- metrics.buffer.log_summary() # logs loss.mean, acc.mean
589
- metrics.buffer.log_summary("mean", "std") # logs loss.mean, loss.std, etc.
590
- """
591
- if self._metrics_manager is None:
592
- raise RuntimeError("buffer() requires MetricsManager reference")
593
- self._metrics_manager.buffer._store(self._name, **kwargs)
594
- return self
595
-
596
- def log(self, **kwargs) -> 'MetricBuilder':
325
+ def append(self, **kwargs) -> 'MetricBuilder':
597
326
  """
598
- Log a single data point to the metric.
327
+ Append a single data point to the metric.
599
328
 
600
329
  The data point can have any structure - common patterns:
601
- - {loss: 0.3, accuracy: 0.92}
602
330
  - {value: 0.5, step: 100}
331
+ - {loss: 0.3, accuracy: 0.92, epoch: 5}
603
332
  - {timestamp: "...", temperature: 25.5, humidity: 60}
604
333
 
605
- Supports method chaining for fluent API:
606
- experiment.metrics("train").log(loss=0.5, accuracy=0.9)
607
-
608
334
  Args:
609
335
  **kwargs: Data point fields (flexible schema)
610
336
 
611
337
  Returns:
612
- Self for method chaining
338
+ Dict with metricId, index, bufferedDataPoints, chunkSize
613
339
 
614
340
  Example:
615
- experiment.metrics("train").log(loss=0.5, accuracy=0.9)
616
- experiment.metrics.log(epoch=epoch).flush()
341
+ result = experiment.metric(name="train_loss").append(value=0.5, step=100, epoch=1)
342
+ print(f"Appended at index {result['index']}")
617
343
  """
618
- self._experiment._append_to_metric(
344
+ result = self._experiment._append_to_metric(
619
345
  name=self._name,
620
346
  data=kwargs,
621
347
  description=self._description,
622
348
  tags=self._tags,
623
349
  metadata=self._metadata
624
350
  )
625
- return self
351
+ return result
626
352
 
627
- def flush(self) -> 'MetricBuilder':
353
+ def append_batch(self, data_points: List[Dict[str, Any]]) -> Dict[str, Any]:
628
354
  """
629
- Flush buffered data (for method chaining).
355
+ Append multiple data points in batch (more efficient than multiple append calls).
630
356
 
631
- Currently a no-op as data is written immediately, but supports
632
- the fluent API pattern:
633
- experiment.metrics.log(epoch=epoch).flush()
357
+ Args:
358
+ data_points: List of data point dicts
634
359
 
635
360
  Returns:
636
- Self for method chaining
361
+ Dict with metricId, startIndex, endIndex, count, bufferedDataPoints, chunkSize
362
+
363
+ Example:
364
+ result = experiment.metric(name="metrics").append_batch([
365
+ {"loss": 0.5, "acc": 0.8, "step": 1},
366
+ {"loss": 0.4, "acc": 0.85, "step": 2},
367
+ {"loss": 0.3, "acc": 0.9, "step": 3}
368
+ ])
369
+ print(f"Appended {result['count']} points")
637
370
  """
638
- # Data is written immediately, so nothing to flush
639
- # This method exists for API consistency and chaining
640
- return self
371
+ if not data_points:
372
+ raise ValueError("data_points cannot be empty")
373
+
374
+ result = self._experiment._append_batch_to_metric(
375
+ name=self._name,
376
+ data_points=data_points,
377
+ description=self._description,
378
+ tags=self._tags,
379
+ metadata=self._metadata
380
+ )
381
+ return result
641
382
 
642
383
  def read(self, start_index: int = 0, limit: int = 1000) -> Dict[str, Any]:
643
384
  """
ml_dash/params.py CHANGED
@@ -17,9 +17,9 @@ class ParametersBuilder:
17
17
  Fluent interface for parameter operations.
18
18
 
19
19
  Usage:
20
- exp.params.set(model={"lr": 0.001}, optimizer="adam")
21
- params = exp.params.get()
22
- params_nested = exp.params.get(flatten=False)
20
+ experiment.parameters().set(model={"lr": 0.001}, optimizer="adam")
21
+ params = experiment.parameters().get()
22
+ params_nested = experiment.parameters().get(flatten=False)
23
23
  """
24
24
 
25
25
  def __init__(self, experiment: 'Experiment'):
@@ -51,16 +51,16 @@ class ParametersBuilder:
51
51
 
52
52
  Examples:
53
53
  # Set nested parameters
54
- exp.params.set(
54
+ experiment.parameters().set(
55
55
  model={"lr": 0.001, "batch_size": 32},
56
56
  optimizer="adam"
57
57
  )
58
58
 
59
59
  # Merge/update specific parameters
60
- exp.params.set(model={"lr": 0.0001}) # Only updates model.lr
60
+ experiment.parameters().set(model={"lr": 0.0001}) # Only updates model.lr
61
61
 
62
62
  # Set flat parameters with dot notation
63
- exp.params.set(**{"model.lr": 0.001, "model.batch_size": 32})
63
+ experiment.parameters().set(**{"model.lr": 0.001, "model.batch_size": 32})
64
64
  """
65
65
  if not self._experiment._is_open:
66
66
  raise RuntimeError(