ml-dash 0.0.17__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ml_dash/ARCHITECTURE.md +382 -0
- ml_dash/__init__.py +14 -1
- ml_dash/autolog.py +32 -0
- ml_dash/backends/__init__.py +11 -0
- ml_dash/backends/base.py +124 -0
- ml_dash/backends/dash_backend.py +571 -0
- ml_dash/backends/local_backend.py +90 -0
- ml_dash/components/__init__.py +13 -0
- ml_dash/components/files.py +246 -0
- ml_dash/components/logs.py +104 -0
- ml_dash/components/metrics.py +169 -0
- ml_dash/components/parameters.py +144 -0
- ml_dash/job_logger.py +42 -0
- ml_dash/ml_logger.py +234 -0
- ml_dash/run.py +331 -0
- ml_dash-0.4.0.dist-info/METADATA +1424 -0
- ml_dash-0.4.0.dist-info/RECORD +19 -0
- ml_dash-0.4.0.dist-info/WHEEL +4 -0
- ml_dash-0.4.0.dist-info/entry_points.txt +3 -0
- app-build/asset-manifest.json +0 -15
- app-build/favicon.ico +0 -0
- app-build/github-markdown.css +0 -957
- app-build/index.html +0 -1
- app-build/manifest.json +0 -15
- app-build/monaco-editor-worker-loader-proxy.js +0 -6
- app-build/precache-manifest.ffc09f8a591c529a1bd5c6f21f49815f.js +0 -26
- app-build/service-worker.js +0 -34
- ml_dash/app.py +0 -60
- ml_dash/config.py +0 -16
- ml_dash/example.py +0 -0
- ml_dash/file_events.py +0 -71
- ml_dash/file_handlers.py +0 -141
- ml_dash/file_utils.py +0 -5
- ml_dash/file_watcher.py +0 -30
- ml_dash/main.py +0 -60
- ml_dash/mime_types.py +0 -20
- ml_dash/schema/__init__.py +0 -110
- ml_dash/schema/archive.py +0 -165
- ml_dash/schema/directories.py +0 -59
- ml_dash/schema/experiments.py +0 -65
- ml_dash/schema/files/__init__.py +0 -204
- ml_dash/schema/files/file_helpers.py +0 -79
- ml_dash/schema/files/images.py +0 -27
- ml_dash/schema/files/metrics.py +0 -64
- ml_dash/schema/files/parameters.py +0 -50
- ml_dash/schema/files/series.py +0 -235
- ml_dash/schema/files/videos.py +0 -27
- ml_dash/schema/helpers.py +0 -66
- ml_dash/schema/projects.py +0 -65
- ml_dash/schema/schema_helpers.py +0 -19
- ml_dash/schema/users.py +0 -33
- ml_dash/sse.py +0 -18
- ml_dash-0.0.17.dist-info/METADATA +0 -67
- ml_dash-0.0.17.dist-info/RECORD +0 -38
- ml_dash-0.0.17.dist-info/WHEEL +0 -5
- ml_dash-0.0.17.dist-info/top_level.txt +0 -2
ml_dash/ARCHITECTURE.md
ADDED
|
@@ -0,0 +1,382 @@
|
|
|
1
|
+
# ML-Logger Architecture
|
|
2
|
+
|
|
3
|
+
## Class Hierarchy and Composition
|
|
4
|
+
|
|
5
|
+
<details open>
|
|
6
|
+
<summary><strong>🏗️ System Overview</strong></summary>
|
|
7
|
+
|
|
8
|
+
```
|
|
9
|
+
ML-Logger System
|
|
10
|
+
│
|
|
11
|
+
├── Storage Backends (remove existing implementations awaiting design)
|
|
12
|
+
add local logger, s3, gcp, ml_dash, as empty files. Also include an empty base class.
|
|
13
|
+
├── Logger Components (file and data types)
|
|
14
|
+
├── ML_Logger (Main Interface)
|
|
15
|
+
└── Supporting and Utility Classes
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
</details>
|
|
19
|
+
|
|
20
|
+
<details>
|
|
21
|
+
<summary><strong>💾 Storage Backends</strong> (Where to store)</summary>
|
|
22
|
+
|
|
23
|
+
```
|
|
24
|
+
Storage Backends
|
|
25
|
+
│
|
|
26
|
+
├── StorageBackend (Abstract Base)
|
|
27
|
+
│ ├── exists()
|
|
28
|
+
│ ├── write_bytes()
|
|
29
|
+
│ ├── read_bytes()
|
|
30
|
+
│ ├── write_text()
|
|
31
|
+
│ ├── read_text()
|
|
32
|
+
│ ├── append_text()
|
|
33
|
+
│ ├── list_dir()
|
|
34
|
+
│ └── get_url()
|
|
35
|
+
│
|
|
36
|
+
├── LocalBackend(StorageBackend)
|
|
37
|
+
│ └── Implements file system operations
|
|
38
|
+
│
|
|
39
|
+
├── S3Backend(StorageBackend)
|
|
40
|
+
│ └── Implements AWS S3 operations
|
|
41
|
+
│
|
|
42
|
+
└── GCPBackend(StorageBackend)
|
|
43
|
+
└── Implements Google Cloud Storage operations
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
</details>
|
|
47
|
+
|
|
48
|
+
<details>
|
|
49
|
+
<summary><strong>📝 Logger Components</strong> (What to log)</summary>
|
|
50
|
+
|
|
51
|
+
```
|
|
52
|
+
Experiment
|
|
53
|
+
│
|
|
54
|
+
├── logs: TextLogger
|
|
55
|
+
│ ├── log(level, message)
|
|
56
|
+
│ ├── error(message)
|
|
57
|
+
│ ├── warning(message)
|
|
58
|
+
│ ├── info(message)
|
|
59
|
+
│ └── debug(message)
|
|
60
|
+
│
|
|
61
|
+
├── metrics: ScalarLogger (accessed via experiment.metrics)
|
|
62
|
+
│ ├── log(step, **metrics) - Log metrics immediately
|
|
63
|
+
│ ├── collect(step, **metrics) - Collect for later aggregation
|
|
64
|
+
│ ├── flush(_aggregation, step) - Aggregate and log collected metrics
|
|
65
|
+
│ ├── get_summary(name, frequency)
|
|
66
|
+
│ ├── __call__(namespace) - Return namespaced logger
|
|
67
|
+
│ └── Uses: ScalarCache, Series
|
|
68
|
+
│
|
|
69
|
+
├── files: ArtifactLogger (accessed via experiment.files)
|
|
70
|
+
│ ├── save(data, filename) - Save generic data
|
|
71
|
+
│ ├── save_pkl(data, filename) - Save pickle data
|
|
72
|
+
│ ├── save_image(name, image) - Save image
|
|
73
|
+
│ ├── save_video(name, video, fps) - Save video
|
|
74
|
+
│ ├── save_audio(name, audio) - Save audio
|
|
75
|
+
│ ├── savefig(fig, filename) - Save matplotlib figure
|
|
76
|
+
│ ├── load_torch(filename) - Load PyTorch data
|
|
77
|
+
│ ├── make_video(pattern, output, fps, codec, quality, sort) - Create video from frames
|
|
78
|
+
│ ├── __call__(namespace) - Return namespaced logger
|
|
79
|
+
│ └── File management and artifact storage
|
|
80
|
+
│
|
|
81
|
+
├── params: ParameterIndex
|
|
82
|
+
│ ├── set(params) - Set/overwrite parameters
|
|
83
|
+
│ ├── extend(params) - Merge with existing parameters
|
|
84
|
+
│ ├── update(key, value) - Update single parameter
|
|
85
|
+
│ ├── read() - Read all parameters
|
|
86
|
+
│ └── Manages experiment configuration
|
|
87
|
+
│
|
|
88
|
+
└── charts: ChartBuilder # PLANNING PHASE, subject to changes.
|
|
89
|
+
├── line_chart(query)
|
|
90
|
+
├── scatter_plot(query)
|
|
91
|
+
├── bar_chart(query)
|
|
92
|
+
└── video/images(query)
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
</details>
|
|
96
|
+
|
|
97
|
+
<details>
|
|
98
|
+
<summary><strong>🎯 Composite Logger</strong> (Main Interface)</summary>
|
|
99
|
+
|
|
100
|
+
```
|
|
101
|
+
MLLogger
|
|
102
|
+
├── __init__(backend: StorageBackend)
|
|
103
|
+
├── params: ParameterIndex - Parameter management
|
|
104
|
+
├── metrics: ScalarLogger - Metrics logging
|
|
105
|
+
├── readme: MarkdownLogger - Rich Text logging (PLANNING PHASE)
|
|
106
|
+
├── files: ArtifactLogger - File and artifact management
|
|
107
|
+
├── logs: TextLogger - Text logging
|
|
108
|
+
│
|
|
109
|
+
├── Convenience Methods: (can just hide under logs)
|
|
110
|
+
│ ├── error() -> text.error()
|
|
111
|
+
│ ├── warning() -> text.warning()
|
|
112
|
+
│ ├── info() -> text.info()
|
|
113
|
+
│ └── debug() -> text.debug()
|
|
114
|
+
│
|
|
115
|
+
└── Context Managers:
|
|
116
|
+
├── experiment(name)
|
|
117
|
+
└── run(id)
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
</details>
|
|
121
|
+
|
|
122
|
+
<details>
|
|
123
|
+
<summary><strong>⚙️ Supporting Classes</strong></summary>
|
|
124
|
+
|
|
125
|
+
```
|
|
126
|
+
Supporting Classes
|
|
127
|
+
│
|
|
128
|
+
└── Serialization (serdes/) (NOT USED)
|
|
129
|
+
├── serialize()
|
|
130
|
+
├── deserialize()
|
|
131
|
+
└── Type registry with $t, $s keys
|
|
132
|
+
```
|
|
133
|
+
|
|
134
|
+
</details>
|
|
135
|
+
|
|
136
|
+
## Usage Examples
|
|
137
|
+
|
|
138
|
+
<details>
|
|
139
|
+
<summary><strong>📊 Logging Different Data Types</strong></summary>
|
|
140
|
+
|
|
141
|
+
```python
|
|
142
|
+
# Text logging (errors, warnings, info) experiment.logs.error("Training failed") experiment.logs.warning("Low GPU memory") experiment.logs.info("Starting epoch 1")
|
|
143
|
+
|
|
144
|
+
# Parameter logging experiment.params.set(learning_rate=0.001, batch_size=32)
|
|
145
|
+
|
|
146
|
+
# Metrics logging experiment.metrics.log(step=100, loss=0.523, accuracy=0.95)
|
|
147
|
+
|
|
148
|
+
# Collect metrics for aggregation experiment.metrics.collect(step=101, loss=0.521) experiment.metrics.flush(_aggregation="mean", step=100)
|
|
149
|
+
|
|
150
|
+
# Namespaced metrics experiment.metrics("train").log(step=100, loss=0.5) experiment.metrics("val").log(step=100, accuracy=0.95)
|
|
151
|
+
|
|
152
|
+
# File operations experiment.files.save_image("confusion_matrix", image_array) experiment.files.save(model_state, "checkpoint.pt") experiment.files("checkpoints").save(model_state, "model_epoch_10.pt")
|
|
153
|
+
```
|
|
154
|
+
|
|
155
|
+
</details>
|
|
156
|
+
|
|
157
|
+
<details>
|
|
158
|
+
<summary><strong>🎛️ Direct Component Access</strong></summary>
|
|
159
|
+
|
|
160
|
+
```python
|
|
161
|
+
# Access components directly for advanced usage experiment.logs.error("Direct text logging") experiment.metrics.log(step=50, lr=0.001) experiment.files.save_video("training_progress", video_array, fps=30)
|
|
162
|
+
|
|
163
|
+
# Namespaced file operations experiment.files("videos").save_video("training_progress", video_array, fps=30) experiment.files("checkpoints").save(model_state, "model.pt")
|
|
164
|
+
|
|
165
|
+
# Get statistics
|
|
166
|
+
stats = experiment.metrics.get_stats("loss")
|
|
167
|
+
percentile_95 = experiment.metrics.get_percentile("loss", 95)
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
</details>
|
|
171
|
+
|
|
172
|
+
## File Organization
|
|
173
|
+
|
|
174
|
+
<details>
|
|
175
|
+
<summary><strong>📁 Project Structure</strong></summary>
|
|
176
|
+
|
|
177
|
+
```
|
|
178
|
+
ml-logger/
|
|
179
|
+
├── src/ml_logger/
|
|
180
|
+
│ ├── __init__.py
|
|
181
|
+
│ ├── experiment.py # Main MLLogger class
|
|
182
|
+
│ │
|
|
183
|
+
│ ├── backends/
|
|
184
|
+
│ │ ├── __init__.py
|
|
185
|
+
│ │ ├── base.py # StorageBackend ABC
|
|
186
|
+
│ │ ├── local.py # LocalBackend
|
|
187
|
+
│ │ ├── s3.py # S3Backend
|
|
188
|
+
│ │ └── gcp.py # GCPBackend
|
|
189
|
+
│ │
|
|
190
|
+
│ ├── loggers/
|
|
191
|
+
│ │ ├── __init__.py
|
|
192
|
+
│ │ ├── text.py # TextLogger
|
|
193
|
+
│ │ ├── scalar.py # ScalarLogger
|
|
194
|
+
│ │ └── artifact.py # ArtifactLogger
|
|
195
|
+
│ │
|
|
196
|
+
│ ├── scalar_cache.py # ScalarCache, Series, RollingStats
|
|
197
|
+
│ │
|
|
198
|
+
│ └── serdes/
|
|
199
|
+
│ ├── __init__.py
|
|
200
|
+
│ └── ndjson.py # Serialization with $t, $s
|
|
201
|
+
│
|
|
202
|
+
└── tests/
|
|
203
|
+
├── test_backends.py
|
|
204
|
+
├── test_loggers.py
|
|
205
|
+
├── test_scalar_cache.py
|
|
206
|
+
└── test_integration.py
|
|
207
|
+
```
|
|
208
|
+
|
|
209
|
+
</details>
|
|
210
|
+
|
|
211
|
+
## Advanced Features
|
|
212
|
+
|
|
213
|
+
<details>
|
|
214
|
+
<summary><strong>📈 Statistical Features</strong></summary>
|
|
215
|
+
|
|
216
|
+
### Rolling Statistics
|
|
217
|
+
- **Window-based metrics**: Configurable window size for recent data
|
|
218
|
+
- **Automatic calculation**: Mean, variance, std, min, max
|
|
219
|
+
- **Percentiles**: p0, p1, p5, p10, p20, p25, p40, p50, p60, p75, p80, p90, p95, p99, p100
|
|
220
|
+
|
|
221
|
+
### Summary Frequencies
|
|
222
|
+
Automatic summaries at: 1, 5, 10, 15, 20, 25, 30, 40, 50, 75, 80, 100, 120, 150, 200, 250, 300, 400, 500, 600, 1000, 1200, 1500, 2000, 2500, ...
|
|
223
|
+
|
|
224
|
+
```python
|
|
225
|
+
# Access statistics
|
|
226
|
+
stats = experiment.scalars.get_stats("loss")
|
|
227
|
+
print(f"Mean: {stats.mean}, Std: {stats.std}")
|
|
228
|
+
|
|
229
|
+
# Get percentiles
|
|
230
|
+
p95 = experiment.scalars.get_percentile("accuracy", 95)
|
|
231
|
+
|
|
232
|
+
# Get summaries at specific frequencies
|
|
233
|
+
summaries = experiment.scalars.get_summary("loss", frequency=100)
|
|
234
|
+
```
|
|
235
|
+
|
|
236
|
+
</details>
|
|
237
|
+
|
|
238
|
+
<details>
|
|
239
|
+
<summary><strong>🔄 Serialization System</strong></summary>
|
|
240
|
+
|
|
241
|
+
### Type-Annotated Serialization
|
|
242
|
+
- Uses `$t` for type keys
|
|
243
|
+
- Uses `$s` for shape keys (arrays)
|
|
244
|
+
- Recursive serialization for nested structures
|
|
245
|
+
- Supports: primitives, datetime, numpy, Path, bytes, collections
|
|
246
|
+
|
|
247
|
+
```python
|
|
248
|
+
from ml_dash.serdes import serialize, deserialize
|
|
249
|
+
|
|
250
|
+
# Serialize complex objects
|
|
251
|
+
data = {
|
|
252
|
+
"array": np.array([[1, 2], [3, 4]]),
|
|
253
|
+
"date": datetime.now(),
|
|
254
|
+
"path": Path("/tmp/file.txt")
|
|
255
|
+
}
|
|
256
|
+
serialized = serialize(data)
|
|
257
|
+
|
|
258
|
+
# Deserialize back
|
|
259
|
+
original = deserialize(serialized)
|
|
260
|
+
```
|
|
261
|
+
|
|
262
|
+
</details>
|
|
263
|
+
|
|
264
|
+
## Examples
|
|
265
|
+
|
|
266
|
+
<details>
|
|
267
|
+
<summary><strong>🤖 ML Training Example</strong></summary>
|
|
268
|
+
|
|
269
|
+
```python
|
|
270
|
+
# train.py - Define your training function
|
|
271
|
+
from ml_dash import get_logger
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
@logger.run
|
|
275
|
+
def train(config):
|
|
276
|
+
"""Training function that will be wrapped by the experiment."""
|
|
277
|
+
model = create_model(config.model_type)
|
|
278
|
+
optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
|
|
279
|
+
|
|
280
|
+
best_acc = 0
|
|
281
|
+
for epoch in range(config.epochs):
|
|
282
|
+
# Training loop
|
|
283
|
+
for batch_idx, (data, target) in enumerate(train_loader):
|
|
284
|
+
loss = train_step(model, data, target, optimizer)
|
|
285
|
+
|
|
286
|
+
step = epoch * len(train_loader) + batch_idx
|
|
287
|
+
with experiment.step(step):
|
|
288
|
+
# Log metrics
|
|
289
|
+
experiment.log_metric("train/loss", loss.item())
|
|
290
|
+
|
|
291
|
+
# Log histograms periodically
|
|
292
|
+
if step % 100 == 0:
|
|
293
|
+
experiment.log_histogram("gradients", get_gradients(model))
|
|
294
|
+
|
|
295
|
+
# Save visualizations
|
|
296
|
+
if step % 500 == 0:
|
|
297
|
+
fig = plot_predictions(model, data)
|
|
298
|
+
experiment.log_image("predictions", fig)
|
|
299
|
+
|
|
300
|
+
# Validation
|
|
301
|
+
val_loss, val_acc = validate(model, val_loader)
|
|
302
|
+
experiment.log_metrics({
|
|
303
|
+
"val/loss": val_loss,
|
|
304
|
+
"val/accuracy": val_acc
|
|
305
|
+
}, step=epoch)
|
|
306
|
+
|
|
307
|
+
# Save checkpoint
|
|
308
|
+
if val_acc > best_acc:
|
|
309
|
+
experiment.log_model("best_model", model.state_dict())
|
|
310
|
+
best_acc = val_acc
|
|
311
|
+
|
|
312
|
+
# Final summary
|
|
313
|
+
experiment.info(f"Training completed. Best accuracy: {best_acc}")
|
|
314
|
+
return {"best_accuracy": best_acc}
|
|
315
|
+
|
|
316
|
+
```
|
|
317
|
+
|
|
318
|
+
**experiment.py** - Launch experiments with different configs:
|
|
319
|
+
|
|
320
|
+
```python
|
|
321
|
+
from ml_dash import get_logger
|
|
322
|
+
from train import train
|
|
323
|
+
|
|
324
|
+
# Initialize logger
|
|
325
|
+
experiment = get_logger("s3://experiments/mnist")
|
|
326
|
+
|
|
327
|
+
# Define experiment configurations
|
|
328
|
+
configs = [
|
|
329
|
+
{"model_type": "CNN", "lr": 0.001, "batch_size": 32, "epochs": 100},
|
|
330
|
+
{"model_type": "CNN", "lr": 0.01, "batch_size": 64, "epochs": 100},
|
|
331
|
+
{"model_type": "ResNet", "lr": 0.001, "batch_size": 32, "epochs": 150},
|
|
332
|
+
]
|
|
333
|
+
|
|
334
|
+
# Run experiment with multiple configurations
|
|
335
|
+
with experiment.experiment("model_comparison"):
|
|
336
|
+
for i, config in enumerate(configs):
|
|
337
|
+
# Each config gets its own run
|
|
338
|
+
run_name = f"{config['model_type']}_lr{config['lr']}"
|
|
339
|
+
|
|
340
|
+
# The decorator handles run creation and lifecycle
|
|
341
|
+
result = train(
|
|
342
|
+
config=config,
|
|
343
|
+
_run_name=run_name,
|
|
344
|
+
_hyperparams=config,
|
|
345
|
+
_tags=["baseline", config["model_type"].lower()]
|
|
346
|
+
)
|
|
347
|
+
|
|
348
|
+
print(f"Run {run_name} completed with accuracy: {result['best_accuracy']}")
|
|
349
|
+
```
|
|
350
|
+
|
|
351
|
+
</details>
|
|
352
|
+
|
|
353
|
+
<details>
|
|
354
|
+
<summary><strong>🔍 Debugging Example</strong></summary>
|
|
355
|
+
|
|
356
|
+
```python
|
|
357
|
+
# Setup logger with debug level
|
|
358
|
+
experiment =get_logger("./debug_logs") experiment.logs.set_level(LogLevel.DEBUG)
|
|
359
|
+
|
|
360
|
+
try:
|
|
361
|
+
# Your code here
|
|
362
|
+
result = risky_operation()
|
|
363
|
+
experiment.debug(f"Operation result: {result}")
|
|
364
|
+
|
|
365
|
+
except Exception as e:
|
|
366
|
+
# Log exception with full traceback
|
|
367
|
+
experiment.exception("Operation failed", exc_info=True)
|
|
368
|
+
|
|
369
|
+
# Log additional context
|
|
370
|
+
experiment.error("Failed at step", step=current_step,
|
|
371
|
+
input_shape=data.shape)
|
|
372
|
+
|
|
373
|
+
# Save problematic data for debugging
|
|
374
|
+
experiment.log_file("failed_input", "debug_data.pkl")
|
|
375
|
+
|
|
376
|
+
finally:
|
|
377
|
+
# Get recent logs
|
|
378
|
+
errors = experiment.get_logs(level="ERROR", limit=50)
|
|
379
|
+
print(f"Found {len(errors)} errors")
|
|
380
|
+
```
|
|
381
|
+
|
|
382
|
+
</details>
|
ml_dash/__init__.py
CHANGED
|
@@ -1 +1,14 @@
|
|
|
1
|
-
|
|
1
|
+
"""ML-Logger: A minimal, local-first experiment tracking library."""
|
|
2
|
+
|
|
3
|
+
from .run import Experiment
|
|
4
|
+
from .ml_logger import ML_Logger, LogLevel
|
|
5
|
+
from .job_logger import JobLogger
|
|
6
|
+
|
|
7
|
+
__version__ = "0.4.0"
|
|
8
|
+
|
|
9
|
+
__all__ = [
|
|
10
|
+
"Experiment",
|
|
11
|
+
"ML_Logger",
|
|
12
|
+
"LogLevel",
|
|
13
|
+
"JobLogger",
|
|
14
|
+
]
|
ml_dash/autolog.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
"""Auto-configured experiment for ML-Logger.
|
|
2
|
+
|
|
3
|
+
This module provides a pre-configured global `experiment` instance that can be
|
|
4
|
+
imported and used immediately without manual setup.
|
|
5
|
+
|
|
6
|
+
Example:
|
|
7
|
+
from ml_dash.autolog import experiment
|
|
8
|
+
|
|
9
|
+
# No setup needed!
|
|
10
|
+
experiment.params.set(learning_rate=0.001)
|
|
11
|
+
experiment.metrics.log(step=0, loss=0.5)
|
|
12
|
+
experiment.files.save(model.state_dict(), "checkpoint.pt")
|
|
13
|
+
|
|
14
|
+
Configuration:
|
|
15
|
+
The auto-experiment is configured from environment variables:
|
|
16
|
+
- ML_LOGGER_NAMESPACE: User/team namespace (default: "default")
|
|
17
|
+
- ML_LOGGER_WORKSPACE: Project workspace (default: "experiments")
|
|
18
|
+
- ML_LOGGER_PREFIX: Experiment prefix (default: auto-generated timestamp+uuid)
|
|
19
|
+
- ML_LOGGER_REMOTE: Remote server URL (optional)
|
|
20
|
+
|
|
21
|
+
Or from ~/.ml-logger/config.yaml:
|
|
22
|
+
namespace: alice
|
|
23
|
+
workspace: my-project
|
|
24
|
+
remote: http://localhost:3001
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
from .run import Experiment
|
|
28
|
+
|
|
29
|
+
# Auto-configured global experiment instance
|
|
30
|
+
experiment = Experiment._auto_configure()
|
|
31
|
+
|
|
32
|
+
__all__ = ["experiment"]
|
ml_dash/backends/base.py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
"""Storage backend abstract base class for ML-Logger.
|
|
2
|
+
|
|
3
|
+
This module defines the abstract interface that all storage backends must implement.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from abc import ABC, abstractmethod
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Optional, List
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class StorageBackend(ABC):
|
|
12
|
+
"""Abstract base class for storage backends.
|
|
13
|
+
|
|
14
|
+
All storage backends (local, S3, GCP, ML-Dash) must implement these methods.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
@abstractmethod
|
|
18
|
+
def exists(self, path: str) -> bool:
|
|
19
|
+
"""Check if a file or directory exists.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
path: Path to check
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
True if path exists, False otherwise
|
|
26
|
+
"""
|
|
27
|
+
pass
|
|
28
|
+
|
|
29
|
+
@abstractmethod
|
|
30
|
+
def write_bytes(self, path: str, data: bytes) -> None:
|
|
31
|
+
"""Write binary data to a file.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
path: File path
|
|
35
|
+
data: Binary data to write
|
|
36
|
+
"""
|
|
37
|
+
pass
|
|
38
|
+
|
|
39
|
+
@abstractmethod
|
|
40
|
+
def read_bytes(self, path: str) -> bytes:
|
|
41
|
+
"""Read binary data from a file.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
path: File path
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
Binary data from file
|
|
48
|
+
"""
|
|
49
|
+
pass
|
|
50
|
+
|
|
51
|
+
@abstractmethod
|
|
52
|
+
def write_text(self, path: str, text: str) -> None:
|
|
53
|
+
"""Write text to a file.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
path: File path
|
|
57
|
+
text: Text to write
|
|
58
|
+
"""
|
|
59
|
+
pass
|
|
60
|
+
|
|
61
|
+
@abstractmethod
|
|
62
|
+
def read_text(self, path: str) -> str:
|
|
63
|
+
"""Read text from a file.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
path: File path
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
Text content from file
|
|
70
|
+
"""
|
|
71
|
+
pass
|
|
72
|
+
|
|
73
|
+
@abstractmethod
|
|
74
|
+
def append_text(self, path: str, text: str) -> None:
|
|
75
|
+
"""Append text to a file.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
path: File path
|
|
79
|
+
text: Text to append
|
|
80
|
+
"""
|
|
81
|
+
pass
|
|
82
|
+
|
|
83
|
+
@abstractmethod
|
|
84
|
+
def list_dir(self, path: str = "") -> List[str]:
|
|
85
|
+
"""List contents of a directory.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
path: Directory path (empty string for root)
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
List of file/directory names
|
|
92
|
+
"""
|
|
93
|
+
pass
|
|
94
|
+
|
|
95
|
+
@abstractmethod
|
|
96
|
+
def get_url(self, path: str) -> Optional[str]:
|
|
97
|
+
"""Get a URL for accessing a file (if applicable).
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
path: File path
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
URL string or None if not applicable
|
|
104
|
+
"""
|
|
105
|
+
pass
|
|
106
|
+
|
|
107
|
+
@abstractmethod
|
|
108
|
+
def makedirs(self, path: str, exist_ok: bool = True) -> None:
|
|
109
|
+
"""Create directories recursively.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
path: Directory path to create
|
|
113
|
+
exist_ok: Don't raise error if directory exists
|
|
114
|
+
"""
|
|
115
|
+
pass
|
|
116
|
+
|
|
117
|
+
@abstractmethod
|
|
118
|
+
def delete(self, path: str) -> None:
|
|
119
|
+
"""Delete a file.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
path: File path to delete
|
|
123
|
+
"""
|
|
124
|
+
pass
|