mlx-tracker 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. mlx_tracker-0.1.0/.github/ISSUE_TEMPLATE/PULL_REQUEST_TEMPLATE.md +13 -0
  2. mlx_tracker-0.1.0/.github/ISSUE_TEMPLATE/bug_report.md +21 -0
  3. mlx_tracker-0.1.0/.github/ISSUE_TEMPLATE/feature_request.md +17 -0
  4. mlx_tracker-0.1.0/.gitignore +65 -0
  5. mlx_tracker-0.1.0/CHANGELOG.md +23 -0
  6. mlx_tracker-0.1.0/CONTRIBUTING.md +0 -0
  7. mlx_tracker-0.1.0/LICENSE +21 -0
  8. mlx_tracker-0.1.0/PKG-INFO +50 -0
  9. mlx_tracker-0.1.0/README.md +4 -0
  10. mlx_tracker-0.1.0/docs/commands/init.md +40 -0
  11. mlx_tracker-0.1.0/docs/commands/log.md +37 -0
  12. mlx_tracker-0.1.0/docs/commands/run.md +57 -0
  13. mlx_tracker-0.1.0/docs/index.md +54 -0
  14. mlx_tracker-0.1.0/docs/quickstart.md +55 -0
  15. mlx_tracker-0.1.0/examples/catboost_example.py +79 -0
  16. mlx_tracker-0.1.0/examples/pytorch_example.py +92 -0
  17. mlx_tracker-0.1.0/examples/sklearn_example.py +59 -0
  18. mlx_tracker-0.1.0/mlx/__init__.py +8 -0
  19. mlx_tracker-0.1.0/mlx/cli.py +46 -0
  20. mlx_tracker-0.1.0/mlx/commands/__init__.py +1 -0
  21. mlx_tracker-0.1.0/mlx/commands/compare.py +346 -0
  22. mlx_tracker-0.1.0/mlx/commands/export.py +264 -0
  23. mlx_tracker-0.1.0/mlx/commands/init.py +179 -0
  24. mlx_tracker-0.1.0/mlx/commands/log.py +174 -0
  25. mlx_tracker-0.1.0/mlx/commands/ls.py +218 -0
  26. mlx_tracker-0.1.0/mlx/commands/run.py +306 -0
  27. mlx_tracker-0.1.0/mlx/commands/status.py +124 -0
  28. mlx_tracker-0.1.0/mlx/core/__init__.py +7 -0
  29. mlx_tracker-0.1.0/mlx/core/experiment.py +43 -0
  30. mlx_tracker-0.1.0/mlx/core/metrics.py +149 -0
  31. mlx_tracker-0.1.0/mlx/core/params.py +150 -0
  32. mlx_tracker-0.1.0/mlx/core/run.py +162 -0
  33. mlx_tracker-0.1.0/mlx/storage/__init__.py +1 -0
  34. mlx_tracker-0.1.0/mlx/storage/db.py +121 -0
  35. mlx_tracker-0.1.0/mlx/storage/filesystem.py +232 -0
  36. mlx_tracker-0.1.0/mlx/utils/__init__.py +1 -0
  37. mlx_tracker-0.1.0/mlx/utils/config.py +0 -0
  38. mlx_tracker-0.1.0/mlx/utils/display.py +133 -0
  39. mlx_tracker-0.1.0/pyproject.toml +49 -0
  40. mlx_tracker-0.1.0/test_metrics_params.py +137 -0
  41. mlx_tracker-0.1.0/test_today.py +94 -0
  42. mlx_tracker-0.1.0/tests/__init__.py +0 -0
  43. mlx_tracker-0.1.0/tests/conftest.py +64 -0
  44. mlx_tracker-0.1.0/tests/test_cli.py +171 -0
  45. mlx_tracker-0.1.0/tests/test_experiment.py +61 -0
  46. mlx_tracker-0.1.0/tests/test_run.py +128 -0
@@ -0,0 +1,13 @@
1
+ ## What does this PR do?
2
+ <!-- Brief description -->
3
+
4
+ ## Type of change
5
+ - [ ] Bug fix
6
+ - [ ] New feature
7
+ - [ ] Documentation update
8
+ - [ ] Refactor
9
+
10
+ ## Checklist
11
+ - [ ] I ran `pytest` and all tests pass
12
+ - [ ] I ran `ruff check .` and fixed any issues
13
+ - [ ] I updated `CHANGELOG.md`
@@ -0,0 +1,21 @@
1
+ ---
2
+ name: Bug Report
3
+ about: Something isn't working
4
+ labels: bug
5
+ ---
6
+
7
+ ## What happened?
8
+ <!-- Describe the bug clearly -->
9
+
10
+ ## Steps to reproduce
11
+ ```bash
12
+ # Paste the exact commands you ran
13
+ ```
14
+
15
+ ## Expected behavior
16
+ <!-- What should have happened? -->
17
+
18
+ ## Environment
19
+ - OS:
20
+ - Python version:
21
+ - mlx version: (`mlx version`)
@@ -0,0 +1,17 @@
1
+ ---
2
+ name: Feature Request
3
+ about: Suggest something new
4
+ labels: enhancement
5
+ ---
6
+
7
+ ## What problem does this solve?
8
+ <!-- Describe the problem you're facing -->
9
+
10
+ ## Proposed solution
11
+ <!-- How would you like it to work? -->
12
+
13
+ ## Example usage
14
+ ```bash
15
+ # Show what the command would look like
16
+ mlx your-new-command --option value
17
+ ```
@@ -0,0 +1,65 @@
1
+ # ─────────────────────────────────────
2
+ # Python
3
+ # ─────────────────────────────────────
4
+
5
+ # Virtual environment — never commit this
6
+ venv
7
+ venv/
8
+ env/
9
+
10
+ # Python compiled files — auto-generated by Python
11
+ __pycache__/
12
+ *.py[cod]
13
+ *.pyo
14
+
15
+ # Build artifacts — generated when you run `pip install` or `python -m build`
16
+ dist/
17
+ build/
18
+ *.egg-info/
19
+ .eggs/
20
+
21
+ # ─────────────────────────────────────
22
+ # MLX — local experiment data
23
+ # ─────────────────────────────────────
24
+
25
+ # This is each user's private local data
26
+ # Everyone has their own .mlx/ folder on their own machine
27
+ .mlx/
28
+
29
+ # ─────────────────────────────────────
30
+ # Testing & Coverage
31
+ # ─────────────────────────────────────
32
+ .pytest_cache/
33
+ .coverage
34
+ htmlcov/
35
+ coverage.xml
36
+
37
+ # ─────────────────────────────────────
38
+ # IDE / Editor files
39
+ # ─────────────────────────────────────
40
+ .vscode/
41
+ .idea/
42
+ *.swp
43
+ *.swo
44
+
45
+ # ─────────────────────────────────────
46
+ # OS generated files
47
+ # ─────────────────────────────────────
48
+
49
+ # Mac
50
+ .DS_Store
51
+ .AppleDouble
52
+
53
+ # Windows
54
+ Thumbs.db
55
+ ehthumbs.db
56
+ Desktop.ini
57
+
58
+ # ─────────────────────────────────────
59
+ # Secrets — NEVER commit these
60
+ # ─────────────────────────────────────
61
+ .env
62
+ .env.local
63
+ *.pem
64
+ *.key
65
+ secrets.toml
@@ -0,0 +1,23 @@
1
+ # Changelog
2
+
3
+ All notable changes to mlx will be documented here.
4
+
5
+ Format based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
6
+ Versioning follows [Semantic Versioning](https://semver.org/).
7
+
8
+ ---
9
+
10
+ ## [Unreleased]
11
+
12
+ ### Added
13
+ - Initial project structure
14
+ - `pyproject.toml` with modern Python packaging
15
+ - CLI entry point via Typer
16
+ - MIT License
17
+
18
+ ---
19
+
20
+ ## Version Guide
21
+
22
+ - `0.x.x` — Early development, APIs may change
23
+ - `1.0.0` — Stable release, ready for production use
File without changes
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Your Name
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,50 @@
1
+ Metadata-Version: 2.4
2
+ Name: mlx-tracker
3
+ Version: 0.1.0
4
+ Summary: Local ML experiment tracker for engineers who live in the terminal
5
+ Project-URL: Homepage, https://github.com/adityasingh345/mlx
6
+ Project-URL: Repository, https://github.com/adityasingh345/mlx
7
+ Author-email: Your Real Name <your@email.com>
8
+ License: MIT License
9
+
10
+ Copyright (c) 2024 Your Name
11
+
12
+ Permission is hereby granted, free of charge, to any person obtaining a copy
13
+ of this software and associated documentation files (the "Software"), to deal
14
+ in the Software without restriction, including without limitation the rights
15
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16
+ copies of the Software, and to permit persons to whom the Software is
17
+ furnished to do so, subject to the following conditions:
18
+
19
+ The above copyright notice and this permission notice shall be included in all
20
+ copies or substantial portions of the Software.
21
+
22
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28
+ SOFTWARE.
29
+ License-File: LICENSE
30
+ Keywords: cli,experiment-tracking,machine-learning,mlops
31
+ Classifier: Development Status :: 3 - Alpha
32
+ Classifier: Environment :: Console
33
+ Classifier: License :: OSI Approved :: MIT License
34
+ Classifier: Programming Language :: Python :: 3.9
35
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
36
+ Requires-Python: >=3.9
37
+ Requires-Dist: rich>=13
38
+ Requires-Dist: sqlmodel>=0.0.16
39
+ Requires-Dist: toml>=0.10
40
+ Requires-Dist: typer[all]>=0.12
41
+ Provides-Extra: dev
42
+ Requires-Dist: pytest; extra == 'dev'
43
+ Requires-Dist: pytest-cov; extra == 'dev'
44
+ Requires-Dist: ruff; extra == 'dev'
45
+ Description-Content-Type: text/markdown
46
+
47
+ # 🧪 mlx — ML Experiment Manager
48
+
49
+ > Track experiments, runs, params & metrics. 100% local. No server needed.
50
+ ```bash
@@ -0,0 +1,4 @@
1
+ # 🧪 mlx — ML Experiment Manager
2
+
3
+ > Track experiments, runs, params & metrics. 100% local. No server needed.
4
+ ```bash
@@ -0,0 +1,40 @@
1
+ # mlx init
2
+
3
+ Initialize a new mlx project in the current directory.
4
+
5
+ ## Usage
6
+ ```bash
7
+ mlx init [OPTIONS]
8
+ ```
9
+
10
+ ## Options
11
+
12
+ | Option | Short | Default | Description |
13
+ |--------|-------|---------|-------------|
14
+ | `--name` | `-n` | folder name | Project name |
15
+ | `--desc` | `-d` | `""` | Project description |
16
+ | `--force` | `-f` | `False` | Re-initialize existing project |
17
+
18
+ ## Examples
19
+ ```bash
20
+ # Basic init
21
+ mlx init
22
+
23
+ # With custom name
24
+ mlx init --name "fraud-detection"
25
+
26
+ # With name and description
27
+ mlx init --name "fraud-detection" --desc "Fraud model v2"
28
+
29
+ # Re-initialize (keeps existing data)
30
+ mlx init --force
31
+ ```
32
+
33
+ ## What it creates
34
+ ```
35
+ .mlx/
36
+ ├── mlx.db ← SQLite database
37
+ ├── config.toml ← project config
38
+ ├── runs/ ← log files per run
39
+ └── artifacts/ ← saved models (future)
40
+ ```
@@ -0,0 +1,37 @@
1
+ # mlx log
2
+
3
+ Log metrics, params and notes to the active run.
4
+
5
+ ## mlx log metric
6
+ ```bash
7
+ mlx log metric accuracy 0.94
8
+ mlx log metric accuracy 0.94 --step 100
9
+ mlx log metric val_loss 0.21 --step 50
10
+ ```
11
+
12
+ | Argument | Description |
13
+ |----------|-------------|
14
+ | `key` | Metric name — accuracy, loss, auc |
15
+ | `value` | Numeric value — 0.94 |
16
+ | `--step` | Training step or epoch |
17
+
18
+ ---
19
+
20
+ ## mlx log param
21
+ ```bash
22
+ mlx log param learning_rate 0.05
23
+ mlx log param depth 6
24
+ mlx log param optimizer adam
25
+ ```
26
+
27
+ Logging the same param twice **updates** it — no duplicates.
28
+
29
+ ---
30
+
31
+ ## mlx log note
32
+ ```bash
33
+ mlx log note "val loss stopped improving at step 150"
34
+ mlx log note "model saved to artifacts/model.cbm"
35
+ ```
36
+
37
+ Notes are saved to the run's log file and shown in `mlx logs`.
@@ -0,0 +1,57 @@
1
+ # mlx run
2
+
3
+ Manage experiment runs.
4
+
5
+ ## Subcommands
6
+
7
+ ### mlx run start
8
+
9
+ Start a new run and begin tracking.
10
+ ```bash
11
+ mlx run start --name "catboost-v1"
12
+ mlx run start --name "catboost-v1" --experiment "fraud"
13
+ mlx run start --name "catboost-v1" --tags "catboost,baseline"
14
+ ```
15
+
16
+ | Option | Short | Description |
17
+ |--------|-------|-------------|
18
+ | `--name` | `-n` | **Required.** Run name |
19
+ | `--experiment` | `-e` | Experiment group (default: "default") |
20
+ | `--tags` | `-t` | Comma-separated tags |
21
+
22
+ ---
23
+
24
+ ### mlx run stop
25
+
26
+ Stop the active run.
27
+ ```bash
28
+ mlx run stop
29
+ mlx run stop --status failed
30
+ ```
31
+
32
+ | Option | Description |
33
+ |--------|-------------|
34
+ | `--status` | `done` or `failed` (default: done) |
35
+ | `--run-id` | Stop specific run instead of active |
36
+
37
+ ---
38
+
39
+ ### mlx run list
40
+
41
+ List all runs.
42
+ ```bash
43
+ mlx run list
44
+ mlx run list --experiment fraud
45
+ mlx run list --status done
46
+ mlx run list --limit 10
47
+ ```
48
+
49
+ ---
50
+
51
+ ### mlx run delete
52
+
53
+ Delete a run and all its data permanently.
54
+ ```bash
55
+ mlx run delete --run-id "catboost-v1_..."
56
+ mlx run delete --run-id "catboost-v1_..." --yes
57
+ ```
@@ -0,0 +1,54 @@
1
+ # mlx — ML Experiment Manager
2
+
3
+ > Track experiments, runs, params and metrics.
4
+ > 100% local. No server. No account. No cloud.
5
+
6
+ ## Why mlx?
7
+
8
+ Every ML engineer has this problem:
9
+
10
+ - Trained 20 models last week
11
+ - Can't remember which settings gave the best result
12
+ - No way to compare runs side by side
13
+ - Results scattered across notebooks and print statements
14
+
15
+ mlx fixes this with one simple workflow:
16
+ ```bash
17
+ mlx run start --name "catboost-v1"
18
+ python train.py
19
+ mlx run stop
20
+ mlx compare catboost-v1 catboost-v2
21
+ ```
22
+
23
+ ## How it works
24
+
25
+ mlx stores everything in a local SQLite database at `.mlx/mlx.db`.
26
+ No internet connection needed. Your data never leaves your machine.
27
+ ```
28
+ your-project/
29
+ └── .mlx/
30
+ ├── mlx.db ← all runs, params, metrics
31
+ ├── config.toml ← project settings
32
+ └── runs/
33
+ └── catboost-v1_20240301/
34
+ └── stdout.log
35
+ ```
36
+
37
+ ## Install
38
+ ```bash
39
+ pip install mlx-tracker
40
+ ```
41
+
42
+ ## Commands
43
+
44
+ | Command | Description |
45
+ |---------|-------------|
46
+ | `mlx init` | Initialize project |
47
+ | `mlx run start` | Start a run |
48
+ | `mlx run stop` | Stop active run |
49
+ | `mlx log metric` | Log a metric |
50
+ | `mlx log param` | Log a param |
51
+ | `mlx ls` | List all runs |
52
+ | `mlx status` | Show active run |
53
+ | `mlx compare` | Compare runs |
54
+ | `mlx export` | Export to CSV/JSON |
@@ -0,0 +1,55 @@
1
+ # Quickstart
2
+
3
+ Get tracking in under 5 minutes.
4
+
5
+ ## 1. Install
6
+ ```bash
7
+ pip install mlx-tracker
8
+ ```
9
+
10
+ ## 2. Initialize your project
11
+ ```bash
12
+ cd your-ml-project
13
+ mlx init
14
+ ```
15
+
16
+ ## 3. Start a run
17
+ ```bash
18
+ mlx run start --name "my-first-run" --experiment "fraud-detection"
19
+ ```
20
+
21
+ ## 4. Log params and metrics
22
+ ```bash
23
+ mlx log param learning_rate 0.05
24
+ mlx log param depth 6
25
+
26
+ mlx log metric accuracy 0.94 --step 100
27
+ mlx log metric loss 0.21 --step 100
28
+ ```
29
+
30
+ ## 5. Stop the run
31
+ ```bash
32
+ mlx run stop
33
+ ```
34
+
35
+ ## 6. See your results
36
+ ```bash
37
+ mlx ls
38
+ mlx status --run-id "my-first-run_..."
39
+ ```
40
+
41
+ ## 7. Train another model and compare
42
+ ```bash
43
+ mlx run start --name "my-second-run"
44
+ mlx log param learning_rate 0.01
45
+ mlx log metric accuracy 0.97 --step 100
46
+ mlx run stop
47
+
48
+ mlx compare my-first-run_... my-second-run_...
49
+ ```
50
+
51
+ ## Next steps
52
+
53
+ - [Commands reference](commands/init.md)
54
+ - [CatBoost example](../examples/catboost_example.py)
55
+ - [sklearn example](../examples/sklearn_example.py)
@@ -0,0 +1,79 @@
1
+ """
2
+ examples/catboost_example.py
3
+
4
+ Full CatBoost training tracked with mlx.
5
+
6
+ Setup:
7
+ pip install catboost scikit-learn
8
+
9
+ Usage:
10
+ cd your-project
11
+ mlx init
12
+ mlx run start --name "catboost-v1" --tags "catboost,baseline"
13
+ python examples/catboost_example.py
14
+ mlx run stop
15
+ mlx ls
16
+ """
17
+
18
+ import subprocess
19
+ from sklearn.datasets import make_classification
20
+ from sklearn.model_selection import train_test_split
21
+ from sklearn.metrics import accuracy_score, roc_auc_score
22
+ from catboost import CatBoostClassifier
23
+
24
+
25
+ def mlx(cmd: str):
26
+ """Call mlx CLI from Python."""
27
+ result = subprocess.run(
28
+ f"mlx {cmd}", shell=True,
29
+ capture_output=True, text=True
30
+ )
31
+ if result.stdout.strip():
32
+ print(f" {result.stdout.strip()}")
33
+
34
+
35
+ # ── Data ────────────────────────────────────
36
+ X, y = make_classification(n_samples=10000, n_features=20,
37
+ n_informative=10, random_state=42)
38
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
39
+ X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2)
40
+
41
+ # ── Params ───────────────────────────────────
42
+ params = {
43
+ "learning_rate": 0.05,
44
+ "depth": 6,
45
+ "iterations": 200,
46
+ "loss_function": "Logloss",
47
+ "eval_metric": "AUC",
48
+ "random_seed": 42,
49
+ "verbose": False,
50
+ }
51
+
52
+ for key, value in params.items():
53
+ mlx(f"log param {key} {value}")
54
+
55
+ # ── Train ────────────────────────────────────
56
+ model = CatBoostClassifier(**params)
57
+ model.fit(X_train, y_train, eval_set=(X_val, y_val), use_best_model=True)
58
+
59
+ # ── Log metrics per iteration ────────────────
60
+ evals = model.get_evals_result()
61
+ for i, (tl, vl) in enumerate(zip(
62
+ evals["learn"]["Logloss"],
63
+ evals["validation"]["Logloss"]
64
+ )):
65
+ step = i + 1
66
+ if step % 20 == 0:
67
+ mlx(f"log metric train_logloss {tl:.4f} --step {step}")
68
+ mlx(f"log metric val_logloss {vl:.4f} --step {step}")
69
+
70
+ # ── Final metrics ─────────────────────────────
71
+ y_pred = model.predict(X_test)
72
+ y_pred_prob = model.predict_proba(X_test)[:, 1]
73
+
74
+ mlx(f"log metric test_accuracy {accuracy_score(y_test, y_pred):.4f}")
75
+ mlx(f"log metric test_auc {roc_auc_score(y_test, y_pred_prob):.4f}")
76
+ mlx(f"log param best_iteration {model.get_best_iteration()}")
77
+ mlx("log note 'Training complete'")
78
+
79
+ print("\nDone! Run: mlx run stop && mlx ls")
@@ -0,0 +1,92 @@
1
+ """
2
+ examples/pytorch_example.py
3
+
4
+ PyTorch training loop tracked with mlx.
5
+ Logs metrics at every epoch.
6
+
7
+ Usage:
8
+ pip install torch
9
+ mlx run start --name "pytorch-v1" --tags "pytorch,mlp"
10
+ python examples/pytorch_example.py
11
+ mlx run stop
12
+ """
13
+
14
+ import subprocess
15
+ import torch
16
+ import torch.nn as nn
17
+ from torch.utils.data import DataLoader, TensorDataset
18
+ from sklearn.datasets import make_classification
19
+ from sklearn.model_selection import train_test_split
20
+ from sklearn.preprocessing import StandardScaler
21
+ import numpy as np
22
+
23
+
24
+ def mlx(cmd):
25
+ subprocess.run(f"mlx {cmd}", shell=True, capture_output=True)
26
+
27
+
28
+ # ── Data ─────────────────────────────────────
29
+ X, y = make_classification(n_samples=5000, n_features=20,
30
+ n_informative=10, random_state=42)
31
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
32
+
33
+ scaler = StandardScaler()
34
+ X_train = scaler.fit_transform(X_train)
35
+ X_test = scaler.transform(X_test)
36
+
37
+ # Convert to tensors
38
+ X_tr = torch.FloatTensor(X_train)
39
+ y_tr = torch.FloatTensor(y_train)
40
+ X_te = torch.FloatTensor(X_test)
41
+ y_te = torch.FloatTensor(y_test)
42
+
43
+ loader = DataLoader(TensorDataset(X_tr, y_tr), batch_size=64, shuffle=True)
44
+
45
+ # ── Model ─────────────────────────────────────
46
+ model = nn.Sequential(
47
+ nn.Linear(20, 64), nn.ReLU(),
48
+ nn.Linear(64, 32), nn.ReLU(),
49
+ nn.Linear(32, 1), nn.Sigmoid(),
50
+ )
51
+
52
+ lr = 0.001
53
+ epochs = 20
54
+ optimizer = torch.optim.Adam(model.parameters(), lr=lr)
55
+ criterion = nn.BCELoss()
56
+
57
+ # ── Log params ────────────────────────────────
58
+ mlx(f"log param learning_rate {lr}")
59
+ mlx(f"log param epochs {epochs}")
60
+ mlx(f"log param batch_size 64")
61
+ mlx(f"log param optimizer adam")
62
+ mlx(f"log param architecture MLP-64-32-1")
63
+
64
+ # ── Training loop ─────────────────────────────
65
+ for epoch in range(1, epochs + 1):
66
+
67
+ model.train()
68
+ total_loss = 0
69
+ for xb, yb in loader:
70
+ optimizer.zero_grad()
71
+ loss = criterion(model(xb).squeeze(), yb)
72
+ loss.backward()
73
+ optimizer.step()
74
+ total_loss += loss.item()
75
+
76
+ avg_loss = total_loss / len(loader)
77
+
78
+ # Evaluate
79
+ model.eval()
80
+ with torch.no_grad():
81
+ preds = model(X_te).squeeze()
82
+ test_loss = criterion(preds, y_te).item()
83
+ accuracy = ((preds > 0.5).float() == y_te).float().mean().item()
84
+
85
+ # Log every epoch
86
+ mlx(f"log metric train_loss {avg_loss:.4f} --step {epoch}")
87
+ mlx(f"log metric test_loss {test_loss:.4f} --step {epoch}")
88
+ mlx(f"log metric accuracy {accuracy:.4f} --step {epoch}")
89
+
90
+ print(f"Epoch {epoch:2d} | train={avg_loss:.4f} | test={test_loss:.4f} | acc={accuracy:.4f}")
91
+
92
+ print("\nDone! Run: mlx run stop && mlx ls")