mlxsmith 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mlxsmith/accel/__init__.py +0 -3
- mlxsmith/bench.py +12 -2
- mlxsmith/cli.py +188 -3
- mlxsmith/config_models.py +16 -2
- mlxsmith/integrations/__init__.py +19 -0
- mlxsmith/integrations/mlx_lm_lora.py +117 -0
- mlxsmith/llm/backend.py +8 -1
- mlxsmith/llm/mlx_lm_backend.py +59 -2
- mlxsmith/llm/mock_backend.py +8 -1
- mlxsmith/optim/__init__.py +3 -0
- mlxsmith/optim/muon.py +93 -0
- mlxsmith/orchestrator/daemon.py +44 -377
- mlxsmith/orchestrator/trainer_worker.py +4 -0
- mlxsmith/rlm/loop.py +53 -92
- mlxsmith/sdk/__init__.py +18 -2
- mlxsmith/sdk/losses.py +102 -1
- mlxsmith/sdk/training_client.py +24 -5
- mlxsmith/train/distill.py +6 -1
- mlxsmith/train/online_dpo.py +249 -0
- mlxsmith/train/pref.py +31 -29
- mlxsmith/train/rft.py +123 -38
- mlxsmith/train/self_verify.py +199 -0
- mlxsmith/train/sft.py +13 -2
- mlxsmith/util.py +0 -6
- mlxsmith/verifiers/llm_judge.py +278 -0
- mlxsmith/verifiers/prime.py +127 -0
- {mlxsmith-0.1.1.dist-info → mlxsmith-0.1.3.dist-info}/METADATA +29 -13
- {mlxsmith-0.1.1.dist-info → mlxsmith-0.1.3.dist-info}/RECORD +32 -25
- mlxsmith/accel/zmlx_backend.py +0 -42
- {mlxsmith-0.1.1.dist-info → mlxsmith-0.1.3.dist-info}/WHEEL +0 -0
- {mlxsmith-0.1.1.dist-info → mlxsmith-0.1.3.dist-info}/entry_points.txt +0 -0
- {mlxsmith-0.1.1.dist-info → mlxsmith-0.1.3.dist-info}/licenses/LICENSE +0 -0
- {mlxsmith-0.1.1.dist-info → mlxsmith-0.1.3.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: mlxsmith
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.3
|
|
4
4
|
Summary: Apple Silicon MLX fine-tuning toolkit — SFT, DPO/ORPO, GRPO, distillation, and OpenAI-compatible serving.
|
|
5
5
|
Author-email: Shannon Labs <hmbown@gmail.com>
|
|
6
6
|
License: MIT
|
|
@@ -36,18 +36,19 @@ Provides-Extra: llm
|
|
|
36
36
|
Requires-Dist: mlx-lm>=0.30.5; extra == "llm"
|
|
37
37
|
Requires-Dist: transformers>=5.0.0; extra == "llm"
|
|
38
38
|
Requires-Dist: datasets>=3.0.0; extra == "llm"
|
|
39
|
+
Provides-Extra: lora
|
|
40
|
+
Requires-Dist: mlx-lm-lora>=1.0.0; extra == "lora"
|
|
39
41
|
Provides-Extra: serve
|
|
40
42
|
Requires-Dist: fastapi>=0.128.0; extra == "serve"
|
|
41
43
|
Requires-Dist: uvicorn>=0.40.0; extra == "serve"
|
|
42
44
|
Requires-Dist: httpx>=0.28.0; extra == "serve"
|
|
43
|
-
Provides-Extra: zmlx
|
|
44
|
-
Requires-Dist: zmlx; extra == "zmlx"
|
|
45
45
|
Provides-Extra: dev
|
|
46
46
|
Requires-Dist: pytest>=9.0.0; extra == "dev"
|
|
47
47
|
Requires-Dist: ruff>=0.14.0; extra == "dev"
|
|
48
48
|
Provides-Extra: all
|
|
49
49
|
Requires-Dist: mlx>=0.30.4; extra == "all"
|
|
50
50
|
Requires-Dist: mlx-lm>=0.30.5; extra == "all"
|
|
51
|
+
Requires-Dist: mlx-lm-lora>=1.0.0; extra == "all"
|
|
51
52
|
Requires-Dist: transformers>=5.0.0; extra == "all"
|
|
52
53
|
Requires-Dist: datasets>=3.0.0; extra == "all"
|
|
53
54
|
Requires-Dist: fastapi>=0.128.0; extra == "all"
|
|
@@ -59,7 +60,7 @@ Dynamic: license-file
|
|
|
59
60
|
|
|
60
61
|
Apple Silicon MLX fine-tuning toolkit — SFT, DPO/ORPO, GRPO, distillation, and OpenAI-compatible serving.
|
|
61
62
|
|
|
62
|
-
**Status:** alpha (v0.1.
|
|
63
|
+
**Status:** alpha (v0.1.2). Full training pipeline validated on Qwen3-4B.
|
|
63
64
|
|
|
64
65
|
## Install
|
|
65
66
|
|
|
@@ -76,6 +77,9 @@ pip install mlxsmith
|
|
|
76
77
|
# Apple Silicon training + serving
|
|
77
78
|
pip install "mlxsmith[mlx,llm,serve]"
|
|
78
79
|
|
|
80
|
+
# mlx-lm-lora passthrough (advanced training methods)
|
|
81
|
+
pip install "mlxsmith[lora]"
|
|
82
|
+
|
|
79
83
|
# Everything
|
|
80
84
|
pip install "mlxsmith[all]"
|
|
81
85
|
```
|
|
@@ -85,7 +89,7 @@ pip install "mlxsmith[all]"
|
|
|
85
89
|
```bash
|
|
86
90
|
mlxsmith init myproj
|
|
87
91
|
cd myproj
|
|
88
|
-
mlxsmith doctor # check Python, MLX, Metal
|
|
92
|
+
mlxsmith doctor # check Python, MLX, Metal
|
|
89
93
|
```
|
|
90
94
|
|
|
91
95
|
## Training
|
|
@@ -133,6 +137,22 @@ mlxsmith distill --teacher large-model --student small-model --mode opd
|
|
|
133
137
|
mlxsmith pipeline
|
|
134
138
|
```
|
|
135
139
|
|
|
140
|
+
### mlx-lm-lora parity (all methods)
|
|
141
|
+
|
|
142
|
+
Use the passthrough to access mlx-lm-lora features (DPO variants, GRPO variants,
|
|
143
|
+
PPO, synthetic datasets, judge training, etc.):
|
|
144
|
+
|
|
145
|
+
```bash
|
|
146
|
+
# Train with mlx-lm-lora directly
|
|
147
|
+
mlxsmith lora train --model Qwen/Qwen3-4B-Instruct-2507 --data data/prefs --train-mode dpo -- --beta 0.1
|
|
148
|
+
|
|
149
|
+
# Generate synthetic datasets
|
|
150
|
+
mlxsmith lora synthetic prompts -- --model mlx-community/Qwen3-4B-Instruct-2507-4bit --num-samples 1000
|
|
151
|
+
|
|
152
|
+
# Train judge model
|
|
153
|
+
mlxsmith lora judge -- --model mlx-community/Qwen3-4B-Instruct-2507-4bit --data data/prefs
|
|
154
|
+
```
|
|
155
|
+
|
|
136
156
|
## Serving
|
|
137
157
|
|
|
138
158
|
OpenAI-compatible `/v1/chat/completions` endpoint.
|
|
@@ -204,6 +224,7 @@ Built-in verifiers for eval, RFT, and preference tuning:
|
|
|
204
224
|
- **pytest** — sandboxed test execution
|
|
205
225
|
- **docker** — containerized verification
|
|
206
226
|
- **compose** — multi-verifier composition (AND/OR/weighted)
|
|
227
|
+
- **llm_judge** — LLM-based self-verification / ThinkPRM-style verifier
|
|
207
228
|
|
|
208
229
|
See `docs/VERIFIERS.md` for the verifier API.
|
|
209
230
|
|
|
@@ -232,6 +253,9 @@ mlxsmith config env # show environment variable mapping
|
|
|
232
253
|
|
|
233
254
|
Config sources (in priority order): CLI flags > environment variables (`MLXSMITH__SECTION__KEY`) > config file > defaults.
|
|
234
255
|
|
|
256
|
+
Training optimizers are configurable via `train.optimizer` and `train.optimizer_kwargs`
|
|
257
|
+
(for example `adamw`, `adam`, `qhadam`, `muon` when available in MLX).
|
|
258
|
+
|
|
235
259
|
## SDK (programmatic API)
|
|
236
260
|
|
|
237
261
|
For building custom training loops:
|
|
@@ -269,14 +293,6 @@ mlxsmith rlm history # view history
|
|
|
269
293
|
|
|
270
294
|
Includes task generation, mutation for data diversity, corpus management, EMA-based gating, and weight pointer IPC for multi-process coordination. See `docs/orchestrator.md`.
|
|
271
295
|
|
|
272
|
-
### ZMLX acceleration
|
|
273
|
-
|
|
274
|
-
Optional zero-copy MLX acceleration backend.
|
|
275
|
-
|
|
276
|
-
```bash
|
|
277
|
-
mlxsmith accel status
|
|
278
|
-
```
|
|
279
|
-
|
|
280
296
|
## Docs
|
|
281
297
|
|
|
282
298
|
- `docs/PROJECT_FORMAT.md` — project layout and artifacts
|
|
@@ -1,69 +1,76 @@
|
|
|
1
1
|
mlxsmith/__init__.py,sha256=CJZKl9Hp16DYlQR7yqstir-cL4n7GCw90d-meXliCHk,48
|
|
2
2
|
mlxsmith/adapters.py,sha256=wkQ2q_ugaxCviNARSmxehwBcc2_NKVJ7mOofT-y30TY,1318
|
|
3
3
|
mlxsmith/auth.py,sha256=_j_gx5ccZfpHs0_Xmpcgh_ELhX3ZBJLg2YYpjA-aPRI,2195
|
|
4
|
-
mlxsmith/bench.py,sha256=
|
|
5
|
-
mlxsmith/cli.py,sha256=
|
|
4
|
+
mlxsmith/bench.py,sha256=b2hNRT_lT1dhjlOr_5vn1HIk6dTz3CmtrrP3Qypu6bc,4048
|
|
5
|
+
mlxsmith/cli.py,sha256=GpG-4B8R_dIp73hbl_P5Tii5ARbEzeeubI0y2YCFFBE,42935
|
|
6
6
|
mlxsmith/config.py,sha256=K1VbN-3WoWf4vzZ6BNeTgEz2DFH8s9YminqwyNBVLj0,16389
|
|
7
|
-
mlxsmith/config_models.py,sha256=
|
|
7
|
+
mlxsmith/config_models.py,sha256=c51kbH57Nlj7iO5haGqsXS7ZSoHTMJCEiRGMd62LMOM,8912
|
|
8
8
|
mlxsmith/data.py,sha256=3ZlNS8bnD7LlWACEmULbf8RGQzCuf0QulFpI1PWvNuI,16160
|
|
9
9
|
mlxsmith/eval.py,sha256=nSARSEKKwZM8Ot5rUDDpGikaClGNxvg0ifgGkTA6mM0,3792
|
|
10
10
|
mlxsmith/infer.py,sha256=ekpHhTird0dnTJzFOc-O98rjwkEKgAr9AFicKlaB3MA,4610
|
|
11
11
|
mlxsmith/models.py,sha256=BRaPTxzqy-5KEKdccveMgjpbRP0ZmbRnA_su8rz2P4k,8033
|
|
12
12
|
mlxsmith/runs.py,sha256=2voYBryGGMlAKskHJ7TDiIPQL2_fFxSQ8RgtfGZ7ccg,1409
|
|
13
13
|
mlxsmith/server.py,sha256=Fk-i9xK_Teq0Z0m-W1GRJVtcG0nYvd4bQ85lnAUuT1w,10690
|
|
14
|
-
mlxsmith/util.py,sha256=
|
|
15
|
-
mlxsmith/accel/__init__.py,sha256=
|
|
14
|
+
mlxsmith/util.py,sha256=Cke2FxIVNmvpW9ElPxL5bc0JO_YFVHWtBwpR3QRtfWQ,4410
|
|
15
|
+
mlxsmith/accel/__init__.py,sha256=fBY65q66jql1q5YMT9aIo96dgvVLHqS_LoJbVNA2xHY,201
|
|
16
16
|
mlxsmith/accel/base.py,sha256=o0kmxV68KbxOmucC3eDjKbFA8dfTT2ETqN0XD_l3mM0,435
|
|
17
17
|
mlxsmith/accel/none.py,sha256=WhxECIBv-pE63Vh1Iv86ObgT9JHOi4hA4BUyJc__sKU,362
|
|
18
|
-
mlxsmith/accel/zmlx_backend.py,sha256=JfzQ44v9hrCJgcqU018ZD7qLNlubIe09CwYRpKyfwR8,1529
|
|
19
18
|
mlxsmith/api/__init__.py,sha256=IrpIXDkUJm4BZqahYOK_0BkxvomlscEvCoLCm4GDxo8,998
|
|
20
19
|
mlxsmith/api/handlers.py,sha256=94Spq4glFp7mRwmKrFqt7erd1Af_PxVP_vpxCo2UFdQ,46896
|
|
21
20
|
mlxsmith/api/schemas.py,sha256=Q18kF9FKtvT1vdnXy6feSNTtCV2FiRWDzfdsPzc0te8,19316
|
|
22
21
|
mlxsmith/envs/__init__.py,sha256=t7QiEHtfyP1dUCj-4TJUN0hd9lRqBKYd5Ek7dgEwus4,671
|
|
23
22
|
mlxsmith/envs/system.py,sha256=2bChkOxm2S7d0WCrweHGhoI6-xOYDxlC0YbHH6Ibjq4,12782
|
|
24
23
|
mlxsmith/envs/token_env.py,sha256=rhv2o3eI1GyTtfAXG72z-31amNGaLv0KW56mEsWkXlY,6709
|
|
24
|
+
mlxsmith/integrations/__init__.py,sha256=TjPvsGDnw-nma0OHXqiUPWciKhCiLZ325_j3fsgtjnE,685
|
|
25
|
+
mlxsmith/integrations/mlx_lm_lora.py,sha256=KXyoqzWbBR2XEAlJuHHr-x2SwEgKJsOZUA--hdQfEwc,3296
|
|
25
26
|
mlxsmith/llm/__init__.py,sha256=jWEkXGdvwZ8tUYHVqWW3SYHXG-LSWaGbdwOR0mF_4Zw,396
|
|
26
|
-
mlxsmith/llm/backend.py,sha256=
|
|
27
|
+
mlxsmith/llm/backend.py,sha256=m0TdgtNWBbwzsB9riHu1sKWuXnNAVkRbHrv1RZI3XZA,4458
|
|
27
28
|
mlxsmith/llm/interface.py,sha256=udQl_R7ecmM4Nh20P50Nmnv2h853ByrgevjQIRDxX4g,6601
|
|
28
|
-
mlxsmith/llm/mlx_lm_backend.py,sha256=
|
|
29
|
-
mlxsmith/llm/mock_backend.py,sha256=
|
|
29
|
+
mlxsmith/llm/mlx_lm_backend.py,sha256=Obop9oFmuncxaNty7QnykGai_Ed55JbQrgjjQLhq4W8,20278
|
|
30
|
+
mlxsmith/llm/mock_backend.py,sha256=4xFjU-36ewZZTMMhf5IEDLcRDYw2IsNhpHt8gn-ZOz4,7562
|
|
30
31
|
mlxsmith/llm/registry.py,sha256=ZmYE-WclyMo6z0HwUufqt3tKT4E84xZ6I-PFu1Z5nws,309
|
|
32
|
+
mlxsmith/optim/__init__.py,sha256=60kanLQV28xaVe2xR7M__r0IhFjfFkDuzHjFC3K6Z3g,65
|
|
33
|
+
mlxsmith/optim/muon.py,sha256=h0ZNnR3EITlYFfb_7HtpnFnPWc0nvtT9mYqr-GThLbk,2857
|
|
31
34
|
mlxsmith/orchestrator/__init__.py,sha256=oc4qIkISZMGMvYeOqU8lDDmFL3uxDYJHsv_rra9DH-E,792
|
|
32
|
-
mlxsmith/orchestrator/daemon.py,sha256=
|
|
35
|
+
mlxsmith/orchestrator/daemon.py,sha256=_kTQ8QKIDPoo2znlHtB8jjsLbvbQ0m_EkxWspit1uGQ,3960
|
|
33
36
|
mlxsmith/orchestrator/inference_worker.py,sha256=PfmsanrBnx9HZNqG00jTQQTKqDa2bl-wUtYAWtxfzvs,17963
|
|
34
37
|
mlxsmith/orchestrator/queue.py,sha256=E8VymvJi2zEpuTwsG7JB-vROJGGS5evPPhIpkmdwtq4,11286
|
|
35
|
-
mlxsmith/orchestrator/trainer_worker.py,sha256=
|
|
38
|
+
mlxsmith/orchestrator/trainer_worker.py,sha256=MssjXrUcB7bOyajuP8W1eokibCrC0GTTGGeHAmiSXxg,16215
|
|
36
39
|
mlxsmith/rlm/__init__.py,sha256=Q09oRONXWTFXuWwMJOpGWg0I-UDkuib0OA1O_cNFp2U,236
|
|
37
40
|
mlxsmith/rlm/corpus.py,sha256=-p12H650_ybe2kXC219M4wXYpD08QHUpY92ErVjSfX8,2112
|
|
38
41
|
mlxsmith/rlm/gating.py,sha256=L18niYKEezphASdsgzW6pz3PN7ylA-L5Wu4_GLLVfHw,2455
|
|
39
42
|
mlxsmith/rlm/generate.py,sha256=q1v_TP8sqVj05omhoF60Ns1iX6yClgc7lP6njz4lK18,7601
|
|
40
43
|
mlxsmith/rlm/history.py,sha256=Vm4JtWqsZnqB-fuo3zWfweeogmmLTL3VHaYZ45vrkz8,299
|
|
41
44
|
mlxsmith/rlm/inference.py,sha256=ntCEKxD1KrkIXgZNQbD4jhS5rJPtwcVYc8qLc5E5cnc,5297
|
|
42
|
-
mlxsmith/rlm/loop.py,sha256=
|
|
45
|
+
mlxsmith/rlm/loop.py,sha256=hfajubtzBnwBof_ZE41sdKFY4G8WXu16dr8m-gNJFO0,47853
|
|
43
46
|
mlxsmith/rlm/mutate.py,sha256=_NUNMpVCRaEDgtzI8J2NOTcj5NnycZnP_UoUpFacjTs,2553
|
|
44
47
|
mlxsmith/rlm/trainer.py,sha256=RRXPlJy4SySpLZGge0ORMYs7HeiWgfGQNeMBOBfG4Ys,3014
|
|
45
48
|
mlxsmith/rlm/weights.py,sha256=tgl4Uc80QF9YpCCr3ewBmL7uru9As2fDA1Z1SgZn-e4,8455
|
|
46
|
-
mlxsmith/sdk/__init__.py,sha256=
|
|
49
|
+
mlxsmith/sdk/__init__.py,sha256=QzwWDSjtVi2iewp-hUI3MfE85xX3oK3PlrYXSAUAzBE,11105
|
|
47
50
|
mlxsmith/sdk/future.py,sha256=WmYB-fDstaEuv-FUNX_S7IJSENbVEsYYEEVzH02ImLk,16868
|
|
48
|
-
mlxsmith/sdk/losses.py,sha256=
|
|
51
|
+
mlxsmith/sdk/losses.py,sha256=Cq2udU9S1fn-glV-RRZBe1_4BtPqjzjucWiTy2cnClY,10243
|
|
49
52
|
mlxsmith/sdk/sampling_client.py,sha256=o7jfgYpVWXrrIHo4-SrGAJx4FAlYdo198da27Jp0Yj4,24899
|
|
50
|
-
mlxsmith/sdk/training_client.py,sha256=
|
|
53
|
+
mlxsmith/sdk/training_client.py,sha256=XVpl5kbpmpKWu-vecyMDJhf0zvoqQQiTpawqokU9ejE,24924
|
|
51
54
|
mlxsmith/train/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
52
|
-
mlxsmith/train/distill.py,sha256=
|
|
55
|
+
mlxsmith/train/distill.py,sha256=2yOr0QrSyQ3ZYEMtrtgb32rzVGvc-z_wLY9sxPuwUCY,10513
|
|
53
56
|
mlxsmith/train/lora.py,sha256=k3aNqBjMyE6rPGS2CAJRSDsTJiUa1ztjrA3k9N87IjY,9046
|
|
54
|
-
mlxsmith/train/
|
|
55
|
-
mlxsmith/train/
|
|
56
|
-
mlxsmith/train/
|
|
57
|
+
mlxsmith/train/online_dpo.py,sha256=48RPHyIeTUP3dtLsgHgPrLwDwf80Fjwa1Tdspt9R3L4,8837
|
|
58
|
+
mlxsmith/train/pref.py,sha256=1g3w5OS7evxP33bFPpF_oKjwxUiqpA5mhD-t6iWA6uw,6646
|
|
59
|
+
mlxsmith/train/rft.py,sha256=w7k8a_QYot7zCZAsI73vuuxIJJzrBuX9ot9TXmWsuYg,21957
|
|
60
|
+
mlxsmith/train/self_verify.py,sha256=dxoSBsIFt3mgMie3esr2v2qEdAw7ZMGZ8Cui6iADP7g,6994
|
|
61
|
+
mlxsmith/train/sft.py,sha256=naf2ny8kQQDU31TQZMBxC_9X6T0TajYtE7bofHgFw2o,5897
|
|
57
62
|
mlxsmith/verifiers/__init__.py,sha256=HXS9XWsPUYZ9WJaxVIPa1TWzwx948uymj23YIu4EW6Q,60
|
|
58
63
|
mlxsmith/verifiers/compose.py,sha256=jQlsBByNvniEsOVh8Ulvjb7L-VcPrxqjld5b1sjBc8c,3374
|
|
59
64
|
mlxsmith/verifiers/docker_verifier.py,sha256=BIqu2VVLsC8owNpiNbOZNmTirfpvuWNJS4F6UZYni1s,2925
|
|
60
65
|
mlxsmith/verifiers/jsonschema.py,sha256=hG_8c07Hwv-tpN2g0oxELwmLRxS8QGzRFwabmo4yY8Y,1324
|
|
66
|
+
mlxsmith/verifiers/llm_judge.py,sha256=g95reNiZ_oYsd4CNo7lViYk2UScMjmDT_qn3ztUnf9o,8557
|
|
67
|
+
mlxsmith/verifiers/prime.py,sha256=1CAJSmqMLxh8GTNyBeZDxPBn3h-R02qtDwjgKumhyTE,3946
|
|
61
68
|
mlxsmith/verifiers/pytest_verifier.py,sha256=ARNajzxUPNwtzSow6I2d0mLopZyvY29_d3F1sYVwEUY,2514
|
|
62
69
|
mlxsmith/verifiers/regex.py,sha256=N7z3koE8Iy-a4DBs4404iQCNX2WGxequm5g4umric2Y,524
|
|
63
70
|
mlxsmith/verifiers/types.py,sha256=FytBxB1OnNX1EcqZXSSs3WvL0GRv7byW4mfBMf6xP68,240
|
|
64
|
-
mlxsmith-0.1.
|
|
65
|
-
mlxsmith-0.1.
|
|
66
|
-
mlxsmith-0.1.
|
|
67
|
-
mlxsmith-0.1.
|
|
68
|
-
mlxsmith-0.1.
|
|
69
|
-
mlxsmith-0.1.
|
|
71
|
+
mlxsmith-0.1.3.dist-info/licenses/LICENSE,sha256=ESYyLizI0WWtxMeS7rGVcX3ivMezm-HOd5WdeOh-9oU,1056
|
|
72
|
+
mlxsmith-0.1.3.dist-info/METADATA,sha256=oVGtFReV_z88H31sGtEwBvyRVbJATH0vEhtxKad9z9o,9970
|
|
73
|
+
mlxsmith-0.1.3.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
74
|
+
mlxsmith-0.1.3.dist-info/entry_points.txt,sha256=ys7GcKEjhzhkkTMBmmaNavTgsjqOuFnCKIG2w8Wcn6w,46
|
|
75
|
+
mlxsmith-0.1.3.dist-info/top_level.txt,sha256=hKBwc8bn7uoI-_5Yhcq1T3IuChFhUFdzItIkZK1up6A,9
|
|
76
|
+
mlxsmith-0.1.3.dist-info/RECORD,,
|
mlxsmith/accel/zmlx_backend.py
DELETED
|
@@ -1,42 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from typing import Any, Dict
|
|
4
|
-
from .base import AccelStats
|
|
5
|
-
|
|
6
|
-
class ZMLXBackend:
|
|
7
|
-
name = "zmlx"
|
|
8
|
-
|
|
9
|
-
def __init__(self):
|
|
10
|
-
self._available = False
|
|
11
|
-
self._notes = {}
|
|
12
|
-
try:
|
|
13
|
-
import zmlx # type: ignore
|
|
14
|
-
self._available = True
|
|
15
|
-
self._notes["zmlx_version"] = getattr(zmlx, "__version__", None)
|
|
16
|
-
except Exception as e:
|
|
17
|
-
self._available = False
|
|
18
|
-
self._notes["error"] = f"{type(e).__name__}: {e}"
|
|
19
|
-
|
|
20
|
-
def patch(self) -> None:
|
|
21
|
-
if not self._available:
|
|
22
|
-
# soft fail; caller should report status
|
|
23
|
-
return
|
|
24
|
-
# ZMLX can patch ops/modules. We keep this intentionally minimal and safe.
|
|
25
|
-
try:
|
|
26
|
-
import zmlx # type: ignore
|
|
27
|
-
# If ZMLX provides a global patch hook, call it; otherwise, no-op.
|
|
28
|
-
patch_fn = getattr(zmlx, "patch", None)
|
|
29
|
-
if callable(patch_fn):
|
|
30
|
-
patch_fn()
|
|
31
|
-
self._notes["patched"] = True
|
|
32
|
-
else:
|
|
33
|
-
self._notes["patched"] = False
|
|
34
|
-
self._notes["hint"] = "No zmlx.patch() found; implement patch hook or integrate per-module."
|
|
35
|
-
except Exception as e:
|
|
36
|
-
self._notes["patched_error"] = f"{type(e).__name__}: {e}"
|
|
37
|
-
|
|
38
|
-
def warmup(self, model: Any, example_batch: Any) -> Dict[str, Any]:
|
|
39
|
-
return {"warmup": "not_implemented", "notes": self._notes}
|
|
40
|
-
|
|
41
|
-
def stats(self) -> AccelStats:
|
|
42
|
-
return AccelStats(backend="zmlx", notes=self._notes)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|