@mlx-node/cli 0.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,190 @@
1
+ # @mlx-node/cli
2
+
3
+ Command-line tool for downloading models and datasets from HuggingFace Hub and converting model weights for use with `@mlx-node/*` packages.
4
+
5
+ ## Requirements
6
+
7
+ - macOS with Apple Silicon (M1 or later)
8
+ - Node.js 18+
9
+
10
+ ## Installation
11
+
12
+ ```bash
13
+ npm install -g @mlx-node/cli
14
+ ```
15
+
16
+ Or run via your package manager:
17
+
18
+ ```bash
19
+ npx @mlx-node/cli download model --model Qwen/Qwen3-0.6B
20
+ ```
21
+
22
+ ## Commands
23
+
24
+ ### Download Model
25
+
26
+ Download model weights and tokenizer files from HuggingFace Hub:
27
+
28
+ ```bash
29
+ mlx download model --model Qwen/Qwen3-0.6B
30
+ ```
31
+
32
+ Downloads to `.cache/models/<model-slug>` by default. Skips if already downloaded.
33
+
34
+ #### Options
35
+
36
+ | Flag | Short | Default | Description |
37
+ | ------------- | ----- | ---------------------- | ----------------------------------------- |
38
+ | `--model` | `-m` | `Qwen/Qwen3-0.6B` | HuggingFace model name |
39
+ | `--output` | `-o` | `.cache/models/<slug>` | Output directory |
40
+ | `--glob` | `-g` | (all supported files) | Filter files by glob pattern (repeatable) |
41
+ | `--set-token` | | | Set up HuggingFace authentication |
42
+
43
+ #### Authentication
44
+
45
+ For gated or private models, set up your HuggingFace token:
46
+
47
+ ```bash
48
+ mlx download model --set-token
49
+ ```
50
+
51
+ This validates the token against the HuggingFace API and stores it securely in your OS keychain. The token is automatically used for subsequent downloads.
52
+
53
+ #### File Filtering
54
+
55
+ By default, downloads config files, tokenizer files, and weight files (`.safetensors`, `.json`, `.pdiparams`, `.yml`). Use `--glob` to filter specific files:
56
+
57
+ ```bash
58
+ # Download only bf16 safetensors shards
59
+ mlx download model --model Qwen/Qwen3-7B --glob "*.bf16*.safetensors"
60
+
61
+ # Download specific files
62
+ mlx download model --model org/model --glob "*.safetensors" --glob "*.json"
63
+ ```
64
+
65
+ Core config and tokenizer files are always included regardless of glob filters.
66
+
67
+ ### Download Dataset
68
+
69
+ Download datasets from HuggingFace Hub with automatic Parquet-to-JSONL conversion:
70
+
71
+ ```bash
72
+ mlx download dataset
73
+ ```
74
+
75
+ #### Options
76
+
77
+ | Flag | Short | Default | Env Override | Description |
78
+ | ------------ | ----- | -------------- | ------------------ | ------------------------ |
79
+ | `--dataset` | `-d` | `openai/gsm8k` | `GSM8K_DATASET` | HuggingFace dataset name |
80
+ | `--revision` | `-r` | `main` | `GSM8K_REVISION` | Dataset revision/branch |
81
+ | `--output` | `-o` | `data/gsm8k` | `GSM8K_OUTPUT_DIR` | Output directory |
82
+
83
+ Produces `train.jsonl` and `test.jsonl` in the output directory. Automatically converts Parquet files to JSONL if the dataset doesn't include JSONL directly.
84
+
85
+ ### Convert Weights
86
+
87
+ Convert model weights between formats with optional quantization:
88
+
89
+ ```bash
90
+ # Dtype conversion (SafeTensors)
91
+ mlx convert --input ./model --output ./model-bf16 --dtype bfloat16
92
+
93
+ # Quantization
94
+ mlx convert --input ./model --output ./model-q4 --quantize --q-bits 4
95
+
96
+ # Mixed-precision quantization recipe
97
+ mlx convert --input ./model --output ./model-mixed --quantize --q-recipe mixed_4_6
98
+
99
+ # MXFP8 quantization
100
+ mlx convert --input ./model --output ./model-fp8 --quantize --q-mode mxfp8
101
+
102
+ # GGUF to SafeTensors
103
+ mlx convert --input ./model.gguf --output ./model-safetensors
104
+
105
+ # GGUF with vision encoder
106
+ mlx convert --input ./model.gguf --output ./model-vlm --mmproj ./mmproj.gguf
107
+
108
+ # imatrix AWQ pre-scaling with unsloth dynamic quantization
109
+ mlx convert --input ./model --output ./model-unsloth --quantize --q-recipe unsloth --imatrix-path ./imatrix.gguf
110
+ ```
111
+
112
+ #### Options
113
+
114
+ | Flag | Short | Default | Description |
115
+ | ---------------- | ----- | ------------- | ---------------------------------------------- |
116
+ | `--input` | `-i` | _required_ | Input model directory or `.gguf` file |
117
+ | `--output` | `-o` | _required_ | Output directory |
118
+ | `--dtype` | `-d` | `bfloat16` | Target dtype: `float32`, `float16`, `bfloat16` |
119
+ | `--model-type` | `-m` | auto-detected | Model type override |
120
+ | `--verbose` | `-v` | `false` | Verbose logging |
121
+ | `--quantize` | `-q` | `false` | Enable quantization |
122
+ | `--q-bits` | | `4` | Quantization bits (4 or 8) |
123
+ | `--q-group-size` | | `64` | Quantization group size |
124
+ | `--q-mode` | | `affine` | Mode: `affine` or `mxfp8` |
125
+ | `--q-recipe` | | | Per-layer mixed-bit recipe |
126
+ | `--imatrix-path` | | | imatrix GGUF for AWQ pre-scaling |
127
+ | `--mmproj` | | | mmproj GGUF for vision encoder weights |
128
+
129
+ #### Model Types
130
+
131
+ Auto-detected from `config.json` when not specified:
132
+
133
+ | Type | Description |
134
+ | -------------- | ------------------------------------------------------- |
135
+ | (default) | Standard SafeTensors dtype conversion |
136
+ | `qwen3_5` | Qwen3.5 Dense with FP8 dequant and key remapping |
137
+ | `qwen3_5_moe` | Qwen3.5 MoE with expert stacking |
138
+ | `paddleocr-vl` | PaddleOCR-VL weight sanitization |
139
+ | `pp-lcnet-ori` | PP-LCNet orientation classifier (Paddle to SafeTensors) |
140
+ | `uvdoc` | UVDoc dewarping model (Paddle/PyTorch to SafeTensors) |
141
+
142
+ #### Quantization Recipes
143
+
144
+ | Recipe | Description |
145
+ | ----------- | ---------------------------------- |
146
+ | `mixed_2_6` | 2-bit embeddings, 6-bit layers |
147
+ | `mixed_3_4` | 3-bit embeddings, 4-bit layers |
148
+ | `mixed_3_6` | 3-bit embeddings, 6-bit layers |
149
+ | `mixed_4_6` | 4-bit embeddings, 6-bit layers |
150
+ | `qwen3_5` | Optimized for Qwen3.5 architecture |
151
+ | `unsloth` | Unsloth dynamic quantization |
152
+
153
+ ## Examples
154
+
155
+ ### Full Workflow
156
+
157
+ ```bash
158
+ # 1. Set up authentication (one-time)
159
+ mlx download model --set-token
160
+
161
+ # 2. Download a model
162
+ mlx download model --model Qwen/Qwen3-0.6B
163
+
164
+ # 3. Download training data
165
+ mlx download dataset --dataset openai/gsm8k
166
+
167
+ # 4. Quantize the model
168
+ mlx convert \
169
+ --input .cache/models/Qwen-Qwen3-0.6B \
170
+ --output .cache/models/Qwen3-0.6B-q4 \
171
+ --quantize --q-bits 4
172
+
173
+ # 5. Use in your application
174
+ # import { loadModel } from '@mlx-node/lm';
175
+ # const model = await loadModel('.cache/models/Qwen3-0.6B-q4');
176
+ ```
177
+
178
+ ### GGUF Conversion
179
+
180
+ ```bash
181
+ # Download a GGUF model
182
+ mlx download model --model user/model-gguf --glob "*.gguf"
183
+
184
+ # Convert to SafeTensors
185
+ mlx convert --input .cache/models/model-gguf/model.gguf --output ./model-converted
186
+ ```
187
+
188
+ ## License
189
+
190
+ [MIT](https://github.com/mlx-node/mlx-node/blob/main/LICENSE)
package/dist/cli.d.ts ADDED
@@ -0,0 +1,3 @@
1
+ #!/usr/bin/env node
2
+ export {};
3
+ //# sourceMappingURL=cli.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"cli.d.ts","sourceRoot":"","sources":["../src/cli.ts"],"names":[],"mappings":""}
package/dist/cli.js ADDED
@@ -0,0 +1,80 @@
1
+ #!/usr/bin/env node
2
+ import pkgJson from '../package.json' with { type: 'json' };
3
+ const args = process.argv.slice(2);
4
+ const command = args[0];
5
+ const subcommand = args[1];
6
+ function printHelp() {
7
+ console.log(`
8
+ mlx - MLX-Node CLI v${pkgJson.version}
9
+
10
+ Usage:
11
+ mlx <command> [options]
12
+
13
+ Commands:
14
+ download model Download a model from HuggingFace
15
+ download dataset Download a dataset from HuggingFace
16
+ convert Convert model weights to MLX format
17
+
18
+ Options:
19
+ -h, --help Show this help message
20
+ -v, --version Show version number
21
+
22
+ Examples:
23
+ mlx download model -m Qwen/Qwen3-0.6B
24
+ mlx download dataset -d openai/gsm8k
25
+ mlx convert -i .cache/models/qwen3-0.6b -o .cache/models/qwen3-0.6b-mlx -d bf16
26
+ `);
27
+ }
28
+ async function main() {
29
+ if (!command || command === '--help' || command === '-h') {
30
+ printHelp();
31
+ return;
32
+ }
33
+ if (command === '--version' || command === '-v') {
34
+ console.log(pkgJson.version);
35
+ return;
36
+ }
37
+ switch (command) {
38
+ case 'download': {
39
+ if (!subcommand || subcommand === '--help' || subcommand === '-h') {
40
+ console.log(`
41
+ Usage:
42
+ mlx download model Download a model from HuggingFace
43
+ mlx download dataset Download a dataset from HuggingFace
44
+
45
+ Run mlx download <subcommand> --help for more information.
46
+ `);
47
+ return;
48
+ }
49
+ const rest = args.slice(2);
50
+ if (subcommand === 'model') {
51
+ const { run } = await import('./commands/download-model.js');
52
+ await run(rest);
53
+ }
54
+ else if (subcommand === 'dataset') {
55
+ const { run } = await import('./commands/download-dataset.js');
56
+ await run(rest);
57
+ }
58
+ else {
59
+ console.error(`Unknown download subcommand: ${subcommand}`);
60
+ console.error('Available: model, dataset');
61
+ process.exit(1);
62
+ }
63
+ break;
64
+ }
65
+ case 'convert': {
66
+ const rest = args.slice(1);
67
+ const { run } = await import('./commands/convert.js');
68
+ await run(rest);
69
+ break;
70
+ }
71
+ default:
72
+ console.error(`Unknown command: ${command}`);
73
+ printHelp();
74
+ process.exit(1);
75
+ }
76
+ }
77
+ main().catch((error) => {
78
+ console.error(error);
79
+ process.exit(1);
80
+ });
@@ -0,0 +1,2 @@
1
+ export declare function run(argv: string[]): Promise<void>;
2
+ //# sourceMappingURL=convert.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"convert.d.ts","sourceRoot":"","sources":["../../src/commands/convert.ts"],"names":[],"mappings":"AAiEA,wBAAsB,GAAG,CAAC,IAAI,EAAE,MAAM,EAAE,iBAqRvC"}
@@ -0,0 +1,316 @@
1
+ import { parseArgs } from 'node:util';
2
+ import { resolve } from 'node:path';
3
+ import { readFileSync, existsSync } from 'node:fs';
4
+ import { convertModel, convertForeignWeights, convertGgufToSafetensors } from '@mlx-node/core';
5
+ function printHelp() {
6
+ console.log(`
7
+ Convert Model Weights to MLX Format
8
+
9
+ Usage:
10
+ mlx convert --input <path> --output <dir> [options]
11
+
12
+ Required Arguments:
13
+ --input, -i <path> Input model directory or .gguf file
14
+ --output, -o <dir> Output directory for converted model
15
+
16
+ Optional Arguments:
17
+ --dtype, -d <type> Target dtype (default: bfloat16)
18
+ Options: float32, float16, bfloat16
19
+ --model-type, -m Model type (auto-detected if not specified)
20
+ Options: paddleocr-vl, pp-lcnet-ori, uvdoc, qwen3_5, qwen3_5_moe
21
+ --verbose, -v Enable verbose logging
22
+ --help, -h Show this help message
23
+
24
+ Vision Arguments:
25
+ --mmproj <path> Path to mmproj GGUF file (vision encoder weights)
26
+ Converts and merges vision weights into output directory
27
+
28
+ Quantization Arguments:
29
+ --quantize, -q Enable quantization of converted weights
30
+ --q-bits <int> Quantization bits (default: 4 for affine, 8 for mxfp8)
31
+ --q-group-size <int> Group size (default: 64 for affine, 32 for mxfp8)
32
+ --q-mode <string> Mode: "affine" (default) or "mxfp8"
33
+ --q-recipe <string> Per-layer mixed-bit quantization recipe
34
+ Options: mixed_2_6, mixed_3_4, mixed_3_6, mixed_4_6, qwen3_5, unsloth
35
+ --imatrix-path <path> imatrix GGUF file for AWQ-style pre-scaling
36
+ Improves quantization quality using calibration data
37
+
38
+ Model Types:
39
+ (default) SafeTensors dtype conversion (HuggingFace models)
40
+ paddleocr-vl PaddleOCR-VL weight sanitization
41
+ qwen3_5 Qwen3.5 dense model (FP8 dequant, key remapping)
42
+ qwen3_5_moe Qwen3.5 MoE model (FP8 dequant, expert stacking)
43
+ pp-lcnet-ori PP-LCNet orientation classifier (Paddle -> SafeTensors)
44
+ uvdoc UVDoc unwarping model (Paddle/PyTorch -> SafeTensors)
45
+
46
+ GGUF Support:
47
+ When --input points to a .gguf file, the converter automatically parses the
48
+ GGUF binary format and converts tensors to SafeTensors. Supports BF16, F16,
49
+ F32, Q4_0, Q4_1, and Q8_0 tensor types. Tokenizer files are copied from
50
+ alongside the GGUF file if present.
51
+
52
+ Examples:
53
+ mlx convert -i .cache/models/qwen3-0.6b -o .cache/models/qwen3-0.6b-mlx
54
+ mlx convert -i .cache/models/Qwen3.5-35B-A3B-FP8 -o .cache/models/Qwen3.5-35B-A3B-4bit -m qwen3_5_moe -q --q-bits 4
55
+ mlx convert -m pp-lcnet-ori -i .cache/models/PP-LCNet -o ./models/PP-LCNet_x1_0_doc_ori/
56
+ mlx convert -i model.gguf -o ./models/converted-mlx
57
+ mlx convert -i model-BF16.gguf -o ./models/converted-4bit -q --q-bits 4
58
+ mlx convert -i model-BF16.gguf -o ./models/mixed-4-6 -q --q-recipe mixed_4_6
59
+ mlx convert -i .cache/models/qwen3.5-9b -o ./models/qwen35-recipe -q --q-recipe qwen3_5 -m qwen3_5
60
+ mlx convert -i model-BF16.gguf -o ./models/awq-4bit -q --q-recipe unsloth --imatrix-path imatrix.gguf
61
+ mlx convert -i .cache/models/Qwen3.5-27B -o ./models/qwen3.5-unsloth -q --q-recipe unsloth --mmproj mmproj-BF16.gguf
62
+ `);
63
+ }
64
+ export async function run(argv) {
65
+ const { values: args } = parseArgs({
66
+ args: argv,
67
+ options: {
68
+ input: { type: 'string', short: 'i' },
69
+ output: { type: 'string', short: 'o' },
70
+ dtype: { type: 'string', short: 'd' },
71
+ 'model-type': { type: 'string', short: 'm' },
72
+ verbose: { type: 'boolean', short: 'v', default: false },
73
+ quantize: { type: 'boolean', short: 'q', default: false },
74
+ 'q-bits': { type: 'string' },
75
+ 'q-group-size': { type: 'string' },
76
+ 'q-mode': { type: 'string' },
77
+ 'q-recipe': { type: 'string' },
78
+ 'imatrix-path': { type: 'string' },
79
+ mmproj: { type: 'string' },
80
+ help: { type: 'boolean', short: 'h', default: false },
81
+ },
82
+ });
83
+ if (args.help) {
84
+ printHelp();
85
+ return;
86
+ }
87
+ if (!args.input || !args.output) {
88
+ console.error('Error: Both --input and --output are required\n');
89
+ console.error('Use --help for usage information');
90
+ process.exit(1);
91
+ }
92
+ const inputPath = resolve(args.input);
93
+ const outputDir = resolve(args.output);
94
+ const verbose = args.verbose;
95
+ const parsePositiveInt = (flag, raw) => {
96
+ if (raw === undefined)
97
+ return undefined;
98
+ if (!/^[1-9]\d*$/.test(raw)) {
99
+ console.error(`Error: ${flag} requires a positive integer value`);
100
+ process.exit(1);
101
+ }
102
+ return Number(raw);
103
+ };
104
+ const quantBits = parsePositiveInt('--q-bits', args['q-bits']);
105
+ const quantGroupSize = parsePositiveInt('--q-group-size', args['q-group-size']);
106
+ const quantMode = args['q-mode'];
107
+ if (quantMode !== undefined && quantMode !== 'affine' && quantMode !== 'mxfp8') {
108
+ console.error('Error: --q-mode must be "affine" or "mxfp8"');
109
+ process.exit(1);
110
+ }
111
+ const quantRecipe = args['q-recipe'];
112
+ const validRecipes = ['mixed_2_6', 'mixed_3_4', 'mixed_3_6', 'mixed_4_6', 'qwen3_5', 'unsloth'];
113
+ if (quantRecipe !== undefined) {
114
+ if (!args.quantize) {
115
+ console.error('Error: --q-recipe requires --quantize (-q) to be enabled');
116
+ process.exit(1);
117
+ }
118
+ if (quantMode === 'mxfp8') {
119
+ console.error('Error: --q-recipe is incompatible with --q-mode mxfp8');
120
+ process.exit(1);
121
+ }
122
+ if (!validRecipes.includes(quantRecipe)) {
123
+ console.error(`Error: Unknown recipe "${quantRecipe}". Available: ${validRecipes.join(', ')}`);
124
+ process.exit(1);
125
+ }
126
+ }
127
+ const mmprojPath = args.mmproj ? resolve(args.mmproj) : undefined;
128
+ if (mmprojPath !== undefined) {
129
+ if (!existsSync(mmprojPath)) {
130
+ console.error(`Error: mmproj file not found: ${mmprojPath}`);
131
+ process.exit(1);
132
+ }
133
+ if (!mmprojPath.endsWith('.gguf')) {
134
+ console.error('Error: --mmproj must point to a .gguf file');
135
+ process.exit(1);
136
+ }
137
+ }
138
+ const imatrixPath = args['imatrix-path'] ? resolve(args['imatrix-path']) : undefined;
139
+ if (imatrixPath !== undefined) {
140
+ if (!existsSync(imatrixPath)) {
141
+ console.error(`Error: imatrix file not found: ${imatrixPath}`);
142
+ process.exit(1);
143
+ }
144
+ if (!imatrixPath.endsWith('.gguf')) {
145
+ console.error('Error: --imatrix-path must point to a .gguf file');
146
+ process.exit(1);
147
+ }
148
+ }
149
+ const startTime = Date.now();
150
+ // GGUF file detection
151
+ if (inputPath.endsWith('.gguf')) {
152
+ if (!existsSync(inputPath)) {
153
+ console.error(`Error: GGUF file not found: ${inputPath}`);
154
+ process.exit(1);
155
+ }
156
+ const dtype = args.dtype || 'bfloat16';
157
+ console.log(`Converting GGUF to SafeTensors`);
158
+ console.log(`Input: ${inputPath}`);
159
+ console.log(`Output: ${outputDir}`);
160
+ console.log(`Dtype: ${dtype}`);
161
+ if (args.quantize) {
162
+ const qMode = quantMode || 'affine';
163
+ const qBits = quantBits || (qMode === 'mxfp8' ? 8 : 4);
164
+ const qGs = quantGroupSize || (qMode === 'mxfp8' ? 32 : 64);
165
+ console.log(`Quantize: ${qBits}-bit ${qMode} (group_size=${qGs})${quantRecipe ? `, recipe=${quantRecipe}` : ''}`);
166
+ }
167
+ if (imatrixPath) {
168
+ console.log(`imatrix: ${imatrixPath}`);
169
+ }
170
+ if (mmprojPath) {
171
+ console.log(`mmproj: ${mmprojPath}`);
172
+ }
173
+ console.log('');
174
+ try {
175
+ const result = await convertGgufToSafetensors({
176
+ inputPath,
177
+ outputDir,
178
+ dtype,
179
+ verbose,
180
+ quantize: args.quantize,
181
+ quantBits,
182
+ quantGroupSize,
183
+ quantMode,
184
+ quantRecipe,
185
+ imatrixPath,
186
+ vlmKeyPrefix: !!mmprojPath,
187
+ });
188
+ const duration = ((Date.now() - startTime) / 1000).toFixed(2);
189
+ console.log(`\n✓ Converted ${result.numTensors} tensors (source: ${result.sourceFormat})`);
190
+ console.log(`✓ Total parameters: ${result.numParameters.toLocaleString()}`);
191
+ console.log(`✓ Output directory: ${result.outputPath}`);
192
+ console.log(`✓ Duration: ${duration}s`);
193
+ if (verbose) {
194
+ console.log('\nConverted tensors:');
195
+ for (const name of result.tensorNames) {
196
+ console.log(` - ${name}`);
197
+ }
198
+ }
199
+ // Convert mmproj (vision encoder) if provided
200
+ if (mmprojPath) {
201
+ console.log('\nConverting mmproj (vision encoder)...');
202
+ const visionResult = await convertGgufToSafetensors({
203
+ inputPath: mmprojPath,
204
+ outputDir,
205
+ dtype: 'bfloat16',
206
+ verbose,
207
+ quantize: false,
208
+ outputFilename: 'vision.safetensors',
209
+ });
210
+ console.log(`✓ Converted ${visionResult.numTensors} vision tensors`);
211
+ }
212
+ }
213
+ catch (error) {
214
+ console.error('\nGGUF conversion failed:', error.message);
215
+ if (error.stack && verbose) {
216
+ console.error('\nStack trace:', error.stack);
217
+ }
218
+ process.exit(1);
219
+ }
220
+ return;
221
+ }
222
+ // Auto-detect model type from config.json if not specified
223
+ let modelType = args['model-type'];
224
+ if (!modelType) {
225
+ try {
226
+ const configPath = resolve(inputPath, 'config.json');
227
+ const config = JSON.parse(readFileSync(configPath, 'utf-8'));
228
+ if (config.model_type === 'paddleocr_vl') {
229
+ modelType = 'paddleocr-vl';
230
+ console.log(`Auto-detected model type: ${modelType} (from config.json)`);
231
+ }
232
+ else if (config.model_type === 'qwen3_5_moe' || config.model_type === 'qwen3_5') {
233
+ modelType = config.model_type;
234
+ console.log(`Auto-detected model type: ${modelType} (from config.json)`);
235
+ }
236
+ }
237
+ catch {
238
+ // config.json not found or invalid
239
+ }
240
+ }
241
+ // Foreign weight formats (Paddle .pdparams/.pdiparams, PyTorch .pkl)
242
+ if (modelType === 'pp-lcnet-ori' || modelType === 'uvdoc') {
243
+ if (!existsSync(inputPath)) {
244
+ console.error(`Error: Input path not found: ${inputPath}`);
245
+ process.exit(1);
246
+ }
247
+ const label = modelType === 'pp-lcnet-ori'
248
+ ? 'PP-LCNet Orientation Classifier (Paddle -> SafeTensors)'
249
+ : 'UVDoc Unwarping Model (-> SafeTensors)';
250
+ console.log(`Converting: ${label}`);
251
+ console.log(`Input: ${inputPath}`);
252
+ console.log(`Output: ${outputDir}\n`);
253
+ const result = convertForeignWeights({
254
+ inputPath,
255
+ outputDir,
256
+ modelType,
257
+ verbose,
258
+ });
259
+ const duration = ((Date.now() - startTime) / 1000).toFixed(2);
260
+ console.log(`\n✓ Converted ${result.numTensors} tensors`);
261
+ console.log(`✓ Output directory: ${result.outputPath}`);
262
+ console.log(`✓ Duration: ${duration}s`);
263
+ return;
264
+ }
265
+ // Default: SafeTensors dtype conversion
266
+ const dtype = args.dtype || 'bfloat16';
267
+ console.log(`Input: ${inputPath}`);
268
+ console.log(`Output: ${outputDir}`);
269
+ console.log(`Dtype: ${dtype}`);
270
+ if (modelType) {
271
+ console.log(`Model Type: ${modelType}`);
272
+ }
273
+ if (args.quantize) {
274
+ const qMode = quantMode || 'affine';
275
+ const qBits = quantBits || (qMode === 'mxfp8' ? 8 : 4);
276
+ const qGs = quantGroupSize || (qMode === 'mxfp8' ? 32 : 64);
277
+ console.log(`Quantize: ${qBits}-bit ${qMode} (group_size=${qGs})${quantRecipe ? `, recipe=${quantRecipe}` : ''}`);
278
+ }
279
+ if (imatrixPath) {
280
+ console.log(`imatrix: ${imatrixPath}`);
281
+ }
282
+ console.log('');
283
+ try {
284
+ const result = await convertModel({
285
+ inputDir: inputPath,
286
+ outputDir,
287
+ dtype,
288
+ verbose,
289
+ modelType,
290
+ quantize: args.quantize,
291
+ quantBits,
292
+ quantGroupSize,
293
+ quantMode,
294
+ quantRecipe,
295
+ imatrixPath,
296
+ });
297
+ const duration = ((Date.now() - startTime) / 1000).toFixed(2);
298
+ console.log(`\n✓ Converted ${result.numTensors} tensors`);
299
+ console.log(`✓ Total parameters: ${result.numParameters.toLocaleString()}`);
300
+ console.log(`✓ Output directory: ${result.outputPath}`);
301
+ console.log(`✓ Duration: ${duration}s`);
302
+ if (verbose) {
303
+ console.log('\nConverted tensors:');
304
+ for (const name of result.tensorNames) {
305
+ console.log(` - ${name}`);
306
+ }
307
+ }
308
+ }
309
+ catch (error) {
310
+ console.error('\nConversion failed:', error.message);
311
+ if (error.stack && verbose) {
312
+ console.error('\nStack trace:', error.stack);
313
+ }
314
+ process.exit(1);
315
+ }
316
+ }
@@ -0,0 +1,2 @@
1
+ export declare function run(argv: string[]): Promise<void>;
2
+ //# sourceMappingURL=download-dataset.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"download-dataset.d.ts","sourceRoot":"","sources":["../../src/commands/download-dataset.ts"],"names":[],"mappings":"AA+DA,wBAAsB,GAAG,CAAC,IAAI,EAAE,MAAM,EAAE,iBAmFvC"}
@@ -0,0 +1,121 @@
1
+ import { readdir, stat, copyFile } from 'node:fs/promises';
2
+ import { parseArgs } from 'node:util';
3
+ import { homedir } from 'node:os';
4
+ import { join, dirname, resolve } from 'node:path';
5
+ import { snapshotDownload } from '@huggingface/hub';
6
+ import { convertParquetToJsonl } from '@mlx-node/core';
7
+ import { ensureDir } from '../utils.js';
8
+ const DEFAULT_DATASET = 'openai/gsm8k';
9
+ const DEFAULT_REVISION = 'main';
10
+ const DEFAULT_CACHE_DIR = join(homedir(), '.cache', 'huggingface');
11
+ const FILE_SPECS = [
12
+ { output: 'train.jsonl', parquetPrefix: 'train-' },
13
+ { output: 'test.jsonl', parquetPrefix: 'test-' },
14
+ ];
15
+ function datasetSlug(name) {
16
+ return name.replace(/\//g, '-').toLowerCase();
17
+ }
18
+ function printHelp() {
19
+ console.log(`
20
+ Download a dataset from HuggingFace
21
+
22
+ Usage:
23
+ mlx download dataset [options]
24
+
25
+ Options:
26
+ -d, --dataset <name> HuggingFace dataset name (default: ${DEFAULT_DATASET})
27
+ -r, --revision <rev> Dataset revision (default: ${DEFAULT_REVISION})
28
+ -o, --output <dir> Output directory (default: data/<dataset-slug>)
29
+ --cache-dir <dir> HuggingFace cache directory (default: ~/.cache/huggingface)
30
+ -h, --help Show this help message
31
+
32
+ Examples:
33
+ mlx download dataset
34
+ mlx download dataset --dataset openai/gsm8k
35
+ mlx download dataset --dataset tatsu-lab/alpaca --output data/alpaca
36
+ `);
37
+ }
38
+ async function findFirstMatch(root, predicate) {
39
+ const entries = await readdir(root, { withFileTypes: true });
40
+ for (const entry of entries) {
41
+ const fullPath = join(root, entry.name);
42
+ if ((entry.isFile() || entry.isSymbolicLink()) && predicate(entry.name, fullPath)) {
43
+ return fullPath;
44
+ }
45
+ }
46
+ for (const entry of entries) {
47
+ if (!entry.isDirectory())
48
+ continue;
49
+ const fullPath = join(root, entry.name);
50
+ const found = await findFirstMatch(fullPath, predicate);
51
+ if (found)
52
+ return found;
53
+ }
54
+ return null;
55
+ }
56
+ export async function run(argv) {
57
+ const { values: args } = parseArgs({
58
+ args: argv,
59
+ options: {
60
+ dataset: {
61
+ type: 'string',
62
+ short: 'd',
63
+ default: process.env.MLX_DATASET ?? DEFAULT_DATASET,
64
+ },
65
+ revision: {
66
+ type: 'string',
67
+ short: 'r',
68
+ default: process.env.MLX_DATASET_REVISION ?? DEFAULT_REVISION,
69
+ },
70
+ output: {
71
+ type: 'string',
72
+ short: 'o',
73
+ },
74
+ 'cache-dir': {
75
+ type: 'string',
76
+ },
77
+ help: {
78
+ type: 'boolean',
79
+ short: 'h',
80
+ default: false,
81
+ },
82
+ },
83
+ });
84
+ if (args.help) {
85
+ printHelp();
86
+ return;
87
+ }
88
+ const dataset = args.dataset;
89
+ const revision = args.revision;
90
+ const outputDir = resolve(args.output ?? process.env.MLX_DATASET_OUTPUT ?? join('data', datasetSlug(dataset)));
91
+ console.log(`Downloading ${dataset}@${revision} snapshot from Hugging Face…`);
92
+ const cacheDir = args['cache-dir'] ? resolve(args['cache-dir']) : DEFAULT_CACHE_DIR;
93
+ const snapshotPath = await snapshotDownload({
94
+ repo: { type: 'dataset', name: dataset },
95
+ revision,
96
+ cacheDir,
97
+ });
98
+ console.log(`Snapshot available at ${snapshotPath}`);
99
+ await ensureDir(outputDir);
100
+ for (const spec of FILE_SPECS) {
101
+ const destinationPath = join(outputDir, spec.output);
102
+ await ensureDir(dirname(destinationPath));
103
+ const original = await findFirstMatch(snapshotPath, (name) => name === spec.output);
104
+ if (original) {
105
+ await copyFile(original, destinationPath);
106
+ const stats = await stat(destinationPath);
107
+ console.log(`Copied ${spec.output} (${Math.round(stats.size / 1024)} KiB) → ${destinationPath}`);
108
+ continue;
109
+ }
110
+ const parquetSource = await findFirstMatch(snapshotPath, (name) => name.endsWith('.parquet') && name.startsWith(spec.parquetPrefix));
111
+ if (!parquetSource) {
112
+ throw new Error(`Could not locate ${spec.output} or matching Parquet file (prefix ${spec.parquetPrefix}) inside snapshot ${snapshotPath}`);
113
+ }
114
+ console.log(`Converting ${parquetSource} → ${destinationPath}`);
115
+ convertParquetToJsonl(parquetSource, destinationPath);
116
+ const stats = await stat(destinationPath);
117
+ console.log(`Saved ${spec.output} (${Math.round(stats.size / 1024)} KiB) → ${destinationPath}`);
118
+ }
119
+ console.log('Done.');
120
+ console.log(`Dataset files stored under ${outputDir}`);
121
+ }
@@ -0,0 +1,2 @@
1
+ export declare function run(argv: string[]): Promise<void>;
2
+ //# sourceMappingURL=download-model.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"download-model.d.ts","sourceRoot":"","sources":["../../src/commands/download-model.ts"],"names":[],"mappings":"AAmLA,wBAAsB,GAAG,CAAC,IAAI,EAAE,MAAM,EAAE,iBAmLvC"}
@@ -0,0 +1,327 @@
1
+ import { readdir, copyFile } from 'node:fs/promises';
2
+ import { parseArgs } from 'node:util';
3
+ import { existsSync } from 'node:fs';
4
+ import { homedir } from 'node:os';
5
+ import { join, resolve, dirname } from 'node:path';
6
+ import { listFiles, whoAmI, downloadFileToCacheDir } from '@huggingface/hub';
7
+ import { AsyncEntry } from '@napi-rs/keyring';
8
+ import { input } from '@inquirer/prompts';
9
+ import { ensureDir, formatBytes } from '../utils.js';
10
+ const DEFAULT_CACHE_DIR = join(homedir(), '.cache', 'huggingface');
11
+ const DEFAULT_MODEL = 'Qwen/Qwen3-0.6B';
12
+ const keyringEntry = new AsyncEntry('mlx-node', 'huggingface-token');
13
+ function printHelp() {
14
+ console.log(`
15
+ Download a model from HuggingFace
16
+
17
+ Usage:
18
+ mlx download model [options]
19
+
20
+ Options:
21
+ -m, --model <name> HuggingFace model name (default: ${DEFAULT_MODEL})
22
+ -o, --output <dir> Output directory (default: .cache/models/<model-slug>)
23
+ -g, --glob <pattern> Filter files by glob pattern (can be repeated)
24
+ --cache-dir <dir> HuggingFace cache directory (default: ~/.cache/huggingface)
25
+ -h, --help Show this help message
26
+ --set-token Set HuggingFace token
27
+
28
+ Glob Filtering:
29
+ Use --glob to download only specific files from a repo. This is especially
30
+ useful for GGUF repos that contain many quantization variants. Patterns use
31
+ simple wildcard matching (* matches any characters).
32
+
33
+ Multiple --glob flags can be combined; a file is included if it matches ANY
34
+ of the patterns.
35
+
36
+ Examples:
37
+ mlx download model
38
+ mlx download model --model Qwen/Qwen3-1.7B --output .cache/models/qwen3-1.7b
39
+
40
+ # Download only the BF16 GGUF variant
41
+ mlx download model -m unsloth/Qwen3.5-9B-GGUF -g "*BF16*"
42
+
43
+ # Download only Q4_K_M and Q8_0 variants
44
+ mlx download model -m unsloth/Qwen3.5-9B-GGUF -g "*Q4_K_M*" -g "*Q8_0*"
45
+
46
+ # Download all .gguf files (skip everything else)
47
+ mlx download model -m unsloth/Qwen3.5-9B-GGUF -g "*.gguf"
48
+ `);
49
+ }
50
+ async function setToken() {
51
+ const token = await input({
52
+ message: 'Enter your HuggingFace token:',
53
+ required: true,
54
+ theme: {
55
+ validationFailureMode: 'clear',
56
+ },
57
+ validate: async (value) => {
58
+ if (!value) {
59
+ return 'Token is required';
60
+ }
61
+ if (!value.startsWith('hf_')) {
62
+ return 'HuggingFace token must start with "hf_"';
63
+ }
64
+ try {
65
+ const { auth } = await whoAmI({ accessToken: value });
66
+ if (!auth) {
67
+ return 'Invalid token';
68
+ }
69
+ return true;
70
+ }
71
+ catch {
72
+ return 'Invalid token';
73
+ }
74
+ },
75
+ });
76
+ if (token) {
77
+ await keyringEntry.setPassword(token);
78
+ }
79
+ }
80
+ const CORE_FILES = [
81
+ 'config.json',
82
+ 'tokenizer.json',
83
+ 'tokenizer_config.json',
84
+ 'special_tokens_map.json',
85
+ 'vocab.json',
86
+ 'merges.txt',
87
+ ];
88
+ /** Convert a simple glob pattern (with * wildcards) to a RegExp */
89
+ function globToRegex(pattern) {
90
+ const escaped = pattern.replace(/[.+^${}()|[\]\\]/g, '\\$&').replace(/\*/g, '.*');
91
+ return new RegExp(`^${escaped}$`, 'i');
92
+ }
93
+ /** Check if a filename matches any of the glob patterns */
94
+ function matchesAnyGlob(filename, patterns) {
95
+ return patterns.some((re) => re.test(filename));
96
+ }
97
+ async function getModelFiles(modelName, accessToken, globPatterns) {
98
+ let totalSize = 0;
99
+ const filesToDownload = [];
100
+ const allFiles = [];
101
+ // Compile glob patterns if provided
102
+ const globs = globPatterns?.map(globToRegex);
103
+ for await (const file of listFiles({ repo: { type: 'model', name: modelName }, accessToken })) {
104
+ allFiles.push(file);
105
+ if (globs) {
106
+ // When glob patterns are active, include files that match the pattern
107
+ // OR are essential metadata files (config, tokenizer)
108
+ const basename = file.path.split('/').pop() || file.path;
109
+ if (matchesAnyGlob(basename, globs) || matchesAnyGlob(file.path, globs)) {
110
+ filesToDownload.push(file);
111
+ if (file.size)
112
+ totalSize += file.size;
113
+ }
114
+ else if (CORE_FILES.includes(basename)) {
115
+ // Always include core config/tokenizer files
116
+ filesToDownload.push(file);
117
+ if (file.size)
118
+ totalSize += file.size;
119
+ }
120
+ }
121
+ else {
122
+ // Default behavior: download model files
123
+ if (CORE_FILES.includes(file.path) ||
124
+ file.path.endsWith('.safetensors') ||
125
+ file.path.endsWith('.json') ||
126
+ file.path.endsWith('.pdiparams') ||
127
+ file.path.endsWith('.yml')) {
128
+ filesToDownload.push(file);
129
+ if (file.size) {
130
+ totalSize += file.size;
131
+ }
132
+ }
133
+ }
134
+ }
135
+ return { totalSize, filesToDownload, allFiles };
136
+ }
137
+ async function verifyDownload(outputDir, weightFiles) {
138
+ console.log('\nVerifying download...');
139
+ let allPresent = true;
140
+ const configPath = join(outputDir, 'config.json');
141
+ if (!existsSync(configPath)) {
142
+ console.error(' ✗ Missing required file: config.json');
143
+ allPresent = false;
144
+ }
145
+ else {
146
+ console.log(' ✓ config.json');
147
+ }
148
+ if (weightFiles.length === 0) {
149
+ console.error(' ✗ No weight files found');
150
+ allPresent = false;
151
+ }
152
+ for (const file of weightFiles) {
153
+ const path = join(outputDir, file);
154
+ if (!existsSync(path)) {
155
+ console.error(` ✗ Missing weight file: ${file}`);
156
+ allPresent = false;
157
+ }
158
+ else {
159
+ console.log(` ✓ ${file}`);
160
+ }
161
+ }
162
+ return allPresent;
163
+ }
164
+ export async function run(argv) {
165
+ const { values: args } = parseArgs({
166
+ args: argv,
167
+ options: {
168
+ model: {
169
+ type: 'string',
170
+ short: 'm',
171
+ default: DEFAULT_MODEL,
172
+ },
173
+ output: {
174
+ type: 'string',
175
+ short: 'o',
176
+ },
177
+ glob: {
178
+ type: 'string',
179
+ short: 'g',
180
+ multiple: true,
181
+ },
182
+ help: {
183
+ type: 'boolean',
184
+ short: 'h',
185
+ default: false,
186
+ },
187
+ 'set-token': {
188
+ type: 'boolean',
189
+ default: false,
190
+ },
191
+ 'cache-dir': {
192
+ type: 'string',
193
+ },
194
+ },
195
+ });
196
+ if (args.help) {
197
+ printHelp();
198
+ return;
199
+ }
200
+ if (args['set-token']) {
201
+ await setToken();
202
+ return;
203
+ }
204
+ const modelName = args.model;
205
+ const globPatterns = args.glob;
206
+ const modelSlug = modelName.split('/').pop().toLowerCase();
207
+ const outputDir = resolve(args.output ?? join('.cache', 'models', modelSlug));
208
+ const HUGGINGFACE_TOKEN = (await keyringEntry.getPassword()) ?? undefined;
209
+ if (!HUGGINGFACE_TOKEN) {
210
+ console.warn('No HuggingFace token found, the model will download with anonymous access');
211
+ }
212
+ const title = `${modelName} Model Download from HuggingFace`;
213
+ const boxWidth = Math.max(title.length + 6, 58);
214
+ const padding = Math.floor((boxWidth - title.length - 2) / 2);
215
+ const rightPadding = boxWidth - title.length - padding;
216
+ console.log('╔' + '═'.repeat(boxWidth) + '╗');
217
+ console.log('║' + ' '.repeat(padding) + title + ' '.repeat(rightPadding) + '║');
218
+ console.log('╚' + '═'.repeat(boxWidth) + '╝\n');
219
+ console.log(`Model: ${modelName}`);
220
+ if (globPatterns?.length) {
221
+ console.log(`Filter: ${globPatterns.join(', ')}`);
222
+ }
223
+ console.log(`Output: ${outputDir}\n`);
224
+ // Check if already downloaded
225
+ if (existsSync(outputDir)) {
226
+ const files = await readdir(outputDir);
227
+ const hasConfig = files.includes('config.json');
228
+ const hasSingleModel = files.includes('model.safetensors');
229
+ const hasShardedModel = files.includes('model.safetensors.index.json');
230
+ const hasPaddleModel = files.includes('inference.pdiparams');
231
+ const hasGguf = files.some((f) => f.endsWith('.gguf'));
232
+ if (hasConfig && (hasSingleModel || hasShardedModel || hasPaddleModel)) {
233
+ console.log('Model already downloaded!\n');
234
+ console.log('To re-download, delete the output directory first:');
235
+ console.log(` rm -rf ${outputDir}\n`);
236
+ return;
237
+ }
238
+ if (hasGguf && !globPatterns?.length) {
239
+ console.log('GGUF file(s) already downloaded!\n');
240
+ console.log('To re-download, delete the output directory first:');
241
+ console.log(` rm -rf ${outputDir}\n`);
242
+ return;
243
+ }
244
+ // For glob downloads, check if all glob-matched files are present
245
+ if (hasGguf && globPatterns?.length) {
246
+ const globs = globPatterns.map(globToRegex);
247
+ const matchedExisting = files.filter((f) => matchesAnyGlob(f, globs) || CORE_FILES.includes(f));
248
+ if (matchedExisting.length > 1) {
249
+ console.log('Matched files already downloaded!\n');
250
+ console.log('To re-download, delete the output directory first:');
251
+ console.log(` rm -rf ${outputDir}\n`);
252
+ return;
253
+ }
254
+ }
255
+ }
256
+ await ensureDir(outputDir);
257
+ console.log('Fetching file list from HuggingFace...\n');
258
+ const { totalSize, filesToDownload, allFiles } = await getModelFiles(modelName, HUGGINGFACE_TOKEN, globPatterns);
259
+ if (filesToDownload.length === 0) {
260
+ console.error('No files matched the given criteria.\n');
261
+ if (globPatterns?.length) {
262
+ const ggufFiles = allFiles.filter((f) => f.path.endsWith('.gguf'));
263
+ if (ggufFiles.length > 0) {
264
+ console.log('Available GGUF files in this repo:');
265
+ for (const f of ggufFiles) {
266
+ console.log(` ${f.path} (${formatBytes(f.size)})`);
267
+ }
268
+ console.log(`\nTry: mlx download model -m ${modelName} -g "<pattern>"`);
269
+ }
270
+ }
271
+ process.exit(1);
272
+ }
273
+ // Show what will be downloaded
274
+ if (globPatterns?.length) {
275
+ console.log(`Matched ${filesToDownload.length} file(s):`);
276
+ for (const f of filesToDownload) {
277
+ console.log(` ${f.path} (${formatBytes(f.size)})`);
278
+ }
279
+ console.log('');
280
+ }
281
+ const sizeStr = formatBytes(totalSize);
282
+ console.log(`Downloading ${filesToDownload.length} file(s) (~${sizeStr})...\n`);
283
+ const cacheDir = args['cache-dir'] ? resolve(args['cache-dir']) : DEFAULT_CACHE_DIR;
284
+ const weightFiles = [];
285
+ const total = filesToDownload.length;
286
+ for (let i = 0; i < total; i++) {
287
+ const file = filesToDownload[i];
288
+ const fileSizeStr = file.size ? formatBytes(file.size) : '';
289
+ console.log(` [${i + 1}/${total}] ${file.path}${fileSizeStr ? ` (${fileSizeStr})` : ''}...`);
290
+ const snapshotPath = await downloadFileToCacheDir({
291
+ repo: { type: 'model', name: modelName },
292
+ path: file.path,
293
+ cacheDir,
294
+ accessToken: HUGGINGFACE_TOKEN,
295
+ });
296
+ const destPath = join(outputDir, file.path);
297
+ await ensureDir(dirname(destPath));
298
+ await copyFile(snapshotPath, destPath);
299
+ if (file.path.endsWith('.safetensors') || file.path.endsWith('.pdiparams') || file.path.endsWith('.gguf')) {
300
+ weightFiles.push(file.path);
301
+ }
302
+ }
303
+ // For GGUF downloads, skip strict verification (no config.json required in GGUF repos)
304
+ const hasGgufFiles = weightFiles.some((f) => f.endsWith('.gguf'));
305
+ if (hasGgufFiles) {
306
+ console.log(`\nDownload complete! ${weightFiles.length} file(s) saved to ${outputDir}\n`);
307
+ console.log('To convert GGUF to MLX SafeTensors format:');
308
+ for (const wf of weightFiles) {
309
+ const ggufPath = join(outputDir, wf);
310
+ console.log(` mlx convert -i ${ggufPath} -o ${outputDir}-mlx`);
311
+ }
312
+ console.log('');
313
+ }
314
+ else {
315
+ console.log(`Format: Base model (needs MLX conversion)`);
316
+ console.log('Note: After download, convert to MLX format:');
317
+ console.log(` mlx convert --input ${outputDir} --output ${outputDir}-mlx-bf16\n`);
318
+ const success = await verifyDownload(outputDir, weightFiles);
319
+ if (success) {
320
+ console.log('\nModel downloaded successfully!\n');
321
+ }
322
+ else {
323
+ console.error('\nDownload incomplete. Please try again.\n');
324
+ process.exit(1);
325
+ }
326
+ }
327
+ }
@@ -0,0 +1,3 @@
1
+ export declare function ensureDir(path: string): Promise<void>;
2
+ export declare function formatBytes(bytes: number): string;
3
+ //# sourceMappingURL=utils.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"utils.d.ts","sourceRoot":"","sources":["../src/utils.ts"],"names":[],"mappings":"AAGA,wBAAsB,SAAS,CAAC,IAAI,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC,CAI3D;AAED,wBAAgB,WAAW,CAAC,KAAK,EAAE,MAAM,GAAG,MAAM,CASjD"}
package/dist/utils.js ADDED
@@ -0,0 +1,17 @@
1
+ import { mkdir } from 'node:fs/promises';
2
+ import { existsSync } from 'node:fs';
3
+ export async function ensureDir(path) {
4
+ if (!existsSync(path)) {
5
+ await mkdir(path, { recursive: true });
6
+ }
7
+ }
8
+ export function formatBytes(bytes) {
9
+ const units = ['B', 'KB', 'MB', 'GB'];
10
+ let size = bytes;
11
+ let unitIndex = 0;
12
+ while (size >= 1024 && unitIndex < units.length - 1) {
13
+ size /= 1024;
14
+ unitIndex++;
15
+ }
16
+ return `${size.toFixed(2)} ${units[unitIndex]}`;
17
+ }
package/package.json ADDED
@@ -0,0 +1,30 @@
1
+ {
2
+ "name": "@mlx-node/cli",
3
+ "version": "0.0.0",
4
+ "homepage": "https://github.com/mlx-node/mlx-node",
5
+ "bugs": {
6
+ "url": "https://github.com/mlx-node/mlx-node/issues"
7
+ },
8
+ "license": "MIT",
9
+ "repository": {
10
+ "type": "git",
11
+ "url": "https://github.com/mlx-node/mlx-node.git",
12
+ "directory": "packages/cli"
13
+ },
14
+ "bin": {
15
+ "mlx": "./dist/cli.js"
16
+ },
17
+ "files": [
18
+ "dist"
19
+ ],
20
+ "type": "module",
21
+ "scripts": {
22
+ "build": "tsc -b"
23
+ },
24
+ "dependencies": {
25
+ "@huggingface/hub": "^2.10.7",
26
+ "@inquirer/prompts": "^8.3.0",
27
+ "@mlx-node/core": "workspace:*",
28
+ "@napi-rs/keyring": "^1.2.0"
29
+ }
30
+ }