@vespermcp/mcp-server 1.2.20 → 1.2.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/README.md +49 -0
  2. package/build/cloud/adapters/supabase.js +49 -0
  3. package/build/cloud/storage-manager.js +6 -0
  4. package/build/export/exporter.js +22 -9
  5. package/build/gateway/unified-dataset-gateway.js +410 -0
  6. package/build/index.js +1592 -837
  7. package/build/ingestion/hf-downloader.js +12 -2
  8. package/build/ingestion/ingestor.js +19 -9
  9. package/build/install/install-service.js +11 -6
  10. package/build/lib/supabase.js +3 -0
  11. package/build/metadata/scraper.js +85 -14
  12. package/build/python/asset_downloader_engine.py +22 -1
  13. package/build/python/convert_engine.py +92 -0
  14. package/build/python/export_engine.py +45 -0
  15. package/build/python/hf_fallback.py +196 -45
  16. package/build/python/kaggle_engine.py +77 -5
  17. package/build/python/normalize_engine.py +83 -0
  18. package/build/python/vesper/core/asset_downloader.py +238 -48
  19. package/build/search/engine.js +43 -5
  20. package/build/search/jit-orchestrator.js +18 -14
  21. package/build/search/query-intent.js +509 -0
  22. package/build/tools/formatter.js +6 -3
  23. package/build/utils/python-runtime.js +130 -0
  24. package/package.json +7 -5
  25. package/scripts/postinstall.cjs +87 -31
  26. package/scripts/wizard.cjs +601 -0
  27. package/scripts/wizard.js +306 -12
  28. package/src/python/__pycache__/config.cpython-312.pyc +0 -0
  29. package/src/python/__pycache__/kaggle_engine.cpython-312.pyc +0 -0
  30. package/src/python/asset_downloader_engine.py +22 -1
  31. package/src/python/convert_engine.py +92 -0
  32. package/src/python/export_engine.py +45 -0
  33. package/src/python/hf_fallback.py +196 -45
  34. package/src/python/kaggle_engine.py +77 -5
  35. package/src/python/normalize_engine.py +83 -0
  36. package/src/python/requirements.txt +12 -0
  37. package/src/python/vesper/core/asset_downloader.py +238 -48
  38. package/wizard.cjs +3 -0
@@ -94,8 +94,18 @@ export class HFDownloader {
94
94
  }
95
95
  catch (error) {
96
96
  const msg = String(error?.message || error);
97
- if (msg.includes("401") || msg.includes("403") || msg.toLowerCase().includes("unauthorized")) {
98
- throw new Error("Hugging Face gated/private dataset requires token. Run 'vespermcp config keys' to set HF token.");
97
+ if (msg.includes("401") || msg.toLowerCase().includes("unauthorized")) {
98
+ throw new Error(`Authentication required for dataset '${repoId}'. ` +
99
+ `This dataset may be gated or private. ` +
100
+ `Use the configure_keys tool to set your HF_TOKEN, then retry.`);
101
+ }
102
+ if (msg.includes("403") || msg.toLowerCase().includes("forbidden")) {
103
+ throw new Error(`Access denied for dataset '${repoId}'. ` +
104
+ `You may need to accept the dataset's usage agreement on huggingface.co, ` +
105
+ `then set HF_TOKEN via configure_keys tool.`);
106
+ }
107
+ if (msg.includes("404") || msg.toLowerCase().includes("not found")) {
108
+ throw new Error(`Dataset '${repoId}' not found on HuggingFace. Check the dataset ID.`);
99
109
  }
100
110
  console.error(`[HF] Failed to list files for ${repoId}:`, msg);
101
111
  return null;
@@ -46,6 +46,9 @@ export class DataIngestor {
46
46
  getKaggleCredentialError() {
47
47
  return "Kaggle support requires API key. Run 'vespermcp config keys' (30 seconds), or provide ~/.kaggle/kaggle.json.";
48
48
  }
49
+ toSafeDatasetPath(datasetId) {
50
+ return datasetId.replace(/[:\/]/g, "_");
51
+ }
49
52
  /**
50
53
  * Ensures a dataset is available locally
51
54
  */
@@ -81,13 +84,20 @@ export class DataIngestor {
81
84
  return resolvedPath;
82
85
  }
83
86
  catch (e) {
84
- this.failDownload(datasetId, e.message);
85
- throw e;
87
+ const msg = String(e?.message || e);
88
+ // If auth error, propagate immediately with helpful message
89
+ if (msg.includes("401") || msg.includes("403") || msg.includes("Authentication") || msg.includes("Access denied")) {
90
+ this.failDownload(datasetId, msg);
91
+ throw e;
92
+ }
93
+ // For other download errors, try the fallback
94
+ onProgress?.(`Direct download failed (${msg}), trying datasets library fallback...`);
86
95
  }
87
96
  }
88
- else {
89
- // Fallback: Use Python datasets library to download and convert
90
- onProgress?.("No raw files found. Using HuggingFace datasets library to download...");
97
+ // Fallback: Use Python datasets library to download and convert
98
+ // This runs when findBestFile returns null OR when direct download fails (non-auth)
99
+ if (!fs.existsSync(this.getTargetPath(datasetId, "parquet")) || !this.store.getDownloadStatus(datasetId)?.status?.includes("completed")) {
100
+ onProgress?.("Using HuggingFace datasets library to download...");
91
101
  const targetPath = this.getTargetPath(datasetId, "parquet");
92
102
  this.store.registerDownload(datasetId, targetPath, "downloading");
93
103
  try {
@@ -108,7 +118,7 @@ export class DataIngestor {
108
118
  this.failDownload(datasetId, errorMsg);
109
119
  throw new Error(errorMsg);
110
120
  }
111
- const targetDir = path.join(this.rawDataDir, datasetId.replace(/\//g, "_"));
121
+ const targetDir = path.join(this.rawDataDir, this.toSafeDatasetPath(datasetId));
112
122
  this.store.registerDownload(datasetId, targetDir, "downloading");
113
123
  try {
114
124
  onProgress?.("Downloading from Kaggle...");
@@ -124,7 +134,7 @@ export class DataIngestor {
124
134
  }
125
135
  }
126
136
  else if (source === "openml") {
127
- const targetDir = path.join(this.rawDataDir, datasetId.replace(/:/g, "_"));
137
+ const targetDir = path.join(this.rawDataDir, this.toSafeDatasetPath(datasetId));
128
138
  this.store.registerDownload(datasetId, targetDir, "downloading");
129
139
  try {
130
140
  onProgress?.("Downloading from OpenML...");
@@ -140,7 +150,7 @@ export class DataIngestor {
140
150
  }
141
151
  }
142
152
  else if (source === "dataworld") {
143
- const targetDir = path.join(this.rawDataDir, datasetId.replace(/[:\/]/g, "_"));
153
+ const targetDir = path.join(this.rawDataDir, this.toSafeDatasetPath(datasetId));
144
154
  this.store.registerDownload(datasetId, targetDir, "downloading");
145
155
  try {
146
156
  onProgress?.("Downloading from data.world...");
@@ -174,7 +184,7 @@ export class DataIngestor {
174
184
  * Generates a safe local filename for a dataset ID
175
185
  */
176
186
  getTargetPath(datasetId, extension = "parquet") {
177
- const safeId = datasetId.replace(/\//g, "_").replace(/:/g, "_");
187
+ const safeId = this.toSafeDatasetPath(datasetId);
178
188
  return path.join(this.rawDataDir, `${safeId}.${extension}`);
179
189
  }
180
190
  /**
@@ -18,12 +18,15 @@ export class InstallService {
18
18
  throw new Error(`Source file not found for installation: ${sourcePath}`);
19
19
  }
20
20
  const dataset = this.metadataStore.getDataset(datasetId);
21
- if (!dataset) {
22
- throw new Error(`Dataset metadata not found for ${datasetId}`);
23
- }
24
21
  // Create target directory
25
- const sanitizedName = dataset.name.replace(/[^a-z0-9]/gi, "_").toLowerCase();
26
- const installDir = targetDir || path.join(this.projectRoot, "datasets", sanitizedName);
22
+ const installLabel = dataset?.name || datasetId;
23
+ const sanitizedName = installLabel.replace(/[^a-z0-9]/gi, "_").toLowerCase();
24
+ // If caller specified a target dir, use it directly
25
+ // Otherwise use the current working directory
26
+ const installDir = targetDir
27
+ ? path.resolve(targetDir)
28
+ : path.resolve(process.cwd(), sanitizedName);
29
+ console.error(`[InstallService] Resolved install directory: ${installDir}`);
27
30
  if (!fs.existsSync(installDir)) {
28
31
  fs.mkdirSync(installDir, { recursive: true });
29
32
  }
@@ -34,7 +37,9 @@ export class InstallService {
34
37
  fs.copyFileSync(sourcePath, targetPath);
35
38
  // Update metadata
36
39
  const absolutePath = path.resolve(targetPath);
37
- this.metadataStore.updateInstallPath(datasetId, absolutePath);
40
+ if (dataset) {
41
+ this.metadataStore.updateInstallPath(datasetId, absolutePath);
42
+ }
38
43
  console.error(`[InstallService] Dataset ${datasetId} installed to ${absolutePath}`);
39
44
  return absolutePath;
40
45
  }
@@ -0,0 +1,3 @@
1
+ import { createClient } from '@supabase/supabase-js';
2
+ export const supabase = createClient(process.env.SUPABASE_URL, process.env.SUPABASE_SERVICE_ROLE_KEY // for MCP, use service_role, not anon
3
+ );
@@ -3,22 +3,29 @@ import { categorizeLicense } from "./license.js";
3
3
  import { calculateQualityScore } from "./quality.js";
4
4
  import { classifyDomain } from "./domain.js";
5
5
  import { retryWithBackoff, delayBetweenRequests } from "./rate-limiter.js";
6
+ import { analyzeDatasetQuery, buildIntentSearchQuery, buildHuggingFaceFilterTags, scoreDatasetAgainstIntent, shouldExcludeByLanguage } from "../search/query-intent.js";
6
7
  export class HuggingFaceScraper {
7
8
  /**
8
9
  * Bulk discovery: Fetch many datasets quickly without deep details.
9
10
  * Hits the 25k target in minutes.
10
11
  */
11
- async scrapeBulk(limit = 1000, query) {
12
+ async scrapeBulk(limit = 1000, queryOrIntent) {
13
+ const intent = typeof queryOrIntent === "string"
14
+ ? await analyzeDatasetQuery(queryOrIntent)
15
+ : queryOrIntent;
16
+ const query = typeof queryOrIntent === "string" ? queryOrIntent : intent?.searchQuery;
17
+ const hfQuery = intent ? buildIntentSearchQuery(intent) : query;
12
18
  const filterMsg = query ? `, query: ${query}` : "";
13
19
  console.error(`[Bulk Scraper] Fetching datasets (target limit: ${limit}${filterMsg})...`);
14
20
  const results = [];
15
21
  let processed = 0;
16
22
  try {
17
23
  const hfToken = process.env.HF_TOKEN || process.env.HUGGINGFACE_TOKEN;
24
+ const hfFilterTags = intent ? buildHuggingFaceFilterTags(intent) : [];
18
25
  for await (const ds of listDatasets({
19
26
  limit: limit,
20
27
  additionalFields: ["description", "tags", "downloadsAllTime", "createdAt"],
21
- search: { query: query },
28
+ search: { query: hfQuery, tags: hfFilterTags.length > 0 ? hfFilterTags : undefined },
22
29
  ...(hfToken ? { accessToken: hfToken } : {})
23
30
  })) {
24
31
  if (results.length >= limit)
@@ -78,6 +85,9 @@ export class HuggingFaceScraper {
78
85
  has_readme: false,
79
86
  is_incomplete: true // Flag for Phase 2
80
87
  };
88
+ // Hard language exclusion
89
+ if (intent && shouldExcludeByLanguage(metadata, intent))
90
+ continue;
81
91
  results.push(metadata);
82
92
  }
83
93
  }
@@ -86,8 +96,12 @@ export class HuggingFaceScraper {
86
96
  }
87
97
  return results;
88
98
  }
89
- async scrape(limit = 100, applyMVPFilters = true, query // Use as general search query
90
- ) {
99
+ async scrape(limit = 100, applyMVPFilters = true, queryOrIntent) {
100
+ const intent = typeof queryOrIntent === "string"
101
+ ? await analyzeDatasetQuery(queryOrIntent)
102
+ : queryOrIntent;
103
+ const query = typeof queryOrIntent === "string" ? queryOrIntent : intent?.searchQuery;
104
+ const hfQuery = intent ? buildIntentSearchQuery(intent) : query;
91
105
  const filterMsg = query ? `, query: ${query}` : "";
92
106
  console.error(`Fetching datasets (target limit: ${limit}, MVP filters: ${applyMVPFilters}${filterMsg})...`);
93
107
  const results = [];
@@ -110,10 +124,11 @@ export class HuggingFaceScraper {
110
124
  }
111
125
  // Add delay between batches to avoid rate limits
112
126
  const BATCH_DELAY = hfToken ? 500 : 2000;
127
+ const hfFilterTags = intent ? buildHuggingFaceFilterTags(intent) : [];
113
128
  for await (const ds of listDatasets({
114
129
  limit: fetchLimit,
115
130
  additionalFields: ["description", "tags"],
116
- search: { query: query },
131
+ search: { query: hfQuery, tags: hfFilterTags.length > 0 ? hfFilterTags : undefined },
117
132
  ...(hfToken ? { accessToken: hfToken } : {})
118
133
  })) {
119
134
  if (results.length >= limit)
@@ -150,18 +165,61 @@ export class HuggingFaceScraper {
150
165
  initialDelay: 2000, // Start with 2 seconds for HF API
151
166
  maxDelay: 30000 // Max 30 seconds
152
167
  });
153
- const splits = fullInfo.splits?.map((s) => ({
168
+ const cardData = fullInfo.cardData || {};
169
+ // Extract splits from cardData.dataset_info (where HF actually stores them)
170
+ // cardData.dataset_info can be an object (single config) or array (multi-config)
171
+ let rawSplits = [];
172
+ const datasetInfoField = cardData.dataset_info;
173
+ if (datasetInfoField) {
174
+ const configs = Array.isArray(datasetInfoField) ? datasetInfoField : [datasetInfoField];
175
+ for (const config of configs) {
176
+ if (config?.splits && Array.isArray(config.splits)) {
177
+ rawSplits = rawSplits.concat(config.splits);
178
+ }
179
+ }
180
+ }
181
+ // Fallback: try top-level splits from the SDK (rarely populated)
182
+ if (rawSplits.length === 0 && fullInfo.splits) {
183
+ rawSplits = fullInfo.splits;
184
+ }
185
+ const splits = rawSplits.map((s) => ({
154
186
  name: s.name,
155
- num_examples: s.numExamples || 0,
156
- size_bytes: s.sizeBytes
157
- })) || [];
158
- const totalExamples = splits.reduce((sum, s) => sum + (s.num_examples || 0), 0);
187
+ num_examples: s.num_examples || s.numExamples || 0,
188
+ size_bytes: s.num_bytes || s.sizeBytes || 0
189
+ }));
190
+ let totalExamples = splits.reduce((sum, s) => sum + (s.num_examples || 0), 0);
159
191
  const totalSizeBytes = splits.reduce((sum, s) => sum + (s.size_bytes || 0), 0);
192
+ // Fallback: estimate from size_categories when splits give 0
193
+ if (totalExamples === 0) {
194
+ const sizeCategories = cardData.size_categories;
195
+ if (Array.isArray(sizeCategories) && sizeCategories.length > 0) {
196
+ const cat = sizeCategories[0];
197
+ const rangeMatch = cat.match(/([\d.]+[KMB]?)\s*<\s*n\s*<\s*([\d.]+[KMB]?)/i);
198
+ if (rangeMatch) {
199
+ const parseHumanNum = (s) => {
200
+ const m = s.match(/^([\d.]+)([KMB])?$/i);
201
+ if (!m)
202
+ return 0;
203
+ const base = parseFloat(m[1]);
204
+ const suffix = (m[2] || '').toUpperCase();
205
+ if (suffix === 'K')
206
+ return base * 1000;
207
+ if (suffix === 'M')
208
+ return base * 1_000_000;
209
+ if (suffix === 'B')
210
+ return base * 1_000_000_000;
211
+ return base;
212
+ };
213
+ const lo = parseHumanNum(rangeMatch[1]);
214
+ const hi = parseHumanNum(rangeMatch[2]);
215
+ totalExamples = Math.round((lo + hi) / 2);
216
+ }
217
+ }
218
+ }
160
219
  const totalSizeMB = totalSizeBytes ? Math.round(totalSizeBytes / (1024 * 1024) * 100) / 100 : undefined;
161
220
  const hasValidationSplit = splits.some((s) => s.name === "validation" || s.name === "val");
162
221
  const licenseTag = tags.find(t => t.startsWith("license:"));
163
222
  const licenseId = licenseTag ? licenseTag.replace("license:", "") : fullInfo.license;
164
- const cardData = fullInfo.cardData || {};
165
223
  const licenseUrl = cardData.license?.[0]?.link || cardData.license_link;
166
224
  const license = categorizeLicense(licenseId, licenseUrl);
167
225
  if (license.category === "restricted") {
@@ -247,7 +305,16 @@ export class HuggingFaceScraper {
247
305
  description_length: description.length,
248
306
  has_readme: !!(cardData.readme || cardData.readme_content)
249
307
  };
250
- results.push(metadata);
308
+ // Hard language exclusion — drop bilingual/multilingual for single-language queries
309
+ if (intent && shouldExcludeByLanguage(metadata, intent)) {
310
+ // skip — do not push
311
+ }
312
+ else {
313
+ if (intent) {
314
+ metadata.intent_score = scoreDatasetAgainstIntent(metadata, intent);
315
+ }
316
+ results.push(metadata);
317
+ }
251
318
  }
252
319
  catch (e) {
253
320
  // Track all errors for user feedback
@@ -297,8 +364,12 @@ export class HuggingFaceScraper {
297
364
  if (otherErrors > 0) {
298
365
  console.error(`[HF Scraper] ⚠️ ${otherErrors} datasets skipped due to errors`);
299
366
  }
300
- // Sort by downloads descending
301
- return results.sort((a, b) => b.downloads - a.downloads);
367
+ return results.sort((a, b) => {
368
+ const intentDelta = Number(b.intent_score || 0) - Number(a.intent_score || 0);
369
+ if (intentDelta !== 0)
370
+ return intentDelta;
371
+ return b.downloads - a.downloads;
372
+ });
302
373
  }
303
374
  extractTask(tags) {
304
375
  const taskTags = [
@@ -3,9 +3,14 @@ import asyncio
3
3
  import json
4
4
  import os
5
5
  import sys
6
+ import warnings
6
7
  from pathlib import Path
7
8
  from typing import Any, Dict
8
9
 
10
+ # Suppress noisy HF warnings
11
+ warnings.filterwarnings("ignore", message=".*trust_remote_code.*")
12
+ warnings.filterwarnings("ignore", message=".*legacy.*")
13
+
9
14
  CURRENT_DIR = Path(__file__).resolve().parent
10
15
  if str(CURRENT_DIR) not in sys.path:
11
16
  sys.path.insert(0, str(CURRENT_DIR))
@@ -21,9 +26,15 @@ def _print(payload: Dict[str, Any]) -> None:
21
26
  async def _run_download(args: argparse.Namespace) -> Dict[str, Any]:
22
27
  payload = json.loads(args.payload)
23
28
  output_root = payload.get("output_root") or str(Path.home() / ".vesper" / "data" / "assets")
29
+ output_dir = payload.get("output_dir")
24
30
  workers = int(payload.get("workers") or 8)
25
31
  recipes_dir = payload.get("recipes_dir")
26
32
 
33
+ # Auto-set HF token from payload if provided
34
+ token = payload.get("token") or payload.get("hf_token")
35
+ if token:
36
+ os.environ["HF_TOKEN"] = str(token)
37
+
27
38
  downloader = AssetDownloader(output_root=output_root, workers=workers, recipes_dir=recipes_dir)
28
39
 
29
40
  result = await downloader.download_assets(
@@ -33,6 +44,7 @@ async def _run_download(args: argparse.Namespace) -> Dict[str, Any]:
33
44
  kaggle_ref=payload.get("kaggle_ref"),
34
45
  urls=payload.get("urls"),
35
46
  output_format=payload.get("output_format", "webdataset"),
47
+ output_dir=str(output_dir) if output_dir else None,
36
48
  max_items=payload.get("max_items"),
37
49
  image_column=payload.get("image_column"),
38
50
  )
@@ -66,7 +78,16 @@ def main() -> None:
66
78
 
67
79
  _print({"ok": False, "error": f"Unknown action: {args.action}"})
68
80
  except Exception as e:
69
- _print({"ok": False, "error": str(e)})
81
+ error_msg = str(e)
82
+ # Provide actionable error messages
83
+ if "401" in error_msg or "403" in error_msg or "Unauthorized" in error_msg:
84
+ error_msg = (
85
+ "Authentication required. This dataset may be gated/private. "
86
+ "Use configure_keys tool to set HF_TOKEN, then retry."
87
+ )
88
+ elif "No image column" in error_msg:
89
+ error_msg += " Hint: specify image_column parameter with the name of the column containing images."
90
+ _print({"ok": False, "error": error_msg})
70
91
 
71
92
 
72
93
  if __name__ == "__main__":
@@ -0,0 +1,92 @@
1
+ """
2
+ Convert a dataset file between formats (CSV, Parquet, JSON, JSONL).
3
+ Usage: convert_engine.py <input_path> <output_path>
4
+ Outputs JSON: {"ok": true, "output_path": "...", "rows": N, "columns": N} or {"ok": false, "error": "..."}
5
+ """
6
+ import sys
7
+ import json
8
+ import os
9
+
10
+ try:
11
+ import polars as pl
12
+ except Exception:
13
+ print(json.dumps({"ok": False, "error": "polars is required. Install with: pip install polars"}))
14
+ sys.exit(1)
15
+
16
+
17
+ def _load(src: str) -> pl.DataFrame:
18
+ ext = os.path.splitext(src)[1].lower()
19
+ if ext == ".csv":
20
+ return pl.read_csv(src, ignore_errors=True, infer_schema_length=10000)
21
+ if ext in (".tsv", ".tab"):
22
+ return pl.read_csv(src, separator="\t", ignore_errors=True, infer_schema_length=10000)
23
+ if ext in (".parquet", ".pq"):
24
+ return pl.read_parquet(src)
25
+ if ext in (".feather", ".ftr", ".arrow", ".ipc"):
26
+ return pl.read_ipc(src)
27
+ if ext in (".jsonl", ".ndjson"):
28
+ return pl.read_ndjson(src)
29
+ if ext == ".json":
30
+ raw = open(src, "r", encoding="utf-8").read().strip()
31
+ if raw.startswith("["):
32
+ return pl.read_json(src)
33
+ if "\n" in raw and raw.split("\n")[0].strip().startswith("{"):
34
+ return pl.read_ndjson(src)
35
+ obj = json.loads(raw)
36
+ if isinstance(obj, dict):
37
+ for key in ("data", "rows", "items", "records", "results", "entries", "samples"):
38
+ if key in obj and isinstance(obj[key], list):
39
+ return pl.DataFrame(obj[key])
40
+ for v in obj.values():
41
+ if isinstance(v, list) and len(v) > 0 and isinstance(v[0], dict):
42
+ return pl.DataFrame(v)
43
+ return pl.read_json(src)
44
+ # Fallback: try csv
45
+ return pl.read_csv(src, ignore_errors=True, infer_schema_length=10000)
46
+
47
+
48
+ def _write(df: pl.DataFrame, dst: str) -> None:
49
+ ext = os.path.splitext(dst)[1].lower()
50
+ os.makedirs(os.path.dirname(dst) or ".", exist_ok=True)
51
+ if ext in (".parquet", ".pq"):
52
+ df.write_parquet(dst)
53
+ elif ext == ".csv":
54
+ df.write_csv(dst)
55
+ elif ext == ".json":
56
+ df.write_json(dst, row_oriented=True)
57
+ elif ext in (".jsonl", ".ndjson"):
58
+ df.write_ndjson(dst)
59
+ else:
60
+ raise ValueError(f"Unsupported output format: {ext}")
61
+
62
+
63
+ def main():
64
+ if len(sys.argv) < 3:
65
+ print(json.dumps({"ok": False, "error": "Usage: convert_engine.py <input> <output>"}))
66
+ sys.exit(1)
67
+
68
+ input_path = sys.argv[1]
69
+ output_path = sys.argv[2]
70
+
71
+ if not os.path.exists(input_path):
72
+ print(json.dumps({"ok": False, "error": f"File not found: {input_path}"}))
73
+ sys.exit(1)
74
+
75
+ try:
76
+ df = _load(input_path)
77
+ _write(df, output_path)
78
+ size_mb = round(os.path.getsize(output_path) / (1024 * 1024), 2)
79
+ print(json.dumps({
80
+ "ok": True,
81
+ "output_path": output_path,
82
+ "rows": df.height,
83
+ "columns": df.width,
84
+ "size_mb": size_mb,
85
+ }))
86
+ except Exception as e:
87
+ print(json.dumps({"ok": False, "error": str(e)}))
88
+ sys.exit(1)
89
+
90
+
91
+ if __name__ == "__main__":
92
+ main()
@@ -50,6 +50,51 @@ def _load(file_path: str, options: dict) -> pl.DataFrame:
50
50
  df = pl.read_ipc(file_path)
51
51
  elif ext == ".jsonl":
52
52
  df = pl.read_ndjson(file_path)
53
+ elif ext == ".json":
54
+ # Auto-detect: array-of-objects vs NDJSON vs nested structures
55
+ try:
56
+ import json as _json
57
+ with open(file_path, "r", encoding="utf-8", errors="ignore") as fh:
58
+ raw_text = fh.read(512) # peek
59
+ stripped = raw_text.lstrip()
60
+ if stripped.startswith("["):
61
+ # Array of objects — standard JSON
62
+ with open(file_path, "r", encoding="utf-8", errors="ignore") as fh:
63
+ data = _json.load(fh)
64
+ if isinstance(data, list) and len(data) > 0:
65
+ df = pl.DataFrame(data)
66
+ else:
67
+ raise ValueError("JSON file is empty or not an array of objects")
68
+ elif stripped.startswith("{"):
69
+ # Could be NDJSON or a single object wrapping rows
70
+ try:
71
+ df = pl.read_ndjson(file_path)
72
+ except Exception:
73
+ with open(file_path, "r", encoding="utf-8", errors="ignore") as fh:
74
+ data = _json.load(fh)
75
+ # Try common wrapper patterns: {"data": [...]}, {"rows": [...]}, etc.
76
+ rows = None
77
+ if isinstance(data, dict):
78
+ for key in ("data", "rows", "records", "items", "results", "entries"):
79
+ if key in data and isinstance(data[key], list):
80
+ rows = data[key]
81
+ break
82
+ if rows is None:
83
+ # Last resort: try to use the dict values
84
+ rows = [data]
85
+ if rows and len(rows) > 0:
86
+ df = pl.DataFrame(rows)
87
+ else:
88
+ raise ValueError("Could not parse JSON structure into tabular data")
89
+ else:
90
+ raise ValueError("JSON file does not start with [ or {")
91
+ except pl.exceptions.ComputeError as ce:
92
+ raise ValueError(f"Failed to parse JSON: {ce}")
93
+ elif ext == ".xlsx":
94
+ try:
95
+ df = pl.read_excel(file_path)
96
+ except Exception as e:
97
+ raise ValueError(f"Failed to read Excel file: {e}")
53
98
  else:
54
99
  raise ValueError(f"Unsupported input format: {ext}")
55
100