@vespermcp/mcp-server 1.2.21 → 1.2.24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/README.md +49 -0
  2. package/build/cache/service.js +7 -0
  3. package/build/cloud/adapters/supabase.js +49 -0
  4. package/build/cloud/storage-manager.js +6 -0
  5. package/build/export/exporter.js +22 -9
  6. package/build/gateway/unified-dataset-gateway.js +441 -0
  7. package/build/index.js +1815 -839
  8. package/build/ingestion/ingestor.js +7 -4
  9. package/build/install/install-service.js +11 -6
  10. package/build/lib/supabase.js +3 -0
  11. package/build/metadata/arxiv-source.js +229 -0
  12. package/build/metadata/circuit-breaker.js +62 -0
  13. package/build/metadata/github-source.js +203 -0
  14. package/build/metadata/hackernews-source.js +123 -0
  15. package/build/metadata/quality.js +27 -0
  16. package/build/metadata/scraper.js +85 -14
  17. package/build/metadata/semantic-scholar-source.js +138 -0
  18. package/build/python/asset_downloader_engine.py +2 -0
  19. package/build/python/convert_engine.py +92 -0
  20. package/build/python/export_engine.py +45 -0
  21. package/build/python/kaggle_engine.py +77 -5
  22. package/build/python/normalize_engine.py +83 -0
  23. package/build/python/vesper/core/asset_downloader.py +5 -1
  24. package/build/scripts/test-phase1-webcore-quality.js +104 -0
  25. package/build/search/engine.js +45 -6
  26. package/build/search/jit-orchestrator.js +18 -14
  27. package/build/search/query-intent.js +509 -0
  28. package/build/tools/formatter.js +6 -3
  29. package/build/utils/python-runtime.js +130 -0
  30. package/build/web/extract-web.js +297 -0
  31. package/build/web/fusion-engine.js +457 -0
  32. package/build/web/types.js +1 -0
  33. package/build/web/web-core.js +242 -0
  34. package/package.json +12 -5
  35. package/scripts/postinstall.cjs +87 -31
  36. package/scripts/wizard.cjs +652 -0
  37. package/scripts/wizard.js +338 -12
  38. package/src/python/__pycache__/config.cpython-312.pyc +0 -0
  39. package/src/python/__pycache__/kaggle_engine.cpython-312.pyc +0 -0
  40. package/src/python/asset_downloader_engine.py +2 -0
  41. package/src/python/convert_engine.py +92 -0
  42. package/src/python/export_engine.py +45 -0
  43. package/src/python/kaggle_engine.py +77 -5
  44. package/src/python/normalize_engine.py +83 -0
  45. package/src/python/requirements.txt +12 -0
  46. package/src/python/vesper/core/asset_downloader.py +5 -1
  47. package/wizard.cjs +3 -0
@@ -0,0 +1,123 @@
1
+ import { rateLimitedFetch } from "./rate-limiter.js";
2
+ import { CircuitBreaker } from "./circuit-breaker.js";
3
+ import { estimateQualityScore } from "./quality.js";
4
+ export class HackerNewsSource {
5
+ cache;
6
+ breaker = new CircuitBreaker("hackernews", {
7
+ failureThreshold: 5,
8
+ openDurationMs: 30_000,
9
+ halfOpenSuccessesToClose: 2,
10
+ });
11
+ constructor(cache) {
12
+ this.cache = cache;
13
+ }
14
+ async discover(query, limit = 20) {
15
+ const out = await this.discoverWithTelemetry(query, limit);
16
+ return out.results;
17
+ }
18
+ async discoverWithTelemetry(query, limit = 20) {
19
+ const start = Date.now();
20
+ const cleanQuery = String(query || "").trim();
21
+ if (!cleanQuery)
22
+ return { results: [], cacheHit: false, latencyMs: Date.now() - start };
23
+ const perPage = Math.max(1, Math.min(100, Number(limit || 20)));
24
+ const cacheKey = `webcore:hackernews:discover:${cleanQuery.toLowerCase()}:hitsPerPage=${perPage}`;
25
+ const cached = await this.cache?.getJson(cacheKey);
26
+ if (cached)
27
+ return { results: cached, cacheHit: true, latencyMs: Date.now() - start };
28
+ if (!this.breaker.canAttempt()) {
29
+ throw new Error("Hacker News connector is temporarily unavailable (circuit open).");
30
+ }
31
+ const url = new URL("https://hn.algolia.com/api/v1/search");
32
+ url.searchParams.set("query", cleanQuery);
33
+ url.searchParams.set("hitsPerPage", String(perPage));
34
+ url.searchParams.set("page", "0");
35
+ const response = await rateLimitedFetch(url.toString(), {
36
+ headers: {
37
+ "Accept": "application/json",
38
+ "User-Agent": "vesper/2.0 (phase1-hackernews-connector)",
39
+ },
40
+ }, { maxRetries: 5, initialDelay: 750, maxDelay: 15000 }).catch((e) => {
41
+ this.breaker.onFailure();
42
+ throw e;
43
+ });
44
+ const data = await response.json().catch((e) => {
45
+ this.breaker.onFailure();
46
+ throw new Error(`Hacker News JSON parse failed: ${e?.message || String(e)}`);
47
+ });
48
+ const hits = Array.isArray(data?.hits) ? data.hits : [];
49
+ const result = hits.slice(0, perPage).map((h) => this.toDatasetMetadata(h));
50
+ this.breaker.onSuccess();
51
+ await this.cache?.setJson(cacheKey, result, 7200); // 2h
52
+ return { results: result, cacheHit: false, latencyMs: Date.now() - start };
53
+ }
54
+ toDatasetMetadata(hit) {
55
+ const objectID = String(hit.objectID ?? "").trim();
56
+ const title = String(hit.title || "").trim() || `HN item ${objectID}`;
57
+ const createdAt = hit.created_at ? String(hit.created_at) : new Date().toISOString();
58
+ const author = hit.author ? String(hit.author).trim() : "";
59
+ const points = Number(hit.points || 0);
60
+ const comments = Number(hit.num_comments || 0);
61
+ const storyText = String(hit.story_text || "").trim();
62
+ const qualityWarnings = [];
63
+ if (!storyText)
64
+ qualityWarnings.push("Missing story text in Hacker News response");
65
+ const abstractLength = (storyText || title).length;
66
+ const authorsPresent = !!author;
67
+ const datePresent = !!createdAt;
68
+ const contentDepth = abstractLength;
69
+ const quality01 = estimateQualityScore({
70
+ abstractLength,
71
+ authorsPresent,
72
+ datePresent,
73
+ contentDepth,
74
+ });
75
+ const itemUrl = hit.url ? String(hit.url) : (objectID ? `https://news.ycombinator.com/item?id=${objectID}` : "");
76
+ return {
77
+ id: objectID || title,
78
+ source: "hackernews",
79
+ name: title,
80
+ description: storyText || title,
81
+ authors: author ? [author] : undefined,
82
+ downloads: comments,
83
+ likes: points,
84
+ stars: points,
85
+ tags: author ? ["hackernews", `author:${author}`] : ["hackernews"],
86
+ last_updated: createdAt,
87
+ task: "thread",
88
+ languages: [],
89
+ domain: "tech",
90
+ splits: [],
91
+ license: {
92
+ id: "unknown",
93
+ category: "unknown",
94
+ usage_restrictions: [],
95
+ warnings: [],
96
+ },
97
+ quality_score: Math.round(quality01 * 100),
98
+ quality_warnings: qualityWarnings,
99
+ download_url: itemUrl,
100
+ format: "TEXT",
101
+ total_examples: 1,
102
+ total_size_bytes: undefined,
103
+ total_size_mb: undefined,
104
+ columns: [
105
+ { name: "title", type: "string" },
106
+ { name: "text", type: "string" },
107
+ ],
108
+ is_structured: true,
109
+ has_target_column: false,
110
+ is_safe_source: true,
111
+ has_personal_data: false,
112
+ is_paywalled: false,
113
+ is_scraped_web_data: false,
114
+ uses_https: true,
115
+ has_train_split: false,
116
+ has_test_split: false,
117
+ has_validation_split: false,
118
+ description_length: (storyText || title).length,
119
+ has_readme: false,
120
+ metadata_url: itemUrl,
121
+ };
122
+ }
123
+ }
@@ -46,3 +46,30 @@ export function calculateQualityScore(data) {
46
46
  score += 2;
47
47
  return Math.min(100, score);
48
48
  }
49
+ /**
50
+ * Phase 1 Web Core quality estimator (document-first).
51
+ *
52
+ * Returns a score in the range [0.3, 1.0] (never 0) using weighted signals:
53
+ * - abstract length (text richness)
54
+ * - authors presence (provenance)
55
+ * - date presence (freshness / completeness)
56
+ * - content depth (optional full text / README length)
57
+ */
58
+ export function estimateQualityScore(input) {
59
+ const abstractLen = Math.max(0, Number(input.abstractLength || 0));
60
+ const contentDepth = Math.max(0, Number(input.contentDepth || 0));
61
+ // Logistic-ish squashing: near-1 once abstract is moderately long.
62
+ const abstractScore = 1 / (1 + Math.exp(-(abstractLen - 200) / 120));
63
+ const depthScore = 1 / (1 + Math.exp(-(contentDepth - 1200) / 500));
64
+ const authorsScore = input.authorsPresent ? 1 : 0;
65
+ const dateScore = input.datePresent ? 1 : 0;
66
+ // Base is intentionally high so that for complete records the score clusters near 1.0.
67
+ const base = 0.3;
68
+ const score = base +
69
+ 0.25 * abstractScore +
70
+ 0.25 * depthScore +
71
+ 0.1 * authorsScore +
72
+ 0.1 * dateScore;
73
+ // Clamp to [0.3, 1.0]
74
+ return Math.max(0.3, Math.min(1.0, score));
75
+ }
@@ -3,22 +3,29 @@ import { categorizeLicense } from "./license.js";
3
3
  import { calculateQualityScore } from "./quality.js";
4
4
  import { classifyDomain } from "./domain.js";
5
5
  import { retryWithBackoff, delayBetweenRequests } from "./rate-limiter.js";
6
+ import { analyzeDatasetQuery, buildIntentSearchQuery, buildHuggingFaceFilterTags, scoreDatasetAgainstIntent, shouldExcludeByLanguage } from "../search/query-intent.js";
6
7
  export class HuggingFaceScraper {
7
8
  /**
8
9
  * Bulk discovery: Fetch many datasets quickly without deep details.
9
10
  * Hits the 25k target in minutes.
10
11
  */
11
- async scrapeBulk(limit = 1000, query) {
12
+ async scrapeBulk(limit = 1000, queryOrIntent) {
13
+ const intent = typeof queryOrIntent === "string"
14
+ ? await analyzeDatasetQuery(queryOrIntent)
15
+ : queryOrIntent;
16
+ const query = typeof queryOrIntent === "string" ? queryOrIntent : intent?.searchQuery;
17
+ const hfQuery = intent ? buildIntentSearchQuery(intent) : query;
12
18
  const filterMsg = query ? `, query: ${query}` : "";
13
19
  console.error(`[Bulk Scraper] Fetching datasets (target limit: ${limit}${filterMsg})...`);
14
20
  const results = [];
15
21
  let processed = 0;
16
22
  try {
17
23
  const hfToken = process.env.HF_TOKEN || process.env.HUGGINGFACE_TOKEN;
24
+ const hfFilterTags = intent ? buildHuggingFaceFilterTags(intent) : [];
18
25
  for await (const ds of listDatasets({
19
26
  limit: limit,
20
27
  additionalFields: ["description", "tags", "downloadsAllTime", "createdAt"],
21
- search: { query: query },
28
+ search: { query: hfQuery, tags: hfFilterTags.length > 0 ? hfFilterTags : undefined },
22
29
  ...(hfToken ? { accessToken: hfToken } : {})
23
30
  })) {
24
31
  if (results.length >= limit)
@@ -78,6 +85,9 @@ export class HuggingFaceScraper {
78
85
  has_readme: false,
79
86
  is_incomplete: true // Flag for Phase 2
80
87
  };
88
+ // Hard language exclusion
89
+ if (intent && shouldExcludeByLanguage(metadata, intent))
90
+ continue;
81
91
  results.push(metadata);
82
92
  }
83
93
  }
@@ -86,8 +96,12 @@ export class HuggingFaceScraper {
86
96
  }
87
97
  return results;
88
98
  }
89
- async scrape(limit = 100, applyMVPFilters = true, query // Use as general search query
90
- ) {
99
+ async scrape(limit = 100, applyMVPFilters = true, queryOrIntent) {
100
+ const intent = typeof queryOrIntent === "string"
101
+ ? await analyzeDatasetQuery(queryOrIntent)
102
+ : queryOrIntent;
103
+ const query = typeof queryOrIntent === "string" ? queryOrIntent : intent?.searchQuery;
104
+ const hfQuery = intent ? buildIntentSearchQuery(intent) : query;
91
105
  const filterMsg = query ? `, query: ${query}` : "";
92
106
  console.error(`Fetching datasets (target limit: ${limit}, MVP filters: ${applyMVPFilters}${filterMsg})...`);
93
107
  const results = [];
@@ -110,10 +124,11 @@ export class HuggingFaceScraper {
110
124
  }
111
125
  // Add delay between batches to avoid rate limits
112
126
  const BATCH_DELAY = hfToken ? 500 : 2000;
127
+ const hfFilterTags = intent ? buildHuggingFaceFilterTags(intent) : [];
113
128
  for await (const ds of listDatasets({
114
129
  limit: fetchLimit,
115
130
  additionalFields: ["description", "tags"],
116
- search: { query: query },
131
+ search: { query: hfQuery, tags: hfFilterTags.length > 0 ? hfFilterTags : undefined },
117
132
  ...(hfToken ? { accessToken: hfToken } : {})
118
133
  })) {
119
134
  if (results.length >= limit)
@@ -150,18 +165,61 @@ export class HuggingFaceScraper {
150
165
  initialDelay: 2000, // Start with 2 seconds for HF API
151
166
  maxDelay: 30000 // Max 30 seconds
152
167
  });
153
- const splits = fullInfo.splits?.map((s) => ({
168
+ const cardData = fullInfo.cardData || {};
169
+ // Extract splits from cardData.dataset_info (where HF actually stores them)
170
+ // cardData.dataset_info can be an object (single config) or array (multi-config)
171
+ let rawSplits = [];
172
+ const datasetInfoField = cardData.dataset_info;
173
+ if (datasetInfoField) {
174
+ const configs = Array.isArray(datasetInfoField) ? datasetInfoField : [datasetInfoField];
175
+ for (const config of configs) {
176
+ if (config?.splits && Array.isArray(config.splits)) {
177
+ rawSplits = rawSplits.concat(config.splits);
178
+ }
179
+ }
180
+ }
181
+ // Fallback: try top-level splits from the SDK (rarely populated)
182
+ if (rawSplits.length === 0 && fullInfo.splits) {
183
+ rawSplits = fullInfo.splits;
184
+ }
185
+ const splits = rawSplits.map((s) => ({
154
186
  name: s.name,
155
- num_examples: s.numExamples || 0,
156
- size_bytes: s.sizeBytes
157
- })) || [];
158
- const totalExamples = splits.reduce((sum, s) => sum + (s.num_examples || 0), 0);
187
+ num_examples: s.num_examples || s.numExamples || 0,
188
+ size_bytes: s.num_bytes || s.sizeBytes || 0
189
+ }));
190
+ let totalExamples = splits.reduce((sum, s) => sum + (s.num_examples || 0), 0);
159
191
  const totalSizeBytes = splits.reduce((sum, s) => sum + (s.size_bytes || 0), 0);
192
+ // Fallback: estimate from size_categories when splits give 0
193
+ if (totalExamples === 0) {
194
+ const sizeCategories = cardData.size_categories;
195
+ if (Array.isArray(sizeCategories) && sizeCategories.length > 0) {
196
+ const cat = sizeCategories[0];
197
+ const rangeMatch = cat.match(/([\d.]+[KMB]?)\s*<\s*n\s*<\s*([\d.]+[KMB]?)/i);
198
+ if (rangeMatch) {
199
+ const parseHumanNum = (s) => {
200
+ const m = s.match(/^([\d.]+)([KMB])?$/i);
201
+ if (!m)
202
+ return 0;
203
+ const base = parseFloat(m[1]);
204
+ const suffix = (m[2] || '').toUpperCase();
205
+ if (suffix === 'K')
206
+ return base * 1000;
207
+ if (suffix === 'M')
208
+ return base * 1_000_000;
209
+ if (suffix === 'B')
210
+ return base * 1_000_000_000;
211
+ return base;
212
+ };
213
+ const lo = parseHumanNum(rangeMatch[1]);
214
+ const hi = parseHumanNum(rangeMatch[2]);
215
+ totalExamples = Math.round((lo + hi) / 2);
216
+ }
217
+ }
218
+ }
160
219
  const totalSizeMB = totalSizeBytes ? Math.round(totalSizeBytes / (1024 * 1024) * 100) / 100 : undefined;
161
220
  const hasValidationSplit = splits.some((s) => s.name === "validation" || s.name === "val");
162
221
  const licenseTag = tags.find(t => t.startsWith("license:"));
163
222
  const licenseId = licenseTag ? licenseTag.replace("license:", "") : fullInfo.license;
164
- const cardData = fullInfo.cardData || {};
165
223
  const licenseUrl = cardData.license?.[0]?.link || cardData.license_link;
166
224
  const license = categorizeLicense(licenseId, licenseUrl);
167
225
  if (license.category === "restricted") {
@@ -247,7 +305,16 @@ export class HuggingFaceScraper {
247
305
  description_length: description.length,
248
306
  has_readme: !!(cardData.readme || cardData.readme_content)
249
307
  };
250
- results.push(metadata);
308
+ // Hard language exclusion — drop bilingual/multilingual for single-language queries
309
+ if (intent && shouldExcludeByLanguage(metadata, intent)) {
310
+ // skip — do not push
311
+ }
312
+ else {
313
+ if (intent) {
314
+ metadata.intent_score = scoreDatasetAgainstIntent(metadata, intent);
315
+ }
316
+ results.push(metadata);
317
+ }
251
318
  }
252
319
  catch (e) {
253
320
  // Track all errors for user feedback
@@ -297,8 +364,12 @@ export class HuggingFaceScraper {
297
364
  if (otherErrors > 0) {
298
365
  console.error(`[HF Scraper] ⚠️ ${otherErrors} datasets skipped due to errors`);
299
366
  }
300
- // Sort by downloads descending
301
- return results.sort((a, b) => b.downloads - a.downloads);
367
+ return results.sort((a, b) => {
368
+ const intentDelta = Number(b.intent_score || 0) - Number(a.intent_score || 0);
369
+ if (intentDelta !== 0)
370
+ return intentDelta;
371
+ return b.downloads - a.downloads;
372
+ });
302
373
  }
303
374
  extractTask(tags) {
304
375
  const taskTags = [
@@ -0,0 +1,138 @@
1
+ import { rateLimitedFetch } from "./rate-limiter.js";
2
+ import { CircuitBreaker } from "./circuit-breaker.js";
3
+ import { estimateQualityScore } from "./quality.js";
4
+ export class SemanticScholarSource {
5
+ cache;
6
+ breaker = new CircuitBreaker("semantic_scholar", {
7
+ failureThreshold: 5,
8
+ openDurationMs: 30_000,
9
+ halfOpenSuccessesToClose: 2,
10
+ });
11
+ constructor(cache) {
12
+ this.cache = cache;
13
+ }
14
+ async discover(query, limit = 20) {
15
+ const out = await this.discoverWithTelemetry(query, limit);
16
+ return out.results;
17
+ }
18
+ async discoverWithTelemetry(query, limit = 20) {
19
+ const start = Date.now();
20
+ const cleanQuery = String(query || "").trim();
21
+ if (!cleanQuery)
22
+ return { results: [], cacheHit: false, latencyMs: Date.now() - start };
23
+ const perPage = Math.max(1, Math.min(100, Number(limit || 20)));
24
+ const cacheKey = `webcore:semantic_scholar:discover:${cleanQuery.toLowerCase()}:limit=${perPage}`;
25
+ const cached = await this.cache?.getJson(cacheKey);
26
+ if (cached)
27
+ return { results: cached, cacheHit: true, latencyMs: Date.now() - start };
28
+ if (!this.breaker.canAttempt()) {
29
+ throw new Error("Semantic Scholar connector is temporarily unavailable (circuit open).");
30
+ }
31
+ const url = new URL("https://api.semanticscholar.org/graph/v1/paper/search");
32
+ url.searchParams.set("query", cleanQuery);
33
+ url.searchParams.set("limit", String(perPage));
34
+ url.searchParams.set("fields", [
35
+ "paperId",
36
+ "title",
37
+ "abstract",
38
+ "url",
39
+ "venue",
40
+ "year",
41
+ "citationCount",
42
+ "authors",
43
+ "publicationTypes",
44
+ "openAccessPdf",
45
+ ].join(","));
46
+ const response = await rateLimitedFetch(url.toString(), {
47
+ headers: {
48
+ "Accept": "application/json",
49
+ "User-Agent": "vesper/2.0 (phase1-semantic-scholar-connector)",
50
+ },
51
+ }, { maxRetries: 5, initialDelay: 1000, maxDelay: 20000 }).catch((e) => {
52
+ this.breaker.onFailure();
53
+ throw e;
54
+ });
55
+ const data = await response.json().catch((e) => {
56
+ this.breaker.onFailure();
57
+ throw new Error(`Semantic Scholar JSON parse failed: ${e?.message || String(e)}`);
58
+ });
59
+ const papers = Array.isArray(data?.data) ? data.data : [];
60
+ const result = papers.map((p) => this.toDatasetMetadata(p)).filter(Boolean);
61
+ this.breaker.onSuccess();
62
+ await this.cache?.setJson(cacheKey, result, 86400); // 24h
63
+ return { results: result, cacheHit: false, latencyMs: Date.now() - start };
64
+ }
65
+ toDatasetMetadata(paper) {
66
+ const paperId = String(paper.paperId || paper.externalIds?.DOI || "").trim();
67
+ const title = String(paper.title || "").trim();
68
+ const abstract = String(paper.abstract || "").trim();
69
+ const url = String(paper.url || "").trim();
70
+ const authors = Array.isArray(paper.authors) ? paper.authors.map((a) => String(a.name || "").trim()).filter(Boolean) : [];
71
+ const tags = [
72
+ ...(Array.isArray(paper.publicationTypes) ? paper.publicationTypes.filter(Boolean).map(String) : []),
73
+ ...(paper.venue ? [String(paper.venue)] : []),
74
+ ];
75
+ const citationCount = Number(paper.citationCount || 0);
76
+ const qualityWarnings = [];
77
+ if (!abstract)
78
+ qualityWarnings.push("Missing abstract from Semantic Scholar response");
79
+ const abstractLength = (abstract || title).length;
80
+ const authorsPresent = authors.length > 0;
81
+ const datePresent = paper.year !== undefined && paper.year !== null;
82
+ const contentDepth = abstractLength;
83
+ const quality01 = estimateQualityScore({
84
+ abstractLength,
85
+ authorsPresent,
86
+ datePresent,
87
+ contentDepth,
88
+ });
89
+ const openAccessPdfUrl = paper.openAccessPdf?.url ? String(paper.openAccessPdf.url).trim() : undefined;
90
+ const downloadUrl = openAccessPdfUrl || url || (paperId ? `https://www.semanticscholar.org/paper/${paperId}` : "");
91
+ return {
92
+ id: paperId || url || title,
93
+ source: "semantic_scholar",
94
+ name: title || "Untitled",
95
+ description: abstract || title,
96
+ authors,
97
+ downloads: 0,
98
+ likes: citationCount,
99
+ stars: citationCount,
100
+ tags,
101
+ last_updated: new Date().toISOString(),
102
+ task: "research-paper",
103
+ languages: [],
104
+ domain: "research",
105
+ splits: [],
106
+ license: {
107
+ id: "unknown",
108
+ category: "unknown",
109
+ usage_restrictions: [],
110
+ warnings: [],
111
+ },
112
+ quality_score: Math.round(quality01 * 100),
113
+ quality_warnings: qualityWarnings,
114
+ download_url: downloadUrl,
115
+ format: "PDF",
116
+ total_examples: 1,
117
+ total_size_bytes: undefined,
118
+ total_size_mb: undefined,
119
+ columns: [
120
+ { name: "title", type: "string" },
121
+ { name: "abstract", type: "string" },
122
+ ],
123
+ is_structured: true,
124
+ has_target_column: false,
125
+ is_safe_source: true,
126
+ has_personal_data: false,
127
+ is_paywalled: false,
128
+ is_scraped_web_data: false,
129
+ uses_https: true,
130
+ has_train_split: false,
131
+ has_test_split: false,
132
+ has_validation_split: false,
133
+ description_length: (abstract || title).length,
134
+ has_readme: false,
135
+ metadata_url: url || (paperId ? `https://www.semanticscholar.org/paper/${paperId}` : ""),
136
+ };
137
+ }
138
+ }
@@ -26,6 +26,7 @@ def _print(payload: Dict[str, Any]) -> None:
26
26
  async def _run_download(args: argparse.Namespace) -> Dict[str, Any]:
27
27
  payload = json.loads(args.payload)
28
28
  output_root = payload.get("output_root") or str(Path.home() / ".vesper" / "data" / "assets")
29
+ output_dir = payload.get("output_dir")
29
30
  workers = int(payload.get("workers") or 8)
30
31
  recipes_dir = payload.get("recipes_dir")
31
32
 
@@ -43,6 +44,7 @@ async def _run_download(args: argparse.Namespace) -> Dict[str, Any]:
43
44
  kaggle_ref=payload.get("kaggle_ref"),
44
45
  urls=payload.get("urls"),
45
46
  output_format=payload.get("output_format", "webdataset"),
47
+ output_dir=str(output_dir) if output_dir else None,
46
48
  max_items=payload.get("max_items"),
47
49
  image_column=payload.get("image_column"),
48
50
  )
@@ -0,0 +1,92 @@
1
+ """
2
+ Convert a dataset file between formats (CSV, Parquet, JSON, JSONL).
3
+ Usage: convert_engine.py <input_path> <output_path>
4
+ Outputs JSON: {"ok": true, "output_path": "...", "rows": N, "columns": N} or {"ok": false, "error": "..."}
5
+ """
6
+ import sys
7
+ import json
8
+ import os
9
+
10
+ try:
11
+ import polars as pl
12
+ except Exception:
13
+ print(json.dumps({"ok": False, "error": "polars is required. Install with: pip install polars"}))
14
+ sys.exit(1)
15
+
16
+
17
+ def _load(src: str) -> pl.DataFrame:
18
+ ext = os.path.splitext(src)[1].lower()
19
+ if ext == ".csv":
20
+ return pl.read_csv(src, ignore_errors=True, infer_schema_length=10000)
21
+ if ext in (".tsv", ".tab"):
22
+ return pl.read_csv(src, separator="\t", ignore_errors=True, infer_schema_length=10000)
23
+ if ext in (".parquet", ".pq"):
24
+ return pl.read_parquet(src)
25
+ if ext in (".feather", ".ftr", ".arrow", ".ipc"):
26
+ return pl.read_ipc(src)
27
+ if ext in (".jsonl", ".ndjson"):
28
+ return pl.read_ndjson(src)
29
+ if ext == ".json":
30
+ raw = open(src, "r", encoding="utf-8").read().strip()
31
+ if raw.startswith("["):
32
+ return pl.read_json(src)
33
+ if "\n" in raw and raw.split("\n")[0].strip().startswith("{"):
34
+ return pl.read_ndjson(src)
35
+ obj = json.loads(raw)
36
+ if isinstance(obj, dict):
37
+ for key in ("data", "rows", "items", "records", "results", "entries", "samples"):
38
+ if key in obj and isinstance(obj[key], list):
39
+ return pl.DataFrame(obj[key])
40
+ for v in obj.values():
41
+ if isinstance(v, list) and len(v) > 0 and isinstance(v[0], dict):
42
+ return pl.DataFrame(v)
43
+ return pl.read_json(src)
44
+ # Fallback: try csv
45
+ return pl.read_csv(src, ignore_errors=True, infer_schema_length=10000)
46
+
47
+
48
+ def _write(df: pl.DataFrame, dst: str) -> None:
49
+ ext = os.path.splitext(dst)[1].lower()
50
+ os.makedirs(os.path.dirname(dst) or ".", exist_ok=True)
51
+ if ext in (".parquet", ".pq"):
52
+ df.write_parquet(dst)
53
+ elif ext == ".csv":
54
+ df.write_csv(dst)
55
+ elif ext == ".json":
56
+ df.write_json(dst, row_oriented=True)
57
+ elif ext in (".jsonl", ".ndjson"):
58
+ df.write_ndjson(dst)
59
+ else:
60
+ raise ValueError(f"Unsupported output format: {ext}")
61
+
62
+
63
+ def main():
64
+ if len(sys.argv) < 3:
65
+ print(json.dumps({"ok": False, "error": "Usage: convert_engine.py <input> <output>"}))
66
+ sys.exit(1)
67
+
68
+ input_path = sys.argv[1]
69
+ output_path = sys.argv[2]
70
+
71
+ if not os.path.exists(input_path):
72
+ print(json.dumps({"ok": False, "error": f"File not found: {input_path}"}))
73
+ sys.exit(1)
74
+
75
+ try:
76
+ df = _load(input_path)
77
+ _write(df, output_path)
78
+ size_mb = round(os.path.getsize(output_path) / (1024 * 1024), 2)
79
+ print(json.dumps({
80
+ "ok": True,
81
+ "output_path": output_path,
82
+ "rows": df.height,
83
+ "columns": df.width,
84
+ "size_mb": size_mb,
85
+ }))
86
+ except Exception as e:
87
+ print(json.dumps({"ok": False, "error": str(e)}))
88
+ sys.exit(1)
89
+
90
+
91
+ if __name__ == "__main__":
92
+ main()
@@ -50,6 +50,51 @@ def _load(file_path: str, options: dict) -> pl.DataFrame:
50
50
  df = pl.read_ipc(file_path)
51
51
  elif ext == ".jsonl":
52
52
  df = pl.read_ndjson(file_path)
53
+ elif ext == ".json":
54
+ # Auto-detect: array-of-objects vs NDJSON vs nested structures
55
+ try:
56
+ import json as _json
57
+ with open(file_path, "r", encoding="utf-8", errors="ignore") as fh:
58
+ raw_text = fh.read(512) # peek
59
+ stripped = raw_text.lstrip()
60
+ if stripped.startswith("["):
61
+ # Array of objects — standard JSON
62
+ with open(file_path, "r", encoding="utf-8", errors="ignore") as fh:
63
+ data = _json.load(fh)
64
+ if isinstance(data, list) and len(data) > 0:
65
+ df = pl.DataFrame(data)
66
+ else:
67
+ raise ValueError("JSON file is empty or not an array of objects")
68
+ elif stripped.startswith("{"):
69
+ # Could be NDJSON or a single object wrapping rows
70
+ try:
71
+ df = pl.read_ndjson(file_path)
72
+ except Exception:
73
+ with open(file_path, "r", encoding="utf-8", errors="ignore") as fh:
74
+ data = _json.load(fh)
75
+ # Try common wrapper patterns: {"data": [...]}, {"rows": [...]}, etc.
76
+ rows = None
77
+ if isinstance(data, dict):
78
+ for key in ("data", "rows", "records", "items", "results", "entries"):
79
+ if key in data and isinstance(data[key], list):
80
+ rows = data[key]
81
+ break
82
+ if rows is None:
83
+ # Last resort: try to use the dict values
84
+ rows = [data]
85
+ if rows and len(rows) > 0:
86
+ df = pl.DataFrame(rows)
87
+ else:
88
+ raise ValueError("Could not parse JSON structure into tabular data")
89
+ else:
90
+ raise ValueError("JSON file does not start with [ or {")
91
+ except pl.exceptions.ComputeError as ce:
92
+ raise ValueError(f"Failed to parse JSON: {ce}")
93
+ elif ext == ".xlsx":
94
+ try:
95
+ df = pl.read_excel(file_path)
96
+ except Exception as e:
97
+ raise ValueError(f"Failed to read Excel file: {e}")
53
98
  else:
54
99
  raise ValueError(f"Unsupported input format: {ext}")
55
100