vesper-wizard 2.0.5 → 2.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (205) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +300 -37
  3. package/build/cache/cdn.js +34 -0
  4. package/build/cache/service.js +63 -0
  5. package/build/cleaning/cleaner.js +81 -0
  6. package/build/cleaning/evaluator.js +89 -0
  7. package/build/cleaning/executor.js +62 -0
  8. package/build/cleaning/exporter.js +87 -0
  9. package/build/cleaning/planner.js +127 -0
  10. package/build/cleaning/rules.js +57 -0
  11. package/build/cleaning/types.js +1 -0
  12. package/build/cloud/adapters/local.js +37 -0
  13. package/build/cloud/adapters/s3.js +24 -0
  14. package/build/cloud/adapters/supabase.js +49 -0
  15. package/build/cloud/storage-manager.js +26 -0
  16. package/build/cloud/types.js +1 -0
  17. package/build/compliance/service.js +73 -0
  18. package/build/compliance/store.js +80 -0
  19. package/build/compliance/types.js +1 -0
  20. package/build/config/config-manager.js +221 -0
  21. package/build/config/secure-keys.js +51 -0
  22. package/build/config/user-config.js +48 -0
  23. package/build/data/processing-worker.js +23 -0
  24. package/build/data/streaming.js +38 -0
  25. package/build/data/worker-pool.js +39 -0
  26. package/build/export/exporter.js +69 -0
  27. package/build/export/packager.js +100 -0
  28. package/build/export/types.js +1 -0
  29. package/build/fusion/aligner.js +56 -0
  30. package/build/fusion/deduplicator.js +69 -0
  31. package/build/fusion/engine.js +69 -0
  32. package/build/fusion/harmonizer.js +39 -0
  33. package/build/fusion/orchestrator.js +86 -0
  34. package/build/fusion/types.js +1 -0
  35. package/build/gateway/unified-dataset-gateway.js +409 -0
  36. package/build/index.js +2704 -0
  37. package/build/ingestion/hf-downloader.js +171 -0
  38. package/build/ingestion/ingestor.js +271 -0
  39. package/build/ingestion/kaggle-downloader.js +102 -0
  40. package/build/install/install-service.js +41 -0
  41. package/build/jobs/manager.js +136 -0
  42. package/build/jobs/queue.js +59 -0
  43. package/build/jobs/types.js +1 -0
  44. package/build/lib/supabase.js +3 -0
  45. package/build/metadata/dataworld-source.js +89 -0
  46. package/build/metadata/domain.js +147 -0
  47. package/build/metadata/github-scraper.js +47 -0
  48. package/build/metadata/institutional-scrapers.js +49 -0
  49. package/build/metadata/kaggle-scraper.js +182 -0
  50. package/build/metadata/kaggle-source.js +70 -0
  51. package/build/metadata/license.js +68 -0
  52. package/build/metadata/monitoring-service.js +107 -0
  53. package/build/metadata/monitoring-store.js +78 -0
  54. package/build/metadata/monitoring-types.js +1 -0
  55. package/build/metadata/openml-source.js +87 -0
  56. package/build/metadata/quality.js +48 -0
  57. package/build/metadata/rate-limiter.js +128 -0
  58. package/build/metadata/scraper.js +377 -0
  59. package/build/metadata/store.js +340 -0
  60. package/build/metadata/types.js +1 -0
  61. package/build/metadata/uci-scraper.js +49 -0
  62. package/build/monitoring/observability.js +76 -0
  63. package/build/preparation/target-detector.js +75 -0
  64. package/build/python/__pycache__/config.cpython-312.pyc +0 -0
  65. package/build/python/asset_downloader_engine.py +92 -0
  66. package/build/python/cleaner.py +226 -0
  67. package/build/python/config.py +263 -0
  68. package/build/python/dataworld_engine.py +208 -0
  69. package/build/python/export_engine.py +243 -0
  70. package/build/python/framework_adapters.py +100 -0
  71. package/build/python/fusion_engine.py +368 -0
  72. package/build/python/github_adapter.py +106 -0
  73. package/build/python/hf_fallback.py +298 -0
  74. package/build/python/image_engine.py +86 -0
  75. package/build/python/kaggle_engine.py +295 -0
  76. package/build/python/media_engine.py +133 -0
  77. package/build/python/nasa_adapter.py +82 -0
  78. package/build/python/openml_engine.py +146 -0
  79. package/build/python/quality_engine.py +267 -0
  80. package/build/python/row_count.py +54 -0
  81. package/build/python/splitter_engine.py +283 -0
  82. package/build/python/target_engine.py +154 -0
  83. package/build/python/test_framework_adapters.py +61 -0
  84. package/build/python/test_fusion_engine.py +89 -0
  85. package/build/python/uci_adapter.py +94 -0
  86. package/build/python/vesper/__init__.py +1 -0
  87. package/build/python/vesper/__pycache__/__init__.cpython-312.pyc +0 -0
  88. package/build/python/vesper/core/__init__.py +1 -0
  89. package/build/python/vesper/core/__pycache__/__init__.cpython-312.pyc +0 -0
  90. package/build/python/vesper/core/__pycache__/asset_downloader.cpython-312.pyc +0 -0
  91. package/build/python/vesper/core/__pycache__/download_recipe.cpython-312.pyc +0 -0
  92. package/build/python/vesper/core/asset_downloader.py +675 -0
  93. package/build/python/vesper/core/download_recipe.py +104 -0
  94. package/build/python/worldbank_adapter.py +99 -0
  95. package/build/quality/analyzer.js +93 -0
  96. package/build/quality/image-analyzer.js +114 -0
  97. package/build/quality/media-analyzer.js +115 -0
  98. package/build/quality/quality-orchestrator.js +162 -0
  99. package/build/quality/types.js +1 -0
  100. package/build/scripts/build-index.js +54 -0
  101. package/build/scripts/check-db.js +73 -0
  102. package/build/scripts/check-jobs.js +24 -0
  103. package/build/scripts/check-naruto.js +17 -0
  104. package/build/scripts/cleanup-kaggle.js +41 -0
  105. package/build/scripts/demo-full-pipeline.js +62 -0
  106. package/build/scripts/demo-ui.js +58 -0
  107. package/build/scripts/e2e-demo.js +72 -0
  108. package/build/scripts/massive-scrape.js +103 -0
  109. package/build/scripts/ops-dashboard.js +33 -0
  110. package/build/scripts/repro-bug.js +37 -0
  111. package/build/scripts/repro-export-bug.js +56 -0
  112. package/build/scripts/scrape-metadata.js +100 -0
  113. package/build/scripts/search-cli.js +26 -0
  114. package/build/scripts/test-bias.js +45 -0
  115. package/build/scripts/test-caching.js +51 -0
  116. package/build/scripts/test-cleaning.js +76 -0
  117. package/build/scripts/test-cloud-storage.js +48 -0
  118. package/build/scripts/test-compliance.js +58 -0
  119. package/build/scripts/test-conversion.js +64 -0
  120. package/build/scripts/test-custom-rules.js +58 -0
  121. package/build/scripts/test-db-opt.js +63 -0
  122. package/build/scripts/test-export-custom.js +33 -0
  123. package/build/scripts/test-exporter.js +53 -0
  124. package/build/scripts/test-fusion.js +61 -0
  125. package/build/scripts/test-github.js +27 -0
  126. package/build/scripts/test-group-split.js +52 -0
  127. package/build/scripts/test-hf-download.js +29 -0
  128. package/build/scripts/test-holdout-manager.js +61 -0
  129. package/build/scripts/test-hybrid-search.js +41 -0
  130. package/build/scripts/test-image-analysis.js +50 -0
  131. package/build/scripts/test-ingestion-infra.js +39 -0
  132. package/build/scripts/test-install.js +40 -0
  133. package/build/scripts/test-institutional.js +26 -0
  134. package/build/scripts/test-integrity.js +41 -0
  135. package/build/scripts/test-jit.js +42 -0
  136. package/build/scripts/test-job-queue.js +62 -0
  137. package/build/scripts/test-kaggle-download.js +34 -0
  138. package/build/scripts/test-large-data.js +50 -0
  139. package/build/scripts/test-mcp-v5.js +74 -0
  140. package/build/scripts/test-media-analysis.js +61 -0
  141. package/build/scripts/test-monitoring.js +91 -0
  142. package/build/scripts/test-observability.js +106 -0
  143. package/build/scripts/test-packager.js +55 -0
  144. package/build/scripts/test-pipeline.js +50 -0
  145. package/build/scripts/test-planning.js +64 -0
  146. package/build/scripts/test-privacy.js +38 -0
  147. package/build/scripts/test-production-sync.js +36 -0
  148. package/build/scripts/test-quality.js +43 -0
  149. package/build/scripts/test-robust-ingestion.js +41 -0
  150. package/build/scripts/test-schema.js +45 -0
  151. package/build/scripts/test-split-validation.js +40 -0
  152. package/build/scripts/test-splitter.js +93 -0
  153. package/build/scripts/test-target-detector.js +29 -0
  154. package/build/scripts/test-uci.js +27 -0
  155. package/build/scripts/test-unified-quality.js +86 -0
  156. package/build/scripts/test-write.js +14 -0
  157. package/build/scripts/verify-integration.js +57 -0
  158. package/build/scripts/verify-priority.js +33 -0
  159. package/build/search/embedder.js +34 -0
  160. package/build/search/engine.js +152 -0
  161. package/build/search/jit-orchestrator.js +258 -0
  162. package/build/search/vector-store.js +123 -0
  163. package/build/splitting/splitter.js +82 -0
  164. package/build/splitting/types.js +1 -0
  165. package/build/tools/formatter.js +251 -0
  166. package/build/utils/downloader.js +52 -0
  167. package/build/utils/selector.js +69 -0
  168. package/mcp-config-template.json +18 -0
  169. package/package.json +101 -29
  170. package/scripts/postinstall.cjs +114 -0
  171. package/scripts/preindex_registry.cjs +157 -0
  172. package/scripts/refresh-index.cjs +87 -0
  173. package/{wizard.js → scripts/wizard.js} +99 -21
  174. package/src/python/__pycache__/config.cpython-312.pyc +0 -0
  175. package/src/python/__pycache__/export_engine.cpython-312.pyc +0 -0
  176. package/src/python/__pycache__/framework_adapters.cpython-312.pyc +0 -0
  177. package/src/python/__pycache__/fusion_engine.cpython-312.pyc +0 -0
  178. package/src/python/__pycache__/kaggle_engine.cpython-312.pyc +0 -0
  179. package/src/python/asset_downloader_engine.py +92 -0
  180. package/src/python/cleaner.py +226 -0
  181. package/src/python/config.py +263 -0
  182. package/src/python/dataworld_engine.py +208 -0
  183. package/src/python/export_engine.py +243 -0
  184. package/src/python/framework_adapters.py +100 -0
  185. package/src/python/fusion_engine.py +368 -0
  186. package/src/python/github_adapter.py +106 -0
  187. package/src/python/hf_fallback.py +298 -0
  188. package/src/python/image_engine.py +86 -0
  189. package/src/python/kaggle_engine.py +295 -0
  190. package/src/python/media_engine.py +133 -0
  191. package/src/python/nasa_adapter.py +82 -0
  192. package/src/python/openml_engine.py +146 -0
  193. package/src/python/quality_engine.py +267 -0
  194. package/src/python/row_count.py +54 -0
  195. package/src/python/splitter_engine.py +283 -0
  196. package/src/python/target_engine.py +154 -0
  197. package/src/python/test_framework_adapters.py +61 -0
  198. package/src/python/test_fusion_engine.py +89 -0
  199. package/src/python/uci_adapter.py +94 -0
  200. package/src/python/vesper/__init__.py +1 -0
  201. package/src/python/vesper/core/__init__.py +1 -0
  202. package/src/python/vesper/core/asset_downloader.py +675 -0
  203. package/src/python/vesper/core/download_recipe.py +104 -0
  204. package/src/python/worldbank_adapter.py +99 -0
  205. package/vesper-mcp-config.json +0 -6
@@ -0,0 +1,675 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import json
5
+ import mimetypes
6
+ import os
7
+ import shutil
8
+ import tempfile
9
+ from dataclasses import dataclass
10
+ from pathlib import Path
11
+ from typing import Any, Awaitable, Callable, Dict, Iterable, List, Optional
12
+ from urllib.parse import urlparse
13
+
14
+ import aiohttp
15
+
16
+ from vesper.core.download_recipe import get_download_recipe
17
+
18
+ try:
19
+ import aiofiles
20
+ except Exception: # pragma: no cover
21
+ aiofiles = None
22
+
23
+ try:
24
+ import webdataset as wds
25
+ except Exception: # pragma: no cover
26
+ wds = None
27
+
28
+
29
+ IMAGE_EXTENSIONS = {".jpg", ".jpeg", ".png", ".webp", ".bmp", ".gif", ".tiff", ".tif", ".svg"}
30
+
31
+
32
+ @dataclass
33
+ class DownloadResult:
34
+ dataset_id: str
35
+ source: str
36
+ output_dir: str
37
+ downloaded_assets: int
38
+ failed_assets: int
39
+ errors_file: str
40
+ metadata_file: str
41
+ output_format: str
42
+
43
+
44
+ class AssetDownloader:
45
+ def __init__(
46
+ self,
47
+ output_root: str,
48
+ workers: int = 8,
49
+ recipes_dir: Optional[str] = None,
50
+ progress_callback: Optional[Callable[[str, Dict[str, Any]], Awaitable[None] | None]] = None,
51
+ ) -> None:
52
+ self.output_root = Path(output_root)
53
+ self.workers = max(1, min(workers, 32))
54
+ self.recipes_dir = recipes_dir
55
+ self.progress_callback = progress_callback
56
+
57
+ async def _emit(self, stage: str, payload: Dict[str, Any]) -> None:
58
+ if not self.progress_callback:
59
+ return
60
+ maybe = self.progress_callback(stage, payload)
61
+ if asyncio.iscoroutine(maybe):
62
+ await maybe
63
+
64
+ @staticmethod
65
+ def _hydrate_kaggle_credentials() -> None:
66
+ try:
67
+ from config import get_all # type: ignore
68
+ keys = get_all() or {}
69
+ except Exception:
70
+ keys = {}
71
+
72
+ username = keys.get("kaggle_username") or os.getenv("KAGGLE_USERNAME")
73
+ key = keys.get("kaggle_key") or os.getenv("KAGGLE_KEY")
74
+
75
+ if username:
76
+ os.environ["KAGGLE_USERNAME"] = str(username)
77
+ if key:
78
+ os.environ["KAGGLE_KEY"] = str(key)
79
+
80
+ username = os.getenv("KAGGLE_USERNAME")
81
+ key = os.getenv("KAGGLE_KEY")
82
+ if not username or not key:
83
+ return
84
+
85
+ kaggle_dir = Path.home() / ".kaggle"
86
+ kaggle_file = kaggle_dir / "kaggle.json"
87
+ try:
88
+ kaggle_dir.mkdir(parents=True, exist_ok=True)
89
+ kaggle_file.write_text(
90
+ json.dumps({"username": username, "key": key}, ensure_ascii=False),
91
+ encoding="utf-8",
92
+ )
93
+ try:
94
+ os.chmod(kaggle_file, 0o600)
95
+ except Exception:
96
+ pass
97
+ except Exception:
98
+ pass
99
+
100
+ @staticmethod
101
+ def find_image_column(dataset: Any) -> Optional[str]:
102
+ """Auto-detect the image column in a HuggingFace dataset.
103
+
104
+ Detection strategy (in priority order):
105
+ 1. HF Feature type: columns with Image() feature type
106
+ 2. Known column names: 'image', 'img', 'photo', 'image_url', etc.
107
+ 3. URL pattern detection: columns containing image URLs (http(s)://...jpg)
108
+ 4. Path pattern detection: columns with file paths ending in image extensions
109
+ """
110
+ # Strategy 1: Check HF Feature types (most reliable)
111
+ features = getattr(dataset, "features", None)
112
+ if features:
113
+ for name, feature in features.items():
114
+ feat_cls = feature.__class__.__name__.lower()
115
+ feat_str = str(feature).lower()
116
+ if feat_cls == "image" or "image(" in feat_str:
117
+ return str(name)
118
+
119
+ # Strategy 2: Check known column names
120
+ cols = getattr(dataset, "column_names", []) or []
121
+
122
+ # Exact match first (highest priority names)
123
+ priority_exact = ["image", "img", "photo", "picture", "images"]
124
+ for c in priority_exact:
125
+ if c in cols:
126
+ return c
127
+
128
+ # Partial match (column names containing image-related keywords)
129
+ priority_partial = [
130
+ "image_path", "image_url", "img_path", "img_url",
131
+ "image_file", "file_name", "filepath", "filename",
132
+ "photo_url", "picture_url", "thumbnail",
133
+ "url", "path", "file",
134
+ ]
135
+ for target in priority_partial:
136
+ for c in cols:
137
+ if c.lower() == target:
138
+ return c
139
+
140
+ # Strategy 3: Sample values to detect URL/path patterns
141
+ try:
142
+ sample_size = min(5, len(dataset)) if hasattr(dataset, "__len__") else 5
143
+ if sample_size > 0:
144
+ for c in cols:
145
+ is_image_col = False
146
+ for i in range(sample_size):
147
+ try:
148
+ val = dataset[i][c]
149
+ except Exception:
150
+ break
151
+
152
+ if val is None:
153
+ continue
154
+
155
+ # PIL Image object
156
+ if hasattr(val, "save") and hasattr(val, "size"):
157
+ is_image_col = True
158
+ break
159
+
160
+ # Dict with image data
161
+ if isinstance(val, dict) and any(k in val for k in ("bytes", "path", "url")):
162
+ is_image_col = True
163
+ break
164
+
165
+ # String: URL or file path
166
+ if isinstance(val, str):
167
+ val_lower = val.lower()
168
+ # Check for image URLs
169
+ if val_lower.startswith(("http://", "https://")) and any(
170
+ ext in val_lower.split("?")[0] for ext in IMAGE_EXTENSIONS
171
+ ):
172
+ is_image_col = True
173
+ break
174
+ # Check for file paths with image extensions
175
+ if any(val_lower.endswith(ext) for ext in IMAGE_EXTENSIONS):
176
+ is_image_col = True
177
+ break
178
+
179
+ if is_image_col:
180
+ return c
181
+ except Exception:
182
+ pass
183
+
184
+ return None
185
+
186
+ async def download_assets(
187
+ self,
188
+ dataset_id: str,
189
+ source: Optional[str] = None,
190
+ repo_id: Optional[str] = None,
191
+ kaggle_ref: Optional[str] = None,
192
+ urls: Optional[List[str]] = None,
193
+ output_format: str = "webdataset",
194
+ max_items: Optional[int] = None,
195
+ image_column: Optional[str] = None,
196
+ ) -> Dict[str, Any]:
197
+ recipe = get_download_recipe(dataset_id, self.recipes_dir)
198
+ if recipe:
199
+ source = source or recipe.get("source")
200
+ repo_id = repo_id or recipe.get("repo_id")
201
+ image_column = image_column or recipe.get("image_column")
202
+
203
+ source = (source or "").lower()
204
+ if source not in {"huggingface", "kaggle", "url"}:
205
+ raise ValueError("source must be one of: huggingface, kaggle, url")
206
+
207
+ # --- Validate imports and args BEFORE creating any directories ---
208
+ if source == "huggingface":
209
+ if not repo_id:
210
+ raise ValueError("repo_id is required for source=huggingface")
211
+ try:
212
+ from datasets import load_dataset as _ld # noqa: F401
213
+ except Exception as e:
214
+ raise RuntimeError(
215
+ f"datasets package is required for HuggingFace downloads. "
216
+ f"Install with: pip install datasets. Details: {e}"
217
+ )
218
+ elif source == "kaggle":
219
+ ref = kaggle_ref or repo_id
220
+ if not ref:
221
+ raise ValueError("kaggle_ref is required for source=kaggle")
222
+ try:
223
+ from kaggle.api.kaggle_api_extended import KaggleApi as _Ka # noqa: F401
224
+ except Exception as e:
225
+ raise RuntimeError(
226
+ f"kaggle package is required for Kaggle downloads. "
227
+ f"Install with: pip install kaggle. Details: {e}"
228
+ )
229
+ else:
230
+ if not urls:
231
+ raise ValueError("urls are required for source=url")
232
+
233
+ # --- Now safe to create directories ---
234
+ dataset_dir = self.output_root / dataset_id.replace("/", "_").replace(":", "_")
235
+ images_dir = dataset_dir / "images"
236
+ dataset_dir.mkdir(parents=True, exist_ok=True)
237
+ images_dir.mkdir(parents=True, exist_ok=True)
238
+
239
+ errors_file = dataset_dir / "errors.jsonl"
240
+ metadata_file = dataset_dir / "metadata.jsonl"
241
+
242
+ try:
243
+ if source == "huggingface":
244
+ summary = await self._download_huggingface(repo_id, dataset_id, images_dir, metadata_file, errors_file, max_items, image_column)
245
+ elif source == "kaggle":
246
+ ref = kaggle_ref or repo_id
247
+ summary = await self._download_kaggle(ref, dataset_id, images_dir, metadata_file, errors_file, max_items)
248
+ else:
249
+ summary = await self._download_urls(urls, dataset_id, images_dir, metadata_file, errors_file, max_items)
250
+ except Exception:
251
+ # Clean up empty directories on failure so we don't leave ghost artifacts
252
+ if images_dir.exists() and not any(images_dir.iterdir()):
253
+ shutil.rmtree(dataset_dir, ignore_errors=True)
254
+ raise
255
+
256
+ if output_format == "webdataset":
257
+ await self._write_webdataset(dataset_dir, images_dir, metadata_file)
258
+ elif output_format == "parquet":
259
+ await self._write_parquet(dataset_dir, metadata_file)
260
+
261
+ result = DownloadResult(
262
+ dataset_id=dataset_id,
263
+ source=source,
264
+ output_dir=str(dataset_dir),
265
+ downloaded_assets=summary["downloaded"],
266
+ failed_assets=summary["failed"],
267
+ errors_file=str(errors_file),
268
+ metadata_file=str(metadata_file),
269
+ output_format=output_format,
270
+ )
271
+ return result.__dict__
272
+
273
+ async def _download_huggingface(
274
+ self,
275
+ repo_id: str,
276
+ dataset_id: str,
277
+ images_dir: Path,
278
+ metadata_file: Path,
279
+ errors_file: Path,
280
+ max_items: Optional[int],
281
+ image_column: Optional[str],
282
+ ) -> Dict[str, int]:
283
+ from datasets import load_dataset # validated in download_assets()
284
+ import warnings
285
+ warnings.filterwarnings("ignore", message=".*trust_remote_code.*")
286
+
287
+ await self._emit("start", {"source": "huggingface", "repo_id": repo_id})
288
+
289
+ token = os.environ.get("HF_TOKEN") or os.environ.get("HUGGINGFACE_TOKEN") or None
290
+
291
+ # Try loading with multiple strategies
292
+ ds = None
293
+ load_errors = []
294
+
295
+ for trust_rc in [True, False]:
296
+ for split_name in ["train", "test", "validation"]:
297
+ try:
298
+ kwargs = {"path": repo_id, "split": split_name}
299
+ if trust_rc:
300
+ kwargs["trust_remote_code"] = True
301
+ if token:
302
+ kwargs["token"] = token
303
+ ds = load_dataset(**kwargs)
304
+ break
305
+ except Exception as e:
306
+ msg = str(e)
307
+ # Immediately raise auth errors
308
+ if any(x in msg for x in ["401", "403", "gated", "Unauthorized"]):
309
+ raise RuntimeError(
310
+ f"Authentication required for '{repo_id}'. "
311
+ "This dataset may be gated or private. "
312
+ "Use the configure_keys tool to set HF_TOKEN, then retry."
313
+ )
314
+ load_errors.append(msg)
315
+ continue
316
+ if ds is not None:
317
+ break
318
+
319
+ # Fallback: load without split
320
+ if ds is None:
321
+ try:
322
+ kwargs = {"path": repo_id, "trust_remote_code": True}
323
+ if token:
324
+ kwargs["token"] = token
325
+ dd = load_dataset(**kwargs)
326
+ from datasets import DatasetDict
327
+ if isinstance(dd, DatasetDict):
328
+ first_split = list(dd.keys())[0]
329
+ ds = dd[first_split]
330
+ else:
331
+ ds = dd
332
+ except Exception as e:
333
+ msg = str(e)
334
+ if any(x in msg for x in ["401", "403", "gated", "Unauthorized"]):
335
+ raise RuntimeError(
336
+ f"Authentication required for '{repo_id}'. "
337
+ "Use the configure_keys tool to set HF_TOKEN, then retry."
338
+ )
339
+ combined = "; ".join(load_errors[:3])
340
+ raise RuntimeError(
341
+ f"Failed to load HuggingFace dataset '{repo_id}': {msg}. "
342
+ f"Previous attempts: {combined}"
343
+ )
344
+
345
+ col = image_column or self.find_image_column(ds)
346
+ if not col:
347
+ raise RuntimeError(
348
+ f"No image column detected in HuggingFace dataset '{repo_id}'. "
349
+ "Available columns: " + ", ".join(getattr(ds, "column_names", [])) + ". "
350
+ "Provide image_column parameter explicitly."
351
+ )
352
+
353
+ total = len(ds) if hasattr(ds, "__len__") else 0
354
+ target = min(total, max_items) if max_items and total else (max_items or total or 0)
355
+
356
+ downloaded = 0
357
+ failed = 0
358
+
359
+ # Create an aiohttp session for URL-based images
360
+ session = None
361
+
362
+ try:
363
+ with metadata_file.open("w", encoding="utf-8") as mf, errors_file.open("w", encoding="utf-8") as ef:
364
+ for idx, row in enumerate(ds):
365
+ if max_items and idx >= max_items:
366
+ break
367
+ try:
368
+ out_name = f"{idx:08d}.jpg"
369
+ out_path = images_dir / out_name
370
+ value = row.get(col)
371
+
372
+ # Handle URL-based images inline
373
+ if isinstance(value, dict) and value.get("url") and not value.get("bytes") and not value.get("path"):
374
+ url = value["url"]
375
+ if session is None:
376
+ session = aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=60))
377
+ await self._download_image_from_url(session, url, out_path)
378
+ elif isinstance(value, str) and value.startswith(("http://", "https://")):
379
+ if session is None:
380
+ session = aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=60))
381
+ await self._download_image_from_url(session, value, out_path)
382
+ else:
383
+ self._save_image_value(value, out_path)
384
+
385
+ record = {
386
+ "dataset_id": dataset_id,
387
+ "index": idx,
388
+ "image_path": str(out_path),
389
+ "source": "huggingface",
390
+ "repo_id": repo_id,
391
+ }
392
+ mf.write(json.dumps(record, ensure_ascii=False) + "\n")
393
+ downloaded += 1
394
+ if downloaded % 50 == 0:
395
+ await self._emit("progress", {"downloaded": downloaded, "failed": failed, "target": target})
396
+ except Exception as e:
397
+ failed += 1
398
+ ef.write(json.dumps({"index": idx, "error": str(e)}, ensure_ascii=False) + "\n")
399
+ finally:
400
+ if session is not None:
401
+ await session.close()
402
+
403
+ await self._emit("done", {"downloaded": downloaded, "failed": failed})
404
+ return {"downloaded": downloaded, "failed": failed}
405
+
406
+ async def _download_image_from_url(self, session: aiohttp.ClientSession, url: str, out_path: Path) -> None:
407
+ """Download an image from a URL to a local path."""
408
+ async with session.get(url) as response:
409
+ if response.status != 200:
410
+ raise RuntimeError(f"HTTP {response.status} downloading {url}")
411
+ data = await response.read()
412
+ if not data:
413
+ raise RuntimeError(f"Empty response from {url}")
414
+ out_path.write_bytes(data)
415
+
416
+ async def _download_kaggle(
417
+ self,
418
+ kaggle_ref: str,
419
+ dataset_id: str,
420
+ images_dir: Path,
421
+ metadata_file: Path,
422
+ errors_file: Path,
423
+ max_items: Optional[int],
424
+ ) -> Dict[str, int]:
425
+ from kaggle.api.kaggle_api_extended import KaggleApi # validated in download_assets()
426
+
427
+ await self._emit("start", {"source": "kaggle", "dataset": kaggle_ref})
428
+
429
+ self._hydrate_kaggle_credentials()
430
+
431
+ api = KaggleApi()
432
+ try:
433
+ api.authenticate()
434
+ except Exception as e:
435
+ raise RuntimeError(
436
+ "Kaggle authentication failed. Run 'configure_kaggle' or 'configure_keys' with "
437
+ "kaggle_username and kaggle_key, then retry. "
438
+ f"Details: {e}"
439
+ )
440
+
441
+ tmp_dir = Path(tempfile.mkdtemp(prefix="vesper_kaggle_assets_"))
442
+ downloaded = 0
443
+ failed = 0
444
+
445
+ try:
446
+ api.dataset_download_files(kaggle_ref, path=str(tmp_dir), unzip=True, quiet=True)
447
+ candidates = [p for p in tmp_dir.rglob("*") if p.is_file() and p.suffix.lower() in IMAGE_EXTENSIONS]
448
+ if max_items:
449
+ candidates = candidates[:max_items]
450
+
451
+ with metadata_file.open("w", encoding="utf-8") as mf, errors_file.open("w", encoding="utf-8") as ef:
452
+ for idx, src_path in enumerate(candidates):
453
+ try:
454
+ out_name = f"{idx:08d}{src_path.suffix.lower()}"
455
+ out_path = images_dir / out_name
456
+ shutil.copy2(src_path, out_path)
457
+ record = {
458
+ "dataset_id": dataset_id,
459
+ "index": idx,
460
+ "image_path": str(out_path),
461
+ "source": "kaggle",
462
+ "repo_id": kaggle_ref,
463
+ }
464
+ mf.write(json.dumps(record, ensure_ascii=False) + "\n")
465
+ downloaded += 1
466
+ except Exception as e:
467
+ failed += 1
468
+ ef.write(json.dumps({"file": str(src_path), "error": str(e)}, ensure_ascii=False) + "\n")
469
+ finally:
470
+ shutil.rmtree(tmp_dir, ignore_errors=True)
471
+
472
+ await self._emit("done", {"downloaded": downloaded, "failed": failed})
473
+ return {"downloaded": downloaded, "failed": failed}
474
+
475
+ async def _download_urls(
476
+ self,
477
+ urls: List[str],
478
+ dataset_id: str,
479
+ images_dir: Path,
480
+ metadata_file: Path,
481
+ errors_file: Path,
482
+ max_items: Optional[int],
483
+ ) -> Dict[str, int]:
484
+ if aiofiles is None:
485
+ raise RuntimeError("aiofiles is required for URL downloads. Install with: pip install aiofiles")
486
+
487
+ selected = urls[:max_items] if max_items else urls
488
+ sem = asyncio.Semaphore(self.workers)
489
+
490
+ downloaded = 0
491
+ failed = 0
492
+ metadata_lock = asyncio.Lock()
493
+
494
+ async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=180)) as session:
495
+ async def worker(idx: int, url: str) -> None:
496
+ nonlocal downloaded, failed
497
+ async with sem:
498
+ try:
499
+ local_path = await self._download_one_url(session, idx, url, images_dir)
500
+ async with metadata_lock:
501
+ async with aiofiles.open(metadata_file, "a", encoding="utf-8") as mf:
502
+ await mf.write(json.dumps({
503
+ "dataset_id": dataset_id,
504
+ "index": idx,
505
+ "image_path": str(local_path),
506
+ "source": "url",
507
+ "url": url,
508
+ }, ensure_ascii=False) + "\n")
509
+ downloaded += 1
510
+ except Exception as e:
511
+ failed += 1
512
+ async with metadata_lock:
513
+ async with aiofiles.open(errors_file, "a", encoding="utf-8") as ef:
514
+ await ef.write(json.dumps({"index": idx, "url": url, "error": str(e)}, ensure_ascii=False) + "\n")
515
+
516
+ tasks = [asyncio.create_task(worker(i, u)) for i, u in enumerate(selected)]
517
+ await asyncio.gather(*tasks)
518
+
519
+ await self._emit("done", {"downloaded": downloaded, "failed": failed})
520
+ return {"downloaded": downloaded, "failed": failed}
521
+
522
+ async def _download_one_url(self, session: aiohttp.ClientSession, idx: int, url: str, images_dir: Path) -> Path:
523
+ ext = Path(url.split("?")[0]).suffix.lower()
524
+ if ext not in IMAGE_EXTENSIONS:
525
+ ext = ".jpg"
526
+ out_path = images_dir / f"{idx:08d}{ext}"
527
+
528
+ existing_size = out_path.stat().st_size if out_path.exists() else 0
529
+ headers: Dict[str, str] = {}
530
+ if existing_size > 0:
531
+ headers["Range"] = f"bytes={existing_size}-"
532
+
533
+ async with session.get(url, headers=headers) as response:
534
+ if response.status not in (200, 206):
535
+ raise RuntimeError(f"HTTP {response.status}")
536
+
537
+ mode = "ab" if response.status == 206 and existing_size > 0 else "wb"
538
+ async with aiofiles.open(out_path, mode) as f:
539
+ async for chunk in response.content.iter_chunked(1024 * 256):
540
+ await f.write(chunk)
541
+
542
+ return out_path
543
+
544
+ @staticmethod
545
+ def _save_image_value(value: Any, out_path: Path) -> None:
546
+ """Save an image value to disk. Handles multiple image representations:
547
+ - PIL Image objects (have .save method)
548
+ - dict with 'bytes' key (raw image bytes)
549
+ - dict with 'path' key (local file path)
550
+ - bytes/bytearray (raw image data)
551
+ - str (local file path)
552
+ """
553
+ if value is None:
554
+ raise ValueError("empty image value")
555
+
556
+ # PIL Image object
557
+ if hasattr(value, "save") and hasattr(value, "size"):
558
+ value.save(out_path)
559
+ return
560
+
561
+ # Raw bytes
562
+ if isinstance(value, (bytes, bytearray)):
563
+ out_path.write_bytes(value)
564
+ return
565
+
566
+ # Dict with image data
567
+ if isinstance(value, dict):
568
+ if value.get("bytes"):
569
+ raw = value["bytes"]
570
+ if isinstance(raw, (bytes, bytearray)):
571
+ out_path.write_bytes(raw)
572
+ else:
573
+ # Could be a list of ints
574
+ out_path.write_bytes(bytes(raw))
575
+ return
576
+ if value.get("path"):
577
+ p = str(value["path"])
578
+ if os.path.exists(p):
579
+ shutil.copy2(p, out_path)
580
+ return
581
+ raise ValueError(f"Image path not found: {p}")
582
+ if value.get("url"):
583
+ raise ValueError("image URL detected — use async URL downloader")
584
+
585
+ # String: local file path
586
+ if isinstance(value, str):
587
+ if os.path.exists(value):
588
+ shutil.copy2(value, out_path)
589
+ return
590
+ if value.startswith(("http://", "https://")):
591
+ raise ValueError("image URL detected — use async URL downloader")
592
+ raise ValueError(f"Image path not found: {value}")
593
+
594
+ # numpy array (common in some datasets)
595
+ try:
596
+ import numpy as np
597
+ if isinstance(value, np.ndarray):
598
+ from PIL import Image
599
+ img = Image.fromarray(value)
600
+ img.save(out_path)
601
+ return
602
+ except (ImportError, Exception):
603
+ pass
604
+
605
+ raise ValueError(f"Unsupported image value type: {type(value).__name__}")
606
+
607
+ async def _write_webdataset(self, dataset_dir: Path, images_dir: Path, metadata_file: Path) -> None:
608
+ """Write a webdataset-compatible tar archive.
609
+
610
+ Uses Python's built-in tarfile module instead of wds.ShardWriter to
611
+ avoid the gopen() handler issue on Windows (backslash paths).
612
+ The resulting .tar files are fully compatible with webdataset readers.
613
+ """
614
+ import io
615
+ import tarfile as _tarfile
616
+
617
+ max_per_shard = 5000
618
+ shard_idx = 0
619
+ count_in_shard = 0
620
+ current_tar: _tarfile.TarFile | None = None
621
+
622
+ def _open_shard() -> _tarfile.TarFile:
623
+ nonlocal shard_idx
624
+ shard_path = dataset_dir / f"shard-{shard_idx:06d}.tar"
625
+ shard_idx += 1
626
+ return _tarfile.open(str(shard_path), "w")
627
+
628
+ try:
629
+ current_tar = _open_shard()
630
+
631
+ with metadata_file.open("r", encoding="utf-8") as mf:
632
+ for line in mf:
633
+ row = json.loads(line)
634
+ image_path = Path(row["image_path"])
635
+ if not image_path.exists():
636
+ continue
637
+
638
+ key = image_path.stem
639
+ ext = image_path.suffix.lstrip(".") or "jpg"
640
+
641
+ # Add image file
642
+ img_data = image_path.read_bytes()
643
+ img_info = _tarfile.TarInfo(name=f"{key}.{ext}")
644
+ img_info.size = len(img_data)
645
+ current_tar.addfile(img_info, io.BytesIO(img_data))
646
+
647
+ # Add JSON metadata sidecar
648
+ json_data = json.dumps(row, ensure_ascii=False).encode("utf-8")
649
+ json_info = _tarfile.TarInfo(name=f"{key}.json")
650
+ json_info.size = len(json_data)
651
+ current_tar.addfile(json_info, io.BytesIO(json_data))
652
+
653
+ count_in_shard += 1
654
+ if count_in_shard >= max_per_shard:
655
+ current_tar.close()
656
+ current_tar = _open_shard()
657
+ count_in_shard = 0
658
+ finally:
659
+ if current_tar is not None:
660
+ current_tar.close()
661
+
662
+ async def _write_parquet(self, dataset_dir: Path, metadata_file: Path) -> None:
663
+ try:
664
+ import pyarrow as pa
665
+ import pyarrow.parquet as pq
666
+ except Exception as e:
667
+ raise RuntimeError(f"pyarrow is required for parquet output: {e}")
668
+
669
+ rows: List[Dict[str, Any]] = []
670
+ with metadata_file.open("r", encoding="utf-8") as mf:
671
+ for line in mf:
672
+ rows.append(json.loads(line))
673
+
674
+ table = pa.Table.from_pylist(rows)
675
+ pq.write_table(table, str(dataset_dir / "metadata.parquet"))